summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/video.c16
-rw-r--r--drivers/amba/bus.c348
-rw-r--r--drivers/ata/sata_fsl.c25
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/block/rbd.c361
-rw-r--r--drivers/block/smart1,2.h2
-rw-r--r--drivers/bluetooth/btusb.c4
-rw-r--r--drivers/char/Kconfig8
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/hw_random/nomadik-rng.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2
-rw-r--r--drivers/char/mem.c5
-rw-r--r--drivers/char/msm_smd_pkt.c466
-rw-r--r--drivers/char/mwave/Makefile4
-rw-r--r--drivers/char/mwave/README2
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c123
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c122
-rw-r--r--drivers/cpuidle/sysfs.c2
-rw-r--r--drivers/dca/dca-core.c6
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/Makefile9
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/coh901318.c4
-rw-r--r--drivers/dma/dmatest.c14
-rw-r--r--drivers/dma/dw_dmac.c103
-rw-r--r--drivers/dma/dw_dmac_regs.h12
-rw-r--r--drivers/dma/fsldma.c551
-rw-r--r--drivers/dma/fsldma.h6
-rw-r--r--drivers/dma/mxs-dma.c724
-rw-r--r--drivers/dma/pch_dma.c35
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/dma/shdma.c2
-rw-r--r--drivers/dma/ste_dma40.c1402
-rw-r--r--drivers/dma/ste_dma40_ll.c218
-rw-r--r--drivers/dma/ste_dma40_ll.h66
-rw-r--r--drivers/dma/timb_dma.c5
-rw-r--r--drivers/edac/Kconfig10
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/amd64_edac.c1442
-rw-r--r--drivers/edac/amd64_edac.h369
-rw-r--r--drivers/edac/amd64_edac_inj.c8
-rw-r--r--drivers/edac/edac_mc_sysfs.c26
-rw-r--r--drivers/edac/i7300_edac.c2
-rw-r--r--drivers/edac/i82975x_edac.c69
-rw-r--r--drivers/edac/mce_amd.c8
-rw-r--r--drivers/edac/mce_amd.h28
-rw-r--r--drivers/edac/tile_edac.c254
-rw-r--r--drivers/firewire/Kconfig3
-rw-r--r--drivers/firewire/core-card.c21
-rw-r--r--drivers/firewire/core-cdev.c54
-rw-r--r--drivers/firewire/core-device.c22
-rw-r--r--drivers/firewire/core-iso.c23
-rw-r--r--drivers/firewire/core-topology.c2
-rw-r--r--drivers/firewire/core.h3
-rw-r--r--drivers/firewire/ohci.c85
-rw-r--r--drivers/firewire/sbp2.c11
-rw-r--r--drivers/firmware/Kconfig12
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/dcdbas.c4
-rw-r--r--drivers/firmware/sigma.c115
-rw-r--r--drivers/gpio/74x164.c2
-rw-r--r--drivers/gpio/Kconfig19
-rw-r--r--drivers/gpio/cs5535-gpio.c6
-rw-r--r--drivers/gpio/janz-ttl.c3
-rw-r--r--drivers/gpio/langwell_gpio.c43
-rw-r--r--drivers/gpio/mc33880.c2
-rw-r--r--drivers/gpio/mcp23s08.c191
-rw-r--r--drivers/gpio/pca953x.c13
-rw-r--r--drivers/gpio/pl061.c2
-rw-r--r--drivers/gpio/rdc321x-gpio.c3
-rw-r--r--drivers/gpio/sch_gpio.c57
-rw-r--r--drivers/gpio/sx150x.c54
-rw-r--r--drivers/gpio/timbgpio.c6
-rw-r--r--drivers/gpu/drm/drm_sman.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c6
-rw-r--r--drivers/gpu/drm/radeon/Kconfig1
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r300.c4
-rw-r--r--drivers/gpu/drm/radeon/r420.c4
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c257
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c4
-rw-r--r--drivers/gpu/drm/radeon/rs600.c4
-rw-r--r--drivers/gpu/drm/radeon/rs690.c4
-rw-r--r--drivers/gpu/drm/radeon/rv515.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
-rw-r--r--drivers/hid/Kconfig67
-rw-r--r--drivers/hid/Makefile10
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-axff.c31
-rw-r--r--drivers/hid/hid-core.c49
-rw-r--r--drivers/hid/hid-dr.c (renamed from drivers/hid/hid-drff.c)117
-rw-r--r--drivers/hid/hid-egalax.c279
-rw-r--r--drivers/hid/hid-gyration.c5
-rw-r--r--drivers/hid/hid-ids.h16
-rw-r--r--drivers/hid/hid-input.c35
-rw-r--r--drivers/hid/hid-keytouch.c66
-rw-r--r--drivers/hid/hid-lcpower.c70
-rw-r--r--drivers/hid/hid-lg.c2
-rw-r--r--drivers/hid/hid-magicmouse.c4
-rw-r--r--drivers/hid/hid-multitouch.c76
-rw-r--r--drivers/hid/hid-ntrig.c519
-rw-r--r--drivers/hid/hid-ortek.c6
-rw-r--r--drivers/hid/hid-picolcd.c1
-rw-r--r--drivers/hid/hid-roccat-arvo.c450
-rw-r--r--drivers/hid/hid-roccat-arvo.h98
-rw-r--r--drivers/hid/hid-roccat-common.c62
-rw-r--r--drivers/hid/hid-roccat-common.h23
-rw-r--r--drivers/hid/hid-roccat-kone.c156
-rw-r--r--drivers/hid/hid-roccat-koneplus.c167
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c715
-rw-r--r--drivers/hid/hid-roccat-kovaplus.h157
-rw-r--r--drivers/hid/hid-roccat-pyra.c174
-rw-r--r--drivers/hid/hid-roccat.c53
-rw-r--r--drivers/hid/hid-roccat.h32
-rw-r--r--drivers/hid/hid-sony.c20
-rw-r--r--drivers/hid/hidraw.c112
-rw-r--r--drivers/hid/usbhid/hid-core.c35
-rw-r--r--drivers/hwmon/Kconfig187
-rw-r--r--drivers/hwmon/Makefile20
-rw-r--r--drivers/hwmon/abituguru.c4
-rw-r--r--drivers/hwmon/abituguru3.c4
-rw-r--r--drivers/hwmon/ads1015.c337
-rw-r--r--drivers/hwmon/f71882fg.c522
-rw-r--r--drivers/hwmon/jz4740-hwmon.c4
-rw-r--r--drivers/hwmon/lineage-pem.c586
-rw-r--r--drivers/hwmon/lm75.c66
-rw-r--r--drivers/hwmon/lm85.c136
-rw-r--r--drivers/hwmon/ltc4151.c256
-rw-r--r--drivers/hwmon/max16064.c91
-rw-r--r--drivers/hwmon/max34440.c199
-rw-r--r--drivers/hwmon/max6639.c653
-rw-r--r--drivers/hwmon/max8688.c158
-rw-r--r--drivers/hwmon/pmbus.c203
-rw-r--r--drivers/hwmon/pmbus.h313
-rw-r--r--drivers/hwmon/pmbus_core.c1658
-rw-r--r--drivers/hwmon/sch5627.c858
-rw-r--r--drivers/hwmon/sht15.c6
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c157
-rw-r--r--drivers/hwmon/w83627ehf.c1351
-rw-r--r--drivers/hwspinlock/Kconfig22
-rw-r--r--drivers/hwspinlock/Makefile6
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c548
-rw-r--r--drivers/hwspinlock/hwspinlock_internal.h61
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c231
-rw-r--r--drivers/i2c/Makefile1
-rw-r--r--drivers/i2c/busses/Kconfig54
-rw-r--r--drivers/i2c/busses/Makefile4
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c535
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c161
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-mxs.c412
-rw-r--r--drivers/i2c/busses/i2c-ocores.c3
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c176
-rw-r--r--drivers/i2c/busses/i2c-pxa.c115
-rw-r--r--drivers/i2c/busses/i2c-tegra.c700
-rw-r--r--drivers/i2c/busses/i2c-xiic.c3
-rw-r--r--drivers/i2c/i2c-boardinfo.c2
-rw-r--r--drivers/i2c/i2c-core.c28
-rw-r--r--drivers/i2c/i2c-dev.c60
-rw-r--r--drivers/ide/Makefile2
-rw-r--r--drivers/ieee802154/Makefile2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c6
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c26
-rw-r--r--drivers/input/Kconfig10
-rw-r--r--drivers/input/Makefile2
-rw-r--r--drivers/input/evdev.c10
-rw-r--r--drivers/input/input-polldev.c4
-rw-r--r--drivers/input/input.c55
-rw-r--r--drivers/input/keyboard/Kconfig10
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/lm8323.c15
-rw-r--r--drivers/input/keyboard/max7359_keypad.c17
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c57
-rw-r--r--drivers/input/keyboard/omap4-keypad.c74
-rw-r--r--drivers/input/keyboard/qt1070.c276
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c22
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c28
-rw-r--r--drivers/input/misc/88pm860x_onkey.c2
-rw-r--r--drivers/input/misc/Kconfig13
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ad714x-i2c.c17
-rw-r--r--drivers/input/misc/ad714x-spi.c17
-rw-r--r--drivers/input/misc/adxl34x-i2c.c16
-rw-r--r--drivers/input/misc/adxl34x-spi.c20
-rw-r--r--drivers/input/misc/ati_remote2.c4
-rw-r--r--drivers/input/misc/twl4030-vibra.c3
-rw-r--r--drivers/input/misc/uinput.c48
-rw-r--r--drivers/input/misc/xen-kbdfront.c (renamed from drivers/input/xen-kbdfront.c)83
-rw-r--r--drivers/input/mouse/bcm5974.c28
-rw-r--r--drivers/input/mouse/synaptics_i2c.c16
-rw-r--r--drivers/input/serio/ambakmi.c3
-rw-r--r--drivers/input/sparse-keymap.c4
-rw-r--r--drivers/input/tablet/wacom_sys.c12
-rw-r--r--drivers/input/tablet/wacom_wac.c247
-rw-r--r--drivers/input/tablet/wacom_wac.h6
-rw-r--r--drivers/input/touchscreen/Kconfig45
-rw-r--r--drivers/input/touchscreen/Makefile4
-rw-r--r--drivers/input/touchscreen/ad7877.c19
-rw-r--r--drivers/input/touchscreen/ad7879-spi.c17
-rw-r--r--drivers/input/touchscreen/ads7846.c16
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c1211
-rw-r--r--drivers/input/touchscreen/qt602240_ts.c1406
-rw-r--r--drivers/input/touchscreen/tsc2005.c756
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c368
-rw-r--r--drivers/isdn/hisax/Makefile2
-rw-r--r--drivers/isdn/mISDN/hwchannel.c6
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-88pm860x.c60
-rw-r--r--drivers/leds/leds-bd2802.c47
-rw-r--r--drivers/leds/leds-lm3530.c378
-rw-r--r--drivers/leds/leds-lp5521.c14
-rw-r--r--drivers/leds/leds-lp5523.c20
-rw-r--r--drivers/leds/leds-mc13783.c7
-rw-r--r--drivers/leds/leds-net5501.c2
-rw-r--r--drivers/macintosh/rack-meter.c2
-rw-r--r--drivers/macintosh/via-pmu-backlight.c1
-rw-r--r--drivers/md/bitmap.c6
-rw-r--r--drivers/md/dm-log.c8
-rw-r--r--drivers/md/dm-mpath.c22
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-remote.c109
-rw-r--r--drivers/media/radio/radio-timb.c3
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/rc/rc-main.c4
-rw-r--r--drivers/media/video/timblogiw.c3
-rw-r--r--drivers/memstick/Makefile4
-rw-r--r--drivers/memstick/core/Makefile4
-rw-r--r--drivers/memstick/host/Makefile4
-rw-r--r--drivers/message/fusion/Makefile2
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h1
-rw-r--r--drivers/message/fusion/mptbase.c7
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptsas.c7
-rw-r--r--drivers/message/i2o/i2o_config.c3
-rw-r--r--drivers/mfd/88pm860x-core.c567
-rw-r--r--drivers/mfd/88pm860x-i2c.c103
-rw-r--r--drivers/mfd/Kconfig51
-rw-r--r--drivers/mfd/Makefile7
-rw-r--r--drivers/mfd/ab3100-core.c10
-rw-r--r--drivers/mfd/ab3550-core.c12
-rw-r--r--drivers/mfd/ab8500-core.c56
-rw-r--r--drivers/mfd/ab8500-debugfs.c6
-rw-r--r--drivers/mfd/ab8500-gpadc.c614
-rw-r--r--drivers/mfd/ab8500-sysctrl.c80
-rw-r--r--drivers/mfd/adp5520.c15
-rw-r--r--drivers/mfd/asic3.c10
-rw-r--r--drivers/mfd/cs5535-mfd.c37
-rw-r--r--drivers/mfd/davinci_voicecodec.c4
-rw-r--r--drivers/mfd/htc-pasic3.c7
-rw-r--r--drivers/mfd/janz-cmodio.c3
-rw-r--r--drivers/mfd/jz4740-adc.c4
-rw-r--r--drivers/mfd/lpc_sch.c7
-rw-r--r--drivers/mfd/max8997.c427
-rw-r--r--drivers/mfd/max8998.c4
-rw-r--r--drivers/mfd/mc13xxx-core.c21
-rw-r--r--drivers/mfd/mfd-core.c135
-rw-r--r--drivers/mfd/pcf50633-core.c24
-rw-r--r--drivers/mfd/rdc321x-southbridge.c4
-rw-r--r--drivers/mfd/sh_mobile_sdhi.c4
-rw-r--r--drivers/mfd/sm501.c134
-rw-r--r--drivers/mfd/t7l66xb.c13
-rw-r--r--drivers/mfd/tc6387xb.c7
-rw-r--r--drivers/mfd/tc6393xb.c25
-rw-r--r--drivers/mfd/ti-ssp.c476
-rw-r--r--drivers/mfd/timberdale.c81
-rw-r--r--drivers/mfd/tps6105x.c246
-rw-r--r--drivers/mfd/tps6586x.c26
-rw-r--r--drivers/mfd/twl-core.c8
-rw-r--r--drivers/mfd/twl4030-codec.c6
-rw-r--r--drivers/mfd/twl4030-madc.c802
-rw-r--r--drivers/mfd/ucb1x00-ts.c5
-rw-r--r--drivers/mfd/vx855.c1
-rw-r--r--drivers/mfd/wl1273-core.c8
-rw-r--r--drivers/mfd/wm831x-i2c.c18
-rw-r--r--drivers/mfd/wm831x-irq.c44
-rw-r--r--drivers/mfd/wm831x-spi.c24
-rw-r--r--drivers/mfd/wm8400-core.c2
-rw-r--r--drivers/mfd/wm8994-core.c77
-rw-r--r--drivers/mfd/wm8994-irq.c14
-rw-r--r--drivers/misc/Kconfig19
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/apds9802als.c17
-rw-r--r--drivers/misc/atmel_tclib.c4
-rw-r--r--drivers/misc/bh1780gli.c19
-rw-r--r--drivers/misc/bmp085.c6
-rw-r--r--drivers/misc/cb710/Makefile4
-rw-r--r--drivers/misc/ep93xx_pwm.c6
-rw-r--r--drivers/misc/hmc6352.c4
-rw-r--r--drivers/misc/lis3lv02d/Kconfig37
-rw-r--r--drivers/misc/lis3lv02d/Makefile7
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c (renamed from drivers/hwmon/lis3lv02d.c)3
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h (renamed from drivers/hwmon/lis3lv02d.h)0
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c (renamed from drivers/hwmon/lis3lv02d_i2c.c)2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_spi.c (renamed from drivers/hwmon/lis3lv02d_spi.c)19
-rw-r--r--drivers/misc/pch_phub.c1
-rw-r--r--drivers/misc/sgi-gru/Makefile4
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c908
-rw-r--r--drivers/mmc/card/Kconfig3
-rw-r--r--drivers/mmc/card/block.c1
-rw-r--r--drivers/mmc/card/mmc_test.c271
-rw-r--r--drivers/mmc/core/Makefile3
-rw-r--r--drivers/mmc/core/core.c26
-rw-r--r--drivers/mmc/core/core.h2
-rw-r--r--drivers/mmc/core/host.c5
-rw-r--r--drivers/mmc/core/mmc.c86
-rw-r--r--drivers/mmc/core/quirks.c84
-rw-r--r--drivers/mmc/core/sd.c1
-rw-r--r--drivers/mmc/core/sdio.c9
-rw-r--r--drivers/mmc/host/Kconfig13
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci.c19
-rw-r--r--drivers/mmc/host/cb710-mmc.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c83
-rw-r--r--drivers/mmc/host/dw_mmc.h2
-rw-r--r--drivers/mmc/host/mmci.c348
-rw-r--r--drivers/mmc/host/mmci.h15
-rw-r--r--drivers/mmc/host/msm_sdcc.c48
-rw-r--r--drivers/mmc/host/msm_sdcc.h1
-rw-r--r--drivers/mmc/host/mxcmmc.c181
-rw-r--r--drivers/mmc/host/mxs-mmc.c874
-rw-r--r--drivers/mmc/host/of_mmc_spi.c26
-rw-r--r--drivers/mmc/host/omap.c6
-rw-r--r--drivers/mmc/host/omap_hsmmc.c43
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c134
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h1
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c3
-rw-r--r--drivers/mmc/host/sdhci-pci.c12
-rw-r--r--drivers/mmc/host/sdhci-s3c.c3
-rw-r--r--drivers/mmc/host/sdhci-tegra.c6
-rw-r--r--drivers/mmc/host/sh_mmcif.c62
-rw-r--r--drivers/mmc/host/tmio_mmc.c130
-rw-r--r--drivers/mmc/host/via-sdmmc.c3
-rw-r--r--drivers/mtd/nand/Kconfig19
-rw-r--r--drivers/mtd/nand/mxc_nand.c5
-rw-r--r--drivers/mtd/nand/omap2.c367
-rw-r--r--drivers/mtd/nand/tmio_nand.c11
-rw-r--r--drivers/mtd/onenand/Kconfig2
-rw-r--r--drivers/mtd/onenand/omap2.c36
-rw-r--r--drivers/mtd/ubi/Kconfig8
-rw-r--r--drivers/mtd/ubi/Kconfig.debug73
-rw-r--r--drivers/mtd/ubi/build.c34
-rw-r--r--drivers/mtd/ubi/debug.c14
-rw-r--r--drivers/mtd/ubi/debug.h131
-rw-r--r--drivers/mtd/ubi/io.c145
-rw-r--r--drivers/mtd/ubi/kapi.c2
-rw-r--r--drivers/mtd/ubi/scan.c95
-rw-r--r--drivers/mtd/ubi/scan.h2
-rw-r--r--drivers/mtd/ubi/ubi.h10
-rw-r--r--drivers/mtd/ubi/vmt.c7
-rw-r--r--drivers/mtd/ubi/vtbl.c9
-rw-r--r--drivers/mtd/ubi/wl.c20
-rw-r--r--drivers/net/atl1c/atl1c.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c7
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/can/c_can/c_can.c6
-rw-r--r--drivers/net/can/janz-ican3.c3
-rw-r--r--drivers/net/ftmac100.c2
-rw-r--r--drivers/net/gianfar.c16
-rw-r--r--drivers/net/gianfar.h1
-rw-r--r--drivers/net/ks8842.c3
-rw-r--r--drivers/net/macvlan.c18
-rw-r--r--drivers/net/niu.c2
-rw-r--r--drivers/net/ppp_deflate.c2
-rw-r--r--drivers/net/qla3xxx.c2
-rw-r--r--drivers/net/r8169.c4
-rw-r--r--drivers/net/rionet.c6
-rw-r--r--drivers/net/skfp/Makefile2
-rw-r--r--drivers/net/sungem.h2
-rw-r--r--drivers/net/tile/tilepro.c965
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/wan/lmc/Makefile2
-rw-r--r--drivers/net/wireless/hostap/hostap_config.h4
-rw-r--r--drivers/net/wireless/zd1211rw/Makefile4
-rw-r--r--drivers/oprofile/cpu_buffer.c24
-rw-r--r--drivers/oprofile/timer_int.c4
-rw-r--r--drivers/parisc/dino.c22
-rw-r--r--drivers/parisc/eisa.c12
-rw-r--r--drivers/parisc/gsc.c22
-rw-r--r--drivers/parisc/iosapic.c40
-rw-r--r--drivers/parisc/superio.c12
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c1
-rw-r--r--drivers/pci/pci-label.c252
-rw-r--r--drivers/pci/pci-sysfs.c4
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c31
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/pci/quirks.c113
-rw-r--r--drivers/pci/setup-bus.c321
-rw-r--r--drivers/platform/x86/Kconfig18
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acer-wmi.c2
-rw-r--r--drivers/platform/x86/asus-laptop.c1
-rw-r--r--drivers/platform/x86/asus_acpi.c1
-rw-r--r--drivers/platform/x86/classmate-laptop.c1
-rw-r--r--drivers/platform/x86/compal-laptop.c1
-rw-r--r--drivers/platform/x86/dell-laptop.c1
-rw-r--r--drivers/platform/x86/eeepc-laptop.c1
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c1
-rw-r--r--drivers/platform/x86/hp_accel.c (renamed from drivers/hwmon/hp_accel.c)7
-rw-r--r--drivers/platform/x86/msi-laptop.c1
-rw-r--r--drivers/platform/x86/msi-wmi.c1
-rw-r--r--drivers/platform/x86/panasonic-laptop.c1
-rw-r--r--drivers/platform/x86/sony-laptop.c3
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c1
-rw-r--r--drivers/platform/x86/toshiba_acpi.c1
-rw-r--r--drivers/pnp/base.h2
-rw-r--r--drivers/pnp/manager.c7
-rw-r--r--drivers/pnp/resource.c7
-rw-r--r--drivers/power/jz4740-battery.c4
-rw-r--r--drivers/pps/clients/Makefile4
-rw-r--r--drivers/pps/generators/pps_gen_parport.c5
-rw-r--r--drivers/rapidio/Makefile4
-rw-r--r--drivers/rapidio/rio-scan.c2
-rw-r--r--drivers/rapidio/rio-sysfs.c42
-rw-r--r--drivers/rapidio/rio.c84
-rw-r--r--drivers/rapidio/switches/Makefile4
-rw-r--r--drivers/regulator/88pm8607.c46
-rw-r--r--drivers/regulator/Kconfig18
-rw-r--r--drivers/regulator/Makefile3
-rw-r--r--drivers/regulator/ab3100.c3
-rw-r--r--drivers/regulator/max8997.c1213
-rw-r--r--drivers/regulator/mc13783-regulator.c7
-rw-r--r--drivers/regulator/mc13892-regulator.c7
-rw-r--r--drivers/regulator/tps6105x-regulator.c196
-rw-r--r--drivers/regulator/twl-regulator.c24
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile5
-rw-r--r--drivers/rtc/rtc-ds1374.c19
-rw-r--r--drivers/rtc/rtc-ds1390.c2
-rw-r--r--drivers/rtc/rtc-ds1511.c2
-rw-r--r--drivers/rtc/rtc-ds3234.c2
-rw-r--r--drivers/rtc/rtc-isl1208.c176
-rw-r--r--drivers/rtc/rtc-m41t94.c2
-rw-r--r--drivers/rtc/rtc-pl030.c2
-rw-r--r--drivers/rtc/rtc-pl031.c2
-rw-r--r--drivers/rtc/rtc-tegra.c488
-rw-r--r--drivers/s390/scsi/zfcp_aux.c80
-rw-r--r--drivers/s390/scsi/zfcp_def.h15
-rw-r--r--drivers/s390/scsi/zfcp_erp.c4
-rw-r--r--drivers/s390/scsi/zfcp_ext.h9
-rw-r--r--drivers/s390/scsi/zfcp_fc.c333
-rw-r--r--drivers/s390/scsi/zfcp_fc.h124
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c27
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c71
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/NCR5380.c3
-rw-r--r--drivers/scsi/aacraid/Makefile2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c2
-rw-r--r--drivers/scsi/aic94xx/Makefile4
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c4
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c18
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h4
-rw-r--r--drivers/scsi/be2iscsi/be_main.c3
-rw-r--r--drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h1080
-rw-r--r--drivers/scsi/bnx2fc/Kconfig11
-rw-r--r--drivers/scsi/bnx2fc/Makefile3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h511
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_constants.h206
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h70
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c515
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2535
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c1868
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c1833
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c844
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h4
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c125
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c29
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c53
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c56
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.h19
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c7
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c66
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h5
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c112
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c18
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c7
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c26
-rw-r--r--drivers/scsi/fcoe/Makefile2
-rw-r--r--drivers/scsi/fcoe/fcoe.c621
-rw-r--r--drivers/scsi/fcoe/fcoe.h50
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c (renamed from drivers/scsi/fcoe/libfcoe.c)40
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c770
-rw-r--r--drivers/scsi/fcoe/libfcoe.h31
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/vnic_dev.c2
-rw-r--r--drivers/scsi/hpsa.c579
-rw-r--r--drivers/scsi/hpsa.h9
-rw-r--r--drivers/scsi/hpsa_cmd.h4
-rw-r--r--drivers/scsi/ipr.c9
-rw-r--r--drivers/scsi/iscsi_tcp.c134
-rw-r--r--drivers/scsi/iscsi_tcp.h4
-rw-r--r--drivers/scsi/libfc/fc_exch.c111
-rw-r--r--drivers/scsi/libfc/fc_fcp.c39
-rw-r--r--drivers/scsi/libfc/fc_libfc.c120
-rw-r--r--drivers/scsi/libfc/fc_libfc.h14
-rw-r--r--drivers/scsi/libfc/fc_lport.c69
-rw-r--r--drivers/scsi/libfc/fc_npiv.c10
-rw-r--r--drivers/scsi/libfc/fc_rport.c191
-rw-r--r--drivers/scsi/libiscsi.c44
-rw-r--r--drivers/scsi/libsas/Kconfig8
-rw-r--r--drivers/scsi/libsas/Makefile6
-rw-r--r--drivers/scsi/libsas/sas_ata.c80
-rw-r--r--drivers/scsi/libsas/sas_dump.c4
-rw-r--r--drivers/scsi/libsas/sas_dump.h12
-rw-r--r--drivers/scsi/libsas/sas_expander.c5
-rw-r--r--drivers/scsi/libsas/sas_internal.h6
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c3
-rw-r--r--drivers/scsi/lpfc/Makefile6
-rw-r--r--drivers/scsi/lpfc/lpfc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c123
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c962
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h60
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c173
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h104
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c134
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c113
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c217
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/megaraid.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c149
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c19
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h5
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h6
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_history.txt384
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_sas.h7
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c128
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h40
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1
-rw-r--r--drivers/scsi/mvsas/Makefile4
-rw-r--r--drivers/scsi/osd/osd_initiator.c20
-rw-r--r--drivers/scsi/osst.c10
-rw-r--r--drivers/scsi/pcmcia/Makefile2
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c37
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c27
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h10
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c45
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c191
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_debug.c192
-rw-r--r--drivers/scsi/scsi_devinfo.c85
-rw-r--r--drivers/scsi/scsi_error.c118
-rw-r--r--drivers/scsi/scsi_lib.c36
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c104
-rw-r--r--drivers/scsi/sd.c230
-rw-r--r--drivers/scsi/sd.h25
-rw-r--r--drivers/spi/Kconfig35
-rw-r--r--drivers/spi/Makefile4
-rw-r--r--drivers/spi/amba-pl022.c75
-rw-r--r--drivers/spi/davinci_spi.c54
-rw-r--r--drivers/spi/dw_spi.c4
-rw-r--r--drivers/spi/dw_spi.h234
-rw-r--r--drivers/spi/dw_spi_mid.c3
-rw-r--r--drivers/spi/dw_spi_mmio.c4
-rw-r--r--drivers/spi/dw_spi_pci.c3
-rw-r--r--drivers/spi/omap2_mcspi.c260
-rw-r--r--drivers/spi/spi_altera.c339
-rw-r--r--drivers/spi/spi_bfin5xx.c104
-rw-r--r--drivers/spi/spi_bitbang.c13
-rw-r--r--drivers/spi/spi_imx.c12
-rw-r--r--drivers/spi/spi_oc_tiny.c425
-rw-r--r--drivers/spi/spi_sh.c543
-rw-r--r--drivers/spi/spi_sh_msiof.c127
-rw-r--r--drivers/spi/spidev.c12
-rw-r--r--drivers/spi/ti-ssp-spi.c402
-rw-r--r--drivers/spi/xilinx_spi.c3
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c1
-rw-r--r--drivers/staging/samsung-laptop/samsung-laptop.c1
-rw-r--r--drivers/staging/tty/specialix.c8
-rw-r--r--drivers/target/target_core_cdb.c8
-rw-r--r--drivers/target/target_core_hba.c1
-rw-r--r--drivers/tty/bfin_jtag_comm.c6
-rw-r--r--drivers/tty/hvc/hvc_xen.c2
-rw-r--r--drivers/tty/hvc/hvcs.c76
-rw-r--r--drivers/tty/serial/Kconfig18
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c513
-rw-r--r--drivers/tty/serial/msm_serial.c286
-rw-r--r--drivers/tty/serial/msm_serial.h28
-rw-r--r--drivers/tty/serial/mxs-auart.c798
-rw-r--r--drivers/tty/serial/pch_uart.c1
-rw-r--r--drivers/tty/serial/ucc_uart.c67
-rw-r--r--drivers/tty/tty_buffer.c14
-rw-r--r--drivers/tty/tty_ldisc.c14
-rw-r--r--drivers/usb/core/driver.c15
-rw-r--r--drivers/usb/core/usb.c5
-rw-r--r--drivers/usb/core/usb.h3
-rw-r--r--drivers/usb/host/ehci-mxc.c5
-rw-r--r--drivers/usb/host/ohci-tmio.c8
-rw-r--r--drivers/usb/misc/appledisplay.c1
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_core.h4
-rw-r--r--drivers/usb/musb/musbhsdma.h2
-rw-r--r--drivers/usb/otg/isp1301_omap.c2
-rw-r--r--drivers/vhost/net.c159
-rw-r--r--drivers/vhost/vhost.c55
-rw-r--r--drivers/video/Kconfig27
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/amba-clcd.c105
-rw-r--r--drivers/video/arkfb.c160
-rw-r--r--drivers/video/atmel_lcdfb.c32
-rw-r--r--drivers/video/aty/aty128fb.c1
-rw-r--r--drivers/video/aty/atyfb_base.c1
-rw-r--r--drivers/video/aty/radeon_backlight.c1
-rw-r--r--drivers/video/aty/radeon_base.c2
-rw-r--r--drivers/video/aty/radeon_i2c.c3
-rw-r--r--drivers/video/backlight/88pm860x_bl.c35
-rw-r--r--drivers/video/backlight/Kconfig18
-rw-r--r--drivers/video/backlight/Makefile3
-rw-r--r--drivers/video/backlight/adp5520_bl.c1
-rw-r--r--drivers/video/backlight/adp8860_bl.c1
-rw-r--r--drivers/video/backlight/adx_bl.c1
-rw-r--r--drivers/video/backlight/apple_bl.c241
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c1
-rw-r--r--drivers/video/backlight/backlight.c24
-rw-r--r--drivers/video/backlight/corgi_lcd.c1
-rw-r--r--drivers/video/backlight/cr_bllcd.c1
-rw-r--r--drivers/video/backlight/da903x_bl.c1
-rw-r--r--drivers/video/backlight/ep93xx_bl.c1
-rw-r--r--drivers/video/backlight/generic_bl.c1
-rw-r--r--drivers/video/backlight/hp680_bl.c1
-rw-r--r--drivers/video/backlight/jornada720_bl.c5
-rw-r--r--drivers/video/backlight/jornada720_lcd.c4
-rw-r--r--drivers/video/backlight/kb3886_bl.c1
-rw-r--r--drivers/video/backlight/ld9040.c819
-rw-r--r--drivers/video/backlight/ld9040_gamma.h200
-rw-r--r--drivers/video/backlight/locomolcd.c1
-rw-r--r--drivers/video/backlight/max8925_bl.c1
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c400
-rw-r--r--drivers/video/backlight/omap1_bl.c1
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c1
-rw-r--r--drivers/video/backlight/progear_bl.c1
-rw-r--r--drivers/video/backlight/pwm_bl.c12
-rw-r--r--drivers/video/backlight/s6e63m0.c1
-rw-r--r--drivers/video/backlight/tosa_bl.c1
-rw-r--r--drivers/video/backlight/wm831x_bl.c1
-rw-r--r--drivers/video/bf54x-lq043fb.c1
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c1
-rw-r--r--drivers/video/cg14.c1
-rw-r--r--drivers/video/cg6.c1
-rw-r--r--drivers/video/console/fbcon.c4
-rw-r--r--drivers/video/console/tileblit.c2
-rw-r--r--drivers/video/cyber2000fb.c263
-rw-r--r--drivers/video/cyber2000fb.h16
-rw-r--r--drivers/video/edid.h4
-rw-r--r--drivers/video/ffb.c2
-rw-r--r--drivers/video/hecubafb.c2
-rw-r--r--drivers/video/hpfb.c6
-rw-r--r--drivers/video/imxfb.c1
-rw-r--r--drivers/video/intelfb/Makefile5
-rw-r--r--drivers/video/matrox/matroxfb_base.c9
-rw-r--r--drivers/video/metronomefb.c2
-rw-r--r--drivers/video/msm/mdp_hw.h11
-rw-r--r--drivers/video/msm/mdp_ppp.c1
-rw-r--r--drivers/video/msm/msm_fb.c27
-rw-r--r--drivers/video/mxsfb.c919
-rw-r--r--drivers/video/nvidia/nv_backlight.c1
-rw-r--r--drivers/video/omap/Kconfig7
-rw-r--r--drivers/video/omap/blizzard.c3
-rw-r--r--drivers/video/omap/hwa742.c3
-rw-r--r--drivers/video/omap2/displays/Kconfig6
-rw-r--r--drivers/video/omap2/displays/Makefile1
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c1
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c25
-rw-r--r--drivers/video/omap2/displays/panel-lgphilips-lb035q02.c279
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c1
-rw-r--r--drivers/video/omap2/displays/panel-taal.c125
-rw-r--r--drivers/video/omap2/dss/Kconfig14
-rw-r--r--drivers/video/omap2/dss/Makefile2
-rw-r--r--drivers/video/omap2/dss/core.c480
-rw-r--r--drivers/video/omap2/dss/dispc.c335
-rw-r--r--drivers/video/omap2/dss/display.c35
-rw-r--r--drivers/video/omap2/dss/dpi.c45
-rw-r--r--drivers/video/omap2/dss/dsi.c967
-rw-r--r--drivers/video/omap2/dss/dss.c763
-rw-r--r--drivers/video/omap2/dss/dss.h153
-rw-r--r--drivers/video/omap2/dss/dss_features.c163
-rw-r--r--drivers/video/omap2/dss/dss_features.h27
-rw-r--r--drivers/video/omap2/dss/hdmi.c1332
-rw-r--r--drivers/video/omap2/dss/hdmi.h415
-rw-r--r--drivers/video/omap2/dss/hdmi_omap4_panel.c222
-rw-r--r--drivers/video/omap2/dss/manager.c13
-rw-r--r--drivers/video/omap2/dss/overlay.c10
-rw-r--r--drivers/video/omap2/dss/rfbi.c128
-rw-r--r--drivers/video/omap2/dss/sdi.c62
-rw-r--r--drivers/video/omap2/dss/venc.c128
-rw-r--r--drivers/video/omap2/omapfb/Kconfig6
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c23
-rw-r--r--drivers/video/riva/fbdev.c1
-rw-r--r--drivers/video/s3c-fb.c1
-rw-r--r--drivers/video/s3fb.c341
-rw-r--r--drivers/video/sh7760fb.c4
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c5
-rw-r--r--drivers/video/sis/sis.h1
-rw-r--r--drivers/video/sis/sis_main.c315
-rw-r--r--drivers/video/sis/vgatypes.h1
-rw-r--r--drivers/video/sm501fb.c275
-rw-r--r--drivers/video/svgalib.c175
-rw-r--r--drivers/video/tcx.c1
-rw-r--r--drivers/video/tmiofb.c28
-rw-r--r--drivers/video/uvesafb.c49
-rw-r--r--drivers/video/vermilion/vermilion.c3
-rw-r--r--drivers/video/vesafb.c44
-rw-r--r--drivers/video/via/viafbdev.h3
-rw-r--r--drivers/video/vt8623fb.c157
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/w1/masters/ds1wm.c13
-rw-r--r--drivers/watchdog/Kconfig34
-rw-r--r--drivers/watchdog/Makefile5
-rw-r--r--drivers/watchdog/alim1535_wdt.c10
-rw-r--r--drivers/watchdog/alim7101_wdt.c2
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c4
-rw-r--r--drivers/watchdog/bfin_wdt.c4
-rw-r--r--drivers/watchdog/booke_wdt.c19
-rw-r--r--drivers/watchdog/cpwd.c36
-rw-r--r--drivers/watchdog/eurotechwdt.c2
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--drivers/watchdog/i6300esb.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c4
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c572
-rw-r--r--drivers/watchdog/intel_scu_watchdog.h66
-rw-r--r--drivers/watchdog/it8712f_wdt.c2
-rw-r--r--drivers/watchdog/it87_wdt.c28
-rw-r--r--drivers/watchdog/jz4740_wdt.c322
-rw-r--r--drivers/watchdog/machzwd.c2
-rw-r--r--drivers/watchdog/max63xx_wdt.c2
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c6
-rw-r--r--drivers/watchdog/mpcore_wdt.c2
-rw-r--r--drivers/watchdog/mtx-1_wdt.c14
-rw-r--r--drivers/watchdog/nv_tco.c2
-rw-r--r--drivers/watchdog/omap_wdt.c25
-rw-r--r--drivers/watchdog/omap_wdt.h2
-rw-r--r--drivers/watchdog/pc87413_wdt.c2
-rw-r--r--drivers/watchdog/pcwd_pci.c2
-rw-r--r--drivers/watchdog/pnx4008_wdt.c2
-rw-r--r--drivers/watchdog/rdc321x_wdt.c3
-rw-r--r--drivers/watchdog/s3c2410_wdt.c2
-rw-r--r--drivers/watchdog/sbc8360.c2
-rw-r--r--drivers/watchdog/sbc_epx_c3.c2
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c2
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c4
-rw-r--r--drivers/watchdog/softdog.c2
-rw-r--r--drivers/watchdog/sp5100_tco.c2
-rw-r--r--drivers/watchdog/sp805_wdt.c2
-rw-r--r--drivers/watchdog/ts72xx_wdt.c2
-rw-r--r--drivers/watchdog/w83697ug_wdt.c4
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt977.c2
-rw-r--r--drivers/watchdog/wdt_pci.c6
-rw-r--r--drivers/watchdog/xen_wdt.c359
-rw-r--r--drivers/xen/Kconfig10
-rw-r--r--drivers/xen/Makefile6
-rw-r--r--drivers/xen/balloon.c359
-rw-r--r--drivers/xen/events.c439
-rw-r--r--drivers/xen/gntalloc.c545
-rw-r--r--drivers/xen/gntdev.c382
-rw-r--r--drivers/xen/grant-table.c10
-rw-r--r--drivers/xen/manage.c16
-rw-r--r--drivers/xen/xen-balloon.c256
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c12
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h3
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c11
806 files changed, 62938 insertions, 15299 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 9bfb71ff3a6a..177c7d156933 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -117,4 +117,6 @@ source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
source "drivers/clk/Kconfig"
+
+source "drivers/hwspinlock/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index b423bb16c3a8..3f135b6fb014 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -117,3 +117,5 @@ obj-y += platform/
obj-y += ieee802154/
#common clk code
obj-y += clk/
+
+obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 90f8f7676d1f..a18e497f1c3c 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -782,6 +782,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (acpi_video_backlight_support()) {
struct backlight_properties props;
+ struct pci_dev *pdev;
+ acpi_handle acpi_parent;
+ struct device *parent = NULL;
int result;
static int count = 0;
char *name;
@@ -794,9 +797,20 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
return;
count++;
+ acpi_get_parent(device->dev->handle, &acpi_parent);
+
+ pdev = acpi_get_pci_dev(acpi_parent);
+ if (pdev) {
+ parent = &pdev->dev;
+ pci_dev_put(pdev);
+ }
+
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_FIRMWARE;
props.max_brightness = device->brightness->count - 3;
- device->backlight = backlight_device_register(name, NULL, device,
+ device->backlight = backlight_device_register(name,
+ parent,
+ device,
&acpi_backlight_ops,
&props);
kfree(name);
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index e7df019d29d4..6d2bb2524b6e 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -13,16 +13,17 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/amba/bus.h>
#include <asm/irq.h>
#include <asm/sizes.h>
-#define to_amba_device(d) container_of(d, struct amba_device, dev)
#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
-static struct amba_id *
-amba_lookup(struct amba_id *table, struct amba_device *dev)
+static const struct amba_id *
+amba_lookup(const struct amba_id *table, struct amba_device *dev)
{
int ret = 0;
@@ -57,26 +58,6 @@ static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
#define amba_uevent NULL
#endif
-static int amba_suspend(struct device *dev, pm_message_t state)
-{
- struct amba_driver *drv = to_amba_driver(dev->driver);
- int ret = 0;
-
- if (dev->driver && drv->suspend)
- ret = drv->suspend(to_amba_device(dev), state);
- return ret;
-}
-
-static int amba_resume(struct device *dev)
-{
- struct amba_driver *drv = to_amba_driver(dev->driver);
- int ret = 0;
-
- if (dev->driver && drv->resume)
- ret = drv->resume(to_amba_device(dev));
- return ret;
-}
-
#define amba_attr_func(name,fmt,arg...) \
static ssize_t name##_show(struct device *_dev, \
struct device_attribute *attr, char *buf) \
@@ -102,17 +83,330 @@ static struct device_attribute amba_dev_attrs[] = {
__ATTR_NULL,
};
+#ifdef CONFIG_PM_SLEEP
+
+static int amba_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+ struct amba_driver *adrv = to_amba_driver(dev->driver);
+ struct amba_device *adev = to_amba_device(dev);
+ int ret = 0;
+
+ if (dev->driver && adrv->suspend)
+ ret = adrv->suspend(adev, mesg);
+
+ return ret;
+}
+
+static int amba_legacy_resume(struct device *dev)
+{
+ struct amba_driver *adrv = to_amba_driver(dev->driver);
+ struct amba_device *adev = to_amba_device(dev);
+ int ret = 0;
+
+ if (dev->driver && adrv->resume)
+ ret = adrv->resume(adev);
+
+ return ret;
+}
+
+static int amba_pm_prepare(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm && drv->pm->prepare)
+ ret = drv->pm->prepare(dev);
+
+ return ret;
+}
+
+static void amba_pm_complete(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->complete)
+ drv->pm->complete(dev);
+}
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define amba_pm_prepare NULL
+#define amba_pm_complete NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_SUSPEND
+
+static int amba_pm_suspend(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend)
+ ret = drv->pm->suspend(dev);
+ } else {
+ ret = amba_legacy_suspend(dev, PMSG_SUSPEND);
+ }
+
+ return ret;
+}
+
+static int amba_pm_suspend_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend_noirq)
+ ret = drv->pm->suspend_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_resume(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume)
+ ret = drv->pm->resume(dev);
+ } else {
+ ret = amba_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_resume_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume_noirq)
+ ret = drv->pm->resume_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_SUSPEND */
+
+#define amba_pm_suspend NULL
+#define amba_pm_resume NULL
+#define amba_pm_suspend_noirq NULL
+#define amba_pm_resume_noirq NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATION
+
+static int amba_pm_freeze(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze)
+ ret = drv->pm->freeze(dev);
+ } else {
+ ret = amba_legacy_suspend(dev, PMSG_FREEZE);
+ }
+
+ return ret;
+}
+
+static int amba_pm_freeze_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze_noirq)
+ ret = drv->pm->freeze_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_thaw(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw)
+ ret = drv->pm->thaw(dev);
+ } else {
+ ret = amba_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_thaw_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw_noirq)
+ ret = drv->pm->thaw_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_poweroff(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff)
+ ret = drv->pm->poweroff(dev);
+ } else {
+ ret = amba_legacy_suspend(dev, PMSG_HIBERNATE);
+ }
+
+ return ret;
+}
+
+static int amba_pm_poweroff_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff_noirq)
+ ret = drv->pm->poweroff_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_restore(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore)
+ ret = drv->pm->restore(dev);
+ } else {
+ ret = amba_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_restore_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore_noirq)
+ ret = drv->pm->restore_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_HIBERNATION */
+
+#define amba_pm_freeze NULL
+#define amba_pm_thaw NULL
+#define amba_pm_poweroff NULL
+#define amba_pm_restore NULL
+#define amba_pm_freeze_noirq NULL
+#define amba_pm_thaw_noirq NULL
+#define amba_pm_poweroff_noirq NULL
+#define amba_pm_restore_noirq NULL
+
+#endif /* !CONFIG_HIBERNATION */
+
+#ifdef CONFIG_PM
+
+static const struct dev_pm_ops amba_pm = {
+ .prepare = amba_pm_prepare,
+ .complete = amba_pm_complete,
+ .suspend = amba_pm_suspend,
+ .resume = amba_pm_resume,
+ .freeze = amba_pm_freeze,
+ .thaw = amba_pm_thaw,
+ .poweroff = amba_pm_poweroff,
+ .restore = amba_pm_restore,
+ .suspend_noirq = amba_pm_suspend_noirq,
+ .resume_noirq = amba_pm_resume_noirq,
+ .freeze_noirq = amba_pm_freeze_noirq,
+ .thaw_noirq = amba_pm_thaw_noirq,
+ .poweroff_noirq = amba_pm_poweroff_noirq,
+ .restore_noirq = amba_pm_restore_noirq,
+ SET_RUNTIME_PM_OPS(
+ pm_generic_runtime_suspend,
+ pm_generic_runtime_resume,
+ pm_generic_runtime_idle
+ )
+};
+
+#define AMBA_PM (&amba_pm)
+
+#else /* !CONFIG_PM */
+
+#define AMBA_PM NULL
+
+#endif /* !CONFIG_PM */
+
/*
* Primecells are part of the Advanced Microcontroller Bus Architecture,
* so we call the bus "amba".
*/
-static struct bus_type amba_bustype = {
+struct bus_type amba_bustype = {
.name = "amba",
.dev_attrs = amba_dev_attrs,
.match = amba_match,
.uevent = amba_uevent,
- .suspend = amba_suspend,
- .resume = amba_resume,
+ .pm = AMBA_PM,
};
static int __init amba_init(void)
@@ -188,7 +482,7 @@ static int amba_probe(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *pcdrv = to_amba_driver(dev->driver);
- struct amba_id *id = amba_lookup(pcdrv->id_table, pcdev);
+ const struct amba_id *id = amba_lookup(pcdrv->id_table, pcdev);
int ret;
do {
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index ef3ce26bb1f0..0f91e583892e 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -6,7 +6,7 @@
* Author: Ashish Kalra <ashish.kalra@freescale.com>
* Li Yang <leoli@freescale.com>
*
- * Copyright (c) 2006-2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2006-2007, 2011 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -157,7 +157,8 @@ enum {
IE_ON_SINGL_DEVICE_ERR | IE_ON_CMD_COMPLETE,
EXT_INDIRECT_SEG_PRD_FLAG = (1 << 31),
- DATA_SNOOP_ENABLE = (1 << 22),
+ DATA_SNOOP_ENABLE_V1 = (1 << 22),
+ DATA_SNOOP_ENABLE_V2 = (1 << 28),
};
/*
@@ -260,6 +261,7 @@ struct sata_fsl_host_priv {
void __iomem *ssr_base;
void __iomem *csr_base;
int irq;
+ int data_snoop;
};
static inline unsigned int sata_fsl_tag(unsigned int tag,
@@ -312,7 +314,8 @@ static void sata_fsl_setup_cmd_hdr_entry(struct sata_fsl_port_priv *pp,
}
static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
- u32 *ttl, dma_addr_t cmd_desc_paddr)
+ u32 *ttl, dma_addr_t cmd_desc_paddr,
+ int data_snoop)
{
struct scatterlist *sg;
unsigned int num_prde = 0;
@@ -362,8 +365,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
ttl_dwords += sg_len;
prd->dba = cpu_to_le32(sg_addr);
- prd->ddc_and_ext =
- cpu_to_le32(DATA_SNOOP_ENABLE | (sg_len & ~0x03));
+ prd->ddc_and_ext = cpu_to_le32(data_snoop | (sg_len & ~0x03));
VPRINTK("sg_fill, ttl=%d, dba=0x%x, ddc=0x%x\n",
ttl_dwords, prd->dba, prd->ddc_and_ext);
@@ -378,7 +380,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
/* set indirect extension flag along with indirect ext. size */
prd_ptr_to_indirect_ext->ddc_and_ext =
cpu_to_le32((EXT_INDIRECT_SEG_PRD_FLAG |
- DATA_SNOOP_ENABLE |
+ data_snoop |
(indirect_ext_segment_sz & ~0x03)));
}
@@ -421,7 +423,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
if (qc->flags & ATA_QCFLAG_DMAMAP)
num_prde = sata_fsl_fill_sg(qc, (void *)cd,
- &ttl_dwords, cd_paddr);
+ &ttl_dwords, cd_paddr,
+ host_priv->data_snoop);
if (qc->tf.protocol == ATA_PROT_NCQ)
desc_info |= FPDMA_QUEUED_CMD;
@@ -1349,6 +1352,11 @@ static int sata_fsl_probe(struct platform_device *ofdev)
}
host_priv->irq = irq;
+ if (of_device_is_compatible(ofdev->dev.of_node, "fsl,pq-sata-v2"))
+ host_priv->data_snoop = DATA_SNOOP_ENABLE_V2;
+ else
+ host_priv->data_snoop = DATA_SNOOP_ENABLE_V1;
+
/* allocate host structure */
host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_FSL_MAX_PORTS);
@@ -1431,6 +1439,9 @@ static struct of_device_id fsl_sata_match[] = {
{
.compatible = "fsl,pq-sata",
},
+ {
+ .compatible = "fsl,pq-sata-v2",
+ },
{},
};
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 7d912baf01d4..049650d42c88 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1031,7 +1031,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
/* We now use the "submit_command" function to submit commands to
the firestream. There is a define up near the definition of
that routine that switches this routine between immediate write
- to the immediate comamnd registers and queuing the commands in
+ to the immediate command registers and queuing the commands in
the HPTXQ for execution. This last technique might be more
efficient if we know we're going to submit a whole lot of
commands in one go, but this driver is not setup to be able to
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index e1e38b11f48a..16dc3645291c 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -31,6 +31,7 @@
#include <linux/ceph/osd_client.h>
#include <linux/ceph/mon_client.h>
#include <linux/ceph/decode.h>
+#include <linux/parser.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -54,6 +55,8 @@
#define DEV_NAME_LEN 32
+#define RBD_NOTIFY_TIMEOUT_DEFAULT 10
+
/*
* block device image metadata (in-memory version)
*/
@@ -71,6 +74,12 @@ struct rbd_image_header {
char *snap_names;
u64 *snap_sizes;
+
+ u64 obj_version;
+};
+
+struct rbd_options {
+ int notify_timeout;
};
/*
@@ -78,6 +87,7 @@ struct rbd_image_header {
*/
struct rbd_client {
struct ceph_client *client;
+ struct rbd_options *rbd_opts;
struct kref kref;
struct list_head node;
};
@@ -124,6 +134,9 @@ struct rbd_device {
char pool_name[RBD_MAX_POOL_NAME_LEN];
int poolid;
+ struct ceph_osd_event *watch_event;
+ struct ceph_osd_request *watch_request;
+
char snap_name[RBD_MAX_SNAP_NAME_LEN];
u32 cur_snap; /* index+1 of current snapshot within snap context
0 - for the head */
@@ -177,6 +190,8 @@ static void rbd_put_dev(struct rbd_device *rbd_dev)
put_device(&rbd_dev->dev);
}
+static int __rbd_update_snaps(struct rbd_device *rbd_dev);
+
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
struct gendisk *disk = bdev->bd_disk;
@@ -211,7 +226,8 @@ static const struct block_device_operations rbd_bd_ops = {
* Initialize an rbd client instance.
* We own *opt.
*/
-static struct rbd_client *rbd_client_create(struct ceph_options *opt)
+static struct rbd_client *rbd_client_create(struct ceph_options *opt,
+ struct rbd_options *rbd_opts)
{
struct rbd_client *rbdc;
int ret = -ENOMEM;
@@ -233,6 +249,8 @@ static struct rbd_client *rbd_client_create(struct ceph_options *opt)
if (ret < 0)
goto out_err;
+ rbdc->rbd_opts = rbd_opts;
+
spin_lock(&node_lock);
list_add_tail(&rbdc->node, &rbd_client_list);
spin_unlock(&node_lock);
@@ -267,6 +285,59 @@ static struct rbd_client *__rbd_client_find(struct ceph_options *opt)
}
/*
+ * mount options
+ */
+enum {
+ Opt_notify_timeout,
+ Opt_last_int,
+ /* int args above */
+ Opt_last_string,
+ /* string args above */
+};
+
+static match_table_t rbdopt_tokens = {
+ {Opt_notify_timeout, "notify_timeout=%d"},
+ /* int args above */
+ /* string args above */
+ {-1, NULL}
+};
+
+static int parse_rbd_opts_token(char *c, void *private)
+{
+ struct rbd_options *rbdopt = private;
+ substring_t argstr[MAX_OPT_ARGS];
+ int token, intval, ret;
+
+ token = match_token((char *)c, rbdopt_tokens, argstr);
+ if (token < 0)
+ return -EINVAL;
+
+ if (token < Opt_last_int) {
+ ret = match_int(&argstr[0], &intval);
+ if (ret < 0) {
+ pr_err("bad mount option arg (not int) "
+ "at '%s'\n", c);
+ return ret;
+ }
+ dout("got int token %d val %d\n", token, intval);
+ } else if (token > Opt_last_int && token < Opt_last_string) {
+ dout("got string token %d val %s\n", token,
+ argstr[0].from);
+ } else {
+ dout("got token %d\n", token);
+ }
+
+ switch (token) {
+ case Opt_notify_timeout:
+ rbdopt->notify_timeout = intval;
+ break;
+ default:
+ BUG_ON(token);
+ }
+ return 0;
+}
+
+/*
* Get a ceph client with specific addr and configuration, if one does
* not exist create it.
*/
@@ -276,11 +347,18 @@ static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
struct rbd_client *rbdc;
struct ceph_options *opt;
int ret;
+ struct rbd_options *rbd_opts;
+
+ rbd_opts = kzalloc(sizeof(*rbd_opts), GFP_KERNEL);
+ if (!rbd_opts)
+ return -ENOMEM;
+
+ rbd_opts->notify_timeout = RBD_NOTIFY_TIMEOUT_DEFAULT;
ret = ceph_parse_options(&opt, options, mon_addr,
- mon_addr + strlen(mon_addr), NULL, NULL);
+ mon_addr + strlen(mon_addr), parse_rbd_opts_token, rbd_opts);
if (ret < 0)
- return ret;
+ goto done_err;
spin_lock(&node_lock);
rbdc = __rbd_client_find(opt);
@@ -296,13 +374,18 @@ static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
}
spin_unlock(&node_lock);
- rbdc = rbd_client_create(opt);
- if (IS_ERR(rbdc))
- return PTR_ERR(rbdc);
+ rbdc = rbd_client_create(opt, rbd_opts);
+ if (IS_ERR(rbdc)) {
+ ret = PTR_ERR(rbdc);
+ goto done_err;
+ }
rbd_dev->rbd_client = rbdc;
rbd_dev->client = rbdc->client;
return 0;
+done_err:
+ kfree(rbd_opts);
+ return ret;
}
/*
@@ -318,6 +401,7 @@ static void rbd_client_release(struct kref *kref)
spin_unlock(&node_lock);
ceph_destroy_client(rbdc->client);
+ kfree(rbdc->rbd_opts);
kfree(rbdc);
}
@@ -666,7 +750,9 @@ static int rbd_do_request(struct request *rq,
struct ceph_osd_req_op *ops,
int num_reply,
void (*rbd_cb)(struct ceph_osd_request *req,
- struct ceph_msg *msg))
+ struct ceph_msg *msg),
+ struct ceph_osd_request **linger_req,
+ u64 *ver)
{
struct ceph_osd_request *req;
struct ceph_file_layout *layout;
@@ -729,12 +815,20 @@ static int rbd_do_request(struct request *rq,
req->r_oid, req->r_oid_len);
up_read(&header->snap_rwsem);
+ if (linger_req) {
+ ceph_osdc_set_request_linger(&dev->client->osdc, req);
+ *linger_req = req;
+ }
+
ret = ceph_osdc_start_request(&dev->client->osdc, req, false);
if (ret < 0)
goto done_err;
if (!rbd_cb) {
ret = ceph_osdc_wait_request(&dev->client->osdc, req);
+ if (ver)
+ *ver = le64_to_cpu(req->r_reassert_version.version);
+ dout("reassert_ver=%lld\n", le64_to_cpu(req->r_reassert_version.version));
ceph_osdc_put_request(req);
}
return ret;
@@ -789,6 +883,11 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
kfree(req_data);
}
+static void rbd_simple_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
+{
+ ceph_osdc_put_request(req);
+}
+
/*
* Do a synchronous ceph osd operation
*/
@@ -801,7 +900,9 @@ static int rbd_req_sync_op(struct rbd_device *dev,
int num_reply,
const char *obj,
u64 ofs, u64 len,
- char *buf)
+ char *buf,
+ struct ceph_osd_request **linger_req,
+ u64 *ver)
{
int ret;
struct page **pages;
@@ -833,7 +934,8 @@ static int rbd_req_sync_op(struct rbd_device *dev,
flags,
ops,
2,
- NULL);
+ NULL,
+ linger_req, ver);
if (ret < 0)
goto done_ops;
@@ -893,7 +995,7 @@ static int rbd_do_op(struct request *rq,
flags,
ops,
num_reply,
- rbd_req_cb);
+ rbd_req_cb, 0, NULL);
done:
kfree(seg_name);
return ret;
@@ -940,18 +1042,174 @@ static int rbd_req_sync_read(struct rbd_device *dev,
u64 snapid,
const char *obj,
u64 ofs, u64 len,
- char *buf)
+ char *buf,
+ u64 *ver)
{
return rbd_req_sync_op(dev, NULL,
(snapid ? snapid : CEPH_NOSNAP),
CEPH_OSD_OP_READ,
CEPH_OSD_FLAG_READ,
NULL,
- 1, obj, ofs, len, buf);
+ 1, obj, ofs, len, buf, NULL, ver);
}
/*
- * Request sync osd read
+ * Request sync osd watch
+ */
+static int rbd_req_sync_notify_ack(struct rbd_device *dev,
+ u64 ver,
+ u64 notify_id,
+ const char *obj)
+{
+ struct ceph_osd_req_op *ops;
+ struct page **pages = NULL;
+ int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0);
+ if (ret < 0)
+ return ret;
+
+ ops[0].watch.ver = cpu_to_le64(dev->header.obj_version);
+ ops[0].watch.cookie = notify_id;
+ ops[0].watch.flag = 0;
+
+ ret = rbd_do_request(NULL, dev, NULL, CEPH_NOSNAP,
+ obj, 0, 0, NULL,
+ pages, 0,
+ CEPH_OSD_FLAG_READ,
+ ops,
+ 1,
+ rbd_simple_req_cb, 0, NULL);
+
+ rbd_destroy_ops(ops);
+ return ret;
+}
+
+static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
+{
+ struct rbd_device *dev = (struct rbd_device *)data;
+ if (!dev)
+ return;
+
+ dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
+ notify_id, (int)opcode);
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ __rbd_update_snaps(dev);
+ mutex_unlock(&ctl_mutex);
+
+ rbd_req_sync_notify_ack(dev, ver, notify_id, dev->obj_md_name);
+}
+
+/*
+ * Request sync osd watch
+ */
+static int rbd_req_sync_watch(struct rbd_device *dev,
+ const char *obj,
+ u64 ver)
+{
+ struct ceph_osd_req_op *ops;
+ struct ceph_osd_client *osdc = &dev->client->osdc;
+
+ int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_WATCH, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = ceph_osdc_create_event(osdc, rbd_watch_cb, 0,
+ (void *)dev, &dev->watch_event);
+ if (ret < 0)
+ goto fail;
+
+ ops[0].watch.ver = cpu_to_le64(ver);
+ ops[0].watch.cookie = cpu_to_le64(dev->watch_event->cookie);
+ ops[0].watch.flag = 1;
+
+ ret = rbd_req_sync_op(dev, NULL,
+ CEPH_NOSNAP,
+ 0,
+ CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
+ ops,
+ 1, obj, 0, 0, NULL,
+ &dev->watch_request, NULL);
+
+ if (ret < 0)
+ goto fail_event;
+
+ rbd_destroy_ops(ops);
+ return 0;
+
+fail_event:
+ ceph_osdc_cancel_event(dev->watch_event);
+ dev->watch_event = NULL;
+fail:
+ rbd_destroy_ops(ops);
+ return ret;
+}
+
+struct rbd_notify_info {
+ struct rbd_device *dev;
+};
+
+static void rbd_notify_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
+{
+ struct rbd_device *dev = (struct rbd_device *)data;
+ if (!dev)
+ return;
+
+ dout("rbd_notify_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
+ notify_id, (int)opcode);
+}
+
+/*
+ * Request sync osd notify
+ */
+static int rbd_req_sync_notify(struct rbd_device *dev,
+ const char *obj)
+{
+ struct ceph_osd_req_op *ops;
+ struct ceph_osd_client *osdc = &dev->client->osdc;
+ struct ceph_osd_event *event;
+ struct rbd_notify_info info;
+ int payload_len = sizeof(u32) + sizeof(u32);
+ int ret;
+
+ ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY, payload_len);
+ if (ret < 0)
+ return ret;
+
+ info.dev = dev;
+
+ ret = ceph_osdc_create_event(osdc, rbd_notify_cb, 1,
+ (void *)&info, &event);
+ if (ret < 0)
+ goto fail;
+
+ ops[0].watch.ver = 1;
+ ops[0].watch.flag = 1;
+ ops[0].watch.cookie = event->cookie;
+ ops[0].watch.prot_ver = RADOS_NOTIFY_VER;
+ ops[0].watch.timeout = 12;
+
+ ret = rbd_req_sync_op(dev, NULL,
+ CEPH_NOSNAP,
+ 0,
+ CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
+ ops,
+ 1, obj, 0, 0, NULL, NULL, NULL);
+ if (ret < 0)
+ goto fail_event;
+
+ ret = ceph_osdc_wait_event(event, CEPH_OSD_TIMEOUT_DEFAULT);
+ dout("ceph_osdc_wait_event returned %d\n", ret);
+ rbd_destroy_ops(ops);
+ return 0;
+
+fail_event:
+ ceph_osdc_cancel_event(event);
+fail:
+ rbd_destroy_ops(ops);
+ return ret;
+}
+
+/*
+ * Request sync osd rollback
*/
static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
u64 snapid,
@@ -969,13 +1227,10 @@ static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
0,
CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
ops,
- 1, obj, 0, 0, NULL);
+ 1, obj, 0, 0, NULL, NULL, NULL);
rbd_destroy_ops(ops);
- if (ret < 0)
- return ret;
-
return ret;
}
@@ -987,7 +1242,8 @@ static int rbd_req_sync_exec(struct rbd_device *dev,
const char *cls,
const char *method,
const char *data,
- int len)
+ int len,
+ u64 *ver)
{
struct ceph_osd_req_op *ops;
int cls_len = strlen(cls);
@@ -1010,7 +1266,7 @@ static int rbd_req_sync_exec(struct rbd_device *dev,
0,
CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
ops,
- 1, obj, 0, 0, NULL);
+ 1, obj, 0, 0, NULL, NULL, ver);
rbd_destroy_ops(ops);
@@ -1156,6 +1412,7 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
struct rbd_image_header_ondisk *dh;
int snap_count = 0;
u64 snap_names_len = 0;
+ u64 ver;
while (1) {
int len = sizeof(*dh) +
@@ -1171,7 +1428,7 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
NULL, CEPH_NOSNAP,
rbd_dev->obj_md_name,
0, len,
- (char *)dh);
+ (char *)dh, &ver);
if (rc < 0)
goto out_dh;
@@ -1188,6 +1445,7 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
}
break;
}
+ header->obj_version = ver;
out_dh:
kfree(dh);
@@ -1205,6 +1463,7 @@ static int rbd_header_add_snap(struct rbd_device *dev,
u64 new_snapid;
int ret;
void *data, *data_start, *data_end;
+ u64 ver;
/* we should create a snapshot only if we're pointing at the head */
if (dev->cur_snap)
@@ -1227,7 +1486,7 @@ static int rbd_header_add_snap(struct rbd_device *dev,
ceph_encode_64_safe(&data, data_end, new_snapid, bad);
ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add",
- data_start, data - data_start);
+ data_start, data - data_start, &ver);
kfree(data_start);
@@ -1259,6 +1518,7 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev)
int ret;
struct rbd_image_header h;
u64 snap_seq;
+ int follow_seq = 0;
ret = rbd_read_header(rbd_dev, &h);
if (ret < 0)
@@ -1267,6 +1527,11 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev)
down_write(&rbd_dev->header.snap_rwsem);
snap_seq = rbd_dev->header.snapc->seq;
+ if (rbd_dev->header.total_snaps &&
+ rbd_dev->header.snapc->snaps[0] == snap_seq)
+ /* pointing at the head, will need to follow that
+ if head moves */
+ follow_seq = 1;
kfree(rbd_dev->header.snapc);
kfree(rbd_dev->header.snap_names);
@@ -1277,7 +1542,10 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev)
rbd_dev->header.snap_names = h.snap_names;
rbd_dev->header.snap_names_len = h.snap_names_len;
rbd_dev->header.snap_sizes = h.snap_sizes;
- rbd_dev->header.snapc->seq = snap_seq;
+ if (follow_seq)
+ rbd_dev->header.snapc->seq = rbd_dev->header.snapc->snaps[0];
+ else
+ rbd_dev->header.snapc->seq = snap_seq;
ret = __rbd_init_snaps_header(rbd_dev);
@@ -1699,7 +1967,28 @@ static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
device_unregister(&rbd_dev->dev);
}
-static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count)
+static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
+{
+ int ret, rc;
+
+ do {
+ ret = rbd_req_sync_watch(rbd_dev, rbd_dev->obj_md_name,
+ rbd_dev->header.obj_version);
+ if (ret == -ERANGE) {
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ rc = __rbd_update_snaps(rbd_dev);
+ mutex_unlock(&ctl_mutex);
+ if (rc < 0)
+ return rc;
+ }
+ } while (ret == -ERANGE);
+
+ return ret;
+}
+
+static ssize_t rbd_add(struct bus_type *bus,
+ const char *buf,
+ size_t count)
{
struct ceph_osd_client *osdc;
struct rbd_device *rbd_dev;
@@ -1797,6 +2086,10 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count)
if (rc)
goto err_out_bus;
+ rc = rbd_init_watch_dev(rbd_dev);
+ if (rc)
+ goto err_out_bus;
+
return count;
err_out_bus:
@@ -1849,6 +2142,12 @@ static void rbd_dev_release(struct device *dev)
struct rbd_device *rbd_dev =
container_of(dev, struct rbd_device, dev);
+ if (rbd_dev->watch_request)
+ ceph_osdc_unregister_linger_request(&rbd_dev->client->osdc,
+ rbd_dev->watch_request);
+ if (rbd_dev->watch_event)
+ ceph_osdc_cancel_event(rbd_dev->watch_event);
+
rbd_put_client(rbd_dev);
/* clean up and free blkdev */
@@ -1914,14 +2213,24 @@ static ssize_t rbd_snap_add(struct device *dev,
ret = rbd_header_add_snap(rbd_dev,
name, GFP_KERNEL);
if (ret < 0)
- goto done_unlock;
+ goto err_unlock;
ret = __rbd_update_snaps(rbd_dev);
if (ret < 0)
- goto done_unlock;
+ goto err_unlock;
+
+ /* shouldn't hold ctl_mutex when notifying.. notify might
+ trigger a watch callback that would need to get that mutex */
+ mutex_unlock(&ctl_mutex);
+
+ /* make a best effort, don't error if failed */
+ rbd_req_sync_notify(rbd_dev, rbd_dev->obj_md_name);
ret = count;
-done_unlock:
+ kfree(name);
+ return ret;
+
+err_unlock:
mutex_unlock(&ctl_mutex);
kfree(name);
return ret;
diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
index a0b403a6b4ed..e5565fbaeb30 100644
--- a/drivers/block/smart1,2.h
+++ b/drivers/block/smart1,2.h
@@ -95,7 +95,7 @@ static unsigned long smart4_completed(ctlr_info_t *h)
/*
* This hardware returns interrupt pending at a different place and
* it does not tell us if the fifo is empty, we will have check
- * that by getting a 0 back from the comamnd_completed call.
+ * that by getting a 0 back from the command_completed call.
*/
static unsigned long smart4_intr_pending(ctlr_info_t *h)
{
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 7e0ebd4a1a74..866811428e20 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -433,7 +433,7 @@ static void btusb_isoc_complete(struct urb *urb)
}
}
-static void inline __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
+static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
{
int i, offset = 0;
@@ -780,7 +780,7 @@ static void btusb_notify(struct hci_dev *hdev, unsigned int evt)
}
}
-static int inline __set_isoc_interface(struct hci_dev *hdev, int altsetting)
+static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting)
{
struct btusb_data *data = hdev->driver_data;
struct usb_interface *intf = data->isoc;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 04f8b2d083c6..ad59b4e0a9b5 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -608,5 +608,13 @@ config RAMOOPS
This enables panic and oops messages to be logged to a circular
buffer in RAM where it can be read back at some later point.
+config MSM_SMD_PKT
+ bool "Enable device interface for some SMD packet ports"
+ default n
+ depends on MSM_SMD
+ help
+ Enables userspace clients to read and write to some packet SMD
+ ports via device interface for MSM chipset.
+
endmenu
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 057f65452e7b..7a00672bd85d 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
+obj-$(CONFIG_MSM_SMD_PKT) += msm_smd_pkt.o
obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_MMTIMER) += mmtimer.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index a348c7e9aa0b..dd1d143eb8ea 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -39,7 +39,7 @@ static struct hwrng nmk_rng = {
.read = nmk_rng_read,
};
-static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id)
+static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
{
void __iomem *base;
int ret;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index c86d43b88e1e..d28b484aee45 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -3521,7 +3521,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
kfree(to_clean);
}
-static void __exit cleanup_ipmi_si(void)
+static void cleanup_ipmi_si(void)
{
struct smi_info *e, *tmp_e;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 1256454b2d43..436a99017998 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -47,10 +47,7 @@ static inline unsigned long size_inside_page(unsigned long start,
#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
static inline int valid_phys_addr_range(unsigned long addr, size_t count)
{
- if (addr + count > __pa(high_memory))
- return 0;
-
- return 1;
+ return addr + count <= __pa(high_memory);
}
static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c
new file mode 100644
index 000000000000..b6f8a65c9960
--- /dev/null
+++ b/drivers/char/msm_smd_pkt.c
@@ -0,0 +1,466 @@
+/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+/*
+ * SMD Packet Driver -- Provides userspace interface to SMD packet ports.
+ */
+
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/poll.h>
+
+#include <mach/msm_smd.h>
+
+#define NUM_SMD_PKT_PORTS 9
+#define DEVICE_NAME "smdpkt"
+#define MAX_BUF_SIZE 2048
+
+struct smd_pkt_dev {
+ struct cdev cdev;
+ struct device *devicep;
+
+ struct smd_channel *ch;
+ int open_count;
+ struct mutex ch_lock;
+ struct mutex rx_lock;
+ struct mutex tx_lock;
+ wait_queue_head_t ch_read_wait_queue;
+ wait_queue_head_t ch_opened_wait_queue;
+
+ int i;
+
+ unsigned char tx_buf[MAX_BUF_SIZE];
+ unsigned char rx_buf[MAX_BUF_SIZE];
+ int remote_open;
+
+} *smd_pkt_devp[NUM_SMD_PKT_PORTS];
+
+struct class *smd_pkt_classp;
+static dev_t smd_pkt_number;
+
+static int msm_smd_pkt_debug_enable;
+module_param_named(debug_enable, msm_smd_pkt_debug_enable,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#ifdef DEBUG
+#define D_DUMP_BUFFER(prestr, cnt, buf) do { \
+ int i; \
+ if (msm_smd_pkt_debug_enable) { \
+ pr_debug("%s", prestr); \
+ for (i = 0; i < cnt; i++) \
+ pr_debug("%.2x", buf[i]); \
+ pr_debug("\n"); \
+ } \
+ } while (0)
+#else
+#define D_DUMP_BUFFER(prestr, cnt, buf) do {} while (0)
+#endif
+
+#ifdef DEBUG
+#define DBG(x...) do { \
+ if (msm_smd_pkt_debug_enable) \
+ pr_debug(x); \
+ } while (0)
+#else
+#define DBG(x...) do {} while (0)
+#endif
+
+static void check_and_wakeup_reader(struct smd_pkt_dev *smd_pkt_devp)
+{
+ int sz;
+
+ if (!smd_pkt_devp || !smd_pkt_devp->ch)
+ return;
+
+ sz = smd_cur_packet_size(smd_pkt_devp->ch);
+ if (sz == 0) {
+ DBG("no packet\n");
+ return;
+ }
+ if (sz > smd_read_avail(smd_pkt_devp->ch)) {
+ DBG("incomplete packet\n");
+ return;
+ }
+
+ DBG("waking up reader\n");
+ wake_up_interruptible(&smd_pkt_devp->ch_read_wait_queue);
+}
+
+static int smd_pkt_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int r, bytes_read;
+ struct smd_pkt_dev *smd_pkt_devp;
+ struct smd_channel *chl;
+
+ DBG("read %d bytes\n", count);
+ if (count > MAX_BUF_SIZE)
+ return -EINVAL;
+
+ smd_pkt_devp = file->private_data;
+ if (!smd_pkt_devp || !smd_pkt_devp->ch)
+ return -EINVAL;
+
+ chl = smd_pkt_devp->ch;
+wait_for_packet:
+ r = wait_event_interruptible(smd_pkt_devp->ch_read_wait_queue,
+ (smd_cur_packet_size(chl) > 0 &&
+ smd_read_avail(chl) >=
+ smd_cur_packet_size(chl)));
+
+ if (r < 0) {
+ if (r != -ERESTARTSYS)
+ pr_err("wait returned %d\n", r);
+ return r;
+ }
+
+ mutex_lock(&smd_pkt_devp->rx_lock);
+
+ bytes_read = smd_cur_packet_size(smd_pkt_devp->ch);
+ if (bytes_read == 0 ||
+ bytes_read < smd_read_avail(smd_pkt_devp->ch)) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ DBG("Nothing to read\n");
+ goto wait_for_packet;
+ }
+
+ if (bytes_read > count) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ pr_info("packet size %d > buffer size %d", bytes_read, count);
+ return -EINVAL;
+ }
+
+ r = smd_read(smd_pkt_devp->ch, smd_pkt_devp->rx_buf, bytes_read);
+ if (r != bytes_read) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ pr_err("smd_read failed to read %d bytes: %d\n", bytes_read, r);
+ return -EIO;
+ }
+
+ D_DUMP_BUFFER("read: ", bytes_read, smd_pkt_devp->rx_buf);
+ r = copy_to_user(buf, smd_pkt_devp->rx_buf, bytes_read);
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ if (r) {
+ pr_err("copy_to_user failed %d\n", r);
+ return -EFAULT;
+ }
+
+ DBG("read complete %d bytes\n", bytes_read);
+ check_and_wakeup_reader(smd_pkt_devp);
+
+ return bytes_read;
+}
+
+static int smd_pkt_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int r;
+ struct smd_pkt_dev *smd_pkt_devp;
+
+ if (count > MAX_BUF_SIZE)
+ return -EINVAL;
+
+ DBG("writting %d bytes\n", count);
+
+ smd_pkt_devp = file->private_data;
+ if (!smd_pkt_devp || !smd_pkt_devp->ch)
+ return -EINVAL;
+
+ mutex_lock(&smd_pkt_devp->tx_lock);
+ if (smd_write_avail(smd_pkt_devp->ch) < count) {
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ DBG("Not enough space to write\n");
+ return -ENOMEM;
+ }
+
+ D_DUMP_BUFFER("write: ", count, buf);
+ r = copy_from_user(smd_pkt_devp->tx_buf, buf, count);
+ if (r) {
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ pr_err("copy_from_user failed %d\n", r);
+ return -EFAULT;
+ }
+
+ r = smd_write(smd_pkt_devp->ch, smd_pkt_devp->tx_buf, count);
+ if (r != count) {
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ pr_err("smd_write failed to write %d bytes: %d.\n", count, r);
+ return -EIO;
+ }
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+
+ DBG("wrote %d bytes\n", count);
+ return count;
+}
+
+static unsigned int smd_pkt_poll(struct file *file, poll_table *wait)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+ unsigned int mask = 0;
+
+ smd_pkt_devp = file->private_data;
+ if (!smd_pkt_devp)
+ return POLLERR;
+
+ DBG("poll waiting\n");
+ poll_wait(file, &smd_pkt_devp->ch_read_wait_queue, wait);
+ if (smd_read_avail(smd_pkt_devp->ch))
+ mask |= POLLIN | POLLRDNORM;
+
+ DBG("poll return\n");
+ return mask;
+}
+
+static void smd_pkt_ch_notify(void *priv, unsigned event)
+{
+ struct smd_pkt_dev *smd_pkt_devp = priv;
+
+ if (smd_pkt_devp->ch == 0)
+ return;
+
+ switch (event) {
+ case SMD_EVENT_DATA:
+ DBG("data\n");
+ check_and_wakeup_reader(smd_pkt_devp);
+ break;
+
+ case SMD_EVENT_OPEN:
+ DBG("remote open\n");
+ smd_pkt_devp->remote_open = 1;
+ wake_up_interruptible(&smd_pkt_devp->ch_opened_wait_queue);
+ break;
+
+ case SMD_EVENT_CLOSE:
+ smd_pkt_devp->remote_open = 0;
+ pr_info("remote closed\n");
+ break;
+
+ default:
+ pr_err("unknown event %d\n", event);
+ break;
+ }
+}
+
+static char *smd_pkt_dev_name[] = {
+ "smdcntl0",
+ "smdcntl1",
+ "smdcntl2",
+ "smdcntl3",
+ "smdcntl4",
+ "smdcntl5",
+ "smdcntl6",
+ "smdcntl7",
+ "smd22",
+};
+
+static char *smd_ch_name[] = {
+ "DATA5_CNTL",
+ "DATA6_CNTL",
+ "DATA7_CNTL",
+ "DATA8_CNTL",
+ "DATA9_CNTL",
+ "DATA12_CNTL",
+ "DATA13_CNTL",
+ "DATA14_CNTL",
+ "DATA22",
+};
+
+static int smd_pkt_open(struct inode *inode, struct file *file)
+{
+ int r = 0;
+ struct smd_pkt_dev *smd_pkt_devp;
+
+ smd_pkt_devp = container_of(inode->i_cdev, struct smd_pkt_dev, cdev);
+ if (!smd_pkt_devp)
+ return -EINVAL;
+
+ file->private_data = smd_pkt_devp;
+
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ if (smd_pkt_devp->open_count == 0) {
+ r = smd_open(smd_ch_name[smd_pkt_devp->i],
+ &smd_pkt_devp->ch, smd_pkt_devp,
+ smd_pkt_ch_notify);
+ if (r < 0) {
+ pr_err("smd_open failed for %s, %d\n",
+ smd_ch_name[smd_pkt_devp->i], r);
+ goto out;
+ }
+
+ r = wait_event_interruptible_timeout(
+ smd_pkt_devp->ch_opened_wait_queue,
+ smd_pkt_devp->remote_open,
+ msecs_to_jiffies(2 * HZ));
+ if (r == 0)
+ r = -ETIMEDOUT;
+
+ if (r < 0) {
+ pr_err("wait returned %d\n", r);
+ smd_close(smd_pkt_devp->ch);
+ smd_pkt_devp->ch = 0;
+ } else {
+ smd_pkt_devp->open_count++;
+ r = 0;
+ }
+ }
+out:
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+ return r;
+}
+
+static int smd_pkt_release(struct inode *inode, struct file *file)
+{
+ int r = 0;
+ struct smd_pkt_dev *smd_pkt_devp = file->private_data;
+
+ if (!smd_pkt_devp)
+ return -EINVAL;
+
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ if (--smd_pkt_devp->open_count == 0) {
+ r = smd_close(smd_pkt_devp->ch);
+ smd_pkt_devp->ch = 0;
+ }
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+
+ return r;
+}
+
+static const struct file_operations smd_pkt_fops = {
+ .owner = THIS_MODULE,
+ .open = smd_pkt_open,
+ .release = smd_pkt_release,
+ .read = smd_pkt_read,
+ .write = smd_pkt_write,
+ .poll = smd_pkt_poll,
+};
+
+static int __init smd_pkt_init(void)
+{
+ int i;
+ int r;
+
+ r = alloc_chrdev_region(&smd_pkt_number, 0,
+ NUM_SMD_PKT_PORTS, DEVICE_NAME);
+ if (r) {
+ pr_err("alloc_chrdev_region() failed %d\n", r);
+ return r;
+ }
+
+ smd_pkt_classp = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(smd_pkt_classp)) {
+ r = PTR_ERR(smd_pkt_classp);
+ pr_err("class_create() failed %d\n", r);
+ goto unreg_chardev;
+ }
+
+ for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) {
+ smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev),
+ GFP_KERNEL);
+ if (IS_ERR(smd_pkt_devp[i])) {
+ r = PTR_ERR(smd_pkt_devp[i]);
+ pr_err("kmalloc() failed %d\n", r);
+ goto clean_cdevs;
+ }
+
+ smd_pkt_devp[i]->i = i;
+
+ init_waitqueue_head(&smd_pkt_devp[i]->ch_read_wait_queue);
+ smd_pkt_devp[i]->remote_open = 0;
+ init_waitqueue_head(&smd_pkt_devp[i]->ch_opened_wait_queue);
+
+ mutex_init(&smd_pkt_devp[i]->ch_lock);
+ mutex_init(&smd_pkt_devp[i]->rx_lock);
+ mutex_init(&smd_pkt_devp[i]->tx_lock);
+
+ cdev_init(&smd_pkt_devp[i]->cdev, &smd_pkt_fops);
+ smd_pkt_devp[i]->cdev.owner = THIS_MODULE;
+
+ r = cdev_add(&smd_pkt_devp[i]->cdev,
+ (smd_pkt_number + i), 1);
+ if (r) {
+ pr_err("cdev_add() failed %d\n", r);
+ kfree(smd_pkt_devp[i]);
+ goto clean_cdevs;
+ }
+
+ smd_pkt_devp[i]->devicep =
+ device_create(smd_pkt_classp, NULL,
+ (smd_pkt_number + i), NULL,
+ smd_pkt_dev_name[i]);
+ if (IS_ERR(smd_pkt_devp[i]->devicep)) {
+ r = PTR_ERR(smd_pkt_devp[i]->devicep);
+ pr_err("device_create() failed %d\n", r);
+ cdev_del(&smd_pkt_devp[i]->cdev);
+ kfree(smd_pkt_devp[i]);
+ goto clean_cdevs;
+ }
+
+ }
+
+ pr_info("SMD Packet Port Driver Initialized.\n");
+ return 0;
+
+clean_cdevs:
+ if (i > 0) {
+ while (--i >= 0) {
+ mutex_destroy(&smd_pkt_devp[i]->ch_lock);
+ mutex_destroy(&smd_pkt_devp[i]->rx_lock);
+ mutex_destroy(&smd_pkt_devp[i]->tx_lock);
+ cdev_del(&smd_pkt_devp[i]->cdev);
+ kfree(smd_pkt_devp[i]);
+ device_destroy(smd_pkt_classp,
+ MKDEV(MAJOR(smd_pkt_number), i));
+ }
+ }
+
+ class_destroy(smd_pkt_classp);
+unreg_chardev:
+ unregister_chrdev_region(MAJOR(smd_pkt_number), NUM_SMD_PKT_PORTS);
+ return r;
+}
+module_init(smd_pkt_init);
+
+static void __exit smd_pkt_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) {
+ mutex_destroy(&smd_pkt_devp[i]->ch_lock);
+ mutex_destroy(&smd_pkt_devp[i]->rx_lock);
+ mutex_destroy(&smd_pkt_devp[i]->tx_lock);
+ cdev_del(&smd_pkt_devp[i]->cdev);
+ kfree(smd_pkt_devp[i]);
+ device_destroy(smd_pkt_classp,
+ MKDEV(MAJOR(smd_pkt_number), i));
+ }
+
+ class_destroy(smd_pkt_classp);
+ unregister_chrdev_region(MAJOR(smd_pkt_number), NUM_SMD_PKT_PORTS);
+}
+module_exit(smd_pkt_cleanup);
+
+MODULE_DESCRIPTION("MSM Shared Memory Packet Port");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/mwave/Makefile b/drivers/char/mwave/Makefile
index 26b4fce217b6..efa6a82e543d 100644
--- a/drivers/char/mwave/Makefile
+++ b/drivers/char/mwave/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_MWAVE) += mwave.o
mwave-y := mwavedd.o smapi.o tp3780i.o 3780i.o
# To have the mwave driver disable other uarts if necessary
-# EXTRA_CFLAGS += -DMWAVE_FUTZ_WITH_OTHER_DEVICES
+# ccflags-y := -DMWAVE_FUTZ_WITH_OTHER_DEVICES
# To compile in lots (~20 KiB) of run-time enablable printk()s for debugging:
-ccflags-y := -DMW_TRACE
+ccflags-y += -DMW_TRACE
diff --git a/drivers/char/mwave/README b/drivers/char/mwave/README
index 480251fc78e2..c2a58f428bc8 100644
--- a/drivers/char/mwave/README
+++ b/drivers/char/mwave/README
@@ -11,7 +11,7 @@ are not saved by the BIOS and so do not persist after unload and reload.
0x0008 tp3780i tracing
Tracing only occurs if the driver has been compiled with the
- MW_TRACE macro #defined (i.e. let EXTRA_CFLAGS += -DMW_TRACE
+ MW_TRACE macro #defined (i.e. let ccflags-y := -DMW_TRACE
in the Makefile).
mwave_3780i_irq=5/7/10/11/15
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5cb4d09919d6..0f17ad8585d7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1371,7 +1371,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
goto out;
if (cpufreq_driver->suspend) {
- ret = cpufreq_driver->suspend(cpu_policy, pmsg);
+ ret = cpufreq_driver->suspend(cpu_policy);
if (ret)
printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
"step on CPU %u\n", cpu_policy->cpu);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 94284c8473b1..33b56e5c5c14 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -76,8 +76,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
/*
- * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
- * different CPUs. It protects dbs_enable in governor start/stop.
+ * dbs_mutex protects dbs_enable in governor start/stop.
*/
static DEFINE_MUTEX(dbs_mutex);
@@ -116,7 +115,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
if (wall)
*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
- return (cputime64_t)jiffies_to_usecs(idle_time);;
+ return (cputime64_t)jiffies_to_usecs(idle_time);
}
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -162,21 +161,12 @@ static struct notifier_block dbs_cpufreq_notifier_block = {
};
/************************** sysfs interface ************************/
-static ssize_t show_sampling_rate_max(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
- "sysfs file is deprecated - used by: %s\n", current->comm);
- return sprintf(buf, "%u\n", -1U);
-}
-
static ssize_t show_sampling_rate_min(struct kobject *kobj,
struct attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", min_sampling_rate);
}
-define_one_global_ro(sampling_rate_max);
define_one_global_ro(sampling_rate_min);
/* cpufreq_conservative Governor Tunables */
@@ -193,33 +183,6 @@ show_one(down_threshold, down_threshold);
show_one(ignore_nice_load, ignore_nice);
show_one(freq_step, freq_step);
-/*** delete after deprecation time ***/
-#define DEPRECATION_MSG(file_name) \
- printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
- "interface is deprecated - " #file_name "\n");
-
-#define show_one_old(file_name) \
-static ssize_t show_##file_name##_old \
-(struct cpufreq_policy *unused, char *buf) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return show_##file_name(NULL, NULL, buf); \
-}
-show_one_old(sampling_rate);
-show_one_old(sampling_down_factor);
-show_one_old(up_threshold);
-show_one_old(down_threshold);
-show_one_old(ignore_nice_load);
-show_one_old(freq_step);
-show_one_old(sampling_rate_min);
-show_one_old(sampling_rate_max);
-
-cpufreq_freq_attr_ro_old(sampling_rate_min);
-cpufreq_freq_attr_ro_old(sampling_rate_max);
-
-/*** delete after deprecation time ***/
-
static ssize_t store_sampling_down_factor(struct kobject *a,
struct attribute *b,
const char *buf, size_t count)
@@ -231,10 +194,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_down_factor = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -248,10 +208,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -262,16 +219,11 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
int ret;
ret = sscanf(buf, "%u", &input);
- mutex_lock(&dbs_mutex);
if (ret != 1 || input > 100 ||
- input <= dbs_tuners_ins.down_threshold) {
- mutex_unlock(&dbs_mutex);
+ input <= dbs_tuners_ins.down_threshold)
return -EINVAL;
- }
dbs_tuners_ins.up_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -282,17 +234,12 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
int ret;
ret = sscanf(buf, "%u", &input);
- mutex_lock(&dbs_mutex);
/* cannot be lower than 11 otherwise freq will not fall */
if (ret != 1 || input < 11 || input > 100 ||
- input >= dbs_tuners_ins.up_threshold) {
- mutex_unlock(&dbs_mutex);
+ input >= dbs_tuners_ins.up_threshold)
return -EINVAL;
- }
dbs_tuners_ins.down_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -311,11 +258,9 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- mutex_lock(&dbs_mutex);
- if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
- mutex_unlock(&dbs_mutex);
+ if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
return count;
- }
+
dbs_tuners_ins.ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle */
@@ -327,8 +272,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (dbs_tuners_ins.ignore_nice)
dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
}
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -347,10 +290,7 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
/* no need to test here if freq_step is zero as the user might actually
* want this, they would be crazy though :) */
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.freq_step = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -362,7 +302,6 @@ define_one_global_rw(ignore_nice_load);
define_one_global_rw(freq_step);
static struct attribute *dbs_attributes[] = {
- &sampling_rate_max.attr,
&sampling_rate_min.attr,
&sampling_rate.attr,
&sampling_down_factor.attr,
@@ -378,49 +317,6 @@ static struct attribute_group dbs_attr_group = {
.name = "conservative",
};
-/*** delete after deprecation time ***/
-
-#define write_one_old(file_name) \
-static ssize_t store_##file_name##_old \
-(struct cpufreq_policy *unused, const char *buf, size_t count) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return store_##file_name(NULL, NULL, buf, count); \
-}
-write_one_old(sampling_rate);
-write_one_old(sampling_down_factor);
-write_one_old(up_threshold);
-write_one_old(down_threshold);
-write_one_old(ignore_nice_load);
-write_one_old(freq_step);
-
-cpufreq_freq_attr_rw_old(sampling_rate);
-cpufreq_freq_attr_rw_old(sampling_down_factor);
-cpufreq_freq_attr_rw_old(up_threshold);
-cpufreq_freq_attr_rw_old(down_threshold);
-cpufreq_freq_attr_rw_old(ignore_nice_load);
-cpufreq_freq_attr_rw_old(freq_step);
-
-static struct attribute *dbs_attributes_old[] = {
- &sampling_rate_max_old.attr,
- &sampling_rate_min_old.attr,
- &sampling_rate_old.attr,
- &sampling_down_factor_old.attr,
- &up_threshold_old.attr,
- &down_threshold_old.attr,
- &ignore_nice_load_old.attr,
- &freq_step_old.attr,
- NULL
-};
-
-static struct attribute_group dbs_attr_group_old = {
- .attrs = dbs_attributes_old,
- .name = "conservative",
-};
-
-/*** delete after deprecation time ***/
-
/************************** sysfs end ************************/
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
@@ -596,12 +492,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock(&dbs_mutex);
- rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
- if (rc) {
- mutex_unlock(&dbs_mutex);
- return rc;
- }
-
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
@@ -664,7 +554,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
- sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
dbs_enable--;
mutex_destroy(&this_dbs_info->timer_mutex);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 58aa85ea5ec6..891360edecdd 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -99,8 +99,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
/*
- * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
- * different CPUs. It protects dbs_enable in governor start/stop.
+ * dbs_mutex protects dbs_enable in governor start/stop.
*/
static DEFINE_MUTEX(dbs_mutex);
@@ -235,21 +234,12 @@ static void ondemand_powersave_bias_init(void)
/************************** sysfs interface ************************/
-static ssize_t show_sampling_rate_max(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
- "sysfs file is deprecated - used by: %s\n", current->comm);
- return sprintf(buf, "%u\n", -1U);
-}
-
static ssize_t show_sampling_rate_min(struct kobject *kobj,
struct attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", min_sampling_rate);
}
-define_one_global_ro(sampling_rate_max);
define_one_global_ro(sampling_rate_min);
/* cpufreq_ondemand Governor Tunables */
@@ -266,32 +256,6 @@ show_one(sampling_down_factor, sampling_down_factor);
show_one(ignore_nice_load, ignore_nice);
show_one(powersave_bias, powersave_bias);
-/*** delete after deprecation time ***/
-
-#define DEPRECATION_MSG(file_name) \
- printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
- "interface is deprecated - " #file_name "\n");
-
-#define show_one_old(file_name) \
-static ssize_t show_##file_name##_old \
-(struct cpufreq_policy *unused, char *buf) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return show_##file_name(NULL, NULL, buf); \
-}
-show_one_old(sampling_rate);
-show_one_old(up_threshold);
-show_one_old(ignore_nice_load);
-show_one_old(powersave_bias);
-show_one_old(sampling_rate_min);
-show_one_old(sampling_rate_max);
-
-cpufreq_freq_attr_ro_old(sampling_rate_min);
-cpufreq_freq_attr_ro_old(sampling_rate_max);
-
-/*** delete after deprecation time ***/
-
static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -300,11 +264,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
-
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -317,11 +277,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
-
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.io_is_busy = !!input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -336,11 +292,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL;
}
-
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.up_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -353,7 +305,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_down_factor = input;
/* Reset down sampling multiplier in case it was active */
@@ -362,8 +313,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
dbs_info = &per_cpu(od_cpu_dbs_info, j);
dbs_info->rate_mult = 1;
}
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -382,9 +331,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- mutex_lock(&dbs_mutex);
if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
- mutex_unlock(&dbs_mutex);
return count;
}
dbs_tuners_ins.ignore_nice = input;
@@ -399,8 +346,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
}
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -417,11 +362,8 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
if (input > 1000)
input = 1000;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.powersave_bias = input;
ondemand_powersave_bias_init();
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -433,7 +375,6 @@ define_one_global_rw(ignore_nice_load);
define_one_global_rw(powersave_bias);
static struct attribute *dbs_attributes[] = {
- &sampling_rate_max.attr,
&sampling_rate_min.attr,
&sampling_rate.attr,
&up_threshold.attr,
@@ -449,43 +390,6 @@ static struct attribute_group dbs_attr_group = {
.name = "ondemand",
};
-/*** delete after deprecation time ***/
-
-#define write_one_old(file_name) \
-static ssize_t store_##file_name##_old \
-(struct cpufreq_policy *unused, const char *buf, size_t count) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return store_##file_name(NULL, NULL, buf, count); \
-}
-write_one_old(sampling_rate);
-write_one_old(up_threshold);
-write_one_old(ignore_nice_load);
-write_one_old(powersave_bias);
-
-cpufreq_freq_attr_rw_old(sampling_rate);
-cpufreq_freq_attr_rw_old(up_threshold);
-cpufreq_freq_attr_rw_old(ignore_nice_load);
-cpufreq_freq_attr_rw_old(powersave_bias);
-
-static struct attribute *dbs_attributes_old[] = {
- &sampling_rate_max_old.attr,
- &sampling_rate_min_old.attr,
- &sampling_rate_old.attr,
- &up_threshold_old.attr,
- &ignore_nice_load_old.attr,
- &powersave_bias_old.attr,
- NULL
-};
-
-static struct attribute_group dbs_attr_group_old = {
- .attrs = dbs_attributes_old,
- .name = "ondemand",
-};
-
-/*** delete after deprecation time ***/
-
/************************** sysfs end ************************/
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
@@ -642,12 +546,7 @@ static void do_dbs_timer(struct work_struct *work)
unsigned int cpu = dbs_info->cpu;
int sample_type = dbs_info->sample_type;
- /* We want all CPUs to do sampling nearly on same jiffy */
- int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
- * dbs_info->rate_mult);
-
- if (num_online_cpus() > 1)
- delay -= jiffies % delay;
+ int delay;
mutex_lock(&dbs_info->timer_mutex);
@@ -660,10 +559,20 @@ static void do_dbs_timer(struct work_struct *work)
/* Setup timer for SUB_SAMPLE */
dbs_info->sample_type = DBS_SUB_SAMPLE;
delay = dbs_info->freq_hi_jiffies;
+ } else {
+ /* We want all CPUs to do sampling nearly on
+ * same jiffy
+ */
+ delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
+ * dbs_info->rate_mult);
+
+ if (num_online_cpus() > 1)
+ delay -= jiffies % delay;
}
} else {
__cpufreq_driver_target(dbs_info->cur_policy,
dbs_info->freq_lo, CPUFREQ_RELATION_H);
+ delay = dbs_info->freq_lo_jiffies;
}
schedule_delayed_work_on(cpu, &dbs_info->work, delay);
mutex_unlock(&dbs_info->timer_mutex);
@@ -727,12 +636,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock(&dbs_mutex);
- rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
- if (rc) {
- mutex_unlock(&dbs_mutex);
- return rc;
- }
-
dbs_enable++;
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
@@ -785,7 +688,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
- sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
mutex_destroy(&this_dbs_info->timer_mutex);
dbs_enable--;
mutex_unlock(&dbs_mutex);
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 0310ffaec9df..be7917ec40c9 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -300,7 +300,7 @@ static struct kobj_type ktype_state_cpuidle = {
.release = cpuidle_state_sysfs_release,
};
-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
{
kobject_put(&device->kobjs[i]->kobj);
wait_for_completion(&device->kobjs[i]->kobj_unregister);
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index c461eda62411..4abd089a094f 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -111,10 +111,8 @@ static void unregister_dca_providers(void)
/* at this point only one domain in the list is expected */
domain = list_first_entry(&dca_domains, struct dca_domain, node);
- list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
- list_del(&dca->node);
- list_add(&dca->node, &unregistered_providers);
- }
+ list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
+ list_move(&dca->node, &unregistered_providers);
dca_free_domain(domain);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 1c28816152fa..a572600e44eb 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -82,7 +82,7 @@ config INTEL_IOP_ADMA
config DW_DMAC
tristate "Synopsys DesignWare AHB DMA support"
- depends on AVR32
+ depends on HAVE_CLK
select DMA_ENGINE
default y if CPU_AT32AP7000
help
@@ -221,12 +221,20 @@ config IMX_SDMA
config IMX_DMA
tristate "i.MX DMA support"
- depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27
+ depends on IMX_HAVE_DMA_V1
select DMA_ENGINE
help
Support the i.MX DMA engine. This engine is integrated into
Freescale i.MX1/21/27 chips.
+config MXS_DMA
+ bool "MXS DMA support"
+ depends on SOC_IMX23 || SOC_IMX28
+ select DMA_ENGINE
+ help
+ Support the MXS DMA engine. This engine including APBH-DMA
+ and APBX-DMA is integrated into Freescale i.MX23/28 chips.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 64b21f5cd740..836095ab3c5c 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,9 +1,5 @@
-ifeq ($(CONFIG_DMADEVICES_DEBUG),y)
- ccflags-y += -DDEBUG
-endif
-ifeq ($(CONFIG_DMADEVICES_VDEBUG),y)
- ccflags-y += -DVERBOSE_DEBUG
-endif
+ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG
+ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_NET_DMA) += iovlock.o
@@ -23,6 +19,7 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o
+obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 07bca4970e50..e6d7228b1479 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1845,7 +1845,7 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
}
#endif
-static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
+static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
{
struct pl08x_driver_data *pl08x;
const struct vendor_data *vd = id->data;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index a6656834f0ff..00deabd9a04b 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -849,7 +849,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
/* Must clear TC interrupt before calling
* dma_tc_handle
- * in case tc_handle initate a new dma job
+ * in case tc_handle initiate a new dma job
*/
__set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
@@ -894,7 +894,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
}
/* Must clear TC interrupt before calling
* dma_tc_handle
- * in case tc_handle initate a new dma job
+ * in case tc_handle initiate a new dma job
*/
__set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 5589358b684d..e0888cb538d4 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -54,6 +54,11 @@ module_param(pq_sources, uint, S_IRUGO);
MODULE_PARM_DESC(pq_sources,
"Number of p+q source buffers (default: 3)");
+static int timeout = 3000;
+module_param(timeout, uint, S_IRUGO);
+MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), \
+ Pass -1 for infinite timeout");
+
/*
* Initialization patterns. All bytes in the source buffer has bit 7
* set, all bytes in the destination buffer has bit 7 cleared.
@@ -285,7 +290,12 @@ static int dmatest_func(void *data)
set_user_nice(current, 10);
- flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT;
+ /*
+ * src buffers are freed by the DMAEngine code with dma_unmap_single()
+ * dst buffers are freed by ourselves below
+ */
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
+ | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
while (!kthread_should_stop()
&& !(iterations && total_tests >= iterations)) {
@@ -294,7 +304,7 @@ static int dmatest_func(void *data)
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
struct completion cmp;
- unsigned long tmo = msecs_to_jiffies(3000);
+ unsigned long tmo = msecs_to_jiffies(timeout);
u8 align = 0;
total_tests++;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index a3991ab0d67e..9c25c7d099e4 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -32,26 +32,30 @@
* which does not support descriptor writeback.
*/
-/* NOTE: DMS+SMS is system-specific. We should get this information
- * from the platform code somehow.
- */
-#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \
- | DWC_CTLL_SRC_MSIZE(0) \
- | DWC_CTLL_DMS(0) \
- | DWC_CTLL_SMS(1) \
- | DWC_CTLL_LLP_D_EN \
- | DWC_CTLL_LLP_S_EN)
+#define DWC_DEFAULT_CTLLO(private) ({ \
+ struct dw_dma_slave *__slave = (private); \
+ int dms = __slave ? __slave->dst_master : 0; \
+ int sms = __slave ? __slave->src_master : 1; \
+ u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \
+ u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \
+ \
+ (DWC_CTLL_DST_MSIZE(dmsize) \
+ | DWC_CTLL_SRC_MSIZE(smsize) \
+ | DWC_CTLL_LLP_D_EN \
+ | DWC_CTLL_LLP_S_EN \
+ | DWC_CTLL_DMS(dms) \
+ | DWC_CTLL_SMS(sms)); \
+ })
/*
* This is configuration-dependent and usually a funny size like 4095.
- * Let's round it down to the nearest power of two.
*
* Note that this is a transfer count, i.e. if we transfer 32-bit
- * words, we can do 8192 bytes per descriptor.
+ * words, we can do 16380 bytes per descriptor.
*
* This parameter is also system-specific.
*/
-#define DWC_MAX_COUNT 2048U
+#define DWC_MAX_COUNT 4095U
/*
* Number of descriptors to allocate for each channel. This should be
@@ -84,11 +88,6 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
}
-static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc)
-{
- return list_entry(dwc->queue.next, struct dw_desc, desc_node);
-}
-
static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
{
struct dw_desc *desc, *_desc;
@@ -201,6 +200,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
dma_async_tx_callback callback;
void *param;
struct dma_async_tx_descriptor *txd = &desc->txd;
+ struct dw_desc *child;
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
@@ -209,6 +209,12 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
param = txd->callback_param;
dwc_sync_desc_for_cpu(dwc, desc);
+
+ /* async_tx_ack */
+ list_for_each_entry(child, &desc->tx_list, desc_node)
+ async_tx_ack(&child->txd);
+ async_tx_ack(&desc->txd);
+
list_splice_init(&desc->tx_list, &dwc->free_list);
list_move(&desc->desc_node, &dwc->free_list);
@@ -259,10 +265,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
* Submit queued descriptors ASAP, i.e. before we go through
* the completed ones.
*/
- if (!list_empty(&dwc->queue))
- dwc_dostart(dwc, dwc_first_queued(dwc));
list_splice_init(&dwc->active_list, &list);
- list_splice_init(&dwc->queue, &dwc->active_list);
+ if (!list_empty(&dwc->queue)) {
+ list_move(dwc->queue.next, &dwc->active_list);
+ dwc_dostart(dwc, dwc_first_active(dwc));
+ }
list_for_each_entry_safe(desc, _desc, &list, desc_node)
dwc_descriptor_complete(dwc, desc);
@@ -291,6 +298,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
return;
}
+ if (list_empty(&dwc->active_list))
+ return;
+
dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
@@ -319,8 +329,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
cpu_relax();
if (!list_empty(&dwc->queue)) {
- dwc_dostart(dwc, dwc_first_queued(dwc));
- list_splice_init(&dwc->queue, &dwc->active_list);
+ list_move(dwc->queue.next, &dwc->active_list);
+ dwc_dostart(dwc, dwc_first_active(dwc));
}
}
@@ -346,7 +356,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
*/
bad_desc = dwc_first_active(dwc);
list_del_init(&bad_desc->desc_node);
- list_splice_init(&dwc->queue, dwc->active_list.prev);
+ list_move(dwc->queue.next, dwc->active_list.prev);
/* Clear the error flag and try to restart the controller */
dma_writel(dw, CLEAR.ERROR, dwc->mask);
@@ -541,8 +551,8 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
if (list_empty(&dwc->active_list)) {
dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
desc->txd.cookie);
- dwc_dostart(dwc, desc);
list_add_tail(&desc->desc_node, &dwc->active_list);
+ dwc_dostart(dwc, dwc_first_active(dwc));
} else {
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
desc->txd.cookie);
@@ -581,14 +591,16 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
* We can be a lot more clever here, but this should take care
* of the most common optimization.
*/
- if (!((src | dest | len) & 3))
+ if (!((src | dest | len) & 7))
+ src_width = dst_width = 3;
+ else if (!((src | dest | len) & 3))
src_width = dst_width = 2;
else if (!((src | dest | len) & 1))
src_width = dst_width = 1;
else
src_width = dst_width = 0;
- ctllo = DWC_DEFAULT_CTLLO
+ ctllo = DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(dst_width)
| DWC_CTLL_SRC_WIDTH(src_width)
| DWC_CTLL_DST_INC
@@ -669,11 +681,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
switch (direction) {
case DMA_TO_DEVICE:
- ctllo = (DWC_DEFAULT_CTLLO
+ ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_FIX
| DWC_CTLL_SRC_INC
- | DWC_CTLL_FC_M2P);
+ | DWC_CTLL_FC(dws->fc));
reg = dws->tx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
@@ -714,11 +726,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
break;
case DMA_FROM_DEVICE:
- ctllo = (DWC_DEFAULT_CTLLO
+ ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_INC
| DWC_CTLL_SRC_FIX
- | DWC_CTLL_FC_P2M);
+ | DWC_CTLL_FC(dws->fc));
reg = dws->rx_reg;
for_each_sg(sgl, sg, sg_len, i) {
@@ -834,7 +846,9 @@ dwc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret != DMA_SUCCESS) {
+ spin_lock_bh(&dwc->lock);
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+ spin_unlock_bh(&dwc->lock);
last_complete = dwc->completed;
last_used = chan->cookie;
@@ -889,8 +903,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
cfghi = dws->cfg_hi;
- cfglo = dws->cfg_lo;
+ cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
}
+
+ cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
+
channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi);
@@ -1126,23 +1143,23 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
case DMA_TO_DEVICE:
desc->lli.dar = dws->tx_reg;
desc->lli.sar = buf_addr + (period_len * i);
- desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_FIX
| DWC_CTLL_SRC_INC
- | DWC_CTLL_FC_M2P
+ | DWC_CTLL_FC(dws->fc)
| DWC_CTLL_INT_EN);
break;
case DMA_FROM_DEVICE:
desc->lli.dar = buf_addr + (period_len * i);
desc->lli.sar = dws->rx_reg;
- desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_INC
| DWC_CTLL_SRC_FIX
- | DWC_CTLL_FC_P2M
+ | DWC_CTLL_FC(dws->fc)
| DWC_CTLL_INT_EN);
break;
default:
@@ -1307,7 +1324,17 @@ static int __init dw_probe(struct platform_device *pdev)
dwc->chan.device = &dw->dma;
dwc->chan.cookie = dwc->completed = 1;
dwc->chan.chan_id = i;
- list_add_tail(&dwc->chan.device_node, &dw->dma.channels);
+ if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
+ list_add_tail(&dwc->chan.device_node,
+ &dw->dma.channels);
+ else
+ list_add(&dwc->chan.device_node, &dw->dma.channels);
+
+ /* 7 is highest priority & 0 is lowest. */
+ if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
+ dwc->priority = 7 - i;
+ else
+ dwc->priority = i;
dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
spin_lock_init(&dwc->lock);
@@ -1335,6 +1362,8 @@ static int __init dw_probe(struct platform_device *pdev)
dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
+ if (pdata->is_private)
+ dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
dw->dma.dev = &pdev->dev;
dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
dw->dma.device_free_chan_resources = dwc_free_chan_resources;
@@ -1447,7 +1476,7 @@ static int __init dw_init(void)
{
return platform_driver_probe(&dw_driver, dw_probe);
}
-module_init(dw_init);
+subsys_initcall(dw_init);
static void __exit dw_exit(void)
{
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index d9a939f67f46..720f821527f8 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -86,6 +86,7 @@ struct dw_dma_regs {
#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
+#define DWC_CTLL_FC(n) ((n) << 20)
#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
@@ -101,6 +102,8 @@ struct dw_dma_regs {
#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
+#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */
+#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */
#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
@@ -134,6 +137,7 @@ struct dw_dma_chan {
struct dma_chan chan;
void __iomem *ch_regs;
u8 mask;
+ u8 priority;
spinlock_t lock;
@@ -155,9 +159,9 @@ __dwc_regs(struct dw_dma_chan *dwc)
}
#define channel_readl(dwc, name) \
- __raw_readl(&(__dwc_regs(dwc)->name))
+ readl(&(__dwc_regs(dwc)->name))
#define channel_writel(dwc, name, val) \
- __raw_writel((val), &(__dwc_regs(dwc)->name))
+ writel((val), &(__dwc_regs(dwc)->name))
static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
{
@@ -181,9 +185,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
}
#define dma_readl(dw, name) \
- __raw_readl(&(__dw_regs(dw)->name))
+ readl(&(__dw_regs(dw)->name))
#define dma_writel(dw, name, val) \
- __raw_writel((val), &(__dw_regs(dw)->name))
+ writel((val), &(__dw_regs(dw)->name))
#define channel_set_bit(dw, reg, mask) \
dma_writel(dw, reg, ((mask) << 8) | (mask))
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index e3854a8f0de0..6b396759e7f5 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -37,35 +37,16 @@
#include "fsldma.h"
-static const char msg_ld_oom[] = "No free memory for link descriptor\n";
+#define chan_dbg(chan, fmt, arg...) \
+ dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
+#define chan_err(chan, fmt, arg...) \
+ dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
-static void dma_init(struct fsldma_chan *chan)
-{
- /* Reset the channel */
- DMA_OUT(chan, &chan->regs->mr, 0, 32);
+static const char msg_ld_oom[] = "No free memory for link descriptor";
- switch (chan->feature & FSL_DMA_IP_MASK) {
- case FSL_DMA_IP_85XX:
- /* Set the channel to below modes:
- * EIE - Error interrupt enable
- * EOSIE - End of segments interrupt enable (basic mode)
- * EOLNIE - End of links interrupt enable
- * BWC - Bandwidth sharing among channels
- */
- DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
- | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE
- | FSL_DMA_MR_EOSIE, 32);
- break;
- case FSL_DMA_IP_83XX:
- /* Set the channel to below modes:
- * EOTIE - End-of-transfer interrupt enable
- * PRC_RM - PCI read multiple
- */
- DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
- | FSL_DMA_MR_PRC_RM, 32);
- break;
- }
-}
+/*
+ * Register Helpers
+ */
static void set_sr(struct fsldma_chan *chan, u32 val)
{
@@ -77,14 +58,38 @@ static u32 get_sr(struct fsldma_chan *chan)
return DMA_IN(chan, &chan->regs->sr, 32);
}
+static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
+{
+ DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
+}
+
+static dma_addr_t get_cdar(struct fsldma_chan *chan)
+{
+ return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
+}
+
+static u32 get_bcr(struct fsldma_chan *chan)
+{
+ return DMA_IN(chan, &chan->regs->bcr, 32);
+}
+
+/*
+ * Descriptor Helpers
+ */
+
static void set_desc_cnt(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, u32 count)
{
hw->count = CPU_TO_DMA(chan, count, 32);
}
+static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
+{
+ return DMA_TO_CPU(chan, desc->hw.count, 32);
+}
+
static void set_desc_src(struct fsldma_chan *chan,
- struct fsl_dma_ld_hw *hw, dma_addr_t src)
+ struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
u64 snoop_bits;
@@ -93,8 +98,18 @@ static void set_desc_src(struct fsldma_chan *chan,
hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
}
+static dma_addr_t get_desc_src(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ u64 snoop_bits;
+
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+ ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
+ return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
+}
+
static void set_desc_dst(struct fsldma_chan *chan,
- struct fsl_dma_ld_hw *hw, dma_addr_t dst)
+ struct fsl_dma_ld_hw *hw, dma_addr_t dst)
{
u64 snoop_bits;
@@ -103,8 +118,18 @@ static void set_desc_dst(struct fsldma_chan *chan,
hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
}
+static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ u64 snoop_bits;
+
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+ ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
+ return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
+}
+
static void set_desc_next(struct fsldma_chan *chan,
- struct fsl_dma_ld_hw *hw, dma_addr_t next)
+ struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
u64 snoop_bits;
@@ -113,24 +138,46 @@ static void set_desc_next(struct fsldma_chan *chan,
hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
}
-static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
+static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
{
- DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
-}
+ u64 snoop_bits;
-static dma_addr_t get_cdar(struct fsldma_chan *chan)
-{
- return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
-}
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
+ ? FSL_DMA_SNEN : 0;
-static dma_addr_t get_ndar(struct fsldma_chan *chan)
-{
- return DMA_IN(chan, &chan->regs->ndar, 64);
+ desc->hw.next_ln_addr = CPU_TO_DMA(chan,
+ DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
+ | snoop_bits, 64);
}
-static u32 get_bcr(struct fsldma_chan *chan)
+/*
+ * DMA Engine Hardware Control Helpers
+ */
+
+static void dma_init(struct fsldma_chan *chan)
{
- return DMA_IN(chan, &chan->regs->bcr, 32);
+ /* Reset the channel */
+ DMA_OUT(chan, &chan->regs->mr, 0, 32);
+
+ switch (chan->feature & FSL_DMA_IP_MASK) {
+ case FSL_DMA_IP_85XX:
+ /* Set the channel to below modes:
+ * EIE - Error interrupt enable
+ * EOLNIE - End of links interrupt enable
+ * BWC - Bandwidth sharing among channels
+ */
+ DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
+ | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
+ break;
+ case FSL_DMA_IP_83XX:
+ /* Set the channel to below modes:
+ * EOTIE - End-of-transfer interrupt enable
+ * PRC_RM - PCI read multiple
+ */
+ DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
+ | FSL_DMA_MR_PRC_RM, 32);
+ break;
+ }
}
static int dma_is_idle(struct fsldma_chan *chan)
@@ -139,25 +186,32 @@ static int dma_is_idle(struct fsldma_chan *chan)
return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
}
+/*
+ * Start the DMA controller
+ *
+ * Preconditions:
+ * - the CDAR register must point to the start descriptor
+ * - the MRn[CS] bit must be cleared
+ */
static void dma_start(struct fsldma_chan *chan)
{
u32 mode;
mode = DMA_IN(chan, &chan->regs->mr, 32);
- if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
- if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
- DMA_OUT(chan, &chan->regs->bcr, 0, 32);
- mode |= FSL_DMA_MR_EMP_EN;
- } else {
- mode &= ~FSL_DMA_MR_EMP_EN;
- }
+ if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
+ DMA_OUT(chan, &chan->regs->bcr, 0, 32);
+ mode |= FSL_DMA_MR_EMP_EN;
+ } else {
+ mode &= ~FSL_DMA_MR_EMP_EN;
}
- if (chan->feature & FSL_DMA_CHAN_START_EXT)
+ if (chan->feature & FSL_DMA_CHAN_START_EXT) {
mode |= FSL_DMA_MR_EMS_EN;
- else
+ } else {
+ mode &= ~FSL_DMA_MR_EMS_EN;
mode |= FSL_DMA_MR_CS;
+ }
DMA_OUT(chan, &chan->regs->mr, mode, 32);
}
@@ -167,13 +221,26 @@ static void dma_halt(struct fsldma_chan *chan)
u32 mode;
int i;
+ /* read the mode register */
mode = DMA_IN(chan, &chan->regs->mr, 32);
- mode |= FSL_DMA_MR_CA;
- DMA_OUT(chan, &chan->regs->mr, mode, 32);
- mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
+ /*
+ * The 85xx controller supports channel abort, which will stop
+ * the current transfer. On 83xx, this bit is the transfer error
+ * mask bit, which should not be changed.
+ */
+ if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
+ mode |= FSL_DMA_MR_CA;
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
+
+ mode &= ~FSL_DMA_MR_CA;
+ }
+
+ /* stop the DMA controller */
+ mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ /* wait for the DMA controller to become idle */
for (i = 0; i < 100; i++) {
if (dma_is_idle(chan))
return;
@@ -182,20 +249,7 @@ static void dma_halt(struct fsldma_chan *chan)
}
if (!dma_is_idle(chan))
- dev_err(chan->dev, "DMA halt timeout!\n");
-}
-
-static void set_ld_eol(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
-{
- u64 snoop_bits;
-
- snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
- ? FSL_DMA_SNEN : 0;
-
- desc->hw.next_ln_addr = CPU_TO_DMA(chan,
- DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
- | snoop_bits, 64);
+ chan_err(chan, "DMA halt timeout!\n");
}
/**
@@ -321,8 +375,7 @@ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
chan->feature &= ~FSL_DMA_CHAN_START_EXT;
}
-static void append_ld_queue(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
+static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
{
struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
@@ -363,8 +416,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = chan->common.cookie;
list_for_each_entry(child, &desc->tx_list, node) {
cookie++;
- if (cookie < 0)
- cookie = 1;
+ if (cookie < DMA_MIN_COOKIE)
+ cookie = DMA_MIN_COOKIE;
child->async_tx.cookie = cookie;
}
@@ -385,15 +438,14 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
*
* Return - The descriptor allocated. NULL for failed.
*/
-static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
- struct fsldma_chan *chan)
+static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
{
struct fsl_desc_sw *desc;
dma_addr_t pdesc;
desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
if (!desc) {
- dev_dbg(chan->dev, "out of memory for link desc\n");
+ chan_dbg(chan, "out of memory for link descriptor\n");
return NULL;
}
@@ -403,10 +455,13 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
desc->async_tx.tx_submit = fsl_dma_tx_submit;
desc->async_tx.phys = pdesc;
+#ifdef FSL_DMA_LD_DEBUG
+ chan_dbg(chan, "LD %p allocated\n", desc);
+#endif
+
return desc;
}
-
/**
* fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
* @chan : Freescale DMA channel
@@ -427,13 +482,11 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
* We need the descriptor to be aligned to 32bytes
* for meeting FSL DMA specification requirement.
*/
- chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
- chan->dev,
+ chan->desc_pool = dma_pool_create(chan->name, chan->dev,
sizeof(struct fsl_desc_sw),
__alignof__(struct fsl_desc_sw), 0);
if (!chan->desc_pool) {
- dev_err(chan->dev, "unable to allocate channel %d "
- "descriptor pool\n", chan->id);
+ chan_err(chan, "unable to allocate descriptor pool\n");
return -ENOMEM;
}
@@ -455,6 +508,9 @@ static void fsldma_free_desc_list(struct fsldma_chan *chan,
list_for_each_entry_safe(desc, _desc, list, node) {
list_del(&desc->node);
+#ifdef FSL_DMA_LD_DEBUG
+ chan_dbg(chan, "LD %p free\n", desc);
+#endif
dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
}
}
@@ -466,6 +522,9 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
list_for_each_entry_safe_reverse(desc, _desc, list, node) {
list_del(&desc->node);
+#ifdef FSL_DMA_LD_DEBUG
+ chan_dbg(chan, "LD %p free\n", desc);
+#endif
dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
}
}
@@ -479,7 +538,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
struct fsldma_chan *chan = to_fsl_chan(dchan);
unsigned long flags;
- dev_dbg(chan->dev, "Free all channel resources.\n");
+ chan_dbg(chan, "free all channel resources\n");
spin_lock_irqsave(&chan->desc_lock, flags);
fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running);
@@ -502,7 +561,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(chan->dev, msg_ld_oom);
+ chan_err(chan, "%s\n", msg_ld_oom);
return NULL;
}
@@ -512,14 +571,15 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
/* Insert the link descriptor to the LD ring */
list_add_tail(&new->node, &new->tx_list);
- /* Set End-of-link to the last link descriptor of new list*/
+ /* Set End-of-link to the last link descriptor of new list */
set_ld_eol(chan, new);
return &new->async_tx;
}
-static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
- struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
+static struct dma_async_tx_descriptor *
+fsl_dma_prep_memcpy(struct dma_chan *dchan,
+ dma_addr_t dma_dst, dma_addr_t dma_src,
size_t len, unsigned long flags)
{
struct fsldma_chan *chan;
@@ -539,12 +599,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
/* Allocate the link descriptor from DMA pool */
new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(chan->dev, msg_ld_oom);
+ chan_err(chan, "%s\n", msg_ld_oom);
goto fail;
}
-#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(chan->dev, "new link desc alloc %p\n", new);
-#endif
copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
@@ -572,7 +629,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
new->async_tx.flags = flags; /* client is in control of this ack */
new->async_tx.cookie = -EBUSY;
- /* Set End-of-link to the last link descriptor of new list*/
+ /* Set End-of-link to the last link descriptor of new list */
set_ld_eol(chan, new);
return &first->async_tx;
@@ -627,12 +684,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
/* allocate and populate the descriptor */
new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(chan->dev, msg_ld_oom);
+ chan_err(chan, "%s\n", msg_ld_oom);
goto fail;
}
-#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(chan->dev, "new link desc alloc %p\n", new);
-#endif
set_desc_cnt(chan, &new->hw, len);
set_desc_src(chan, &new->hw, src);
@@ -744,14 +798,15 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
switch (cmd) {
case DMA_TERMINATE_ALL:
+ spin_lock_irqsave(&chan->desc_lock, flags);
+
/* Halt the DMA engine */
dma_halt(chan);
- spin_lock_irqsave(&chan->desc_lock, flags);
-
/* Remove and free all of the descriptors in the LD queue */
fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running);
+ chan->idle = true;
spin_unlock_irqrestore(&chan->desc_lock, flags);
return 0;
@@ -789,140 +844,87 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
}
/**
- * fsl_dma_update_completed_cookie - Update the completed cookie.
- * @chan : Freescale DMA channel
- *
- * CONTEXT: hardirq
- */
-static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
-{
- struct fsl_desc_sw *desc;
- unsigned long flags;
- dma_cookie_t cookie;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
-
- if (list_empty(&chan->ld_running)) {
- dev_dbg(chan->dev, "no running descriptors\n");
- goto out_unlock;
- }
-
- /* Get the last descriptor, update the cookie to that */
- desc = to_fsl_desc(chan->ld_running.prev);
- if (dma_is_idle(chan))
- cookie = desc->async_tx.cookie;
- else {
- cookie = desc->async_tx.cookie - 1;
- if (unlikely(cookie < DMA_MIN_COOKIE))
- cookie = DMA_MAX_COOKIE;
- }
-
- chan->completed_cookie = cookie;
-
-out_unlock:
- spin_unlock_irqrestore(&chan->desc_lock, flags);
-}
-
-/**
- * fsldma_desc_status - Check the status of a descriptor
+ * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
* @chan: Freescale DMA channel
- * @desc: DMA SW descriptor
- *
- * This function will return the status of the given descriptor
- */
-static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
-{
- return dma_async_is_complete(desc->async_tx.cookie,
- chan->completed_cookie,
- chan->common.cookie);
-}
-
-/**
- * fsl_chan_ld_cleanup - Clean up link descriptors
- * @chan : Freescale DMA channel
+ * @desc: descriptor to cleanup and free
*
- * This function clean up the ld_queue of DMA channel.
+ * This function is used on a descriptor which has been executed by the DMA
+ * controller. It will run any callbacks, submit any dependencies, and then
+ * free the descriptor.
*/
-static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
+static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
{
- struct fsl_desc_sw *desc, *_desc;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
-
- dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie);
- list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
- dma_async_tx_callback callback;
- void *callback_param;
-
- if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS)
- break;
+ struct dma_async_tx_descriptor *txd = &desc->async_tx;
+ struct device *dev = chan->common.device->dev;
+ dma_addr_t src = get_desc_src(chan, desc);
+ dma_addr_t dst = get_desc_dst(chan, desc);
+ u32 len = get_desc_cnt(chan, desc);
+
+ /* Run the link descriptor callback function */
+ if (txd->callback) {
+#ifdef FSL_DMA_LD_DEBUG
+ chan_dbg(chan, "LD %p callback\n", desc);
+#endif
+ txd->callback(txd->callback_param);
+ }
- /* Remove from the list of running transactions */
- list_del(&desc->node);
+ /* Run any dependencies */
+ dma_run_dependencies(txd);
- /* Run the link descriptor callback function */
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
- if (callback) {
- spin_unlock_irqrestore(&chan->desc_lock, flags);
- dev_dbg(chan->dev, "LD %p callback\n", desc);
- callback(callback_param);
- spin_lock_irqsave(&chan->desc_lock, flags);
- }
+ /* Unmap the dst buffer, if requested */
+ if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
+ else
+ dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
+ }
- /* Run any dependencies, then free the descriptor */
- dma_run_dependencies(&desc->async_tx);
- dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
+ /* Unmap the src buffer, if requested */
+ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
}
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+#ifdef FSL_DMA_LD_DEBUG
+ chan_dbg(chan, "LD %p free\n", desc);
+#endif
+ dma_pool_free(chan->desc_pool, desc, txd->phys);
}
/**
* fsl_chan_xfer_ld_queue - transfer any pending transactions
* @chan : Freescale DMA channel
*
- * This will make sure that any pending transactions will be run.
- * If the DMA controller is idle, it will be started. Otherwise,
- * the DMA controller's interrupt handler will start any pending
- * transactions when it becomes idle.
+ * HARDWARE STATE: idle
+ * LOCKING: must hold chan->desc_lock
*/
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
{
struct fsl_desc_sw *desc;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
/*
* If the list of pending descriptors is empty, then we
* don't need to do any work at all
*/
if (list_empty(&chan->ld_pending)) {
- dev_dbg(chan->dev, "no pending LDs\n");
- goto out_unlock;
+ chan_dbg(chan, "no pending LDs\n");
+ return;
}
/*
- * The DMA controller is not idle, which means the interrupt
- * handler will start any queued transactions when it runs
- * at the end of the current transaction
+ * The DMA controller is not idle, which means that the interrupt
+ * handler will start any queued transactions when it runs after
+ * this transaction finishes
*/
- if (!dma_is_idle(chan)) {
- dev_dbg(chan->dev, "DMA controller still busy\n");
- goto out_unlock;
+ if (!chan->idle) {
+ chan_dbg(chan, "DMA controller still busy\n");
+ return;
}
/*
- * TODO:
- * make sure the dma_halt() function really un-wedges the
- * controller as much as possible
- */
- dma_halt(chan);
-
- /*
* If there are some link descriptors which have not been
* transferred, we need to start the controller
*/
@@ -931,18 +933,32 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
* Move all elements from the queue of pending transactions
* onto the list of running transactions
*/
+ chan_dbg(chan, "idle, starting controller\n");
desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
/*
+ * The 85xx DMA controller doesn't clear the channel start bit
+ * automatically at the end of a transfer. Therefore we must clear
+ * it in software before starting the transfer.
+ */
+ if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
+ u32 mode;
+
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode &= ~FSL_DMA_MR_CS;
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ }
+
+ /*
* Program the descriptor's address into the DMA controller,
* then start the DMA transaction
*/
set_cdar(chan, desc->async_tx.phys);
- dma_start(chan);
+ get_cdar(chan);
-out_unlock:
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ dma_start(chan);
+ chan->idle = false;
}
/**
@@ -952,7 +968,11 @@ out_unlock:
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
fsl_chan_xfer_ld_queue(chan);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
}
/**
@@ -964,16 +984,18 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
struct dma_tx_state *txstate)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
- dma_cookie_t last_used;
dma_cookie_t last_complete;
+ dma_cookie_t last_used;
+ unsigned long flags;
- fsl_chan_ld_cleanup(chan);
+ spin_lock_irqsave(&chan->desc_lock, flags);
- last_used = dchan->cookie;
last_complete = chan->completed_cookie;
+ last_used = dchan->cookie;
- dma_set_tx_state(txstate, last_complete, last_used, 0);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -984,21 +1006,20 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
static irqreturn_t fsldma_chan_irq(int irq, void *data)
{
struct fsldma_chan *chan = data;
- int update_cookie = 0;
- int xfer_ld_q = 0;
u32 stat;
/* save and clear the status register */
stat = get_sr(chan);
set_sr(chan, stat);
- dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat);
+ chan_dbg(chan, "irq: stat = 0x%x\n", stat);
+ /* check that this was really our device */
stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
if (!stat)
return IRQ_NONE;
if (stat & FSL_DMA_SR_TE)
- dev_err(chan->dev, "Transfer Error!\n");
+ chan_err(chan, "Transfer Error!\n");
/*
* Programming Error
@@ -1006,29 +1027,10 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
* triger a PE interrupt.
*/
if (stat & FSL_DMA_SR_PE) {
- dev_dbg(chan->dev, "irq: Programming Error INT\n");
- if (get_bcr(chan) == 0) {
- /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
- * Now, update the completed cookie, and continue the
- * next uncompleted transfer.
- */
- update_cookie = 1;
- xfer_ld_q = 1;
- }
+ chan_dbg(chan, "irq: Programming Error INT\n");
stat &= ~FSL_DMA_SR_PE;
- }
-
- /*
- * If the link descriptor segment transfer finishes,
- * we will recycle the used descriptor.
- */
- if (stat & FSL_DMA_SR_EOSI) {
- dev_dbg(chan->dev, "irq: End-of-segments INT\n");
- dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
- (unsigned long long)get_cdar(chan),
- (unsigned long long)get_ndar(chan));
- stat &= ~FSL_DMA_SR_EOSI;
- update_cookie = 1;
+ if (get_bcr(chan) != 0)
+ chan_err(chan, "Programming Error!\n");
}
/*
@@ -1036,10 +1038,8 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
* and start the next transfer if it exist.
*/
if (stat & FSL_DMA_SR_EOCDI) {
- dev_dbg(chan->dev, "irq: End-of-Chain link INT\n");
+ chan_dbg(chan, "irq: End-of-Chain link INT\n");
stat &= ~FSL_DMA_SR_EOCDI;
- update_cookie = 1;
- xfer_ld_q = 1;
}
/*
@@ -1048,27 +1048,79 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
* prepare next transfer.
*/
if (stat & FSL_DMA_SR_EOLNI) {
- dev_dbg(chan->dev, "irq: End-of-link INT\n");
+ chan_dbg(chan, "irq: End-of-link INT\n");
stat &= ~FSL_DMA_SR_EOLNI;
- xfer_ld_q = 1;
}
- if (update_cookie)
- fsl_dma_update_completed_cookie(chan);
- if (xfer_ld_q)
- fsl_chan_xfer_ld_queue(chan);
+ /* check that the DMA controller is really idle */
+ if (!dma_is_idle(chan))
+ chan_err(chan, "irq: controller not idle!\n");
+
+ /* check that we handled all of the bits */
if (stat)
- dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat);
+ chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
- dev_dbg(chan->dev, "irq: Exit\n");
+ /*
+ * Schedule the tasklet to handle all cleanup of the current
+ * transaction. It will start a new transaction if there is
+ * one pending.
+ */
tasklet_schedule(&chan->tasklet);
+ chan_dbg(chan, "irq: Exit\n");
return IRQ_HANDLED;
}
static void dma_do_tasklet(unsigned long data)
{
struct fsldma_chan *chan = (struct fsldma_chan *)data;
- fsl_chan_ld_cleanup(chan);
+ struct fsl_desc_sw *desc, *_desc;
+ LIST_HEAD(ld_cleanup);
+ unsigned long flags;
+
+ chan_dbg(chan, "tasklet entry\n");
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+
+ /* update the cookie if we have some descriptors to cleanup */
+ if (!list_empty(&chan->ld_running)) {
+ dma_cookie_t cookie;
+
+ desc = to_fsl_desc(chan->ld_running.prev);
+ cookie = desc->async_tx.cookie;
+
+ chan->completed_cookie = cookie;
+ chan_dbg(chan, "completed_cookie=%d\n", cookie);
+ }
+
+ /*
+ * move the descriptors to a temporary list so we can drop the lock
+ * during the entire cleanup operation
+ */
+ list_splice_tail_init(&chan->ld_running, &ld_cleanup);
+
+ /* the hardware is now idle and ready for more */
+ chan->idle = true;
+
+ /*
+ * Start any pending transactions automatically
+ *
+ * In the ideal case, we keep the DMA controller busy while we go
+ * ahead and free the descriptors below.
+ */
+ fsl_chan_xfer_ld_queue(chan);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
+
+ /* Remove from the list of transactions */
+ list_del(&desc->node);
+
+ /* Run all cleanup for this descriptor */
+ fsldma_cleanup_descriptor(chan, desc);
+ }
+
+ chan_dbg(chan, "tasklet exit\n");
}
static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
@@ -1116,7 +1168,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev)
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
chan = fdev->chan[i];
if (chan && chan->irq != NO_IRQ) {
- dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id);
+ chan_dbg(chan, "free per-channel IRQ\n");
free_irq(chan->irq, chan);
}
}
@@ -1143,19 +1195,16 @@ static int fsldma_request_irqs(struct fsldma_device *fdev)
continue;
if (chan->irq == NO_IRQ) {
- dev_err(fdev->dev, "no interrupts property defined for "
- "DMA channel %d. Please fix your "
- "device tree\n", chan->id);
+ chan_err(chan, "interrupts property missing in device tree\n");
ret = -ENODEV;
goto out_unwind;
}
- dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id);
+ chan_dbg(chan, "request per-channel IRQ\n");
ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
"fsldma-chan", chan);
if (ret) {
- dev_err(fdev->dev, "unable to request IRQ for DMA "
- "channel %d\n", chan->id);
+ chan_err(chan, "unable to request per-channel IRQ\n");
goto out_unwind;
}
}
@@ -1230,6 +1279,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
fdev->chan[chan->id] = chan;
tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
+ snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
/* Initialize the channel */
dma_init(chan);
@@ -1250,6 +1300,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
spin_lock_init(&chan->desc_lock);
INIT_LIST_HEAD(&chan->ld_pending);
INIT_LIST_HEAD(&chan->ld_running);
+ chan->idle = true;
chan->common.device = &fdev->common;
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index ba9f403c0fbe..9cb5aa57c677 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -102,8 +102,8 @@ struct fsl_desc_sw {
} __attribute__((aligned(32)));
struct fsldma_chan_regs {
- u32 mr; /* 0x00 - Mode Register */
- u32 sr; /* 0x04 - Status Register */
+ u32 mr; /* 0x00 - Mode Register */
+ u32 sr; /* 0x04 - Status Register */
u64 cdar; /* 0x08 - Current descriptor address register */
u64 sar; /* 0x10 - Source Address Register */
u64 dar; /* 0x18 - Destination Address Register */
@@ -135,6 +135,7 @@ struct fsldma_device {
#define FSL_DMA_CHAN_START_EXT 0x00002000
struct fsldma_chan {
+ char name[8]; /* Channel name */
struct fsldma_chan_regs __iomem *regs;
dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */
@@ -147,6 +148,7 @@ struct fsldma_chan {
int id; /* Raw id of this channel */
struct tasklet_struct tasklet;
u32 feature;
+ bool idle; /* DMA controller is idle */
void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
new file mode 100644
index 000000000000..88aad4f54002
--- /dev/null
+++ b/drivers/dma/mxs-dma.c
@@ -0,0 +1,724 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * Refer to drivers/dma/imx-sdma.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+
+#include <asm/irq.h>
+#include <mach/mxs.h>
+#include <mach/dma.h>
+#include <mach/common.h>
+
+/*
+ * NOTE: The term "PIO" throughout the mxs-dma implementation means
+ * PIO mode of mxs apbh-dma and apbx-dma. With this working mode,
+ * dma can program the controller registers of peripheral devices.
+ */
+
+#define MXS_DMA_APBH 0
+#define MXS_DMA_APBX 1
+#define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH)
+
+#define APBH_VERSION_LATEST 3
+#define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST)
+
+#define HW_APBHX_CTRL0 0x000
+#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
+#define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
+#define BP_APBH_CTRL0_CLKGATE_CHANNEL 8
+#define BP_APBH_CTRL0_RESET_CHANNEL 16
+#define HW_APBHX_CTRL1 0x010
+#define HW_APBHX_CTRL2 0x020
+#define HW_APBHX_CHANNEL_CTRL 0x030
+#define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
+#define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800)
+#define HW_APBX_VERSION 0x800
+#define BP_APBHX_VERSION_MAJOR 24
+#define HW_APBHX_CHn_NXTCMDAR(n) \
+ (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70)
+#define HW_APBHX_CHn_SEMA(n) \
+ (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70)
+
+/*
+ * ccw bits definitions
+ *
+ * COMMAND: 0..1 (2)
+ * CHAIN: 2 (1)
+ * IRQ: 3 (1)
+ * NAND_LOCK: 4 (1) - not implemented
+ * NAND_WAIT4READY: 5 (1) - not implemented
+ * DEC_SEM: 6 (1)
+ * WAIT4END: 7 (1)
+ * HALT_ON_TERMINATE: 8 (1)
+ * TERMINATE_FLUSH: 9 (1)
+ * RESERVED: 10..11 (2)
+ * PIO_NUM: 12..15 (4)
+ */
+#define BP_CCW_COMMAND 0
+#define BM_CCW_COMMAND (3 << 0)
+#define CCW_CHAIN (1 << 2)
+#define CCW_IRQ (1 << 3)
+#define CCW_DEC_SEM (1 << 6)
+#define CCW_WAIT4END (1 << 7)
+#define CCW_HALT_ON_TERM (1 << 8)
+#define CCW_TERM_FLUSH (1 << 9)
+#define BP_CCW_PIO_NUM 12
+#define BM_CCW_PIO_NUM (0xf << 12)
+
+#define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field)
+
+#define MXS_DMA_CMD_NO_XFER 0
+#define MXS_DMA_CMD_WRITE 1
+#define MXS_DMA_CMD_READ 2
+#define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */
+
+struct mxs_dma_ccw {
+ u32 next;
+ u16 bits;
+ u16 xfer_bytes;
+#define MAX_XFER_BYTES 0xff00
+ u32 bufaddr;
+#define MXS_PIO_WORDS 16
+ u32 pio_words[MXS_PIO_WORDS];
+};
+
+#define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw))
+
+struct mxs_dma_chan {
+ struct mxs_dma_engine *mxs_dma;
+ struct dma_chan chan;
+ struct dma_async_tx_descriptor desc;
+ struct tasklet_struct tasklet;
+ int chan_irq;
+ struct mxs_dma_ccw *ccw;
+ dma_addr_t ccw_phys;
+ dma_cookie_t last_completed;
+ enum dma_status status;
+ unsigned int flags;
+#define MXS_DMA_SG_LOOP (1 << 0)
+};
+
+#define MXS_DMA_CHANNELS 16
+#define MXS_DMA_CHANNELS_MASK 0xffff
+
+struct mxs_dma_engine {
+ int dev_id;
+ unsigned int version;
+ void __iomem *base;
+ struct clk *clk;
+ struct dma_device dma_device;
+ struct device_dma_parameters dma_parms;
+ struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
+};
+
+static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
+{
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int chan_id = mxs_chan->chan.chan_id;
+
+ if (dma_is_apbh() && apbh_is_old())
+ writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ else
+ writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
+ mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
+}
+
+static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
+{
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int chan_id = mxs_chan->chan.chan_id;
+
+ /* set cmd_addr up */
+ writel(mxs_chan->ccw_phys,
+ mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
+
+ /* enable apbh channel clock */
+ if (dma_is_apbh()) {
+ if (apbh_is_old())
+ writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
+ else
+ writel(1 << chan_id,
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
+ }
+
+ /* write 1 to SEMA to kick off the channel */
+ writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
+}
+
+static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
+{
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int chan_id = mxs_chan->chan.chan_id;
+
+ /* disable apbh channel clock */
+ if (dma_is_apbh()) {
+ if (apbh_is_old())
+ writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ else
+ writel(1 << chan_id,
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ }
+
+ mxs_chan->status = DMA_SUCCESS;
+}
+
+static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
+{
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int chan_id = mxs_chan->chan.chan_id;
+
+ /* freeze the channel */
+ if (dma_is_apbh() && apbh_is_old())
+ writel(1 << chan_id,
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ else
+ writel(1 << chan_id,
+ mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
+
+ mxs_chan->status = DMA_PAUSED;
+}
+
+static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
+{
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int chan_id = mxs_chan->chan.chan_id;
+
+ /* unfreeze the channel */
+ if (dma_is_apbh() && apbh_is_old())
+ writel(1 << chan_id,
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
+ else
+ writel(1 << chan_id,
+ mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR);
+
+ mxs_chan->status = DMA_IN_PROGRESS;
+}
+
+static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan)
+{
+ dma_cookie_t cookie = mxs_chan->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ mxs_chan->chan.cookie = cookie;
+ mxs_chan->desc.cookie = cookie;
+
+ return cookie;
+}
+
+static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct mxs_dma_chan, chan);
+}
+
+static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan);
+
+ mxs_dma_enable_chan(mxs_chan);
+
+ return mxs_dma_assign_cookie(mxs_chan);
+}
+
+static void mxs_dma_tasklet(unsigned long data)
+{
+ struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
+
+ if (mxs_chan->desc.callback)
+ mxs_chan->desc.callback(mxs_chan->desc.callback_param);
+}
+
+static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
+{
+ struct mxs_dma_engine *mxs_dma = dev_id;
+ u32 stat1, stat2;
+
+ /* completion status */
+ stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1);
+ stat1 &= MXS_DMA_CHANNELS_MASK;
+ writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR);
+
+ /* error status */
+ stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2);
+ writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR);
+
+ /*
+ * When both completion and error of termination bits set at the
+ * same time, we do not take it as an error. IOW, it only becomes
+ * an error we need to handler here in case of ether it's (1) an bus
+ * error or (2) a termination error with no completion.
+ */
+ stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
+ (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */
+
+ /* combine error and completion status for checking */
+ stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1;
+ while (stat1) {
+ int channel = fls(stat1) - 1;
+ struct mxs_dma_chan *mxs_chan =
+ &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS];
+
+ if (channel >= MXS_DMA_CHANNELS) {
+ dev_dbg(mxs_dma->dma_device.dev,
+ "%s: error in channel %d\n", __func__,
+ channel - MXS_DMA_CHANNELS);
+ mxs_chan->status = DMA_ERROR;
+ mxs_dma_reset_chan(mxs_chan);
+ } else {
+ if (mxs_chan->flags & MXS_DMA_SG_LOOP)
+ mxs_chan->status = DMA_IN_PROGRESS;
+ else
+ mxs_chan->status = DMA_SUCCESS;
+ }
+
+ stat1 &= ~(1 << channel);
+
+ if (mxs_chan->status == DMA_SUCCESS)
+ mxs_chan->last_completed = mxs_chan->desc.cookie;
+
+ /* schedule tasklet on this channel */
+ tasklet_schedule(&mxs_chan->tasklet);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_data *data = chan->private;
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int ret;
+
+ if (!data)
+ return -EINVAL;
+
+ mxs_chan->chan_irq = data->chan_irq;
+
+ mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
+ &mxs_chan->ccw_phys, GFP_KERNEL);
+ if (!mxs_chan->ccw) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ memset(mxs_chan->ccw, 0, PAGE_SIZE);
+
+ ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
+ 0, "mxs-dma", mxs_dma);
+ if (ret)
+ goto err_irq;
+
+ ret = clk_enable(mxs_dma->clk);
+ if (ret)
+ goto err_clk;
+
+ mxs_dma_reset_chan(mxs_chan);
+
+ dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
+ mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
+
+ /* the descriptor is ready */
+ async_tx_ack(&mxs_chan->desc);
+
+ return 0;
+
+err_clk:
+ free_irq(mxs_chan->chan_irq, mxs_dma);
+err_irq:
+ dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
+ mxs_chan->ccw, mxs_chan->ccw_phys);
+err_alloc:
+ return ret;
+}
+
+static void mxs_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+
+ mxs_dma_disable_chan(mxs_chan);
+
+ free_irq(mxs_chan->chan_irq, mxs_dma);
+
+ dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
+ mxs_chan->ccw, mxs_chan->ccw_phys);
+
+ clk_disable(mxs_dma->clk);
+}
+
+static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long append)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ struct mxs_dma_ccw *ccw;
+ struct scatterlist *sg;
+ int i, j;
+ u32 *pio;
+ static int idx;
+
+ if (mxs_chan->status == DMA_IN_PROGRESS && !append)
+ return NULL;
+
+ if (sg_len + (append ? idx : 0) > NUM_CCW) {
+ dev_err(mxs_dma->dma_device.dev,
+ "maximum number of sg exceeded: %d > %d\n",
+ sg_len, NUM_CCW);
+ goto err_out;
+ }
+
+ mxs_chan->status = DMA_IN_PROGRESS;
+ mxs_chan->flags = 0;
+
+ /*
+ * If the sg is prepared with append flag set, the sg
+ * will be appended to the last prepared sg.
+ */
+ if (append) {
+ BUG_ON(idx < 1);
+ ccw = &mxs_chan->ccw[idx - 1];
+ ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
+ ccw->bits |= CCW_CHAIN;
+ ccw->bits &= ~CCW_IRQ;
+ ccw->bits &= ~CCW_DEC_SEM;
+ ccw->bits &= ~CCW_WAIT4END;
+ } else {
+ idx = 0;
+ }
+
+ if (direction == DMA_NONE) {
+ ccw = &mxs_chan->ccw[idx++];
+ pio = (u32 *) sgl;
+
+ for (j = 0; j < sg_len;)
+ ccw->pio_words[j++] = *pio++;
+
+ ccw->bits = 0;
+ ccw->bits |= CCW_IRQ;
+ ccw->bits |= CCW_DEC_SEM;
+ ccw->bits |= CCW_WAIT4END;
+ ccw->bits |= CCW_HALT_ON_TERM;
+ ccw->bits |= CCW_TERM_FLUSH;
+ ccw->bits |= BF_CCW(sg_len, PIO_NUM);
+ ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
+ } else {
+ for_each_sg(sgl, sg, sg_len, i) {
+ if (sg->length > MAX_XFER_BYTES) {
+ dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
+ sg->length, MAX_XFER_BYTES);
+ goto err_out;
+ }
+
+ ccw = &mxs_chan->ccw[idx++];
+
+ ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
+ ccw->bufaddr = sg->dma_address;
+ ccw->xfer_bytes = sg->length;
+
+ ccw->bits = 0;
+ ccw->bits |= CCW_CHAIN;
+ ccw->bits |= CCW_HALT_ON_TERM;
+ ccw->bits |= CCW_TERM_FLUSH;
+ ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
+ MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
+ COMMAND);
+
+ if (i + 1 == sg_len) {
+ ccw->bits &= ~CCW_CHAIN;
+ ccw->bits |= CCW_IRQ;
+ ccw->bits |= CCW_DEC_SEM;
+ ccw->bits |= CCW_WAIT4END;
+ }
+ }
+ }
+
+ return &mxs_chan->desc;
+
+err_out:
+ mxs_chan->status = DMA_ERROR;
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_data_direction direction)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int num_periods = buf_len / period_len;
+ int i = 0, buf = 0;
+
+ if (mxs_chan->status == DMA_IN_PROGRESS)
+ return NULL;
+
+ mxs_chan->status = DMA_IN_PROGRESS;
+ mxs_chan->flags |= MXS_DMA_SG_LOOP;
+
+ if (num_periods > NUM_CCW) {
+ dev_err(mxs_dma->dma_device.dev,
+ "maximum number of sg exceeded: %d > %d\n",
+ num_periods, NUM_CCW);
+ goto err_out;
+ }
+
+ if (period_len > MAX_XFER_BYTES) {
+ dev_err(mxs_dma->dma_device.dev,
+ "maximum period size exceeded: %d > %d\n",
+ period_len, MAX_XFER_BYTES);
+ goto err_out;
+ }
+
+ while (buf < buf_len) {
+ struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i];
+
+ if (i + 1 == num_periods)
+ ccw->next = mxs_chan->ccw_phys;
+ else
+ ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1);
+
+ ccw->bufaddr = dma_addr;
+ ccw->xfer_bytes = period_len;
+
+ ccw->bits = 0;
+ ccw->bits |= CCW_CHAIN;
+ ccw->bits |= CCW_IRQ;
+ ccw->bits |= CCW_HALT_ON_TERM;
+ ccw->bits |= CCW_TERM_FLUSH;
+ ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
+ MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
+
+ dma_addr += period_len;
+ buf += period_len;
+
+ i++;
+ }
+
+ return &mxs_chan->desc;
+
+err_out:
+ mxs_chan->status = DMA_ERROR;
+ return NULL;
+}
+
+static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ int ret = 0;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ mxs_dma_disable_chan(mxs_chan);
+ break;
+ case DMA_PAUSE:
+ mxs_dma_pause_chan(mxs_chan);
+ break;
+ case DMA_RESUME:
+ mxs_dma_resume_chan(mxs_chan);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ dma_cookie_t last_used;
+
+ last_used = chan->cookie;
+ dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0);
+
+ return mxs_chan->status;
+}
+
+static void mxs_dma_issue_pending(struct dma_chan *chan)
+{
+ /*
+ * Nothing to do. We only have a single descriptor.
+ */
+}
+
+static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
+{
+ int ret;
+
+ ret = clk_enable(mxs_dma->clk);
+ if (ret)
+ goto err_out;
+
+ ret = mxs_reset_block(mxs_dma->base);
+ if (ret)
+ goto err_out;
+
+ /* only major version matters */
+ mxs_dma->version = readl(mxs_dma->base +
+ ((mxs_dma->dev_id == MXS_DMA_APBX) ?
+ HW_APBX_VERSION : HW_APBH_VERSION)) >>
+ BP_APBHX_VERSION_MAJOR;
+
+ /* enable apbh burst */
+ if (dma_is_apbh()) {
+ writel(BM_APBH_CTRL0_APB_BURST_EN,
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ writel(BM_APBH_CTRL0_APB_BURST8_EN,
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ }
+
+ /* enable irq for all the channels */
+ writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
+ mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
+
+ clk_disable(mxs_dma->clk);
+
+ return 0;
+
+err_out:
+ return ret;
+}
+
+static int __init mxs_dma_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(pdev);
+ struct mxs_dma_engine *mxs_dma;
+ struct resource *iores;
+ int ret, i;
+
+ mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL);
+ if (!mxs_dma)
+ return -ENOMEM;
+
+ mxs_dma->dev_id = id_entry->driver_data;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!request_mem_region(iores->start, resource_size(iores),
+ pdev->name)) {
+ ret = -EBUSY;
+ goto err_request_region;
+ }
+
+ mxs_dma->base = ioremap(iores->start, resource_size(iores));
+ if (!mxs_dma->base) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ mxs_dma->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(mxs_dma->clk)) {
+ ret = PTR_ERR(mxs_dma->clk);
+ goto err_clk;
+ }
+
+ dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask);
+
+ INIT_LIST_HEAD(&mxs_dma->dma_device.channels);
+
+ /* Initialize channel parameters */
+ for (i = 0; i < MXS_DMA_CHANNELS; i++) {
+ struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i];
+
+ mxs_chan->mxs_dma = mxs_dma;
+ mxs_chan->chan.device = &mxs_dma->dma_device;
+
+ tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
+ (unsigned long) mxs_chan);
+
+
+ /* Add the channel to mxs_chan list */
+ list_add_tail(&mxs_chan->chan.device_node,
+ &mxs_dma->dma_device.channels);
+ }
+
+ ret = mxs_dma_init(mxs_dma);
+ if (ret)
+ goto err_init;
+
+ mxs_dma->dma_device.dev = &pdev->dev;
+
+ /* mxs_dma gets 65535 bytes maximum sg size */
+ mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms;
+ dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
+
+ mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
+ mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources;
+ mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
+ mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
+ mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
+ mxs_dma->dma_device.device_control = mxs_dma_control;
+ mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending;
+
+ ret = dma_async_device_register(&mxs_dma->dma_device);
+ if (ret) {
+ dev_err(mxs_dma->dma_device.dev, "unable to register\n");
+ goto err_init;
+ }
+
+ dev_info(mxs_dma->dma_device.dev, "initialized\n");
+
+ return 0;
+
+err_init:
+ clk_put(mxs_dma->clk);
+err_clk:
+ iounmap(mxs_dma->base);
+err_ioremap:
+ release_mem_region(iores->start, resource_size(iores));
+err_request_region:
+ kfree(mxs_dma);
+ return ret;
+}
+
+static struct platform_device_id mxs_dma_type[] = {
+ {
+ .name = "mxs-dma-apbh",
+ .driver_data = MXS_DMA_APBH,
+ }, {
+ .name = "mxs-dma-apbx",
+ .driver_data = MXS_DMA_APBX,
+ }
+};
+
+static struct platform_driver mxs_dma_driver = {
+ .driver = {
+ .name = "mxs-dma",
+ },
+ .id_table = mxs_dma_type,
+};
+
+static int __init mxs_dma_module_init(void)
+{
+ return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
+}
+subsys_initcall(mxs_dma_module_init);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 1c38418ae61f..8d8fef1480a9 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -82,7 +82,7 @@ struct pch_dma_regs {
u32 dma_sts1;
u32 reserved2;
u32 reserved3;
- struct pch_dma_desc_regs desc[0];
+ struct pch_dma_desc_regs desc[MAX_CHAN_NR];
};
struct pch_dma_desc {
@@ -124,7 +124,7 @@ struct pch_dma {
struct pci_pool *pool;
struct pch_dma_regs regs;
struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
- struct pch_dma_chan channels[0];
+ struct pch_dma_chan channels[MAX_CHAN_NR];
};
#define PCH_DMA_CTL0 0x00
@@ -366,7 +366,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
dma_cookie_t cookie;
- spin_lock_bh(&pd_chan->lock);
+ spin_lock(&pd_chan->lock);
cookie = pdc_assign_cookie(pd_chan, desc);
if (list_empty(&pd_chan->active_list)) {
@@ -376,7 +376,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
list_add_tail(&desc->desc_node, &pd_chan->queue);
}
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock(&pd_chan->lock);
return 0;
}
@@ -386,7 +386,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
struct pch_dma *pd = to_pd(chan->device);
dma_addr_t addr;
- desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr);
+ desc = pci_pool_alloc(pd->pool, flags, &addr);
if (desc) {
memset(desc, 0, sizeof(struct pch_dma_desc));
INIT_LIST_HEAD(&desc->tx_list);
@@ -405,7 +405,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
struct pch_dma_desc *ret = NULL;
int i;
- spin_lock_bh(&pd_chan->lock);
+ spin_lock(&pd_chan->lock);
list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
i++;
if (async_tx_test_ack(&desc->txd)) {
@@ -415,15 +415,15 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
}
dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
}
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock(&pd_chan->lock);
dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
if (!ret) {
ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
if (ret) {
- spin_lock_bh(&pd_chan->lock);
+ spin_lock(&pd_chan->lock);
pd_chan->descs_allocated++;
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock(&pd_chan->lock);
} else {
dev_err(chan2dev(&pd_chan->chan),
"failed to alloc desc\n");
@@ -437,10 +437,10 @@ static void pdc_desc_put(struct pch_dma_chan *pd_chan,
struct pch_dma_desc *desc)
{
if (desc) {
- spin_lock_bh(&pd_chan->lock);
+ spin_lock(&pd_chan->lock);
list_splice_init(&desc->tx_list, &pd_chan->free_list);
list_add(&desc->desc_node, &pd_chan->free_list);
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock(&pd_chan->lock);
}
}
@@ -530,9 +530,9 @@ static void pd_issue_pending(struct dma_chan *chan)
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
if (pdc_is_idle(pd_chan)) {
- spin_lock_bh(&pd_chan->lock);
+ spin_lock(&pd_chan->lock);
pdc_advance_work(pd_chan);
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock(&pd_chan->lock);
}
}
@@ -592,7 +592,6 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
goto err_desc_get;
}
-
if (!first) {
first = desc;
} else {
@@ -641,13 +640,13 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
spin_unlock_bh(&pd_chan->lock);
-
return 0;
}
static void pdc_tasklet(unsigned long data)
{
struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
+ unsigned long flags;
if (!pdc_is_idle(pd_chan)) {
dev_err(chan2dev(&pd_chan->chan),
@@ -655,12 +654,12 @@ static void pdc_tasklet(unsigned long data)
return;
}
- spin_lock_bh(&pd_chan->lock);
+ spin_lock_irqsave(&pd_chan->lock, flags);
if (test_and_clear_bit(0, &pd_chan->err_status))
pdc_handle_error(pd_chan);
else
pdc_advance_work(pd_chan);
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock_irqrestore(&pd_chan->lock, flags);
}
static irqreturn_t pd_irq(int irq, void *devid)
@@ -694,6 +693,7 @@ static irqreturn_t pd_irq(int irq, void *devid)
return ret;
}
+#ifdef CONFIG_PM
static void pch_dma_save_regs(struct pch_dma *pd)
{
struct pch_dma_chan *pd_chan;
@@ -771,6 +771,7 @@ static int pch_dma_resume(struct pci_dev *pdev)
return 0;
}
+#endif
static int __devinit pch_dma_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 7c50f6dfd3f4..6abe1ec1f2ce 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -657,7 +657,7 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
}
static int __devinit
-pl330_probe(struct amba_device *adev, struct amba_id *id)
+pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
struct dma_pl330_platdata *pdat;
struct dma_pl330_dmac *pdmac;
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 28720d3103c4..6451b581a70b 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -750,7 +750,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
return;
}
- /* Find the first not transferred desciptor */
+ /* Find the first not transferred descriptor */
list_for_each_entry(desc, &sh_chan->ld_queue, node)
if (desc->mark == DESC_SUBMITTED) {
dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 6e1d46a65d0e..af955de035f4 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -68,6 +68,7 @@ enum d40_command {
* @base: Pointer to memory area when the pre_alloc_lli's are not large
* enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
* pre_alloc_lli is used.
+ * @dma_addr: DMA address, if mapped
* @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
* @pre_alloc_lli: Pre allocated area for the most common case of transfers,
* one buffer to one buffer.
@@ -75,6 +76,7 @@ enum d40_command {
struct d40_lli_pool {
void *base;
int size;
+ dma_addr_t dma_addr;
/* Space for dst and src, plus an extra for padding */
u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
};
@@ -94,7 +96,6 @@ struct d40_lli_pool {
* during a transfer.
* @node: List entry.
* @is_in_client_list: true if the client owns this descriptor.
- * @is_hw_linked: true if this job will automatically be continued for
* the previous one.
*
* This descriptor is used for both logical and physical transfers.
@@ -114,7 +115,7 @@ struct d40_desc {
struct list_head node;
bool is_in_client_list;
- bool is_hw_linked;
+ bool cyclic;
};
/**
@@ -130,6 +131,7 @@ struct d40_desc {
*/
struct d40_lcla_pool {
void *base;
+ dma_addr_t dma_addr;
void *base_unaligned;
int pages;
spinlock_t lock;
@@ -303,9 +305,37 @@ struct d40_reg_val {
unsigned int val;
};
-static int d40_pool_lli_alloc(struct d40_desc *d40d,
- int lli_len, bool is_log)
+static struct device *chan2dev(struct d40_chan *d40c)
{
+ return &d40c->chan.dev->device;
+}
+
+static bool chan_is_physical(struct d40_chan *chan)
+{
+ return chan->log_num == D40_PHY_CHAN;
+}
+
+static bool chan_is_logical(struct d40_chan *chan)
+{
+ return !chan_is_physical(chan);
+}
+
+static void __iomem *chan_base(struct d40_chan *chan)
+{
+ return chan->base->virtbase + D40_DREG_PCBASE +
+ chan->phy_chan->num * D40_DREG_PCDELTA;
+}
+
+#define d40_err(dev, format, arg...) \
+ dev_err(dev, "[%s] " format, __func__, ## arg)
+
+#define chan_err(d40c, format, arg...) \
+ d40_err(chan2dev(d40c), format, ## arg)
+
+static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
+ int lli_len)
+{
+ bool is_log = chan_is_logical(d40c);
u32 align;
void *base;
@@ -319,7 +349,7 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
d40d->lli_pool.base = NULL;
} else {
- d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
+ d40d->lli_pool.size = lli_len * 2 * align;
base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
d40d->lli_pool.base = base;
@@ -329,22 +359,37 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
}
if (is_log) {
- d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
- align);
- d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
- align);
+ d40d->lli_log.src = PTR_ALIGN(base, align);
+ d40d->lli_log.dst = d40d->lli_log.src + lli_len;
+
+ d40d->lli_pool.dma_addr = 0;
} else {
- d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
- align);
- d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
- align);
+ d40d->lli_phy.src = PTR_ALIGN(base, align);
+ d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
+
+ d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
+ d40d->lli_phy.src,
+ d40d->lli_pool.size,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(d40c->base->dev,
+ d40d->lli_pool.dma_addr)) {
+ kfree(d40d->lli_pool.base);
+ d40d->lli_pool.base = NULL;
+ d40d->lli_pool.dma_addr = 0;
+ return -ENOMEM;
+ }
}
return 0;
}
-static void d40_pool_lli_free(struct d40_desc *d40d)
+static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
{
+ if (d40d->lli_pool.dma_addr)
+ dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
+ d40d->lli_pool.size, DMA_TO_DEVICE);
+
kfree(d40d->lli_pool.base);
d40d->lli_pool.base = NULL;
d40d->lli_pool.size = 0;
@@ -391,7 +436,7 @@ static int d40_lcla_free_all(struct d40_chan *d40c,
int i;
int ret = -EINVAL;
- if (d40c->log_num == D40_PHY_CHAN)
+ if (chan_is_physical(d40c))
return 0;
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
@@ -430,7 +475,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
list_for_each_entry_safe(d, _d, &d40c->client, node)
if (async_tx_test_ack(&d->txd)) {
- d40_pool_lli_free(d);
+ d40_pool_lli_free(d40c, d);
d40_desc_remove(d);
desc = d;
memset(desc, 0, sizeof(*desc));
@@ -450,6 +495,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
{
+ d40_pool_lli_free(d40c, d40d);
d40_lcla_free_all(d40c, d40d);
kmem_cache_free(d40c->base->desc_slab, d40d);
}
@@ -459,57 +505,128 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
list_add_tail(&desc->node, &d40c->active);
}
-static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
+static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
{
- int curr_lcla = -EINVAL, next_lcla;
+ struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
+ struct d40_phy_lli *lli_src = desc->lli_phy.src;
+ void __iomem *base = chan_base(chan);
+
+ writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
+ writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
+ writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
+ writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
+
+ writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
+ writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
+ writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
+ writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
+}
- if (d40c->log_num == D40_PHY_CHAN) {
- d40_phy_lli_write(d40c->base->virtbase,
- d40c->phy_chan->num,
- d40d->lli_phy.dst,
- d40d->lli_phy.src);
- d40d->lli_current = d40d->lli_len;
- } else {
+static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
+{
+ struct d40_lcla_pool *pool = &chan->base->lcla_pool;
+ struct d40_log_lli_bidir *lli = &desc->lli_log;
+ int lli_current = desc->lli_current;
+ int lli_len = desc->lli_len;
+ bool cyclic = desc->cyclic;
+ int curr_lcla = -EINVAL;
+ int first_lcla = 0;
+ bool linkback;
- if ((d40d->lli_len - d40d->lli_current) > 1)
- curr_lcla = d40_lcla_alloc_one(d40c, d40d);
+ /*
+ * We may have partially running cyclic transfers, in case we did't get
+ * enough LCLA entries.
+ */
+ linkback = cyclic && lli_current == 0;
- d40_log_lli_lcpa_write(d40c->lcpa,
- &d40d->lli_log.dst[d40d->lli_current],
- &d40d->lli_log.src[d40d->lli_current],
- curr_lcla);
+ /*
+ * For linkback, we need one LCLA even with only one link, because we
+ * can't link back to the one in LCPA space
+ */
+ if (linkback || (lli_len - lli_current > 1)) {
+ curr_lcla = d40_lcla_alloc_one(chan, desc);
+ first_lcla = curr_lcla;
+ }
- d40d->lli_current++;
- for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
- struct d40_log_lli *lcla;
+ /*
+ * For linkback, we normally load the LCPA in the loop since we need to
+ * link it to the second LCLA and not the first. However, if we
+ * couldn't even get a first LCLA, then we have to run in LCPA and
+ * reload manually.
+ */
+ if (!linkback || curr_lcla == -EINVAL) {
+ unsigned int flags = 0;
- if (d40d->lli_current + 1 < d40d->lli_len)
- next_lcla = d40_lcla_alloc_one(d40c, d40d);
- else
- next_lcla = -EINVAL;
+ if (curr_lcla == -EINVAL)
+ flags |= LLI_TERM_INT;
- lcla = d40c->base->lcla_pool.base +
- d40c->phy_chan->num * 1024 +
- 8 * curr_lcla * 2;
+ d40_log_lli_lcpa_write(chan->lcpa,
+ &lli->dst[lli_current],
+ &lli->src[lli_current],
+ curr_lcla,
+ flags);
+ lli_current++;
+ }
- d40_log_lli_lcla_write(lcla,
- &d40d->lli_log.dst[d40d->lli_current],
- &d40d->lli_log.src[d40d->lli_current],
- next_lcla);
+ if (curr_lcla < 0)
+ goto out;
- (void) dma_map_single(d40c->base->dev, lcla,
- 2 * sizeof(struct d40_log_lli),
- DMA_TO_DEVICE);
+ for (; lli_current < lli_len; lli_current++) {
+ unsigned int lcla_offset = chan->phy_chan->num * 1024 +
+ 8 * curr_lcla * 2;
+ struct d40_log_lli *lcla = pool->base + lcla_offset;
+ unsigned int flags = 0;
+ int next_lcla;
- curr_lcla = next_lcla;
+ if (lli_current + 1 < lli_len)
+ next_lcla = d40_lcla_alloc_one(chan, desc);
+ else
+ next_lcla = linkback ? first_lcla : -EINVAL;
- if (curr_lcla == -EINVAL) {
- d40d->lli_current++;
- break;
- }
+ if (cyclic || next_lcla == -EINVAL)
+ flags |= LLI_TERM_INT;
+
+ if (linkback && curr_lcla == first_lcla) {
+ /* First link goes in both LCPA and LCLA */
+ d40_log_lli_lcpa_write(chan->lcpa,
+ &lli->dst[lli_current],
+ &lli->src[lli_current],
+ next_lcla, flags);
+ }
+
+ /*
+ * One unused LCLA in the cyclic case if the very first
+ * next_lcla fails...
+ */
+ d40_log_lli_lcla_write(lcla,
+ &lli->dst[lli_current],
+ &lli->src[lli_current],
+ next_lcla, flags);
+
+ dma_sync_single_range_for_device(chan->base->dev,
+ pool->dma_addr, lcla_offset,
+ 2 * sizeof(struct d40_log_lli),
+ DMA_TO_DEVICE);
+ curr_lcla = next_lcla;
+
+ if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
+ lli_current++;
+ break;
}
}
+
+out:
+ desc->lli_current = lli_current;
+}
+
+static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+ if (chan_is_physical(d40c)) {
+ d40_phy_lli_load(d40c, d40d);
+ d40d->lli_current = d40d->lli_len;
+ } else
+ d40_log_lli_to_lcxa(d40c, d40d);
}
static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
@@ -543,18 +660,6 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
return d;
}
-static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
-{
- struct d40_desc *d;
-
- if (list_empty(&d40c->queue))
- return NULL;
- list_for_each_entry(d, &d40c->queue, node)
- if (list_is_last(&d->node, &d40c->queue))
- break;
- return d;
-}
-
static int d40_psize_2_burst_size(bool is_log, int psize)
{
if (is_log) {
@@ -666,9 +771,9 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
}
if (i == D40_SUSPEND_MAX_IT) {
- dev_err(&d40c->chan.dev->device,
- "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
- __func__, d40c->phy_chan->num, d40c->log_num,
+ chan_err(d40c,
+ "unable to suspend the chl %d (log: %d) status %x\n",
+ d40c->phy_chan->num, d40c->log_num,
status);
dump_stack();
ret = -EBUSY;
@@ -701,17 +806,45 @@ static void d40_term_all(struct d40_chan *d40c)
d40c->busy = false;
}
+static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
+ u32 event, int reg)
+{
+ void __iomem *addr = chan_base(d40c) + reg;
+ int tries;
+
+ if (!enable) {
+ writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
+ | ~D40_EVENTLINE_MASK(event), addr);
+ return;
+ }
+
+ /*
+ * The hardware sometimes doesn't register the enable when src and dst
+ * event lines are active on the same logical channel. Retry to ensure
+ * it does. Usually only one retry is sufficient.
+ */
+ tries = 100;
+ while (--tries) {
+ writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
+ | ~D40_EVENTLINE_MASK(event), addr);
+
+ if (readl(addr) & D40_EVENTLINE_MASK(event))
+ break;
+ }
+
+ if (tries != 99)
+ dev_dbg(chan2dev(d40c),
+ "[%s] workaround enable S%cLNK (%d tries)\n",
+ __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
+ 100 - tries);
+
+ WARN_ON(!tries);
+}
+
static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
{
- u32 val;
unsigned long flags;
- /* Notice, that disable requires the physical channel to be stopped */
- if (do_enable)
- val = D40_ACTIVATE_EVENTLINE;
- else
- val = D40_DEACTIVATE_EVENTLINE;
-
spin_lock_irqsave(&d40c->phy_chan->lock, flags);
/* Enable event line connected to device (or memcpy) */
@@ -719,20 +852,15 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
(d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
- writel((val << D40_EVENTLINE_POS(event)) |
- ~D40_EVENTLINE_MASK(event),
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
+ __d40_config_set_event(d40c, do_enable, event,
+ D40_CHAN_REG_SSLNK);
}
+
if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
- writel((val << D40_EVENTLINE_POS(event)) |
- ~D40_EVENTLINE_MASK(event),
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK);
+ __d40_config_set_event(d40c, do_enable, event,
+ D40_CHAN_REG_SDLNK);
}
spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
@@ -740,15 +868,12 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
static u32 d40_chan_has_events(struct d40_chan *d40c)
{
+ void __iomem *chanbase = chan_base(d40c);
u32 val;
- val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
+ val = readl(chanbase + D40_CHAN_REG_SSLNK);
+ val |= readl(chanbase + D40_CHAN_REG_SDLNK);
- val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK);
return val;
}
@@ -771,7 +896,7 @@ static u32 d40_get_prmo(struct d40_chan *d40c)
= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
};
- if (d40c->log_num == D40_PHY_CHAN)
+ if (chan_is_physical(d40c))
return phy_map[d40c->dma_cfg.mode_opt];
else
return log_map[d40c->dma_cfg.mode_opt];
@@ -785,7 +910,7 @@ static void d40_config_write(struct d40_chan *d40c)
/* Odd addresses are even addresses + 4 */
addr_base = (d40c->phy_chan->num % 2) * 4;
/* Setup channel mode to logical or physical */
- var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
+ var = ((u32)(chan_is_logical(d40c)) + 1) <<
D40_CHAN_POS(d40c->phy_chan->num);
writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
@@ -794,30 +919,18 @@ static void d40_config_write(struct d40_chan *d40c)
writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
+ int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
+ & D40_SREG_ELEM_LOG_LIDX_MASK;
+ void __iomem *chanbase = chan_base(d40c);
+
/* Set default config for CFG reg */
- writel(d40c->src_def_cfg,
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSCFG);
- writel(d40c->dst_def_cfg,
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDCFG);
+ writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
+ writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
/* Set LIDX for lcla */
- writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
- D40_SREG_ELEM_LOG_LIDX_MASK,
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDELT);
-
- writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
- D40_SREG_ELEM_LOG_LIDX_MASK,
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSELT);
-
+ writel(lidx, chanbase + D40_CHAN_REG_SSELT);
+ writel(lidx, chanbase + D40_CHAN_REG_SDELT);
}
}
@@ -825,15 +938,15 @@ static u32 d40_residue(struct d40_chan *d40c)
{
u32 num_elt;
- if (d40c->log_num != D40_PHY_CHAN)
+ if (chan_is_logical(d40c))
num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
>> D40_MEM_LCSP2_ECNT_POS;
- else
- num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDELT) &
- D40_SREG_ELEM_PHY_ECNT_MASK) >>
- D40_SREG_ELEM_PHY_ECNT_POS;
+ else {
+ u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
+ num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
+ >> D40_SREG_ELEM_PHY_ECNT_POS;
+ }
+
return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
}
@@ -841,20 +954,17 @@ static bool d40_tx_is_linked(struct d40_chan *d40c)
{
bool is_link;
- if (d40c->log_num != D40_PHY_CHAN)
+ if (chan_is_logical(d40c))
is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
else
- is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK) &
- D40_SREG_LNK_PHYS_LNK_MASK;
+ is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
+ & D40_SREG_LNK_PHYS_LNK_MASK;
+
return is_link;
}
-static int d40_pause(struct dma_chan *chan)
+static int d40_pause(struct d40_chan *d40c)
{
- struct d40_chan *d40c =
- container_of(chan, struct d40_chan, chan);
int res = 0;
unsigned long flags;
@@ -865,7 +975,7 @@ static int d40_pause(struct dma_chan *chan)
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
if (res == 0) {
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
d40_config_set_event(d40c, false);
/* Resume the other logical channels if any */
if (d40_chan_has_events(d40c))
@@ -878,10 +988,8 @@ static int d40_pause(struct dma_chan *chan)
return res;
}
-static int d40_resume(struct dma_chan *chan)
+static int d40_resume(struct d40_chan *d40c)
{
- struct d40_chan *d40c =
- container_of(chan, struct d40_chan, chan);
int res = 0;
unsigned long flags;
@@ -891,7 +999,7 @@ static int d40_resume(struct dma_chan *chan)
spin_lock_irqsave(&d40c->lock, flags);
if (d40c->base->rev == 0)
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
res = d40_channel_execute_command(d40c,
D40_DMA_SUSPEND_REQ);
goto no_suspend;
@@ -900,7 +1008,7 @@ static int d40_resume(struct dma_chan *chan)
/* If bytes left to transfer or linked tx resume job */
if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
- if (d40c->log_num != D40_PHY_CHAN)
+ if (chan_is_logical(d40c))
d40_config_set_event(d40c, true);
res = d40_channel_execute_command(d40c, D40_DMA_RUN);
@@ -911,75 +1019,20 @@ no_suspend:
return res;
}
-static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d)
+static int d40_terminate_all(struct d40_chan *chan)
{
- /* TODO: Write */
-}
-
-static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
-{
- struct d40_desc *d40d_prev = NULL;
- int i;
- u32 val;
-
- if (!list_empty(&d40c->queue))
- d40d_prev = d40_last_queued(d40c);
- else if (!list_empty(&d40c->active))
- d40d_prev = d40_first_active_get(d40c);
-
- if (!d40d_prev)
- return;
-
- /* Here we try to join this job with previous jobs */
- val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
-
- /* Figure out which link we're currently transmitting */
- for (i = 0; i < d40d_prev->lli_len; i++)
- if (val == d40d_prev->lli_phy.src[i].reg_lnk)
- break;
-
- val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
-
- if (i == (d40d_prev->lli_len - 1) && val > 0) {
- /* Change the current one */
- writel(virt_to_phys(d40d->lli_phy.src),
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
- writel(virt_to_phys(d40d->lli_phy.dst),
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK);
-
- d40d->is_hw_linked = true;
-
- } else if (i < d40d_prev->lli_len) {
- (void) dma_unmap_single(d40c->base->dev,
- virt_to_phys(d40d_prev->lli_phy.src),
- d40d_prev->lli_pool.size,
- DMA_TO_DEVICE);
+ unsigned long flags;
+ int ret = 0;
- /* Keep the settings */
- val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk &
- ~D40_SREG_LNK_PHYS_LNK_MASK;
- d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
- val | virt_to_phys(d40d->lli_phy.src);
+ ret = d40_pause(chan);
+ if (!ret && chan_is_physical(chan))
+ ret = d40_channel_execute_command(chan, D40_DMA_STOP);
- val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk &
- ~D40_SREG_LNK_PHYS_LNK_MASK;
- d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk =
- val | virt_to_phys(d40d->lli_phy.dst);
+ spin_lock_irqsave(&chan->lock, flags);
+ d40_term_all(chan);
+ spin_unlock_irqrestore(&chan->lock, flags);
- (void) dma_map_single(d40c->base->dev,
- d40d_prev->lli_phy.src,
- d40d_prev->lli_pool.size,
- DMA_TO_DEVICE);
- d40d->is_hw_linked = true;
- }
+ return ret;
}
static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -990,8 +1043,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
unsigned long flags;
- (void) d40_pause(&d40c->chan);
-
spin_lock_irqsave(&d40c->lock, flags);
d40c->chan.cookie++;
@@ -1001,17 +1052,10 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
d40d->txd.cookie = d40c->chan.cookie;
- if (d40c->log_num == D40_PHY_CHAN)
- d40_tx_submit_phy(d40c, d40d);
- else
- d40_tx_submit_log(d40c, d40d);
-
d40_desc_queue(d40c, d40d);
spin_unlock_irqrestore(&d40c->lock, flags);
- (void) d40_resume(&d40c->chan);
-
return tx->cookie;
}
@@ -1020,7 +1064,7 @@ static int d40_start(struct d40_chan *d40c)
if (d40c->base->rev == 0) {
int err;
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
err = d40_channel_execute_command(d40c,
D40_DMA_SUSPEND_REQ);
if (err)
@@ -1028,7 +1072,7 @@ static int d40_start(struct d40_chan *d40c)
}
}
- if (d40c->log_num != D40_PHY_CHAN)
+ if (chan_is_logical(d40c))
d40_config_set_event(d40c, true);
return d40_channel_execute_command(d40c, D40_DMA_RUN);
@@ -1051,21 +1095,14 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
/* Add to active queue */
d40_desc_submit(d40c, d40d);
- /*
- * If this job is already linked in hw,
- * do not submit it.
- */
-
- if (!d40d->is_hw_linked) {
- /* Initiate DMA job */
- d40_desc_load(d40c, d40d);
+ /* Initiate DMA job */
+ d40_desc_load(d40c, d40d);
- /* Start dma job */
- err = d40_start(d40c);
+ /* Start dma job */
+ err = d40_start(d40c);
- if (err)
- return NULL;
- }
+ if (err)
+ return NULL;
}
return d40d;
@@ -1082,17 +1119,36 @@ static void dma_tc_handle(struct d40_chan *d40c)
if (d40d == NULL)
return;
- d40_lcla_free_all(d40c, d40d);
+ if (d40d->cyclic) {
+ /*
+ * If this was a paritially loaded list, we need to reloaded
+ * it, and only when the list is completed. We need to check
+ * for done because the interrupt will hit for every link, and
+ * not just the last one.
+ */
+ if (d40d->lli_current < d40d->lli_len
+ && !d40_tx_is_linked(d40c)
+ && !d40_residue(d40c)) {
+ d40_lcla_free_all(d40c, d40d);
+ d40_desc_load(d40c, d40d);
+ (void) d40_start(d40c);
- if (d40d->lli_current < d40d->lli_len) {
- d40_desc_load(d40c, d40d);
- /* Start dma job */
- (void) d40_start(d40c);
- return;
- }
+ if (d40d->lli_current == d40d->lli_len)
+ d40d->lli_current = 0;
+ }
+ } else {
+ d40_lcla_free_all(d40c, d40d);
- if (d40_queue_start(d40c) == NULL)
- d40c->busy = false;
+ if (d40d->lli_current < d40d->lli_len) {
+ d40_desc_load(d40c, d40d);
+ /* Start dma job */
+ (void) d40_start(d40c);
+ return;
+ }
+
+ if (d40_queue_start(d40c) == NULL)
+ d40c->busy = false;
+ }
d40c->pending_tx++;
tasklet_schedule(&d40c->tasklet);
@@ -1111,11 +1167,11 @@ static void dma_tasklet(unsigned long data)
/* Get first active entry from list */
d40d = d40_first_active_get(d40c);
-
if (d40d == NULL)
goto err;
- d40c->completed = d40d->txd.cookie;
+ if (!d40d->cyclic)
+ d40c->completed = d40d->txd.cookie;
/*
* If terminating a channel pending_tx is set to zero.
@@ -1130,16 +1186,18 @@ static void dma_tasklet(unsigned long data)
callback = d40d->txd.callback;
callback_param = d40d->txd.callback_param;
- if (async_tx_test_ack(&d40d->txd)) {
- d40_pool_lli_free(d40d);
- d40_desc_remove(d40d);
- d40_desc_free(d40c, d40d);
- } else {
- if (!d40d->is_in_client_list) {
+ if (!d40d->cyclic) {
+ if (async_tx_test_ack(&d40d->txd)) {
+ d40_pool_lli_free(d40c, d40d);
d40_desc_remove(d40d);
- d40_lcla_free_all(d40c, d40d);
- list_add_tail(&d40d->node, &d40c->client);
- d40d->is_in_client_list = true;
+ d40_desc_free(d40c, d40d);
+ } else {
+ if (!d40d->is_in_client_list) {
+ d40_desc_remove(d40d);
+ d40_lcla_free_all(d40c, d40d);
+ list_add_tail(&d40d->node, &d40c->client);
+ d40d->is_in_client_list = true;
+ }
}
}
@@ -1216,9 +1274,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
if (!il[row].is_error)
dma_tc_handle(d40c);
else
- dev_err(base->dev,
- "[%s] IRQ chan: %ld offset %d idx %d\n",
- __func__, chan, il[row].offset, idx);
+ d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
+ chan, il[row].offset, idx);
spin_unlock(&d40c->lock);
}
@@ -1237,8 +1294,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
if (!conf->dir) {
- dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n",
- __func__);
+ chan_err(d40c, "Invalid direction.\n");
res = -EINVAL;
}
@@ -1246,46 +1302,40 @@ static int d40_validate_conf(struct d40_chan *d40c,
d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
d40c->runtime_addr == 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Invalid TX channel address (%d)\n",
- __func__, conf->dst_dev_type);
+ chan_err(d40c, "Invalid TX channel address (%d)\n",
+ conf->dst_dev_type);
res = -EINVAL;
}
if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
d40c->runtime_addr == 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Invalid RX channel address (%d)\n",
- __func__, conf->src_dev_type);
+ chan_err(d40c, "Invalid RX channel address (%d)\n",
+ conf->src_dev_type);
res = -EINVAL;
}
if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
dst_event_group == STEDMA40_DEV_DST_MEMORY) {
- dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
- __func__);
+ chan_err(d40c, "Invalid dst\n");
res = -EINVAL;
}
if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
src_event_group == STEDMA40_DEV_SRC_MEMORY) {
- dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
- __func__);
+ chan_err(d40c, "Invalid src\n");
res = -EINVAL;
}
if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
- dev_err(&d40c->chan.dev->device,
- "[%s] No event line\n", __func__);
+ chan_err(d40c, "No event line\n");
res = -EINVAL;
}
if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
(src_event_group != dst_event_group)) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Invalid event group\n", __func__);
+ chan_err(d40c, "Invalid event group\n");
res = -EINVAL;
}
@@ -1294,9 +1344,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
* DMAC HW supports it. Will be added to this driver,
* in case any dma client requires it.
*/
- dev_err(&d40c->chan.dev->device,
- "[%s] periph to periph not supported\n",
- __func__);
+ chan_err(d40c, "periph to periph not supported\n");
res = -EINVAL;
}
@@ -1309,9 +1357,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
* src (burst x width) == dst (burst x width)
*/
- dev_err(&d40c->chan.dev->device,
- "[%s] src (burst x width) != dst (burst x width)\n",
- __func__);
+ chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
res = -EINVAL;
}
@@ -1514,8 +1560,7 @@ static int d40_config_memcpy(struct d40_chan *d40c)
dma_has_cap(DMA_SLAVE, cap)) {
d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
} else {
- dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
- __func__);
+ chan_err(d40c, "No memcpy\n");
return -EINVAL;
}
@@ -1540,21 +1585,19 @@ static int d40_free_dma(struct d40_chan *d40c)
/* Release client owned descriptors */
if (!list_empty(&d40c->client))
list_for_each_entry_safe(d, _d, &d40c->client, node) {
- d40_pool_lli_free(d);
+ d40_pool_lli_free(d40c, d);
d40_desc_remove(d);
d40_desc_free(d40c, d);
}
if (phy == NULL) {
- dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
- __func__);
+ chan_err(d40c, "phy == null\n");
return -EINVAL;
}
if (phy->allocated_src == D40_ALLOC_FREE &&
phy->allocated_dst == D40_ALLOC_FREE) {
- dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
- __func__);
+ chan_err(d40c, "channel already free\n");
return -EINVAL;
}
@@ -1566,19 +1609,17 @@ static int d40_free_dma(struct d40_chan *d40c)
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
is_src = true;
} else {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unknown direction\n", __func__);
+ chan_err(d40c, "Unknown direction\n");
return -EINVAL;
}
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
if (res) {
- dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
- __func__);
+ chan_err(d40c, "suspend failed\n");
return res;
}
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
/* Release logical channel, deactivate the event line */
d40_config_set_event(d40c, false);
@@ -1594,9 +1635,8 @@ static int d40_free_dma(struct d40_chan *d40c)
res = d40_channel_execute_command(d40c,
D40_DMA_RUN);
if (res) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Executing RUN command\n",
- __func__);
+ chan_err(d40c,
+ "Executing RUN command\n");
return res;
}
}
@@ -1609,8 +1649,7 @@ static int d40_free_dma(struct d40_chan *d40c)
/* Release physical channel */
res = d40_channel_execute_command(d40c, D40_DMA_STOP);
if (res) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to stop channel\n", __func__);
+ chan_err(d40c, "Failed to stop channel\n");
return res;
}
d40c->phy_chan = NULL;
@@ -1622,6 +1661,7 @@ static int d40_free_dma(struct d40_chan *d40c)
static bool d40_is_paused(struct d40_chan *d40c)
{
+ void __iomem *chanbase = chan_base(d40c);
bool is_paused = false;
unsigned long flags;
void __iomem *active_reg;
@@ -1630,7 +1670,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
spin_lock_irqsave(&d40c->lock, flags);
- if (d40c->log_num == D40_PHY_CHAN) {
+ if (chan_is_physical(d40c)) {
if (d40c->phy_chan->num % 2 == 0)
active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
else
@@ -1648,17 +1688,12 @@ static bool d40_is_paused(struct d40_chan *d40c)
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
- status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK);
+ status = readl(chanbase + D40_CHAN_REG_SDLNK);
} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
- status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
+ status = readl(chanbase + D40_CHAN_REG_SSLNK);
} else {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unknown direction\n", __func__);
+ chan_err(d40c, "Unknown direction\n");
goto _exit;
}
@@ -1688,114 +1723,184 @@ static u32 stedma40_residue(struct dma_chan *chan)
return bytes_left;
}
-struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
- struct scatterlist *sgl_dst,
- struct scatterlist *sgl_src,
- unsigned int sgl_len,
- unsigned long dma_flags)
+static int
+d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
+ struct scatterlist *sg_src, struct scatterlist *sg_dst,
+ unsigned int sg_len, dma_addr_t src_dev_addr,
+ dma_addr_t dst_dev_addr)
{
- int res;
- struct d40_desc *d40d;
- struct d40_chan *d40c = container_of(chan, struct d40_chan,
- chan);
- unsigned long flags;
+ struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
+ struct stedma40_half_channel_info *src_info = &cfg->src_info;
+ struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
+ int ret;
- if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unallocated channel.\n", __func__);
- return ERR_PTR(-EINVAL);
- }
+ ret = d40_log_sg_to_lli(sg_src, sg_len,
+ src_dev_addr,
+ desc->lli_log.src,
+ chan->log_def.lcsp1,
+ src_info->data_width,
+ dst_info->data_width);
- spin_lock_irqsave(&d40c->lock, flags);
- d40d = d40_desc_get(d40c);
+ ret = d40_log_sg_to_lli(sg_dst, sg_len,
+ dst_dev_addr,
+ desc->lli_log.dst,
+ chan->log_def.lcsp3,
+ dst_info->data_width,
+ src_info->data_width);
- if (d40d == NULL)
+ return ret < 0 ? ret : 0;
+}
+
+static int
+d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
+ struct scatterlist *sg_src, struct scatterlist *sg_dst,
+ unsigned int sg_len, dma_addr_t src_dev_addr,
+ dma_addr_t dst_dev_addr)
+{
+ struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
+ struct stedma40_half_channel_info *src_info = &cfg->src_info;
+ struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
+ unsigned long flags = 0;
+ int ret;
+
+ if (desc->cyclic)
+ flags |= LLI_CYCLIC | LLI_TERM_INT;
+
+ ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
+ desc->lli_phy.src,
+ virt_to_phys(desc->lli_phy.src),
+ chan->src_def_cfg,
+ src_info, dst_info, flags);
+
+ ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
+ desc->lli_phy.dst,
+ virt_to_phys(desc->lli_phy.dst),
+ chan->dst_def_cfg,
+ dst_info, src_info, flags);
+
+ dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
+ desc->lli_pool.size, DMA_TO_DEVICE);
+
+ return ret < 0 ? ret : 0;
+}
+
+
+static struct d40_desc *
+d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
+ unsigned int sg_len, unsigned long dma_flags)
+{
+ struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
+ struct d40_desc *desc;
+ int ret;
+
+ desc = d40_desc_get(chan);
+ if (!desc)
+ return NULL;
+
+ desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
+ cfg->dst_info.data_width);
+ if (desc->lli_len < 0) {
+ chan_err(chan, "Unaligned size\n");
goto err;
+ }
- d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width);
- if (d40d->lli_len < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unaligned size\n", __func__);
+ ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
+ if (ret < 0) {
+ chan_err(chan, "Could not allocate lli\n");
goto err;
}
- d40d->lli_current = 0;
- d40d->txd.flags = dma_flags;
- if (d40c->log_num != D40_PHY_CHAN) {
+ desc->lli_current = 0;
+ desc->txd.flags = dma_flags;
+ desc->txd.tx_submit = d40_tx_submit;
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- goto err;
- }
+ dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
- (void) d40_log_sg_to_lli(sgl_src,
- sgl_len,
- d40d->lli_log.src,
- d40c->log_def.lcsp1,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width);
-
- (void) d40_log_sg_to_lli(sgl_dst,
- sgl_len,
- d40d->lli_log.dst,
- d40c->log_def.lcsp3,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.data_width);
- } else {
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- goto err;
- }
+ return desc;
+
+err:
+ d40_desc_free(chan, desc);
+ return NULL;
+}
+
+static dma_addr_t
+d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
+{
+ struct stedma40_platform_data *plat = chan->base->plat_data;
+ struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
+ dma_addr_t addr;
- res = d40_phy_sg_to_lli(sgl_src,
- sgl_len,
- 0,
- d40d->lli_phy.src,
- virt_to_phys(d40d->lli_phy.src),
- d40c->src_def_cfg,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.psize);
+ if (chan->runtime_addr)
+ return chan->runtime_addr;
- if (res < 0)
- goto err;
+ if (direction == DMA_FROM_DEVICE)
+ addr = plat->dev_rx[cfg->src_dev_type];
+ else if (direction == DMA_TO_DEVICE)
+ addr = plat->dev_tx[cfg->dst_dev_type];
- res = d40_phy_sg_to_lli(sgl_dst,
- sgl_len,
- 0,
- d40d->lli_phy.dst,
- virt_to_phys(d40d->lli_phy.dst),
- d40c->dst_def_cfg,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.psize);
+ return addr;
+}
- if (res < 0)
- goto err;
+static struct dma_async_tx_descriptor *
+d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
+ struct scatterlist *sg_dst, unsigned int sg_len,
+ enum dma_data_direction direction, unsigned long dma_flags)
+{
+ struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
+ dma_addr_t src_dev_addr = 0;
+ dma_addr_t dst_dev_addr = 0;
+ struct d40_desc *desc;
+ unsigned long flags;
+ int ret;
- (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
- d40d->lli_pool.size, DMA_TO_DEVICE);
+ if (!chan->phy_chan) {
+ chan_err(chan, "Cannot prepare unallocated channel\n");
+ return NULL;
}
- dma_async_tx_descriptor_init(&d40d->txd, chan);
- d40d->txd.tx_submit = d40_tx_submit;
+ spin_lock_irqsave(&chan->lock, flags);
- spin_unlock_irqrestore(&d40c->lock, flags);
+ desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
+ if (desc == NULL)
+ goto err;
+
+ if (sg_next(&sg_src[sg_len - 1]) == sg_src)
+ desc->cyclic = true;
+
+ if (direction != DMA_NONE) {
+ dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
+
+ if (direction == DMA_FROM_DEVICE)
+ src_dev_addr = dev_addr;
+ else if (direction == DMA_TO_DEVICE)
+ dst_dev_addr = dev_addr;
+ }
+
+ if (chan_is_logical(chan))
+ ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
+ sg_len, src_dev_addr, dst_dev_addr);
+ else
+ ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
+ sg_len, src_dev_addr, dst_dev_addr);
+
+ if (ret) {
+ chan_err(chan, "Failed to prepare %s sg job: %d\n",
+ chan_is_logical(chan) ? "log" : "phy", ret);
+ goto err;
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return &desc->txd;
- return &d40d->txd;
err:
- if (d40d)
- d40_desc_free(d40c, d40d);
- spin_unlock_irqrestore(&d40c->lock, flags);
+ if (desc)
+ d40_desc_free(chan, desc);
+ spin_unlock_irqrestore(&chan->lock, flags);
return NULL;
}
-EXPORT_SYMBOL(stedma40_memcpy_sg);
bool stedma40_filter(struct dma_chan *chan, void *data)
{
@@ -1818,6 +1923,38 @@ bool stedma40_filter(struct dma_chan *chan, void *data)
}
EXPORT_SYMBOL(stedma40_filter);
+static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
+{
+ bool realtime = d40c->dma_cfg.realtime;
+ bool highprio = d40c->dma_cfg.high_priority;
+ u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
+ u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
+ u32 event = D40_TYPE_TO_EVENT(dev_type);
+ u32 group = D40_TYPE_TO_GROUP(dev_type);
+ u32 bit = 1 << event;
+
+ /* Destination event lines are stored in the upper halfword */
+ if (!src)
+ bit <<= 16;
+
+ writel(bit, d40c->base->virtbase + prioreg + group * 4);
+ writel(bit, d40c->base->virtbase + rtreg + group * 4);
+}
+
+static void d40_set_prio_realtime(struct d40_chan *d40c)
+{
+ if (d40c->base->rev < 3)
+ return;
+
+ if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
+ (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
+ __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
+
+ if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
+ (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
+ __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
+}
+
/* DMA ENGINE functions */
static int d40_alloc_chan_resources(struct dma_chan *chan)
{
@@ -1834,9 +1971,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
if (!d40c->configured) {
err = d40_config_memcpy(d40c);
if (err) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to configure memcpy channel\n",
- __func__);
+ chan_err(d40c, "Failed to configure memcpy channel\n");
goto fail;
}
}
@@ -1844,16 +1979,17 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
err = d40_allocate_channel(d40c);
if (err) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to allocate channel\n", __func__);
+ chan_err(d40c, "Failed to allocate channel\n");
goto fail;
}
/* Fill in basic CFG register values */
d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
- &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
+ &d40c->dst_def_cfg, chan_is_logical(d40c));
- if (d40c->log_num != D40_PHY_CHAN) {
+ d40_set_prio_realtime(d40c);
+
+ if (chan_is_logical(d40c)) {
d40_log_cfg(&d40c->dma_cfg,
&d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
@@ -1886,8 +2022,7 @@ static void d40_free_chan_resources(struct dma_chan *chan)
unsigned long flags;
if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Cannot free unallocated channel\n", __func__);
+ chan_err(d40c, "Cannot free unallocated channel\n");
return;
}
@@ -1897,8 +2032,7 @@ static void d40_free_chan_resources(struct dma_chan *chan)
err = d40_free_dma(d40c);
if (err)
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to free channel\n", __func__);
+ chan_err(d40c, "Failed to free channel\n");
spin_unlock_irqrestore(&d40c->lock, flags);
}
@@ -1908,251 +2042,31 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
size_t size,
unsigned long dma_flags)
{
- struct d40_desc *d40d;
- struct d40_chan *d40c = container_of(chan, struct d40_chan,
- chan);
- unsigned long flags;
-
- if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Channel is not allocated.\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- spin_lock_irqsave(&d40c->lock, flags);
- d40d = d40_desc_get(d40c);
-
- if (d40d == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Descriptor is NULL\n", __func__);
- goto err;
- }
+ struct scatterlist dst_sg;
+ struct scatterlist src_sg;
- d40d->txd.flags = dma_flags;
- d40d->lli_len = d40_size_2_dmalen(size,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width);
- if (d40d->lli_len < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unaligned size\n", __func__);
- goto err;
- }
+ sg_init_table(&dst_sg, 1);
+ sg_init_table(&src_sg, 1);
+ sg_dma_address(&dst_sg) = dst;
+ sg_dma_address(&src_sg) = src;
- dma_async_tx_descriptor_init(&d40d->txd, chan);
+ sg_dma_len(&dst_sg) = size;
+ sg_dma_len(&src_sg) = size;
- d40d->txd.tx_submit = d40_tx_submit;
-
- if (d40c->log_num != D40_PHY_CHAN) {
-
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- goto err;
- }
- d40d->lli_current = 0;
-
- if (d40_log_buf_to_lli(d40d->lli_log.src,
- src,
- size,
- d40c->log_def.lcsp1,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- true) == NULL)
- goto err;
-
- if (d40_log_buf_to_lli(d40d->lli_log.dst,
- dst,
- size,
- d40c->log_def.lcsp3,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.data_width,
- true) == NULL)
- goto err;
-
- } else {
-
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- goto err;
- }
-
- if (d40_phy_buf_to_lli(d40d->lli_phy.src,
- src,
- size,
- d40c->dma_cfg.src_info.psize,
- 0,
- d40c->src_def_cfg,
- true,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- false) == NULL)
- goto err;
-
- if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
- dst,
- size,
- d40c->dma_cfg.dst_info.psize,
- 0,
- d40c->dst_def_cfg,
- true,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.data_width,
- false) == NULL)
- goto err;
-
- (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
- d40d->lli_pool.size, DMA_TO_DEVICE);
- }
-
- spin_unlock_irqrestore(&d40c->lock, flags);
- return &d40d->txd;
-
-err:
- if (d40d)
- d40_desc_free(d40c, d40d);
- spin_unlock_irqrestore(&d40c->lock, flags);
- return NULL;
+ return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
}
static struct dma_async_tx_descriptor *
-d40_prep_sg(struct dma_chan *chan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
- unsigned long dma_flags)
+d40_prep_memcpy_sg(struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long dma_flags)
{
if (dst_nents != src_nents)
return NULL;
- return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags);
-}
-
-static int d40_prep_slave_sg_log(struct d40_desc *d40d,
- struct d40_chan *d40c,
- struct scatterlist *sgl,
- unsigned int sg_len,
- enum dma_data_direction direction,
- unsigned long dma_flags)
-{
- dma_addr_t dev_addr = 0;
- int total_size;
-
- d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width);
- if (d40d->lli_len < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unaligned size\n", __func__);
- return -EINVAL;
- }
-
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- return -ENOMEM;
- }
-
- d40d->lli_current = 0;
-
- if (direction == DMA_FROM_DEVICE)
- if (d40c->runtime_addr)
- dev_addr = d40c->runtime_addr;
- else
- dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
- else if (direction == DMA_TO_DEVICE)
- if (d40c->runtime_addr)
- dev_addr = d40c->runtime_addr;
- else
- dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
-
- else
- return -EINVAL;
-
- total_size = d40_log_sg_to_dev(sgl, sg_len,
- &d40d->lli_log,
- &d40c->log_def,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- direction,
- dev_addr);
-
- if (total_size < 0)
- return -EINVAL;
-
- return 0;
-}
-
-static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
- struct d40_chan *d40c,
- struct scatterlist *sgl,
- unsigned int sgl_len,
- enum dma_data_direction direction,
- unsigned long dma_flags)
-{
- dma_addr_t src_dev_addr;
- dma_addr_t dst_dev_addr;
- int res;
-
- d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width);
- if (d40d->lli_len < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unaligned size\n", __func__);
- return -EINVAL;
- }
-
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- return -ENOMEM;
- }
-
- d40d->lli_current = 0;
-
- if (direction == DMA_FROM_DEVICE) {
- dst_dev_addr = 0;
- if (d40c->runtime_addr)
- src_dev_addr = d40c->runtime_addr;
- else
- src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
- } else if (direction == DMA_TO_DEVICE) {
- if (d40c->runtime_addr)
- dst_dev_addr = d40c->runtime_addr;
- else
- dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
- src_dev_addr = 0;
- } else
- return -EINVAL;
-
- res = d40_phy_sg_to_lli(sgl,
- sgl_len,
- src_dev_addr,
- d40d->lli_phy.src,
- virt_to_phys(d40d->lli_phy.src),
- d40c->src_def_cfg,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.psize);
- if (res < 0)
- return res;
-
- res = d40_phy_sg_to_lli(sgl,
- sgl_len,
- dst_dev_addr,
- d40d->lli_phy.dst,
- virt_to_phys(d40d->lli_phy.dst),
- d40c->dst_def_cfg,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.psize);
- if (res < 0)
- return res;
-
- (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
- d40d->lli_pool.size, DMA_TO_DEVICE);
- return 0;
+ return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
}
static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
@@ -2161,52 +2075,40 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
enum dma_data_direction direction,
unsigned long dma_flags)
{
- struct d40_desc *d40d;
- struct d40_chan *d40c = container_of(chan, struct d40_chan,
- chan);
- unsigned long flags;
- int err;
-
- if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Cannot prepare unallocated channel\n", __func__);
- return ERR_PTR(-EINVAL);
- }
+ if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
+ return NULL;
- spin_lock_irqsave(&d40c->lock, flags);
- d40d = d40_desc_get(d40c);
+ return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
+}
- if (d40d == NULL)
- goto err;
+static struct dma_async_tx_descriptor *
+dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_data_direction direction)
+{
+ unsigned int periods = buf_len / period_len;
+ struct dma_async_tx_descriptor *txd;
+ struct scatterlist *sg;
+ int i;
- if (d40c->log_num != D40_PHY_CHAN)
- err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
- direction, dma_flags);
- else
- err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
- direction, dma_flags);
- if (err) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to prepare %s slave sg job: %d\n",
- __func__,
- d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
- goto err;
+ sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL);
+ for (i = 0; i < periods; i++) {
+ sg_dma_address(&sg[i]) = dma_addr;
+ sg_dma_len(&sg[i]) = period_len;
+ dma_addr += period_len;
}
- d40d->txd.flags = dma_flags;
+ sg[periods].offset = 0;
+ sg[periods].length = 0;
+ sg[periods].page_link =
+ ((unsigned long)sg | 0x01) & ~0x02;
- dma_async_tx_descriptor_init(&d40d->txd, chan);
+ txd = d40_prep_sg(chan, sg, sg, periods, direction,
+ DMA_PREP_INTERRUPT);
- d40d->txd.tx_submit = d40_tx_submit;
+ kfree(sg);
- spin_unlock_irqrestore(&d40c->lock, flags);
- return &d40d->txd;
-
-err:
- if (d40d)
- d40_desc_free(d40c, d40d);
- spin_unlock_irqrestore(&d40c->lock, flags);
- return NULL;
+ return txd;
}
static enum dma_status d40_tx_status(struct dma_chan *chan,
@@ -2219,9 +2121,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
int ret;
if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Cannot read status of unallocated channel\n",
- __func__);
+ chan_err(d40c, "Cannot read status of unallocated channel\n");
return -EINVAL;
}
@@ -2245,8 +2145,7 @@ static void d40_issue_pending(struct dma_chan *chan)
unsigned long flags;
if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Channel is not allocated!\n", __func__);
+ chan_err(d40c, "Channel is not allocated!\n");
return;
}
@@ -2339,7 +2238,7 @@ static void d40_set_runtime_config(struct dma_chan *chan,
return;
}
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
if (config_maxburst >= 16)
psize = STEDMA40_PSIZE_LOG_16;
else if (config_maxburst >= 8)
@@ -2372,7 +2271,7 @@ static void d40_set_runtime_config(struct dma_chan *chan,
cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
/* Fill in register values */
- if (d40c->log_num != D40_PHY_CHAN)
+ if (chan_is_logical(d40c))
d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
else
d40_phy_cfg(cfg, &d40c->src_def_cfg,
@@ -2393,25 +2292,20 @@ static void d40_set_runtime_config(struct dma_chan *chan,
static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
- unsigned long flags;
struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Channel is not allocated!\n", __func__);
+ chan_err(d40c, "Channel is not allocated!\n");
return -EINVAL;
}
switch (cmd) {
case DMA_TERMINATE_ALL:
- spin_lock_irqsave(&d40c->lock, flags);
- d40_term_all(d40c);
- spin_unlock_irqrestore(&d40c->lock, flags);
- return 0;
+ return d40_terminate_all(d40c);
case DMA_PAUSE:
- return d40_pause(chan);
+ return d40_pause(d40c);
case DMA_RESUME:
- return d40_resume(chan);
+ return d40_resume(d40c);
case DMA_SLAVE_CONFIG:
d40_set_runtime_config(chan,
(struct dma_slave_config *) arg);
@@ -2456,6 +2350,35 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
}
}
+static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
+{
+ if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
+ dev->device_prep_slave_sg = d40_prep_slave_sg;
+
+ if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
+ dev->device_prep_dma_memcpy = d40_prep_memcpy;
+
+ /*
+ * This controller can only access address at even
+ * 32bit boundaries, i.e. 2^2
+ */
+ dev->copy_align = 2;
+ }
+
+ if (dma_has_cap(DMA_SG, dev->cap_mask))
+ dev->device_prep_dma_sg = d40_prep_memcpy_sg;
+
+ if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
+ dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
+
+ dev->device_alloc_chan_resources = d40_alloc_chan_resources;
+ dev->device_free_chan_resources = d40_free_chan_resources;
+ dev->device_issue_pending = d40_issue_pending;
+ dev->device_tx_status = d40_tx_status;
+ dev->device_control = d40_control;
+ dev->dev = base->dev;
+}
+
static int __init d40_dmaengine_init(struct d40_base *base,
int num_reserved_chans)
{
@@ -2466,23 +2389,14 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_slave.cap_mask);
dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
- base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
- base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
- base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
- base->dma_slave.device_prep_dma_sg = d40_prep_sg;
- base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
- base->dma_slave.device_tx_status = d40_tx_status;
- base->dma_slave.device_issue_pending = d40_issue_pending;
- base->dma_slave.device_control = d40_control;
- base->dma_slave.dev = base->dev;
+ d40_ops_init(base, &base->dma_slave);
err = dma_async_device_register(&base->dma_slave);
if (err) {
- dev_err(base->dev,
- "[%s] Failed to register slave channels\n",
- __func__);
+ d40_err(base->dev, "Failed to register slave channels\n");
goto failure1;
}
@@ -2491,29 +2405,15 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_memcpy.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
- dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
-
- base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
- base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
- base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
- base->dma_slave.device_prep_dma_sg = d40_prep_sg;
- base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
- base->dma_memcpy.device_tx_status = d40_tx_status;
- base->dma_memcpy.device_issue_pending = d40_issue_pending;
- base->dma_memcpy.device_control = d40_control;
- base->dma_memcpy.dev = base->dev;
- /*
- * This controller can only access address at even
- * 32bit boundaries, i.e. 2^2
- */
- base->dma_memcpy.copy_align = 2;
+ dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
+
+ d40_ops_init(base, &base->dma_memcpy);
err = dma_async_device_register(&base->dma_memcpy);
if (err) {
- dev_err(base->dev,
- "[%s] Failed to regsiter memcpy only channels\n",
- __func__);
+ d40_err(base->dev,
+ "Failed to regsiter memcpy only channels\n");
goto failure2;
}
@@ -2523,24 +2423,15 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_both.cap_mask);
dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
- dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
-
- base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
- base->dma_both.device_free_chan_resources = d40_free_chan_resources;
- base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
- base->dma_slave.device_prep_dma_sg = d40_prep_sg;
- base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
- base->dma_both.device_tx_status = d40_tx_status;
- base->dma_both.device_issue_pending = d40_issue_pending;
- base->dma_both.device_control = d40_control;
- base->dma_both.dev = base->dev;
- base->dma_both.copy_align = 2;
+ dma_cap_set(DMA_SG, base->dma_both.cap_mask);
+ dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
+
+ d40_ops_init(base, &base->dma_both);
err = dma_async_device_register(&base->dma_both);
if (err) {
- dev_err(base->dev,
- "[%s] Failed to register logical and physical capable channels\n",
- __func__);
+ d40_err(base->dev,
+ "Failed to register logical and physical capable channels\n");
goto failure3;
}
return 0;
@@ -2616,9 +2507,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
{ .reg = D40_DREG_PERIPHID1, .val = 0x0000},
/*
* D40_DREG_PERIPHID2 Depends on HW revision:
- * MOP500/HREF ED has 0x0008,
+ * DB8500ed has 0x0008,
* ? has 0x0018,
- * HREF V1 has 0x0028
+ * DB8500v1 has 0x0028
+ * DB8500v2 has 0x0038
*/
{ .reg = D40_DREG_PERIPHID3, .val = 0x0000},
@@ -2642,8 +2534,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "[%s] No matching clock found\n",
- __func__);
+ d40_err(&pdev->dev, "No matching clock found\n");
goto failure;
}
@@ -2666,9 +2557,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
if (dma_id_regs[i].val !=
readl(virtbase + dma_id_regs[i].reg)) {
- dev_err(&pdev->dev,
- "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
- __func__,
+ d40_err(&pdev->dev,
+ "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
dma_id_regs[i].val,
dma_id_regs[i].reg,
readl(virtbase + dma_id_regs[i].reg));
@@ -2681,9 +2571,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
D40_HW_DESIGNER) {
- dev_err(&pdev->dev,
- "[%s] Unknown designer! Got %x wanted %x\n",
- __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
+ d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
+ val & D40_DREG_PERIPHID2_DESIGNER_MASK,
D40_HW_DESIGNER);
goto failure;
}
@@ -2713,7 +2602,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
sizeof(struct d40_chan), GFP_KERNEL);
if (base == NULL) {
- dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
+ d40_err(&pdev->dev, "Out of memory\n");
goto failure;
}
@@ -2860,6 +2749,7 @@ static void __init d40_hw_init(struct d40_base *base)
static int __init d40_lcla_allocate(struct d40_base *base)
{
+ struct d40_lcla_pool *pool = &base->lcla_pool;
unsigned long *page_list;
int i, j;
int ret = 0;
@@ -2885,9 +2775,8 @@ static int __init d40_lcla_allocate(struct d40_base *base)
base->lcla_pool.pages);
if (!page_list[i]) {
- dev_err(base->dev,
- "[%s] Failed to allocate %d pages.\n",
- __func__, base->lcla_pool.pages);
+ d40_err(base->dev, "Failed to allocate %d pages.\n",
+ base->lcla_pool.pages);
for (j = 0; j < i; j++)
free_pages(page_list[j], base->lcla_pool.pages);
@@ -2925,6 +2814,15 @@ static int __init d40_lcla_allocate(struct d40_base *base)
LCLA_ALIGNMENT);
}
+ pool->dma_addr = dma_map_single(base->dev, pool->base,
+ SZ_1K * base->num_phy_chans,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(base->dev, pool->dma_addr)) {
+ pool->dma_addr = 0;
+ ret = -ENOMEM;
+ goto failure;
+ }
+
writel(virt_to_phys(base->lcla_pool.base),
base->virtbase + D40_DREG_LCLA);
failure:
@@ -2957,9 +2855,7 @@ static int __init d40_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
if (!res) {
ret = -ENOENT;
- dev_err(&pdev->dev,
- "[%s] No \"lcpa\" memory resource\n",
- __func__);
+ d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
goto failure;
}
base->lcpa_size = resource_size(res);
@@ -2968,9 +2864,9 @@ static int __init d40_probe(struct platform_device *pdev)
if (request_mem_region(res->start, resource_size(res),
D40_NAME " I/O lcpa") == NULL) {
ret = -EBUSY;
- dev_err(&pdev->dev,
- "[%s] Failed to request LCPA region 0x%x-0x%x\n",
- __func__, res->start, res->end);
+ d40_err(&pdev->dev,
+ "Failed to request LCPA region 0x%x-0x%x\n",
+ res->start, res->end);
goto failure;
}
@@ -2986,16 +2882,13 @@ static int __init d40_probe(struct platform_device *pdev)
base->lcpa_base = ioremap(res->start, resource_size(res));
if (!base->lcpa_base) {
ret = -ENOMEM;
- dev_err(&pdev->dev,
- "[%s] Failed to ioremap LCPA region\n",
- __func__);
+ d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
goto failure;
}
ret = d40_lcla_allocate(base);
if (ret) {
- dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
- __func__);
+ d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
goto failure;
}
@@ -3004,9 +2897,8 @@ static int __init d40_probe(struct platform_device *pdev)
base->irq = platform_get_irq(pdev, 0);
ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
-
if (ret) {
- dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
+ d40_err(&pdev->dev, "No IRQ defined\n");
goto failure;
}
@@ -3025,6 +2917,12 @@ failure:
kmem_cache_destroy(base->desc_slab);
if (base->virtbase)
iounmap(base->virtbase);
+
+ if (base->lcla_pool.dma_addr)
+ dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
+ SZ_1K * base->num_phy_chans,
+ DMA_TO_DEVICE);
+
if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
free_pages((unsigned long)base->lcla_pool.base,
base->lcla_pool.pages);
@@ -3049,7 +2947,7 @@ failure:
kfree(base);
}
- dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
+ d40_err(&pdev->dev, "probe failed\n");
return ret;
}
@@ -3060,7 +2958,7 @@ static struct platform_driver d40_driver = {
},
};
-int __init stedma40_init(void)
+static int __init stedma40_init(void)
{
return platform_driver_probe(&d40_driver, d40_probe);
}
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 0b096a38322d..cad9e1daedff 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -125,13 +125,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
static int d40_phy_fill_lli(struct d40_phy_lli *lli,
dma_addr_t data,
u32 data_size,
- int psize,
dma_addr_t next_lli,
u32 reg_cfg,
- bool term_int,
- u32 data_width,
- bool is_device)
+ struct stedma40_half_channel_info *info,
+ unsigned int flags)
{
+ bool addr_inc = flags & LLI_ADDR_INC;
+ bool term_int = flags & LLI_TERM_INT;
+ unsigned int data_width = info->data_width;
+ int psize = info->psize;
int num_elems;
if (psize == STEDMA40_PSIZE_PHY_1)
@@ -154,7 +156,7 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli,
* Distance to next element sized entry.
* Usually the size of the element unless you want gaps.
*/
- if (!is_device)
+ if (addr_inc)
lli->reg_elt |= (0x1 << data_width) <<
D40_SREG_ELEM_PHY_EIDX_POS;
@@ -198,47 +200,51 @@ static int d40_seg_size(int size, int data_width1, int data_width2)
return seg_max;
}
-struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
- dma_addr_t addr,
- u32 size,
- int psize,
- dma_addr_t lli_phys,
- u32 reg_cfg,
- bool term_int,
- u32 data_width1,
- u32 data_width2,
- bool is_device)
+static struct d40_phy_lli *
+d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size,
+ dma_addr_t lli_phys, dma_addr_t first_phys, u32 reg_cfg,
+ struct stedma40_half_channel_info *info,
+ struct stedma40_half_channel_info *otherinfo,
+ unsigned long flags)
{
+ bool lastlink = flags & LLI_LAST_LINK;
+ bool addr_inc = flags & LLI_ADDR_INC;
+ bool term_int = flags & LLI_TERM_INT;
+ bool cyclic = flags & LLI_CYCLIC;
int err;
dma_addr_t next = lli_phys;
int size_rest = size;
int size_seg = 0;
+ /*
+ * This piece may be split up based on d40_seg_size(); we only want the
+ * term int on the last part.
+ */
+ if (term_int)
+ flags &= ~LLI_TERM_INT;
+
do {
- size_seg = d40_seg_size(size_rest, data_width1, data_width2);
+ size_seg = d40_seg_size(size_rest, info->data_width,
+ otherinfo->data_width);
size_rest -= size_seg;
- if (term_int && size_rest == 0)
- next = 0;
+ if (size_rest == 0 && term_int)
+ flags |= LLI_TERM_INT;
+
+ if (size_rest == 0 && lastlink)
+ next = cyclic ? first_phys : 0;
else
next = ALIGN(next + sizeof(struct d40_phy_lli),
D40_LLI_ALIGN);
- err = d40_phy_fill_lli(lli,
- addr,
- size_seg,
- psize,
- next,
- reg_cfg,
- !next,
- data_width1,
- is_device);
+ err = d40_phy_fill_lli(lli, addr, size_seg, next,
+ reg_cfg, info, flags);
if (err)
goto err;
lli++;
- if (!is_device)
+ if (addr_inc)
addr += size_seg;
} while (size_rest);
@@ -254,39 +260,35 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
struct d40_phy_lli *lli_sg,
dma_addr_t lli_phys,
u32 reg_cfg,
- u32 data_width1,
- u32 data_width2,
- int psize)
+ struct stedma40_half_channel_info *info,
+ struct stedma40_half_channel_info *otherinfo,
+ unsigned long flags)
{
int total_size = 0;
int i;
struct scatterlist *current_sg = sg;
- dma_addr_t dst;
struct d40_phy_lli *lli = lli_sg;
dma_addr_t l_phys = lli_phys;
+ if (!target)
+ flags |= LLI_ADDR_INC;
+
for_each_sg(sg, current_sg, sg_len, i) {
+ dma_addr_t sg_addr = sg_dma_address(current_sg);
+ unsigned int len = sg_dma_len(current_sg);
+ dma_addr_t dst = target ?: sg_addr;
total_size += sg_dma_len(current_sg);
- if (target)
- dst = target;
- else
- dst = sg_phys(current_sg);
+ if (i == sg_len - 1)
+ flags |= LLI_TERM_INT | LLI_LAST_LINK;
l_phys = ALIGN(lli_phys + (lli - lli_sg) *
sizeof(struct d40_phy_lli), D40_LLI_ALIGN);
- lli = d40_phy_buf_to_lli(lli,
- dst,
- sg_dma_len(current_sg),
- psize,
- l_phys,
- reg_cfg,
- sg_len - 1 == i,
- data_width1,
- data_width2,
- target == dst);
+ lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, lli_phys,
+ reg_cfg, info, otherinfo, flags);
+
if (lli == NULL)
return -EINVAL;
}
@@ -295,45 +297,22 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
}
-void d40_phy_lli_write(void __iomem *virtbase,
- u32 phy_chan_num,
- struct d40_phy_lli *lli_dst,
- struct d40_phy_lli *lli_src)
-{
-
- writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG);
- writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
- writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR);
- writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK);
-
- writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG);
- writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
- writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR);
- writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK);
-
-}
-
/* DMA logical lli operations */
static void d40_log_lli_link(struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
- int next)
+ int next, unsigned int flags)
{
+ bool interrupt = flags & LLI_TERM_INT;
u32 slos = 0;
u32 dlos = 0;
if (next != -EINVAL) {
slos = next * 2;
dlos = next * 2 + 1;
- } else {
+ }
+
+ if (interrupt) {
lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
}
@@ -348,9 +327,9 @@ static void d40_log_lli_link(struct d40_log_lli *lli_dst,
void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
- int next)
+ int next, unsigned int flags)
{
- d40_log_lli_link(lli_dst, lli_src, next);
+ d40_log_lli_link(lli_dst, lli_src, next, flags);
writel(lli_src->lcsp02, &lcpa[0].lcsp0);
writel(lli_src->lcsp13, &lcpa[0].lcsp1);
@@ -361,9 +340,9 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
- int next)
+ int next, unsigned int flags)
{
- d40_log_lli_link(lli_dst, lli_src, next);
+ d40_log_lli_link(lli_dst, lli_src, next, flags);
writel(lli_src->lcsp02, &lcla[0].lcsp02);
writel(lli_src->lcsp13, &lcla[0].lcsp13);
@@ -375,8 +354,10 @@ static void d40_log_fill_lli(struct d40_log_lli *lli,
dma_addr_t data, u32 data_size,
u32 reg_cfg,
u32 data_width,
- bool addr_inc)
+ unsigned int flags)
{
+ bool addr_inc = flags & LLI_ADDR_INC;
+
lli->lcsp13 = reg_cfg;
/* The number of elements to transfer */
@@ -395,67 +376,15 @@ static void d40_log_fill_lli(struct d40_log_lli *lli,
}
-int d40_log_sg_to_dev(struct scatterlist *sg,
- int sg_len,
- struct d40_log_lli_bidir *lli,
- struct d40_def_lcsp *lcsp,
- u32 src_data_width,
- u32 dst_data_width,
- enum dma_data_direction direction,
- dma_addr_t dev_addr)
-{
- int total_size = 0;
- struct scatterlist *current_sg = sg;
- int i;
- struct d40_log_lli *lli_src = lli->src;
- struct d40_log_lli *lli_dst = lli->dst;
-
- for_each_sg(sg, current_sg, sg_len, i) {
- total_size += sg_dma_len(current_sg);
-
- if (direction == DMA_TO_DEVICE) {
- lli_src =
- d40_log_buf_to_lli(lli_src,
- sg_phys(current_sg),
- sg_dma_len(current_sg),
- lcsp->lcsp1, src_data_width,
- dst_data_width,
- true);
- lli_dst =
- d40_log_buf_to_lli(lli_dst,
- dev_addr,
- sg_dma_len(current_sg),
- lcsp->lcsp3, dst_data_width,
- src_data_width,
- false);
- } else {
- lli_dst =
- d40_log_buf_to_lli(lli_dst,
- sg_phys(current_sg),
- sg_dma_len(current_sg),
- lcsp->lcsp3, dst_data_width,
- src_data_width,
- true);
- lli_src =
- d40_log_buf_to_lli(lli_src,
- dev_addr,
- sg_dma_len(current_sg),
- lcsp->lcsp1, src_data_width,
- dst_data_width,
- false);
- }
- }
- return total_size;
-}
-
-struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
+static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
dma_addr_t addr,
int size,
u32 lcsp13, /* src or dst*/
u32 data_width1,
u32 data_width2,
- bool addr_inc)
+ unsigned int flags)
{
+ bool addr_inc = flags & LLI_ADDR_INC;
struct d40_log_lli *lli = lli_sg;
int size_rest = size;
int size_seg = 0;
@@ -468,7 +397,7 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
addr,
size_seg,
lcsp13, data_width1,
- addr_inc);
+ flags);
if (addr_inc)
addr += size_seg;
lli++;
@@ -479,6 +408,7 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
int d40_log_sg_to_lli(struct scatterlist *sg,
int sg_len,
+ dma_addr_t dev_addr,
struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/
u32 data_width1, u32 data_width2)
@@ -487,14 +417,24 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
struct scatterlist *current_sg = sg;
int i;
struct d40_log_lli *lli = lli_sg;
+ unsigned long flags = 0;
+
+ if (!dev_addr)
+ flags |= LLI_ADDR_INC;
for_each_sg(sg, current_sg, sg_len, i) {
+ dma_addr_t sg_addr = sg_dma_address(current_sg);
+ unsigned int len = sg_dma_len(current_sg);
+ dma_addr_t addr = dev_addr ?: sg_addr;
+
total_size += sg_dma_len(current_sg);
- lli = d40_log_buf_to_lli(lli,
- sg_phys(current_sg),
- sg_dma_len(current_sg),
+
+ lli = d40_log_buf_to_lli(lli, addr, len,
lcsp13,
- data_width1, data_width2, true);
+ data_width1,
+ data_width2,
+ flags);
}
+
return total_size;
}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 9cc43495bea2..195ee65ee7f3 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -163,6 +163,22 @@
#define D40_DREG_LCEIS1 0x0B4
#define D40_DREG_LCEIS2 0x0B8
#define D40_DREG_LCEIS3 0x0BC
+#define D40_DREG_PSEG1 0x110
+#define D40_DREG_PSEG2 0x114
+#define D40_DREG_PSEG3 0x118
+#define D40_DREG_PSEG4 0x11C
+#define D40_DREG_PCEG1 0x120
+#define D40_DREG_PCEG2 0x124
+#define D40_DREG_PCEG3 0x128
+#define D40_DREG_PCEG4 0x12C
+#define D40_DREG_RSEG1 0x130
+#define D40_DREG_RSEG2 0x134
+#define D40_DREG_RSEG3 0x138
+#define D40_DREG_RSEG4 0x13C
+#define D40_DREG_RCEG1 0x140
+#define D40_DREG_RCEG2 0x144
+#define D40_DREG_RCEG3 0x148
+#define D40_DREG_RCEG4 0x14C
#define D40_DREG_STFU 0xFC8
#define D40_DREG_ICFG 0xFCC
#define D40_DREG_PERIPHID0 0xFE0
@@ -277,6 +293,13 @@ struct d40_def_lcsp {
/* Physical channels */
+enum d40_lli_flags {
+ LLI_ADDR_INC = 1 << 0,
+ LLI_TERM_INT = 1 << 1,
+ LLI_CYCLIC = 1 << 2,
+ LLI_LAST_LINK = 1 << 3,
+};
+
void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
u32 *src_cfg,
u32 *dst_cfg,
@@ -292,46 +315,15 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
struct d40_phy_lli *lli,
dma_addr_t lli_phys,
u32 reg_cfg,
- u32 data_width1,
- u32 data_width2,
- int psize);
-
-struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
- dma_addr_t data,
- u32 data_size,
- int psize,
- dma_addr_t next_lli,
- u32 reg_cfg,
- bool term_int,
- u32 data_width1,
- u32 data_width2,
- bool is_device);
-
-void d40_phy_lli_write(void __iomem *virtbase,
- u32 phy_chan_num,
- struct d40_phy_lli *lli_dst,
- struct d40_phy_lli *lli_src);
+ struct stedma40_half_channel_info *info,
+ struct stedma40_half_channel_info *otherinfo,
+ unsigned long flags);
/* Logical channels */
-struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
- dma_addr_t addr,
- int size,
- u32 lcsp13, /* src or dst*/
- u32 data_width1, u32 data_width2,
- bool addr_inc);
-
-int d40_log_sg_to_dev(struct scatterlist *sg,
- int sg_len,
- struct d40_log_lli_bidir *lli,
- struct d40_def_lcsp *lcsp,
- u32 src_data_width,
- u32 dst_data_width,
- enum dma_data_direction direction,
- dma_addr_t dev_addr);
-
int d40_log_sg_to_lli(struct scatterlist *sg,
int sg_len,
+ dma_addr_t dev_addr,
struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/
u32 data_width1, u32 data_width2);
@@ -339,11 +331,11 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
- int next);
+ int next, unsigned int flags);
void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
- int next);
+ int next, unsigned int flags);
#endif /* STE_DMA40_LLI_H */
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 3b88a4e7c98a..d2c75feff7df 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -27,6 +27,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/slab.h>
#include <linux/timb_dma.h>
@@ -629,7 +630,7 @@ static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
desc_node)
list_move(&td_desc->desc_node, &td_chan->free_list);
- /* now tear down the runnning */
+ /* now tear down the running */
__td_finish(td_chan);
spin_unlock_bh(&td_chan->lock);
@@ -684,7 +685,7 @@ static irqreturn_t td_irq(int irq, void *devid)
static int __devinit td_probe(struct platform_device *pdev)
{
- struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_dma_platform_data *pdata = mfd_get_data(pdev);
struct timb_dma *td;
struct resource *iomem;
int irq;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index fe70a341bd8b..fac1a2002e67 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -7,7 +7,7 @@
menuconfig EDAC
bool "EDAC (Error Detection And Correction) reporting"
depends on HAS_IOMEM
- depends on X86 || PPC
+ depends on X86 || PPC || TILE
help
EDAC is designed to report errors in the core system.
These are low-level errors that are reported in the CPU or
@@ -282,4 +282,12 @@ config EDAC_CPC925
a companion chip to the PowerPC 970 family of
processors.
+config EDAC_TILE
+ tristate "Tilera Memory Controller"
+ depends on EDAC_MM_EDAC && TILE
+ default y
+ help
+ Support for error detection and correction on the
+ Tilera memory controller.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index ba2898b3639b..3e239133e29e 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -54,3 +54,4 @@ obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o
obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o
obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o
+obj-$(CONFIG_EDAC_TILE) += tile_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 23e03554f0d3..0be30e978c85 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -25,59 +25,12 @@ static struct mem_ctl_info **mcis;
static struct ecc_settings **ecc_stngs;
/*
- * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
- * later.
- */
-static int ddr2_dbam_revCG[] = {
- [0] = 32,
- [1] = 64,
- [2] = 128,
- [3] = 256,
- [4] = 512,
- [5] = 1024,
- [6] = 2048,
-};
-
-static int ddr2_dbam_revD[] = {
- [0] = 32,
- [1] = 64,
- [2 ... 3] = 128,
- [4] = 256,
- [5] = 512,
- [6] = 256,
- [7] = 512,
- [8 ... 9] = 1024,
- [10] = 2048,
-};
-
-static int ddr2_dbam[] = { [0] = 128,
- [1] = 256,
- [2 ... 4] = 512,
- [5 ... 6] = 1024,
- [7 ... 8] = 2048,
- [9 ... 10] = 4096,
- [11] = 8192,
-};
-
-static int ddr3_dbam[] = { [0] = -1,
- [1] = 256,
- [2] = 512,
- [3 ... 4] = -1,
- [5 ... 6] = 1024,
- [7 ... 8] = 2048,
- [9 ... 10] = 4096,
- [11] = 8192,
-};
-
-/*
* Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
* bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
* or higher value'.
*
*FIXME: Produce a better mapping/linearisation.
*/
-
-
struct scrubrate {
u32 scrubval; /* bit pattern for scrub rate */
u32 bandwidth; /* bandwidth consumed (bytes/sec) */
@@ -107,6 +60,79 @@ struct scrubrate {
{ 0x00, 0UL}, /* scrubbing off */
};
+static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 *val, const char *func)
+{
+ int err = 0;
+
+ err = pci_read_config_dword(pdev, offset, val);
+ if (err)
+ amd64_warn("%s: error reading F%dx%03x.\n",
+ func, PCI_FUNC(pdev->devfn), offset);
+
+ return err;
+}
+
+int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 val, const char *func)
+{
+ int err = 0;
+
+ err = pci_write_config_dword(pdev, offset, val);
+ if (err)
+ amd64_warn("%s: error writing to F%dx%03x.\n",
+ func, PCI_FUNC(pdev->devfn), offset);
+
+ return err;
+}
+
+/*
+ *
+ * Depending on the family, F2 DCT reads need special handling:
+ *
+ * K8: has a single DCT only
+ *
+ * F10h: each DCT has its own set of regs
+ * DCT0 -> F2x040..
+ * DCT1 -> F2x140..
+ *
+ * F15h: we select which DCT we access using F1x10C[DctCfgSel]
+ *
+ */
+static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+ const char *func)
+{
+ if (addr >= 0x100)
+ return -EINVAL;
+
+ return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
+static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+ const char *func)
+{
+ return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
+static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+ const char *func)
+{
+ u32 reg = 0;
+ u8 dct = 0;
+
+ if (addr >= 0x140 && addr <= 0x1a0) {
+ dct = 1;
+ addr -= 0x100;
+ }
+
+ amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
+ reg &= 0xfffffffe;
+ reg |= dct;
+ amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
+
+ return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
/*
* Memory scrubber control interface. For K8, memory scrubbing is handled by
* hardware and can involve L2 cache, dcache as well as the main memory. With
@@ -156,7 +182,7 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
- pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
+ pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
if (scrubval)
return scrubrates[i].bandwidth;
@@ -167,8 +193,12 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
{
struct amd64_pvt *pvt = mci->pvt_info;
+ u32 min_scrubrate = 0x5;
+
+ if (boot_cpu_data.x86 == 0xf)
+ min_scrubrate = 0x0;
- return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
+ return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
}
static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
@@ -177,7 +207,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
u32 scrubval = 0;
int i, retval = -EINVAL;
- amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
+ amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
scrubval = scrubval & 0x001F;
@@ -192,63 +222,14 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
return retval;
}
-/* Map from a CSROW entry to the mask entry that operates on it */
-static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
-{
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
- return csrow;
- else
- return csrow >> 1;
-}
-
-/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
-static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
-{
- if (dct == 0)
- return pvt->dcsb0[csrow];
- else
- return pvt->dcsb1[csrow];
-}
-
-/*
- * Return the 'mask' address the i'th CS entry. This function is needed because
- * there number of DCSM registers on Rev E and prior vs Rev F and later is
- * different.
- */
-static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
-{
- if (dct == 0)
- return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
- else
- return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
-}
-
-
/*
- * In *base and *limit, pass back the full 40-bit base and limit physical
- * addresses for the node given by node_id. This information is obtained from
- * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
- * base and limit addresses are of type SysAddr, as defined at the start of
- * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
- * in the address range they represent.
+ * returns true if the SysAddr given by sys_addr matches the
+ * DRAM base/limit associated with node_id
*/
-static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
- u64 *base, u64 *limit)
+static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
+ unsigned nid)
{
- *base = pvt->dram_base[node_id];
- *limit = pvt->dram_limit[node_id];
-}
-
-/*
- * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
- * with node_id
- */
-static int amd64_base_limit_match(struct amd64_pvt *pvt,
- u64 sys_addr, int node_id)
-{
- u64 base, limit, addr;
-
- amd64_get_base_and_limit(pvt, node_id, &base, &limit);
+ u64 addr;
/* The K8 treats this as a 40-bit value. However, bits 63-40 will be
* all ones if the most significant implemented address bit is 1.
@@ -258,7 +239,8 @@ static int amd64_base_limit_match(struct amd64_pvt *pvt,
*/
addr = sys_addr & 0x000000ffffffffffull;
- return (addr >= base) && (addr <= limit);
+ return ((addr >= get_dram_base(pvt, nid)) &&
+ (addr <= get_dram_limit(pvt, nid)));
}
/*
@@ -271,7 +253,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
u64 sys_addr)
{
struct amd64_pvt *pvt;
- int node_id;
+ unsigned node_id;
u32 intlv_en, bits;
/*
@@ -285,10 +267,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
* registers. Therefore we arbitrarily choose to read it from the
* register for node 0.
*/
- intlv_en = pvt->dram_IntlvEn[0];
+ intlv_en = dram_intlv_en(pvt, 0);
if (intlv_en == 0) {
- for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
+ for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
if (amd64_base_limit_match(pvt, sys_addr, node_id))
goto found;
}
@@ -305,10 +287,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
bits = (((u32) sys_addr) >> 12) & intlv_en;
for (node_id = 0; ; ) {
- if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
+ if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
break; /* intlv_sel field matches */
- if (++node_id >= DRAM_REG_COUNT)
+ if (++node_id >= DRAM_RANGES)
goto err_no_match;
}
@@ -321,7 +303,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
}
found:
- return edac_mc_find(node_id);
+ return edac_mc_find((int)node_id);
err_no_match:
debugf2("sys_addr 0x%lx doesn't match any node\n",
@@ -331,37 +313,50 @@ err_no_match:
}
/*
- * Extract the DRAM CS base address from selected csrow register.
+ * compute the CS base address of the @csrow on the DRAM controller @dct.
+ * For details see F2x[5C:40] in the processor's BKDG
*/
-static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
+static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
+ u64 *base, u64 *mask)
{
- return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
- pvt->dcs_shift;
-}
+ u64 csbase, csmask, base_bits, mask_bits;
+ u8 addr_shift;
-/*
- * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
- */
-static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
-{
- u64 dcsm_bits, other_bits;
- u64 mask;
-
- /* Extract bits from DRAM CS Mask. */
- dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
+ if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
+ csbase = pvt->csels[dct].csbases[csrow];
+ csmask = pvt->csels[dct].csmasks[csrow];
+ base_bits = GENMASK(21, 31) | GENMASK(9, 15);
+ mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
+ addr_shift = 4;
+ } else {
+ csbase = pvt->csels[dct].csbases[csrow];
+ csmask = pvt->csels[dct].csmasks[csrow >> 1];
+ addr_shift = 8;
- other_bits = pvt->dcsm_mask;
- other_bits = ~(other_bits << pvt->dcs_shift);
+ if (boot_cpu_data.x86 == 0x15)
+ base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
+ else
+ base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
+ }
- /*
- * The extracted bits from DCSM belong in the spaces represented by
- * the cleared bits in other_bits.
- */
- mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
+ *base = (csbase & base_bits) << addr_shift;
- return mask;
+ *mask = ~0ULL;
+ /* poke holes for the csmask */
+ *mask &= ~(mask_bits << addr_shift);
+ /* OR them in */
+ *mask |= (csmask & mask_bits) << addr_shift;
}
+#define for_each_chip_select(i, dct, pvt) \
+ for (i = 0; i < pvt->csels[dct].b_cnt; i++)
+
+#define chip_select_base(i, dct, pvt) \
+ pvt->csels[dct].csbases[i]
+
+#define for_each_chip_select_mask(i, dct, pvt) \
+ for (i = 0; i < pvt->csels[dct].m_cnt; i++)
+
/*
* @input_addr is an InputAddr associated with the node given by mci. Return the
* csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
@@ -374,19 +369,13 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
pvt = mci->pvt_info;
- /*
- * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
- * base/mask register pair, test the condition shown near the start of
- * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
- */
- for (csrow = 0; csrow < pvt->cs_count; csrow++) {
-
- /* This DRAM chip select is disabled on this node */
- if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
+ for_each_chip_select(csrow, 0, pvt) {
+ if (!csrow_enabled(csrow, 0, pvt))
continue;
- base = base_from_dct_base(pvt, csrow);
- mask = ~mask_from_dct_mask(pvt, csrow);
+ get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
+
+ mask = ~mask;
if ((input_addr & mask) == (base & mask)) {
debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
@@ -396,7 +385,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
return csrow;
}
}
-
debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
(unsigned long)input_addr, pvt->mc_node_id);
@@ -404,19 +392,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
}
/*
- * Return the base value defined by the DRAM Base register for the node
- * represented by mci. This function returns the full 40-bit value despite the
- * fact that the register only stores bits 39-24 of the value. See section
- * 3.4.4.1 (BKDG #26094, K8, revA-E)
- */
-static inline u64 get_dram_base(struct mem_ctl_info *mci)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
-
- return pvt->dram_base[pvt->mc_node_id];
-}
-
-/*
* Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
* for the node represented by mci. Info is passed back in *hole_base,
* *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
@@ -445,14 +420,13 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
return 1;
}
- /* only valid for Fam10h */
- if (boot_cpu_data.x86 == 0x10 &&
- (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
+ /* valid for Fam10h and above */
+ if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
return 1;
}
- if ((pvt->dhar & DHAR_VALID) == 0) {
+ if (!dhar_valid(pvt)) {
debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
pvt->mc_node_id);
return 1;
@@ -476,15 +450,15 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
* addresses in the hole so that they start at 0x100000000.
*/
- base = dhar_base(pvt->dhar);
+ base = dhar_base(pvt);
*hole_base = base;
*hole_size = (0x1ull << 32) - base;
if (boot_cpu_data.x86 > 0xf)
- *hole_offset = f10_dhar_offset(pvt->dhar);
+ *hole_offset = f10_dhar_offset(pvt);
else
- *hole_offset = k8_dhar_offset(pvt->dhar);
+ *hole_offset = k8_dhar_offset(pvt);
debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
pvt->mc_node_id, (unsigned long)*hole_base,
@@ -525,10 +499,11 @@ EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
*/
static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
{
+ struct amd64_pvt *pvt = mci->pvt_info;
u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
int ret = 0;
- dram_base = get_dram_base(mci);
+ dram_base = get_dram_base(pvt, pvt->mc_node_id);
ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
&hole_size);
@@ -556,7 +531,7 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
* section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
* Programmer's Manual Volume 1 Application Programming.
*/
- dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
+ dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
"DramAddr 0x%lx\n", (unsigned long)sys_addr,
@@ -592,9 +567,9 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
* See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
* concerning translating a DramAddr to an InputAddr.
*/
- intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
- input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
- (dram_addr & 0xfff);
+ intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
+ input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
+ (dram_addr & 0xfff);
debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
intlv_shift, (unsigned long)dram_addr,
@@ -628,7 +603,7 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
{
struct amd64_pvt *pvt;
- int node_id, intlv_shift;
+ unsigned node_id, intlv_shift;
u64 bits, dram_addr;
u32 intlv_sel;
@@ -642,10 +617,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
*/
pvt = mci->pvt_info;
node_id = pvt->mc_node_id;
- BUG_ON((node_id < 0) || (node_id > 7));
- intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
+ BUG_ON(node_id > 7);
+ intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
if (intlv_shift == 0) {
debugf1(" InputAddr 0x%lx translates to DramAddr of "
"same value\n", (unsigned long)input_addr);
@@ -653,10 +628,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
return input_addr;
}
- bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
- (input_addr & 0xfff);
+ bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
+ (input_addr & 0xfff);
- intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
+ intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
dram_addr = bits + (intlv_sel << 12);
debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
@@ -673,7 +648,7 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
{
struct amd64_pvt *pvt = mci->pvt_info;
- u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
+ u64 hole_base, hole_offset, hole_size, base, sys_addr;
int ret = 0;
ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
@@ -691,7 +666,7 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
}
}
- amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
+ base = get_dram_base(pvt, pvt->mc_node_id);
sys_addr = dram_addr + base;
/*
@@ -736,13 +711,12 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
u64 base, mask;
pvt = mci->pvt_info;
- BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
+ BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
- base = base_from_dct_base(pvt, csrow);
- mask = mask_from_dct_mask(pvt, csrow);
+ get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
*input_addr_min = base & ~mask;
- *input_addr_max = base | mask | pvt->dcs_mask_notused;
+ *input_addr_max = base | mask;
}
/* Map the Error address to a PAGE and PAGE OFFSET. */
@@ -775,18 +749,13 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
-static u16 extract_syndrome(struct err_regs *err)
-{
- return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
-}
-
/*
* Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
* are ECC capable.
*/
static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
{
- int bit;
+ u8 bit;
enum dev_type edac_cap = EDAC_FLAG_NONE;
bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
@@ -799,8 +768,7 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
return edac_cap;
}
-
-static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
+static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
static void amd64_dump_dramcfg_low(u32 dclr, int chan)
{
@@ -813,8 +781,9 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
debugf1(" PAR/ERR parity: %s\n",
(dclr & BIT(8)) ? "enabled" : "disabled");
- debugf1(" DCT 128bit mode width: %s\n",
- (dclr & BIT(11)) ? "128b" : "64b");
+ if (boot_cpu_data.x86 == 0x10)
+ debugf1(" DCT 128bit mode width: %s\n",
+ (dclr & BIT(11)) ? "128b" : "64b");
debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
(dclr & BIT(12)) ? "yes" : "no",
@@ -824,16 +793,16 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
}
/* Display and decode various NB registers for debug purposes. */
-static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
+static void dump_misc_regs(struct amd64_pvt *pvt)
{
debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
debugf1(" NB two channel DRAM capable: %s\n",
- (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
+ (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
- (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
- (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
+ (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
+ (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
amd64_dump_dramcfg_low(pvt->dclr0, 0);
@@ -841,130 +810,84 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
"offset: 0x%08x\n",
- pvt->dhar,
- dhar_base(pvt->dhar),
- (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
- : f10_dhar_offset(pvt->dhar));
+ pvt->dhar, dhar_base(pvt),
+ (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
+ : f10_dhar_offset(pvt));
- debugf1(" DramHoleValid: %s\n",
- (pvt->dhar & DHAR_VALID) ? "yes" : "no");
+ debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
- amd64_debug_display_dimm_sizes(0, pvt);
+ amd64_debug_display_dimm_sizes(pvt, 0);
/* everything below this point is Fam10h and above */
if (boot_cpu_data.x86 == 0xf)
return;
- amd64_debug_display_dimm_sizes(1, pvt);
+ amd64_debug_display_dimm_sizes(pvt, 1);
- amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
+ amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
amd64_dump_dramcfg_low(pvt->dclr1, 1);
}
-/* Read in both of DBAM registers */
-static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
-{
- amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0);
-
- if (boot_cpu_data.x86 >= 0x10)
- amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1);
-}
-
/*
- * NOTE: CPU Revision Dependent code: Rev E and Rev F
- *
- * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
- * set the shift factor for the DCSB and DCSM values.
- *
- * ->dcs_mask_notused, RevE:
- *
- * To find the max InputAddr for the csrow, start with the base address and set
- * all bits that are "don't care" bits in the test at the start of section
- * 3.5.4 (p. 84).
- *
- * The "don't care" bits are all set bits in the mask and all bits in the gaps
- * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
- * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
- * gaps.
- *
- * ->dcs_mask_notused, RevF and later:
- *
- * To find the max InputAddr for the csrow, start with the base address and set
- * all bits that are "don't care" bits in the test at the start of NPT section
- * 4.5.4 (p. 87).
- *
- * The "don't care" bits are all set bits in the mask and all bits in the gaps
- * between bit ranges [36:27] and [21:13].
- *
- * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
- * which are all bits in the above-mentioned gaps.
+ * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
*/
-static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
+static void prep_chip_selects(struct amd64_pvt *pvt)
{
-
if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
- pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
- pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
- pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
- pvt->dcs_shift = REV_E_DCS_SHIFT;
- pvt->cs_count = 8;
- pvt->num_dcsm = 8;
+ pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
+ pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
} else {
- pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
- pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
- pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
- pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
- pvt->cs_count = 8;
- pvt->num_dcsm = 4;
+ pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
+ pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
}
}
/*
- * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
+ * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
*/
-static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
+static void read_dct_base_mask(struct amd64_pvt *pvt)
{
- int cs, reg;
+ int cs;
- amd64_set_dct_base_and_mask(pvt);
+ prep_chip_selects(pvt);
- for (cs = 0; cs < pvt->cs_count; cs++) {
- reg = K8_DCSB0 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs]))
+ for_each_chip_select(cs, 0, pvt) {
+ int reg0 = DCSB0 + (cs * 4);
+ int reg1 = DCSB1 + (cs * 4);
+ u32 *base0 = &pvt->csels[0].csbases[cs];
+ u32 *base1 = &pvt->csels[1].csbases[cs];
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsb0[cs], reg);
-
- /* If DCT are NOT ganged, then read in DCT1's base */
- if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
- reg = F10_DCSB1 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg,
- &pvt->dcsb1[cs]))
- debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsb1[cs], reg);
- } else {
- pvt->dcsb1[cs] = 0;
- }
+ cs, *base0, reg0);
+
+ if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
+ continue;
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
+ debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, *base1, reg1);
}
- for (cs = 0; cs < pvt->num_dcsm; cs++) {
- reg = K8_DCSM0 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs]))
+ for_each_chip_select_mask(cs, 0, pvt) {
+ int reg0 = DCSM0 + (cs * 4);
+ int reg1 = DCSM1 + (cs * 4);
+ u32 *mask0 = &pvt->csels[0].csmasks[cs];
+ u32 *mask1 = &pvt->csels[1].csmasks[cs];
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsm0[cs], reg);
-
- /* If DCT are NOT ganged, then read in DCT1's mask */
- if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
- reg = F10_DCSM1 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg,
- &pvt->dcsm1[cs]))
- debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsm1[cs], reg);
- } else {
- pvt->dcsm1[cs] = 0;
- }
+ cs, *mask0, reg0);
+
+ if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
+ continue;
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
+ debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask1, reg1);
}
}
@@ -972,7 +895,10 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
{
enum mem_type type;
- if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
+ /* F15h supports only DDR3 */
+ if (boot_cpu_data.x86 >= 0x15)
+ type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
+ else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
if (pvt->dchr0 & DDR3_MODE)
type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
else
@@ -986,26 +912,14 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
return type;
}
-/*
- * Read the DRAM Configuration Low register. It differs between CG, D & E revs
- * and the later RevF memory controllers (DDR vs DDR2)
- *
- * Return:
- * number of memory channels in operation
- * Pass back:
- * contents of the DCL0_LOW register
- */
+/* Get the number of DCT channels the memory controller is using. */
static int k8_early_channel_count(struct amd64_pvt *pvt)
{
- int flag, err = 0;
-
- err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
- if (err)
- return err;
+ int flag;
if (pvt->ext_model >= K8_REV_F)
/* RevF (NPT) and later */
- flag = pvt->dclr0 & F10_WIDTH_128;
+ flag = pvt->dclr0 & WIDTH_128;
else
/* RevE and earlier */
flag = pvt->dclr0 & REVE_WIDTH_128;
@@ -1016,55 +930,47 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
return (flag) ? 2 : 1;
}
-/* extract the ERROR ADDRESS for the K8 CPUs */
-static u64 k8_get_error_address(struct mem_ctl_info *mci,
- struct err_regs *info)
+/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
+static u64 get_error_address(struct mce *m)
{
- return (((u64) (info->nbeah & 0xff)) << 32) +
- (info->nbeal & ~0x03);
+ u8 start_bit = 1;
+ u8 end_bit = 47;
+
+ if (boot_cpu_data.x86 == 0xf) {
+ start_bit = 3;
+ end_bit = 39;
+ }
+
+ return m->addr & GENMASK(start_bit, end_bit);
}
-/*
- * Read the Base and Limit registers for K8 based Memory controllers; extract
- * fields from the 'raw' reg into separate data fields
- *
- * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
- */
-static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
+static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
{
- u32 low;
- u32 off = dram << 3; /* 8 bytes between DRAM entries */
+ int off = range << 3;
- amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low);
+ amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
+ amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
- /* Extract parts into separate data entries */
- pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
- pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
- pvt->dram_rw_en[dram] = (low & 0x3);
+ if (boot_cpu_data.x86 == 0xf)
+ return;
- amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low);
+ if (!dram_rw(pvt, range))
+ return;
- /*
- * Extract parts into separate data entries. Limit is the HIGHEST memory
- * location of the region, so lower 24 bits need to be all ones
- */
- pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
- pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
- pvt->dram_DstNode[dram] = (low & 0x7);
+ amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
+ amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
}
-static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
- struct err_regs *err_info, u64 sys_addr)
+static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
+ u16 syndrome)
{
struct mem_ctl_info *src_mci;
+ struct amd64_pvt *pvt = mci->pvt_info;
int channel, csrow;
u32 page, offset;
- u16 syndrome;
-
- syndrome = extract_syndrome(err_info);
/* CHIPKILL enabled */
- if (err_info->nbcfg & K8_NBCFG_CHIPKILL) {
+ if (pvt->nbcfg & NBCFG_CHIPKILL) {
channel = get_channel_from_ecc_syndrome(mci, syndrome);
if (channel < 0) {
/*
@@ -1113,18 +1019,41 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
}
}
-static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
+static int ddr2_cs_size(unsigned i, bool dct_width)
{
- int *dbam_map;
+ unsigned shift = 0;
- if (pvt->ext_model >= K8_REV_F)
- dbam_map = ddr2_dbam;
- else if (pvt->ext_model >= K8_REV_D)
- dbam_map = ddr2_dbam_revD;
+ if (i <= 2)
+ shift = i;
+ else if (!(i & 0x1))
+ shift = i >> 1;
else
- dbam_map = ddr2_dbam_revCG;
+ shift = (i + 1) >> 1;
- return dbam_map[cs_mode];
+ return 128 << (shift + !!dct_width);
+}
+
+static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+ unsigned cs_mode)
+{
+ u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
+
+ if (pvt->ext_model >= K8_REV_F) {
+ WARN_ON(cs_mode > 11);
+ return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
+ }
+ else if (pvt->ext_model >= K8_REV_D) {
+ WARN_ON(cs_mode > 10);
+
+ if (cs_mode == 3 || cs_mode == 8)
+ return 32 << (cs_mode - 1);
+ else
+ return 32 << cs_mode;
+ }
+ else {
+ WARN_ON(cs_mode > 6);
+ return 32 << cs_mode;
+ }
}
/*
@@ -1135,17 +1064,13 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
* Pass back:
* contents of the DCL0_LOW register
*/
-static int f10_early_channel_count(struct amd64_pvt *pvt)
+static int f1x_early_channel_count(struct amd64_pvt *pvt)
{
- int dbams[] = { DBAM0, DBAM1 };
int i, j, channels = 0;
- u32 dbam;
- /* If we are in 128 bit mode, then we are using 2 channels */
- if (pvt->dclr0 & F10_WIDTH_128) {
- channels = 2;
- return channels;
- }
+ /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
+ if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
+ return 2;
/*
* Need to check if in unganged mode: In such, there are 2 channels,
@@ -1162,9 +1087,8 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
* is more than just one DIMM present in unganged mode. Need to check
* both controllers since DIMMs can be placed in either one.
*/
- for (i = 0; i < ARRAY_SIZE(dbams); i++) {
- if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam))
- goto err_reg;
+ for (i = 0; i < 2; i++) {
+ u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
for (j = 0; j < 4; j++) {
if (DBAM_DIMM(j, dbam) > 0) {
@@ -1180,216 +1104,191 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
amd64_info("MCT channel count: %d\n", channels);
return channels;
-
-err_reg:
- return -1;
-
}
-static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
+static int ddr3_cs_size(unsigned i, bool dct_width)
{
- int *dbam_map;
+ unsigned shift = 0;
+ int cs_size = 0;
- if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
- dbam_map = ddr3_dbam;
+ if (i == 0 || i == 3 || i == 4)
+ cs_size = -1;
+ else if (i <= 2)
+ shift = i;
+ else if (i == 12)
+ shift = 7;
+ else if (!(i & 0x1))
+ shift = i >> 1;
else
- dbam_map = ddr2_dbam;
+ shift = (i + 1) >> 1;
+
+ if (cs_size != -1)
+ cs_size = (128 * (1 << !!dct_width)) << shift;
- return dbam_map[cs_mode];
+ return cs_size;
}
-static u64 f10_get_error_address(struct mem_ctl_info *mci,
- struct err_regs *info)
+static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+ unsigned cs_mode)
{
- return (((u64) (info->nbeah & 0xffff)) << 32) +
- (info->nbeal & ~0x01);
+ u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
+
+ WARN_ON(cs_mode > 11);
+
+ if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
+ return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
+ else
+ return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
}
/*
- * Read the Base and Limit registers for F10 based Memory controllers. Extract
- * fields from the 'raw' reg into separate data fields.
- *
- * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
+ * F15h supports only 64bit DCT interfaces
*/
-static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
+static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+ unsigned cs_mode)
{
- u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
-
- low_offset = K8_DRAM_BASE_LOW + (dram << 3);
- high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
-
- /* read the 'raw' DRAM BASE Address register */
- amd64_read_pci_cfg(pvt->F1, low_offset, &low_base);
- amd64_read_pci_cfg(pvt->F1, high_offset, &high_base);
-
- /* Extract parts into separate data entries */
- pvt->dram_rw_en[dram] = (low_base & 0x3);
-
- if (pvt->dram_rw_en[dram] == 0)
- return;
-
- pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
-
- pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
- (((u64)low_base & 0xFFFF0000) << 8);
-
- low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
- high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
-
- /* read the 'raw' LIMIT registers */
- amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit);
- amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit);
-
- pvt->dram_DstNode[dram] = (low_limit & 0x7);
- pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
+ WARN_ON(cs_mode > 12);
- /*
- * Extract address values and form a LIMIT address. Limit is the HIGHEST
- * memory location of the region, so low 24 bits need to be all ones.
- */
- pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
- (((u64) low_limit & 0xFFFF0000) << 8) |
- 0x00FFFFFF;
+ return ddr3_cs_size(cs_mode, false);
}
-static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
+static void read_dram_ctl_register(struct amd64_pvt *pvt)
{
- if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW,
- &pvt->dram_ctl_select_low)) {
- debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
- "High range addresses at: 0x%x\n",
- pvt->dram_ctl_select_low,
- dct_sel_baseaddr(pvt));
+ if (boot_cpu_data.x86 == 0xf)
+ return;
+
+ if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
+ debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
+ pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
- debugf0(" DCT mode: %s, All DCTs on: %s\n",
- (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
- (dct_dram_enabled(pvt) ? "yes" : "no"));
+ debugf0(" DCTs operate in %s mode.\n",
+ (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
if (!dct_ganging_enabled(pvt))
debugf0(" Address range split per DCT: %s\n",
(dct_high_range_enabled(pvt) ? "yes" : "no"));
- debugf0(" DCT data interleave for ECC: %s, "
+ debugf0(" data interleave for ECC: %s, "
"DRAM cleared since last warm reset: %s\n",
(dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
(dct_memory_cleared(pvt) ? "yes" : "no"));
- debugf0(" DCT channel interleave: %s, "
- "DCT interleave bits selector: 0x%x\n",
+ debugf0(" channel interleave: %s, "
+ "interleave bits selector: 0x%x\n",
(dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
dct_sel_interleave_addr(pvt));
}
- amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH,
- &pvt->dram_ctl_select_high);
+ amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
}
/*
- * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
+ * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
* Interleaving Modes.
*/
-static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
- int hi_range_sel, u32 intlv_en)
+static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
+ bool hi_range_sel, u8 intlv_en)
{
- u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
+ u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
if (dct_ganging_enabled(pvt))
- cs = 0;
- else if (hi_range_sel)
- cs = dct_sel_high;
- else if (dct_interleave_enabled(pvt)) {
- /*
- * see F2x110[DctSelIntLvAddr] - channel interleave mode
- */
- if (dct_sel_interleave_addr(pvt) == 0)
- cs = sys_addr >> 6 & 1;
- else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
- temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
+ return 0;
- if (dct_sel_interleave_addr(pvt) & 1)
- cs = (sys_addr >> 9 & 1) ^ temp;
- else
- cs = (sys_addr >> 6 & 1) ^ temp;
- } else if (intlv_en & 4)
- cs = sys_addr >> 15 & 1;
- else if (intlv_en & 2)
- cs = sys_addr >> 14 & 1;
- else if (intlv_en & 1)
- cs = sys_addr >> 13 & 1;
- else
- cs = sys_addr >> 12 & 1;
- } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
- cs = ~dct_sel_high & 1;
- else
- cs = 0;
+ if (hi_range_sel)
+ return dct_sel_high;
- return cs;
-}
+ /*
+ * see F2x110[DctSelIntLvAddr] - channel interleave mode
+ */
+ if (dct_interleave_enabled(pvt)) {
+ u8 intlv_addr = dct_sel_interleave_addr(pvt);
-static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
-{
- if (intlv_en == 1)
- return 1;
- else if (intlv_en == 3)
- return 2;
- else if (intlv_en == 7)
- return 3;
+ /* return DCT select function: 0=DCT0, 1=DCT1 */
+ if (!intlv_addr)
+ return sys_addr >> 6 & 1;
+
+ if (intlv_addr & 0x2) {
+ u8 shift = intlv_addr & 0x1 ? 9 : 6;
+ u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
+
+ return ((sys_addr >> shift) & 1) ^ temp;
+ }
+
+ return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
+ }
+
+ if (dct_high_range_enabled(pvt))
+ return ~dct_sel_high & 1;
return 0;
}
-/* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
-static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
- u32 dct_sel_base_addr,
- u64 dct_sel_base_off,
- u32 hole_valid, u32 hole_off,
- u64 dram_base)
+/* Convert the sys_addr to the normalized DCT address */
+static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
+ u64 sys_addr, bool hi_rng,
+ u32 dct_sel_base_addr)
{
u64 chan_off;
+ u64 dram_base = get_dram_base(pvt, range);
+ u64 hole_off = f10_dhar_offset(pvt);
+ u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
- if (hi_range_sel) {
- if (!(dct_sel_base_addr & 0xFFFF0000) &&
- hole_valid && (sys_addr >= 0x100000000ULL))
- chan_off = hole_off << 16;
+ if (hi_rng) {
+ /*
+ * if
+ * base address of high range is below 4Gb
+ * (bits [47:27] at [31:11])
+ * DRAM address space on this DCT is hoisted above 4Gb &&
+ * sys_addr > 4Gb
+ *
+ * remove hole offset from sys_addr
+ * else
+ * remove high range offset from sys_addr
+ */
+ if ((!(dct_sel_base_addr >> 16) ||
+ dct_sel_base_addr < dhar_base(pvt)) &&
+ dhar_valid(pvt) &&
+ (sys_addr >= BIT_64(32)))
+ chan_off = hole_off;
else
chan_off = dct_sel_base_off;
} else {
- if (hole_valid && (sys_addr >= 0x100000000ULL))
- chan_off = hole_off << 16;
+ /*
+ * if
+ * we have a valid hole &&
+ * sys_addr > 4Gb
+ *
+ * remove hole
+ * else
+ * remove dram base to normalize to DCT address
+ */
+ if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
+ chan_off = hole_off;
else
- chan_off = dram_base & 0xFFFFF8000000ULL;
+ chan_off = dram_base;
}
- return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
- (chan_off & 0x0000FFFFFF800000ULL);
+ return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
}
-/* Hack for the time being - Can we get this from BIOS?? */
-#define CH0SPARE_RANK 0
-#define CH1SPARE_RANK 1
-
/*
* checks if the csrow passed in is marked as SPARED, if so returns the new
* spare row
*/
-static inline int f10_process_possible_spare(int csrow,
- u32 cs, struct amd64_pvt *pvt)
-{
- u32 swap_done;
- u32 bad_dram_cs;
-
- /* Depending on channel, isolate respective SPARING info */
- if (cs) {
- swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
- bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
- if (swap_done && (csrow == bad_dram_cs))
- csrow = CH1SPARE_RANK;
- } else {
- swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
- bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
- if (swap_done && (csrow == bad_dram_cs))
- csrow = CH0SPARE_RANK;
+static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
+{
+ int tmp_cs;
+
+ if (online_spare_swap_done(pvt, dct) &&
+ csrow == online_spare_bad_dramcs(pvt, dct)) {
+
+ for_each_chip_select(tmp_cs, dct, pvt) {
+ if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
+ csrow = tmp_cs;
+ break;
+ }
+ }
}
return csrow;
}
@@ -1402,11 +1301,11 @@ static inline int f10_process_possible_spare(int csrow,
* -EINVAL: NOT FOUND
* 0..csrow = Chip-Select Row
*/
-static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
+static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- u32 cs_base, cs_mask;
+ u64 cs_base, cs_mask;
int cs_found = -EINVAL;
int csrow;
@@ -1416,39 +1315,25 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
pvt = mci->pvt_info;
- debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
-
- for (csrow = 0; csrow < pvt->cs_count; csrow++) {
+ debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
- cs_base = amd64_get_dct_base(pvt, cs, csrow);
- if (!(cs_base & K8_DCSB_CS_ENABLE))
+ for_each_chip_select(csrow, dct, pvt) {
+ if (!csrow_enabled(csrow, dct, pvt))
continue;
- /*
- * We have an ENABLED CSROW, Isolate just the MASK bits of the
- * target: [28:19] and [13:5], which map to [36:27] and [21:13]
- * of the actual address.
- */
- cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
-
- /*
- * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
- * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
- */
- cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
+ get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
- debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
- csrow, cs_base, cs_mask);
+ debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
+ csrow, cs_base, cs_mask);
- cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
+ cs_mask = ~cs_mask;
- debugf1(" Final CSMask=0x%x\n", cs_mask);
- debugf1(" (InputAddr & ~CSMask)=0x%x "
- "(CSBase & ~CSMask)=0x%x\n",
- (in_addr & ~cs_mask), (cs_base & ~cs_mask));
+ debugf1(" (InputAddr & ~CSMask)=0x%llx "
+ "(CSBase & ~CSMask)=0x%llx\n",
+ (in_addr & cs_mask), (cs_base & cs_mask));
- if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
- cs_found = f10_process_possible_spare(csrow, cs, pvt);
+ if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
+ cs_found = f10_process_possible_spare(pvt, dct, csrow);
debugf1(" MATCH csrow=%d\n", cs_found);
break;
@@ -1457,38 +1342,75 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
return cs_found;
}
-/* For a given @dram_range, check if @sys_addr falls within it. */
-static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
- u64 sys_addr, int *nid, int *chan_sel)
+/*
+ * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
+ * swapped with a region located at the bottom of memory so that the GPU can use
+ * the interleaved region and thus two channels.
+ */
+static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
{
- int node_id, cs_found = -EINVAL, high_range = 0;
- u32 intlv_en, intlv_sel, intlv_shift, hole_off;
- u32 hole_valid, tmp, dct_sel_base, channel;
- u64 dram_base, chan_addr, dct_sel_base_off;
+ u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
- dram_base = pvt->dram_base[dram_range];
- intlv_en = pvt->dram_IntlvEn[dram_range];
+ if (boot_cpu_data.x86 == 0x10) {
+ /* only revC3 and revE have that feature */
+ if (boot_cpu_data.x86_model < 4 ||
+ (boot_cpu_data.x86_model < 0xa &&
+ boot_cpu_data.x86_mask < 3))
+ return sys_addr;
+ }
- node_id = pvt->dram_DstNode[dram_range];
- intlv_sel = pvt->dram_IntlvSel[dram_range];
+ amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
- debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
- dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
+ if (!(swap_reg & 0x1))
+ return sys_addr;
- /*
- * This assumes that one node's DHAR is the same as all the other
- * nodes' DHAR.
- */
- hole_off = (pvt->dhar & 0x0000FF80);
- hole_valid = (pvt->dhar & 0x1);
- dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
+ swap_base = (swap_reg >> 3) & 0x7f;
+ swap_limit = (swap_reg >> 11) & 0x7f;
+ rgn_size = (swap_reg >> 20) & 0x7f;
+ tmp_addr = sys_addr >> 27;
- debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
- hole_off, hole_valid, intlv_sel);
+ if (!(sys_addr >> 34) &&
+ (((tmp_addr >= swap_base) &&
+ (tmp_addr <= swap_limit)) ||
+ (tmp_addr < rgn_size)))
+ return sys_addr ^ (u64)swap_base << 27;
+
+ return sys_addr;
+}
+
+/* For a given @dram_range, check if @sys_addr falls within it. */
+static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
+ u64 sys_addr, int *nid, int *chan_sel)
+{
+ int cs_found = -EINVAL;
+ u64 chan_addr;
+ u32 dct_sel_base;
+ u8 channel;
+ bool high_range = false;
+
+ u8 node_id = dram_dst_node(pvt, range);
+ u8 intlv_en = dram_intlv_en(pvt, range);
+ u32 intlv_sel = dram_intlv_sel(pvt, range);
+
+ debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
+ range, sys_addr, get_dram_limit(pvt, range));
+
+ if (dhar_valid(pvt) &&
+ dhar_base(pvt) <= sys_addr &&
+ sys_addr < BIT_64(32)) {
+ amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
+ sys_addr);
+ return -EINVAL;
+ }
if (intlv_en &&
- (intlv_sel != ((sys_addr >> 12) & intlv_en)))
+ (intlv_sel != ((sys_addr >> 12) & intlv_en))) {
+ amd64_warn("Botched intlv bits, en: 0x%x, sel: 0x%x\n",
+ intlv_en, intlv_sel);
return -EINVAL;
+ }
+
+ sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
dct_sel_base = dct_sel_baseaddr(pvt);
@@ -1499,38 +1421,41 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
if (dct_high_range_enabled(pvt) &&
!dct_ganging_enabled(pvt) &&
((sys_addr >> 27) >= (dct_sel_base >> 11)))
- high_range = 1;
-
- channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
+ high_range = true;
- chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
- dct_sel_base_off, hole_valid,
- hole_off, dram_base);
+ channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
- intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
+ chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
+ high_range, dct_sel_base);
- /* remove Node ID (in case of memory interleaving) */
- tmp = chan_addr & 0xFC0;
+ /* Remove node interleaving, see F1x120 */
+ if (intlv_en)
+ chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
+ (chan_addr & 0xfff);
- chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
-
- /* remove channel interleave and hash */
+ /* remove channel interleave */
if (dct_interleave_enabled(pvt) &&
!dct_high_range_enabled(pvt) &&
!dct_ganging_enabled(pvt)) {
- if (dct_sel_interleave_addr(pvt) != 1)
- chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
- else {
- tmp = chan_addr & 0xFC0;
- chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
- | tmp;
- }
+
+ if (dct_sel_interleave_addr(pvt) != 1) {
+ if (dct_sel_interleave_addr(pvt) == 0x3)
+ /* hash 9 */
+ chan_addr = ((chan_addr >> 10) << 9) |
+ (chan_addr & 0x1ff);
+ else
+ /* A[6] or hash 6 */
+ chan_addr = ((chan_addr >> 7) << 6) |
+ (chan_addr & 0x3f);
+ } else
+ /* A[12] */
+ chan_addr = ((chan_addr >> 13) << 12) |
+ (chan_addr & 0xfff);
}
- debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
- chan_addr, (u32)(chan_addr >> 8));
+ debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr);
- cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
+ cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
if (cs_found >= 0) {
*nid = node_id;
@@ -1539,23 +1464,21 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
return cs_found;
}
-static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
+static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
int *node, int *chan_sel)
{
- int dram_range, cs_found = -EINVAL;
- u64 dram_base, dram_limit;
+ int cs_found = -EINVAL;
+ unsigned range;
- for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
+ for (range = 0; range < DRAM_RANGES; range++) {
- if (!pvt->dram_rw_en[dram_range])
+ if (!dram_rw(pvt, range))
continue;
- dram_base = pvt->dram_base[dram_range];
- dram_limit = pvt->dram_limit[dram_range];
+ if ((get_dram_base(pvt, range) <= sys_addr) &&
+ (get_dram_limit(pvt, range) >= sys_addr)) {
- if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
-
- cs_found = f10_match_to_this_node(pvt, dram_range,
+ cs_found = f1x_match_to_this_node(pvt, range,
sys_addr, node,
chan_sel);
if (cs_found >= 0)
@@ -1572,16 +1495,14 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
* The @sys_addr is usually an error address received from the hardware
* (MCX_ADDR).
*/
-static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
- struct err_regs *err_info,
- u64 sys_addr)
+static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
+ u16 syndrome)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 page, offset;
int nid, csrow, chan = 0;
- u16 syndrome;
- csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
+ csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
if (csrow < 0) {
edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
@@ -1590,14 +1511,12 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
error_address_to_page_and_offset(sys_addr, &page, &offset);
- syndrome = extract_syndrome(err_info);
-
/*
* We need the syndromes for channel detection only when we're
* ganged. Otherwise @chan should already contain the channel at
* this point.
*/
- if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
+ if (dct_ganging_enabled(pvt))
chan = get_channel_from_ecc_syndrome(mci, syndrome);
if (chan >= 0)
@@ -1614,16 +1533,16 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
/*
* debug routine to display the memory sizes of all logical DIMMs and its
- * CSROWs as well
+ * CSROWs
*/
-static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
+static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{
int dimm, size0, size1, factor = 0;
- u32 dbam;
- u32 *dcsb;
+ u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
+ u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
if (boot_cpu_data.x86 == 0xf) {
- if (pvt->dclr0 & F10_WIDTH_128)
+ if (pvt->dclr0 & WIDTH_128)
factor = 1;
/* K8 families < revF not supported yet */
@@ -1634,7 +1553,8 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
}
dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
- dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0;
+ dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
+ : pvt->csels[0].csbases;
debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
@@ -1644,12 +1564,14 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
for (dimm = 0; dimm < 4; dimm++) {
size0 = 0;
- if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
- size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
+ if (dcsb[dimm*2] & DCSB_CS_ENABLE)
+ size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
+ DBAM_DIMM(dimm, dbam));
size1 = 0;
- if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
- size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
+ if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
+ size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
+ DBAM_DIMM(dimm, dbam));
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
dimm * 2, size0 << factor,
@@ -1664,10 +1586,9 @@ static struct amd64_family_type amd64_family_types[] = {
.f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
.ops = {
.early_channel_count = k8_early_channel_count,
- .get_error_address = k8_get_error_address,
- .read_dram_base_limit = k8_read_dram_base_limit,
.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
.dbam_to_cs = k8_dbam_to_chip_select,
+ .read_dct_pci_cfg = k8_read_dct_pci_cfg,
}
},
[F10_CPUS] = {
@@ -1675,12 +1596,21 @@ static struct amd64_family_type amd64_family_types[] = {
.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
.f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
.ops = {
- .early_channel_count = f10_early_channel_count,
- .get_error_address = f10_get_error_address,
- .read_dram_base_limit = f10_read_dram_base_limit,
- .read_dram_ctl_register = f10_read_dram_ctl_register,
- .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
+ .early_channel_count = f1x_early_channel_count,
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f10_dbam_to_chip_select,
+ .read_dct_pci_cfg = f10_read_dct_pci_cfg,
+ }
+ },
+ [F15_CPUS] = {
+ .ctl_name = "F15h",
+ .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
+ .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
+ .ops = {
+ .early_channel_count = f1x_early_channel_count,
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
+ .dbam_to_cs = f15_dbam_to_chip_select,
+ .read_dct_pci_cfg = f15_read_dct_pci_cfg,
}
},
};
@@ -1770,15 +1700,15 @@ static u16 x8_vectors[] = {
0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
};
-static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
- int v_dim)
+static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
+ unsigned v_dim)
{
unsigned int i, err_sym;
for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
u16 s = syndrome;
- int v_idx = err_sym * v_dim;
- int v_end = (err_sym + 1) * v_dim;
+ unsigned v_idx = err_sym * v_dim;
+ unsigned v_end = (err_sym + 1) * v_dim;
/* walk over all 16 bits of the syndrome */
for (i = 1; i < (1U << 16); i <<= 1) {
@@ -1850,51 +1780,50 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
struct amd64_pvt *pvt = mci->pvt_info;
int err_sym = -1;
- if (pvt->syn_type == 8)
+ if (pvt->ecc_sym_sz == 8)
err_sym = decode_syndrome(syndrome, x8_vectors,
ARRAY_SIZE(x8_vectors),
- pvt->syn_type);
- else if (pvt->syn_type == 4)
+ pvt->ecc_sym_sz);
+ else if (pvt->ecc_sym_sz == 4)
err_sym = decode_syndrome(syndrome, x4_vectors,
ARRAY_SIZE(x4_vectors),
- pvt->syn_type);
+ pvt->ecc_sym_sz);
else {
- amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
+ amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
return err_sym;
}
- return map_err_sym_to_channel(err_sym, pvt->syn_type);
+ return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
}
/*
* Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
* ADDRESS and process.
*/
-static void amd64_handle_ce(struct mem_ctl_info *mci,
- struct err_regs *info)
+static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
{
struct amd64_pvt *pvt = mci->pvt_info;
u64 sys_addr;
+ u16 syndrome;
/* Ensure that the Error Address is VALID */
- if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
+ if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
return;
}
- sys_addr = pvt->ops->get_error_address(mci, info);
+ sys_addr = get_error_address(m);
+ syndrome = extract_syndrome(m->status);
amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
- pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
+ pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
}
/* Handle any Un-correctable Errors (UEs) */
-static void amd64_handle_ue(struct mem_ctl_info *mci,
- struct err_regs *info)
+static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
{
- struct amd64_pvt *pvt = mci->pvt_info;
struct mem_ctl_info *log_mci, *src_mci = NULL;
int csrow;
u64 sys_addr;
@@ -1902,13 +1831,13 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
log_mci = mci;
- if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
+ if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
return;
}
- sys_addr = pvt->ops->get_error_address(mci, info);
+ sys_addr = get_error_address(m);
/*
* Find out which node the error address belongs to. This may be
@@ -1936,14 +1865,14 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
}
static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
- struct err_regs *info)
+ struct mce *m)
{
- u16 ec = EC(info->nbsl);
- u8 xec = XEC(info->nbsl, 0x1f);
- int ecc_type = (info->nbsh >> 13) & 0x3;
+ u16 ec = EC(m->status);
+ u8 xec = XEC(m->status, 0x1f);
+ u8 ecc_type = (m->status >> 45) & 0x3;
/* Bail early out if this was an 'observed' error */
- if (PP(ec) == K8_NBSL_PP_OBS)
+ if (PP(ec) == NBSL_PP_OBS)
return;
/* Do only ECC errors */
@@ -1951,34 +1880,16 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
return;
if (ecc_type == 2)
- amd64_handle_ce(mci, info);
+ amd64_handle_ce(mci, m);
else if (ecc_type == 1)
- amd64_handle_ue(mci, info);
+ amd64_handle_ue(mci, m);
}
void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
{
struct mem_ctl_info *mci = mcis[node_id];
- struct err_regs regs;
-
- regs.nbsl = (u32) m->status;
- regs.nbsh = (u32)(m->status >> 32);
- regs.nbeal = (u32) m->addr;
- regs.nbeah = (u32)(m->addr >> 32);
- regs.nbcfg = nbcfg;
-
- __amd64_decode_bus_error(mci, &regs);
-
- /*
- * Check the UE bit of the NB status high register, if set generate some
- * logs. If NOT a GART error, then process the event as a NO-INFO event.
- * If it was a GART error, skip that process.
- *
- * FIXME: this should go somewhere else, if at all.
- */
- if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
- edac_mc_handle_ue_no_info(mci, "UE bit is set");
+ __amd64_decode_bus_error(mci, m);
}
/*
@@ -2027,9 +1938,10 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt)
*/
static void read_mc_regs(struct amd64_pvt *pvt)
{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
u64 msr_val;
u32 tmp;
- int dram;
+ unsigned range;
/*
* Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
@@ -2046,75 +1958,66 @@ static void read_mc_regs(struct amd64_pvt *pvt)
} else
debugf0(" TOP_MEM2 disabled.\n");
- amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
+ amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
- if (pvt->ops->read_dram_ctl_register)
- pvt->ops->read_dram_ctl_register(pvt);
+ read_dram_ctl_register(pvt);
- for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
- /*
- * Call CPU specific READ function to get the DRAM Base and
- * Limit values from the DCT.
- */
- pvt->ops->read_dram_base_limit(pvt, dram);
+ for (range = 0; range < DRAM_RANGES; range++) {
+ u8 rw;
- /*
- * Only print out debug info on rows with both R and W Enabled.
- * Normal processing, compiler should optimize this whole 'if'
- * debug output block away.
- */
- if (pvt->dram_rw_en[dram] != 0) {
- debugf1(" DRAM-BASE[%d]: 0x%016llx "
- "DRAM-LIMIT: 0x%016llx\n",
- dram,
- pvt->dram_base[dram],
- pvt->dram_limit[dram]);
-
- debugf1(" IntlvEn=%s %s %s "
- "IntlvSel=%d DstNode=%d\n",
- pvt->dram_IntlvEn[dram] ?
- "Enabled" : "Disabled",
- (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
- (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
- pvt->dram_IntlvSel[dram],
- pvt->dram_DstNode[dram]);
- }
+ /* read settings for this DRAM range */
+ read_dram_base_limit_regs(pvt, range);
+
+ rw = dram_rw(pvt, range);
+ if (!rw)
+ continue;
+
+ debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
+ range,
+ get_dram_base(pvt, range),
+ get_dram_limit(pvt, range));
+
+ debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
+ dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
+ (rw & 0x1) ? "R" : "-",
+ (rw & 0x2) ? "W" : "-",
+ dram_intlv_sel(pvt, range),
+ dram_dst_node(pvt, range));
}
- amd64_read_dct_base_mask(pvt);
+ read_dct_base_mask(pvt);
- amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar);
- amd64_read_dbam_reg(pvt);
+ amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
+ amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
- amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
- amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0);
+ amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
+ amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
- if (boot_cpu_data.x86 >= 0x10) {
- if (!dct_ganging_enabled(pvt)) {
- amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1);
- amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1);
- }
- amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
+ if (!dct_ganging_enabled(pvt)) {
+ amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
+ amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
}
- if (boot_cpu_data.x86 == 0x10 &&
- boot_cpu_data.x86_model > 7 &&
- /* F3x180[EccSymbolSize]=1 => x8 symbols */
- tmp & BIT(25))
- pvt->syn_type = 8;
- else
- pvt->syn_type = 4;
+ pvt->ecc_sym_sz = 4;
- amd64_dump_misc_regs(pvt);
+ if (c->x86 >= 0x10) {
+ amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
+ amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
+
+ /* F10h, revD and later can do x8 ECC too */
+ if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
+ pvt->ecc_sym_sz = 8;
+ }
+ dump_misc_regs(pvt);
}
/*
* NOTE: CPU Revision Dependent code
*
* Input:
- * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
+ * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
* k8 private pointer to -->
* DRAM Bank Address mapping register
* node_id
@@ -2144,7 +2047,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
* encompasses
*
*/
-static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
+static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
{
u32 cs_mode, nr_pages;
@@ -2157,7 +2060,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
*/
cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
- nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
+ nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
/*
* If dual channel then double the memory size of single channel.
@@ -2180,23 +2083,22 @@ static int init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow;
struct amd64_pvt *pvt = mci->pvt_info;
- u64 input_addr_min, input_addr_max, sys_addr;
+ u64 input_addr_min, input_addr_max, sys_addr, base, mask;
u32 val;
int i, empty = 1;
- amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val);
+ amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
pvt->nbcfg = val;
- pvt->ctl_error_info.nbcfg = val;
debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
pvt->mc_node_id, val,
- !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE));
+ !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
- for (i = 0; i < pvt->cs_count; i++) {
+ for_each_chip_select(i, 0, pvt) {
csrow = &mci->csrows[i];
- if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
+ if (!csrow_enabled(i, 0, pvt)) {
debugf1("----CSROW %d EMPTY for node %d\n", i,
pvt->mc_node_id);
continue;
@@ -2206,13 +2108,15 @@ static int init_csrows(struct mem_ctl_info *mci)
i, pvt->mc_node_id);
empty = 0;
- csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
+ csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
- csrow->page_mask = ~mask_from_dct_mask(pvt, i);
+
+ get_cs_base_and_mask(pvt, i, 0, &base, &mask);
+ csrow->page_mask = ~mask;
/* 8 bytes of resolution */
csrow->mtype = amd64_determine_memory_type(pvt, i);
@@ -2231,9 +2135,9 @@ static int init_csrows(struct mem_ctl_info *mci)
/*
* determine whether CHIPKILL or JUST ECC or NO ECC is operating
*/
- if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
+ if (pvt->nbcfg & NBCFG_ECC_ENABLE)
csrow->edac_mode =
- (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
+ (pvt->nbcfg & NBCFG_CHIPKILL) ?
EDAC_S4ECD4ED : EDAC_SECDED;
else
csrow->edac_mode = EDAC_NONE;
@@ -2243,7 +2147,7 @@ static int init_csrows(struct mem_ctl_info *mci)
}
/* get all cores on this DCT */
-static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
+static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
{
int cpu;
@@ -2253,7 +2157,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
}
/* check MCG_CTL on all the cpus on this node */
-static bool amd64_nb_mce_bank_enabled_on_node(int nid)
+static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
{
cpumask_var_t mask;
int cpu, nbe;
@@ -2270,7 +2174,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
for_each_cpu(cpu, mask) {
struct msr *reg = per_cpu_ptr(msrs, cpu);
- nbe = reg->l & K8_MSR_MCGCTL_NBE;
+ nbe = reg->l & MSR_MCGCTL_NBE;
debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
cpu, reg->q,
@@ -2305,16 +2209,16 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
struct msr *reg = per_cpu_ptr(msrs, cpu);
if (on) {
- if (reg->l & K8_MSR_MCGCTL_NBE)
+ if (reg->l & MSR_MCGCTL_NBE)
s->flags.nb_mce_enable = 1;
- reg->l |= K8_MSR_MCGCTL_NBE;
+ reg->l |= MSR_MCGCTL_NBE;
} else {
/*
* Turn off NB MCE reporting only when it was off before
*/
if (!s->flags.nb_mce_enable)
- reg->l &= ~K8_MSR_MCGCTL_NBE;
+ reg->l &= ~MSR_MCGCTL_NBE;
}
}
wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
@@ -2328,40 +2232,38 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
struct pci_dev *F3)
{
bool ret = true;
- u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+ u32 value, mask = 0x3; /* UECC/CECC enable */
if (toggle_ecc_err_reporting(s, nid, ON)) {
amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
return false;
}
- amd64_read_pci_cfg(F3, K8_NBCTL, &value);
+ amd64_read_pci_cfg(F3, NBCTL, &value);
- /* turn on UECCEn and CECCEn bits */
s->old_nbctl = value & mask;
s->nbctl_valid = true;
value |= mask;
- pci_write_config_dword(F3, K8_NBCTL, value);
+ amd64_write_pci_cfg(F3, NBCTL, value);
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
- debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- nid, value,
- !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
+ debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+ nid, value, !!(value & NBCFG_ECC_ENABLE));
- if (!(value & K8_NBCFG_ECC_ENABLE)) {
+ if (!(value & NBCFG_ECC_ENABLE)) {
amd64_warn("DRAM ECC disabled on this node, enabling...\n");
s->flags.nb_ecc_prev = 0;
/* Attempt to turn on DRAM ECC Enable */
- value |= K8_NBCFG_ECC_ENABLE;
- pci_write_config_dword(F3, K8_NBCFG, value);
+ value |= NBCFG_ECC_ENABLE;
+ amd64_write_pci_cfg(F3, NBCFG, value);
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
- if (!(value & K8_NBCFG_ECC_ENABLE)) {
+ if (!(value & NBCFG_ECC_ENABLE)) {
amd64_warn("Hardware rejected DRAM ECC enable,"
"check memory DIMM configuration.\n");
ret = false;
@@ -2372,9 +2274,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
s->flags.nb_ecc_prev = 1;
}
- debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- nid, value,
- !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
+ debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+ nid, value, !!(value & NBCFG_ECC_ENABLE));
return ret;
}
@@ -2382,22 +2283,23 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
struct pci_dev *F3)
{
- u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+ u32 value, mask = 0x3; /* UECC/CECC enable */
+
if (!s->nbctl_valid)
return;
- amd64_read_pci_cfg(F3, K8_NBCTL, &value);
+ amd64_read_pci_cfg(F3, NBCTL, &value);
value &= ~mask;
value |= s->old_nbctl;
- pci_write_config_dword(F3, K8_NBCTL, value);
+ amd64_write_pci_cfg(F3, NBCTL, value);
/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
if (!s->flags.nb_ecc_prev) {
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
- value &= ~K8_NBCFG_ECC_ENABLE;
- pci_write_config_dword(F3, K8_NBCFG, value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
+ value &= ~NBCFG_ECC_ENABLE;
+ amd64_write_pci_cfg(F3, NBCFG, value);
}
/* restore the NB Enable MCGCTL bit */
@@ -2423,9 +2325,9 @@ static bool ecc_enabled(struct pci_dev *F3, u8 nid)
u8 ecc_en = 0;
bool nb_mce_en = false;
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
- ecc_en = !!(value & K8_NBCFG_ECC_ENABLE);
+ ecc_en = !!(value & NBCFG_ECC_ENABLE);
amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
@@ -2463,23 +2365,24 @@ static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
mci->mc_driver_sysfs_attributes = sysfs_attrs;
}
-static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
+static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
+ struct amd64_family_type *fam)
{
struct amd64_pvt *pvt = mci->pvt_info;
mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
- if (pvt->nbcap & K8_NBCAP_SECDED)
+ if (pvt->nbcap & NBCAP_SECDED)
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
- if (pvt->nbcap & K8_NBCAP_CHIPKILL)
+ if (pvt->nbcap & NBCAP_CHIPKILL)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
mci->edac_cap = amd64_determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = EDAC_AMD64_VERSION;
- mci->ctl_name = pvt->ctl_name;
+ mci->ctl_name = fam->ctl_name;
mci->dev_name = pci_name(pvt->F2);
mci->ctl_page_to_phys = NULL;
@@ -2500,14 +2403,16 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
case 0xf:
fam_type = &amd64_family_types[K8_CPUS];
pvt->ops = &amd64_family_types[K8_CPUS].ops;
- pvt->ctl_name = fam_type->ctl_name;
- pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
break;
+
case 0x10:
fam_type = &amd64_family_types[F10_CPUS];
pvt->ops = &amd64_family_types[F10_CPUS].ops;
- pvt->ctl_name = fam_type->ctl_name;
- pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
+ break;
+
+ case 0x15:
+ fam_type = &amd64_family_types[F15_CPUS];
+ pvt->ops = &amd64_family_types[F15_CPUS].ops;
break;
default:
@@ -2517,7 +2422,7 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
pvt->ext_model = boot_cpu_data.x86_model >> 4;
- amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
+ amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
(fam == 0xf ?
(pvt->ext_model >= K8_REV_F ? "revF or later "
: "revE or earlier ")
@@ -2564,14 +2469,14 @@ static int amd64_init_one_instance(struct pci_dev *F2)
goto err_siblings;
ret = -ENOMEM;
- mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, nid);
+ mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
if (!mci)
goto err_siblings;
mci->pvt_info = pvt;
mci->dev = &pvt->F2->dev;
- setup_mci_misc_attrs(mci);
+ setup_mci_misc_attrs(mci, fam_type);
if (init_csrows(mci))
mci->edac_cap = EDAC_FLAG_NONE;
@@ -2714,6 +2619,15 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = {
.class = 0,
.class_mask = 0,
},
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ },
+
{0, }
};
MODULE_DEVICE_TABLE(pci, amd64_pci_table);
@@ -2754,7 +2668,7 @@ static int __init amd64_edac_init(void)
{
int err = -ENODEV;
- edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
+ printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
opstate_init();
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 613ec72b0f65..11be36a311eb 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -144,7 +144,7 @@
* sections 3.5.4 and 3.5.5 for more information.
*/
-#define EDAC_AMD64_VERSION "v3.3.0"
+#define EDAC_AMD64_VERSION "3.4.0"
#define EDAC_MOD_STR "amd64_edac"
/* Extended Model from CPUID, for CPU Revision numbers */
@@ -153,85 +153,64 @@
#define K8_REV_F 4
/* Hardware limit on ChipSelect rows per MC and processors per system */
-#define MAX_CS_COUNT 8
-#define DRAM_REG_COUNT 8
+#define NUM_CHIPSELECTS 8
+#define DRAM_RANGES 8
#define ON true
#define OFF false
/*
+ * Create a contiguous bitmask starting at bit position @lo and ending at
+ * position @hi. For example
+ *
+ * GENMASK(21, 39) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(lo, hi) (((1ULL << ((hi) - (lo) + 1)) - 1) << (lo))
+
+/*
* PCI-defined configuration space registers
*/
+#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
+#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
/*
* Function 1 - Address Map
*/
-#define K8_DRAM_BASE_LOW 0x40
-#define K8_DRAM_LIMIT_LOW 0x44
-#define K8_DHAR 0xf0
-
-#define DHAR_VALID BIT(0)
-#define F10_DRAM_MEM_HOIST_VALID BIT(1)
+#define DRAM_BASE_LO 0x40
+#define DRAM_LIMIT_LO 0x44
-#define DHAR_BASE_MASK 0xff000000
-#define dhar_base(dhar) (dhar & DHAR_BASE_MASK)
+#define dram_intlv_en(pvt, i) ((u8)((pvt->ranges[i].base.lo >> 8) & 0x7))
+#define dram_rw(pvt, i) ((u8)(pvt->ranges[i].base.lo & 0x3))
+#define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7))
+#define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7))
-#define K8_DHAR_OFFSET_MASK 0x0000ff00
-#define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16)
+#define DHAR 0xf0
+#define dhar_valid(pvt) ((pvt)->dhar & BIT(0))
+#define dhar_mem_hoist_valid(pvt) ((pvt)->dhar & BIT(1))
+#define dhar_base(pvt) ((pvt)->dhar & 0xff000000)
+#define k8_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff00) << 16)
-#define F10_DHAR_OFFSET_MASK 0x0000ff80
/* NOTE: Extra mask bit vs K8 */
-#define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16)
+#define f10_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff80) << 16)
+#define DCT_CFG_SEL 0x10C
-/* F10 High BASE/LIMIT registers */
-#define F10_DRAM_BASE_HIGH 0x140
-#define F10_DRAM_LIMIT_HIGH 0x144
+#define DRAM_BASE_HI 0x140
+#define DRAM_LIMIT_HI 0x144
/*
* Function 2 - DRAM controller
*/
-#define K8_DCSB0 0x40
-#define F10_DCSB1 0x140
+#define DCSB0 0x40
+#define DCSB1 0x140
+#define DCSB_CS_ENABLE BIT(0)
-#define K8_DCSB_CS_ENABLE BIT(0)
-#define K8_DCSB_NPT_SPARE BIT(1)
-#define K8_DCSB_NPT_TESTFAIL BIT(2)
+#define DCSM0 0x60
+#define DCSM1 0x160
-/*
- * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form
- * the address
- */
-#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
-#define REV_E_DCS_SHIFT 4
-
-#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
-#define REV_F_F1Xh_DCS_SHIFT 8
-
-/*
- * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount
- * to form the address
- */
-#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
-#define REV_F_DCS_SHIFT 8
-
-/* DRAM CS Mask Registers */
-#define K8_DCSM0 0x60
-#define F10_DCSM1 0x160
-
-/* REV E: select [29:21] and [15:9] from DCSM */
-#define REV_E_DCSM_MASK_BITS 0x3FE0FE00
-
-/* unused bits [24:20] and [12:0] */
-#define REV_E_DCS_NOTUSED_BITS 0x01F01FFF
-
-/* REV F and later: select [28:19] and [13:5] from DCSM */
-#define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0
-
-/* unused bits [26:22] and [12:0] */
-#define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF
+#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
#define DBAM0 0x80
#define DBAM1 0x180
@@ -241,148 +220,84 @@
#define DBAM_MAX_VALUE 11
-
-#define F10_DCLR_0 0x90
-#define F10_DCLR_1 0x190
+#define DCLR0 0x90
+#define DCLR1 0x190
#define REVE_WIDTH_128 BIT(16)
-#define F10_WIDTH_128 BIT(11)
+#define WIDTH_128 BIT(11)
+#define DCHR0 0x94
+#define DCHR1 0x194
+#define DDR3_MODE BIT(8)
-#define F10_DCHR_0 0x94
-#define F10_DCHR_1 0x194
+#define DCT_SEL_LO 0x110
+#define dct_sel_baseaddr(pvt) ((pvt)->dct_sel_lo & 0xFFFFF800)
+#define dct_sel_interleave_addr(pvt) (((pvt)->dct_sel_lo >> 6) & 0x3)
+#define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0))
+#define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2))
-#define F10_DCHR_FOUR_RANK_DIMM BIT(18)
-#define DDR3_MODE BIT(8)
-#define F10_DCHR_MblMode BIT(6)
+#define dct_ganging_enabled(pvt) ((boot_cpu_data.x86 == 0x10) && ((pvt)->dct_sel_lo & BIT(4)))
+#define dct_data_intlv_enabled(pvt) ((pvt)->dct_sel_lo & BIT(5))
+#define dct_memory_cleared(pvt) ((pvt)->dct_sel_lo & BIT(10))
-#define F10_DCTL_SEL_LOW 0x110
-#define dct_sel_baseaddr(pvt) ((pvt->dram_ctl_select_low) & 0xFFFFF800)
-#define dct_sel_interleave_addr(pvt) (((pvt->dram_ctl_select_low) >> 6) & 0x3)
-#define dct_high_range_enabled(pvt) (pvt->dram_ctl_select_low & BIT(0))
-#define dct_interleave_enabled(pvt) (pvt->dram_ctl_select_low & BIT(2))
-#define dct_ganging_enabled(pvt) (pvt->dram_ctl_select_low & BIT(4))
-#define dct_data_intlv_enabled(pvt) (pvt->dram_ctl_select_low & BIT(5))
-#define dct_dram_enabled(pvt) (pvt->dram_ctl_select_low & BIT(8))
-#define dct_memory_cleared(pvt) (pvt->dram_ctl_select_low & BIT(10))
+#define SWAP_INTLV_REG 0x10c
-#define F10_DCTL_SEL_HIGH 0x114
+#define DCT_SEL_HI 0x114
/*
* Function 3 - Misc Control
*/
-#define K8_NBCTL 0x40
-
-/* Correctable ECC error reporting enable */
-#define K8_NBCTL_CECCEn BIT(0)
-
-/* UnCorrectable ECC error reporting enable */
-#define K8_NBCTL_UECCEn BIT(1)
+#define NBCTL 0x40
-#define K8_NBCFG 0x44
-#define K8_NBCFG_CHIPKILL BIT(23)
-#define K8_NBCFG_ECC_ENABLE BIT(22)
+#define NBCFG 0x44
+#define NBCFG_CHIPKILL BIT(23)
+#define NBCFG_ECC_ENABLE BIT(22)
-#define K8_NBSL 0x48
-
-
-/* Family F10h: Normalized Extended Error Codes */
-#define F10_NBSL_EXT_ERR_RES 0x0
+/* F3x48: NBSL */
#define F10_NBSL_EXT_ERR_ECC 0x8
+#define NBSL_PP_OBS 0x2
-/* Next two are overloaded values */
-#define F10_NBSL_EXT_ERR_LINK_PROTO 0xB
-#define F10_NBSL_EXT_ERR_L3_PROTO 0xB
-
-#define F10_NBSL_EXT_ERR_NB_ARRAY 0xC
-#define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD
-#define F10_NBSL_EXT_ERR_LINK_RETRY 0xE
-
-/* Next two are overloaded values */
-#define F10_NBSL_EXT_ERR_GART_WALK 0xF
-#define F10_NBSL_EXT_ERR_DEV_WALK 0xF
-
-/* 0x10 to 0x1B: Reserved */
-#define F10_NBSL_EXT_ERR_L3_DATA 0x1C
-#define F10_NBSL_EXT_ERR_L3_TAG 0x1D
-#define F10_NBSL_EXT_ERR_L3_LRU 0x1E
-
-/* K8: Normalized Extended Error Codes */
-#define K8_NBSL_EXT_ERR_ECC 0x0
-#define K8_NBSL_EXT_ERR_CRC 0x1
-#define K8_NBSL_EXT_ERR_SYNC 0x2
-#define K8_NBSL_EXT_ERR_MST 0x3
-#define K8_NBSL_EXT_ERR_TGT 0x4
-#define K8_NBSL_EXT_ERR_GART 0x5
-#define K8_NBSL_EXT_ERR_RMW 0x6
-#define K8_NBSL_EXT_ERR_WDT 0x7
-#define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8
-#define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD
-
-/*
- * The following are for BUS type errors AFTER values have been normalized by
- * shifting right
- */
-#define K8_NBSL_PP_SRC 0x0
-#define K8_NBSL_PP_RES 0x1
-#define K8_NBSL_PP_OBS 0x2
-#define K8_NBSL_PP_GENERIC 0x3
-
-#define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF)
-
-#define K8_NBEAL 0x50
-#define K8_NBEAH 0x54
-#define K8_SCRCTRL 0x58
-
-#define F10_NB_CFG_LOW 0x88
+#define SCRCTRL 0x58
#define F10_ONLINE_SPARE 0xB0
-#define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1))
-#define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3))
-#define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007)
-#define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007)
+#define online_spare_swap_done(pvt, c) (((pvt)->online_spare >> (1 + 2 * (c))) & 0x1)
+#define online_spare_bad_dramcs(pvt, c) (((pvt)->online_spare >> (4 + 4 * (c))) & 0x7)
#define F10_NB_ARRAY_ADDR 0xB8
-
-#define F10_NB_ARRAY_DRAM_ECC 0x80000000
+#define F10_NB_ARRAY_DRAM_ECC BIT(31)
/* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */
#define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1)
#define F10_NB_ARRAY_DATA 0xBC
-
#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
(BIT(((word) & 0xF) + 20) | \
BIT(17) | bits)
-
#define SET_NB_DRAM_INJECTION_READ(word, bits) \
(BIT(((word) & 0xF) + 20) | \
BIT(16) | bits)
-#define K8_NBCAP 0xE8
-#define K8_NBCAP_CORES (BIT(12)|BIT(13))
-#define K8_NBCAP_CHIPKILL BIT(4)
-#define K8_NBCAP_SECDED BIT(3)
-#define K8_NBCAP_DCT_DUAL BIT(0)
+#define NBCAP 0xE8
+#define NBCAP_CHIPKILL BIT(4)
+#define NBCAP_SECDED BIT(3)
+#define NBCAP_DCT_DUAL BIT(0)
#define EXT_NB_MCA_CFG 0x180
/* MSRs */
-#define K8_MSR_MCGCTL_NBE BIT(4)
-
-#define K8_MSR_MC4CTL 0x0410
-#define K8_MSR_MC4STAT 0x0411
-#define K8_MSR_MC4ADDR 0x0412
+#define MSR_MCGCTL_NBE BIT(4)
/* AMD sets the first MC device at device ID 0x18. */
-static inline int get_node_id(struct pci_dev *pdev)
+static inline u8 get_node_id(struct pci_dev *pdev)
{
return PCI_SLOT(pdev->devfn) - 0x18;
}
-enum amd64_chipset_families {
+enum amd_families {
K8_CPUS = 0,
F10_CPUS,
+ F15_CPUS,
+ NUM_FAMILIES,
};
/* Error injection control structure */
@@ -392,13 +307,35 @@ struct error_injection {
u32 bit_map;
};
+/* low and high part of PCI config space regs */
+struct reg_pair {
+ u32 lo, hi;
+};
+
+/*
+ * See F1x[1, 0][7C:40] DRAM Base/Limit Registers
+ */
+struct dram_range {
+ struct reg_pair base;
+ struct reg_pair lim;
+};
+
+/* A DCT chip selects collection */
+struct chip_select {
+ u32 csbases[NUM_CHIPSELECTS];
+ u8 b_cnt;
+
+ u32 csmasks[NUM_CHIPSELECTS];
+ u8 m_cnt;
+};
+
struct amd64_pvt {
struct low_ops *ops;
/* pci_device handles which we utilize */
struct pci_dev *F1, *F2, *F3;
- int mc_node_id; /* MC index of this MC node */
+ unsigned mc_node_id; /* MC index of this MC node */
int ext_model; /* extended model value of this node */
int channel_count;
@@ -414,60 +351,50 @@ struct amd64_pvt {
u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
- /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
- u32 dcsb0[MAX_CS_COUNT];
- u32 dcsb1[MAX_CS_COUNT];
-
- /* DRAM CS Mask Registers F2x[1,0][6C:60] */
- u32 dcsm0[MAX_CS_COUNT];
- u32 dcsm1[MAX_CS_COUNT];
-
- /*
- * Decoded parts of DRAM BASE and LIMIT Registers
- * F1x[78,70,68,60,58,50,48,40]
- */
- u64 dram_base[DRAM_REG_COUNT];
- u64 dram_limit[DRAM_REG_COUNT];
- u8 dram_IntlvSel[DRAM_REG_COUNT];
- u8 dram_IntlvEn[DRAM_REG_COUNT];
- u8 dram_DstNode[DRAM_REG_COUNT];
- u8 dram_rw_en[DRAM_REG_COUNT];
-
- /*
- * The following fields are set at (load) run time, after CPU revision
- * has been determined, since the dct_base and dct_mask registers vary
- * based on revision
- */
- u32 dcsb_base; /* DCSB base bits */
- u32 dcsm_mask; /* DCSM mask bits */
- u32 cs_count; /* num chip selects (== num DCSB registers) */
- u32 num_dcsm; /* Number of DCSM registers */
- u32 dcs_mask_notused; /* DCSM notused mask bits */
- u32 dcs_shift; /* DCSB and DCSM shift value */
+ /* one for each DCT */
+ struct chip_select csels[2];
+
+ /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
+ struct dram_range ranges[DRAM_RANGES];
u64 top_mem; /* top of memory below 4GB */
u64 top_mem2; /* top of memory above 4GB */
- u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */
- u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */
- u32 online_spare; /* On-Line spare Reg */
+ u32 dct_sel_lo; /* DRAM Controller Select Low */
+ u32 dct_sel_hi; /* DRAM Controller Select High */
+ u32 online_spare; /* On-Line spare Reg */
/* x4 or x8 syndromes in use */
- u8 syn_type;
-
- /* temp storage for when input is received from sysfs */
- struct err_regs ctl_error_info;
+ u8 ecc_sym_sz;
/* place to store error injection parameters prior to issue */
struct error_injection injection;
+};
- /* DCT per-family scrubrate setting */
- u32 min_scrubrate;
+static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
+{
+ u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
- /* family name this instance is running on */
- const char *ctl_name;
+ if (boot_cpu_data.x86 == 0xf)
+ return addr;
-};
+ return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr;
+}
+
+static inline u64 get_dram_limit(struct amd64_pvt *pvt, unsigned i)
+{
+ u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff;
+
+ if (boot_cpu_data.x86 == 0xf)
+ return lim;
+
+ return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim;
+}
+
+static inline u16 extract_syndrome(u64 status)
+{
+ return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00);
+}
/*
* per-node ECC settings descriptor
@@ -482,14 +409,6 @@ struct ecc_settings {
} flags;
};
-extern const char *tt_msgs[4];
-extern const char *ll_msgs[4];
-extern const char *rrrr_msgs[16];
-extern const char *to_msgs[2];
-extern const char *pp_msgs[4];
-extern const char *ii_msgs[4];
-extern const char *htlink_msgs[8];
-
#ifdef CONFIG_EDAC_DEBUG
#define NUM_DBG_ATTRS 5
#else
@@ -511,14 +430,11 @@ extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
*/
struct low_ops {
int (*early_channel_count) (struct amd64_pvt *pvt);
-
- u64 (*get_error_address) (struct mem_ctl_info *mci,
- struct err_regs *info);
- void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram);
- void (*read_dram_ctl_register) (struct amd64_pvt *pvt);
- void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci,
- struct err_regs *info, u64 SystemAddr);
- int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode);
+ void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
+ u16 syndrome);
+ int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode);
+ int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset,
+ u32 *val, const char *func);
};
struct amd64_family_type {
@@ -527,28 +443,17 @@ struct amd64_family_type {
struct low_ops ops;
};
-static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
- u32 *val, const char *func)
-{
- int err = 0;
-
- err = pci_read_config_dword(pdev, offset, val);
- if (err)
- amd64_warn("%s: error reading F%dx%x.\n",
- func, PCI_FUNC(pdev->devfn), offset);
-
- return err;
-}
+int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 val, const char *func);
#define amd64_read_pci_cfg(pdev, offset, val) \
- amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
+ __amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
-/*
- * For future CPU versions, verify the following as new 'slow' rates appear and
- * modify the necessary skip values for the supported CPU.
- */
-#define K8_MIN_SCRUB_RATE_BITS 0x0
-#define F10_MIN_SCRUB_RATE_BITS 0x5
+#define amd64_write_pci_cfg(pdev, offset, val) \
+ __amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
+
+#define amd64_read_dct_pci_cfg(pvt, offset, val) \
+ pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__)
int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
u64 *hole_offset, u64 *hole_size);
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index 688478de1cbd..303f10e03dda 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -117,13 +117,13 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
/* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM_ECC |
SET_NB_ARRAY_ADDRESS(pvt->injection.section);
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
pvt->injection.bit_map);
/* Issue 'word' and 'bit' along with the READ request */
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
@@ -150,13 +150,13 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
/* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM_ECC |
SET_NB_ARRAY_ADDRESS(pvt->injection.section);
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
pvt->injection.bit_map);
/* Issue 'word' and 'bit' along with the READ request */
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 39d97cfdf58c..73196f7b7229 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -785,10 +785,10 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
{
int err;
- debugf1("%s()\n", __func__);
+ debugf4("%s()\n", __func__);
while (sysfs_attrib) {
- debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
+ debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
if (sysfs_attrib->grp) {
struct mcidev_sysfs_group_kobj *grp_kobj;
@@ -818,7 +818,7 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
if (err < 0)
return err;
} else if (sysfs_attrib->attr.name) {
- debugf0("%s() file %s\n", __func__,
+ debugf4("%s() file %s\n", __func__,
sysfs_attrib->attr.name);
err = sysfs_create_file(kobj, &sysfs_attrib->attr);
@@ -853,26 +853,26 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
* Remove first all the atributes
*/
while (sysfs_attrib) {
- debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
+ debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
if (sysfs_attrib->grp) {
- debugf1("%s() seeking for group %s\n",
+ debugf4("%s() seeking for group %s\n",
__func__, sysfs_attrib->grp->name);
list_for_each_entry(grp_kobj,
&mci->grp_kobj_list, list) {
- debugf1("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
+ debugf4("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
if (grp_kobj->grp == sysfs_attrib->grp) {
edac_remove_mci_instance_attributes(mci,
grp_kobj->grp->mcidev_attr,
&grp_kobj->kobj, count + 1);
- debugf0("%s() group %s\n", __func__,
+ debugf4("%s() group %s\n", __func__,
sysfs_attrib->grp->name);
kobject_put(&grp_kobj->kobj);
}
}
- debugf1("%s() end of seeking for group %s\n",
+ debugf4("%s() end of seeking for group %s\n",
__func__, sysfs_attrib->grp->name);
} else if (sysfs_attrib->attr.name) {
- debugf0("%s() file %s\n", __func__,
+ debugf4("%s() file %s\n", __func__,
sysfs_attrib->attr.name);
sysfs_remove_file(kobj, &sysfs_attrib->attr);
} else
@@ -979,7 +979,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
debugf0("%s()\n", __func__);
/* remove all csrow kobjects */
- debugf0("%s() unregister this mci kobj\n", __func__);
+ debugf4("%s() unregister this mci kobj\n", __func__);
for (i = 0; i < mci->nr_csrows; i++) {
if (mci->csrows[i].nr_pages > 0) {
debugf0("%s() unreg csrow-%d\n", __func__, i);
@@ -989,18 +989,18 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
/* remove this mci instance's attribtes */
if (mci->mc_driver_sysfs_attributes) {
- debugf0("%s() unregister mci private attributes\n", __func__);
+ debugf4("%s() unregister mci private attributes\n", __func__);
edac_remove_mci_instance_attributes(mci,
mci->mc_driver_sysfs_attributes,
&mci->edac_mci_kobj, 0);
}
/* remove the symlink */
- debugf0("%s() remove_link\n", __func__);
+ debugf4("%s() remove_link\n", __func__);
sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
/* unregister this instance's kobject */
- debugf0("%s() remove_mci_instance\n", __func__);
+ debugf4("%s() remove_mci_instance\n", __func__);
kobject_put(&mci->edac_mci_kobj);
}
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 05523b504271..76d1f576cdc8 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -162,7 +162,7 @@ static struct edac_pci_ctl_info *i7300_pci;
#define AMBPRESENT_0 0x64
#define AMBPRESENT_1 0x66
-const static u16 mtr_regs[MAX_SLOTS] = {
+static const u16 mtr_regs[MAX_SLOTS] = {
0x80, 0x84, 0x88, 0x8c,
0x82, 0x86, 0x8a, 0x8e
};
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 3218819b7286..92e65e7038e9 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -160,8 +160,8 @@ NOTE: Only ONE of the three must be enabled
* 3:2 Rank 1 architecture
* 1:0 Rank 0 architecture
*
- * 00 => x16 devices; i.e 4 banks
- * 01 => x8 devices; i.e 8 banks
+ * 00 => 4 banks
+ * 01 => 8 banks
*/
#define I82975X_C0BNKARC 0x10e
#define I82975X_C1BNKARC 0x18e
@@ -278,6 +278,7 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
struct i82975x_error_info *info, int handle_errors)
{
int row, multi_chan, chan;
+ unsigned long offst, page;
multi_chan = mci->csrows[0].nr_channels - 1;
@@ -292,17 +293,19 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
info->errsts = info->errsts2;
}
- chan = info->eap & 1;
- info->eap >>= 1;
- if (info->xeap )
- info->eap |= 0x80000000;
- info->eap >>= PAGE_SHIFT;
- row = edac_mc_find_csrow_by_page(mci, info->eap);
+ page = (unsigned long) info->eap;
+ if (info->xeap & 1)
+ page |= 0x100000000ul;
+ chan = page & 1;
+ page >>= 1;
+ offst = page & ((1 << PAGE_SHIFT) - 1);
+ page >>= PAGE_SHIFT;
+ row = edac_mc_find_csrow_by_page(mci, page);
if (info->errsts & 0x0002)
- edac_mc_handle_ue(mci, info->eap, 0, row, "i82975x UE");
+ edac_mc_handle_ue(mci, page, offst , row, "i82975x UE");
else
- edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
+ edac_mc_handle_ce(mci, page, offst, info->derrsyn, row,
multi_chan ? chan : 0,
"i82975x CE");
@@ -344,11 +347,7 @@ static int dual_channel_active(void __iomem *mch_window)
static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
{
/*
- * ASUS P5W DH either does not program this register or programs
- * it wrong!
- * ECC is possible on i92975x ONLY with DEV_X8 which should mean 'val'
- * for each rank should be 01b - the LSB of the word should be 0x55;
- * but it reads 0!
+ * ECC is possible on i92975x ONLY with DEV_X8
*/
return DEV_X8;
}
@@ -356,11 +355,15 @@ static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
static void i82975x_init_csrows(struct mem_ctl_info *mci,
struct pci_dev *pdev, void __iomem *mch_window)
{
+ static const char *labels[4] = {
+ "DIMM A1", "DIMM A2",
+ "DIMM B1", "DIMM B2"
+ };
struct csrow_info *csrow;
unsigned long last_cumul_size;
u8 value;
u32 cumul_size;
- int index;
+ int index, chan;
last_cumul_size = 0;
@@ -369,11 +372,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
* The dram row boundary (DRB) reg values are boundary address
* for each DRAM row with a granularity of 32 or 64MB (single/dual
* channel operation). DRB regs are cumulative; therefore DRB7 will
- * contain the total memory contained in all eight rows.
- *
- * FIXME:
- * EDAC currently works for Dual-channel Interleaved configuration.
- * Other configurations, which the chip supports, need fixing/testing.
+ * contain the total memory contained in all rows.
*
*/
@@ -384,8 +383,26 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
((index >= 4) ? 0x80 : 0));
cumul_size = value;
cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT);
+ /*
+ * Adjust cumul_size w.r.t number of channels
+ *
+ */
+ if (csrow->nr_channels > 1)
+ cumul_size <<= 1;
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size);
+
+ /*
+ * Initialise dram labels
+ * index values:
+ * [0-7] for single-channel; i.e. csrow->nr_channels = 1
+ * [0-3] for dual-channel; i.e. csrow->nr_channels = 2
+ */
+ for (chan = 0; chan < csrow->nr_channels; chan++)
+ strncpy(csrow->channels[chan].label,
+ labels[(index >> 1) + (chan * 2)],
+ EDAC_MC_LABEL_LEN);
+
if (cumul_size == last_cumul_size)
continue; /* not populated */
@@ -393,8 +410,8 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
csrow->last_page = cumul_size - 1;
csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 7; /* I82975X_EAP has 128B resolution */
- csrow->mtype = MEM_DDR; /* i82975x supports only DDR2 */
+ csrow->grain = 1 << 6; /* I82975X_EAP has 64B resolution */
+ csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
csrow->dtype = i82975x_dram_type(mch_window, index);
csrow->edac_mode = EDAC_SECDED; /* only supported */
}
@@ -515,18 +532,20 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
debugf3("%s(): init mci\n", __func__);
mci->dev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_DDR;
+ mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = I82975X_REVISION;
mci->ctl_name = i82975x_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
mci->edac_check = i82975x_check;
mci->ctl_page_to_phys = NULL;
debugf3("%s(): init pvt\n", __func__);
pvt = (struct i82975x_pvt *) mci->pvt_info;
pvt->mch_window = mch_window;
i82975x_init_csrows(mci, pdev, mch_window);
+ mci->scrub_mode = SCRUB_HW_SRC;
i82975x_get_error_info(mci, &discard); /* clear counters */
/* finalize this instance of memory controller with edac core */
@@ -664,7 +683,7 @@ module_init(i82975x_init);
module_exit(i82975x_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>");
+MODULE_AUTHOR("Arvind R. <arvino55@gmail.com>");
MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers");
module_param(edac_op_state, int, 0444);
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index f6cf73d93359..795cfbc0bf50 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -594,6 +594,7 @@ static bool nb_noop_mce(u16 ec, u8 xec)
void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
u16 ec = EC(m->status);
u8 xec = XEC(m->status, 0x1f);
u32 nbsh = (u32)(m->status >> 32);
@@ -602,9 +603,8 @@ void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
pr_emerg(HW_ERR "Northbridge Error (node %d", node_id);
/* F10h, revD can disable ErrCpu[3:0] through ErrCpuVal */
- if ((boot_cpu_data.x86 == 0x10) &&
- (boot_cpu_data.x86_model > 7)) {
- if (nbsh & K8_NBSH_ERR_CPU_VAL)
+ if (c->x86 == 0x10 && c->x86_model > 7) {
+ if (nbsh & NBSH_ERR_CPU_VAL)
core = nbsh & nb_err_cpumask;
} else {
u8 assoc_cpus = nbsh & nb_err_cpumask;
@@ -646,7 +646,7 @@ void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
if (!fam_ops->nb_mce(ec, xec))
goto wrong_nb_mce;
- if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10)
+ if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x15)
if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder)
nb_bus_decoder(node_id, m, nbcfg);
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index 45dda47173f2..795a3206acf5 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -31,19 +31,10 @@
#define R4(x) (((x) >> 4) & 0xf)
#define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!")
-#define K8_NBSH 0x4C
-
-#define K8_NBSH_VALID_BIT BIT(31)
-#define K8_NBSH_OVERFLOW BIT(30)
-#define K8_NBSH_UC_ERR BIT(29)
-#define K8_NBSH_ERR_EN BIT(28)
-#define K8_NBSH_MISCV BIT(27)
-#define K8_NBSH_VALID_ERROR_ADDR BIT(26)
-#define K8_NBSH_PCC BIT(25)
-#define K8_NBSH_ERR_CPU_VAL BIT(24)
-#define K8_NBSH_CECC BIT(14)
-#define K8_NBSH_UECC BIT(13)
-#define K8_NBSH_ERR_SCRUBER BIT(8)
+/*
+ * F3x4C bits (MCi_STATUS' high half)
+ */
+#define NBSH_ERR_CPU_VAL BIT(24)
enum tt_ids {
TT_INSTR = 0,
@@ -86,17 +77,6 @@ extern const char *to_msgs[];
extern const char *ii_msgs[];
/*
- * relevant NB regs
- */
-struct err_regs {
- u32 nbcfg;
- u32 nbsh;
- u32 nbsl;
- u32 nbeah;
- u32 nbeal;
-};
-
-/*
* per-family decoder ops
*/
struct amd_decoder_ops {
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
new file mode 100644
index 000000000000..1d5cf06f6c6b
--- /dev/null
+++ b/drivers/edac/tile_edac.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ * Tilera-specific EDAC driver.
+ *
+ * This source code is derived from the following driver:
+ *
+ * Cell MIC driver for ECC counting
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/edac.h>
+#include <hv/hypervisor.h>
+#include <hv/drv_mshim_intf.h>
+
+#include "edac_core.h"
+
+#define DRV_NAME "tile-edac"
+
+/* Number of cs_rows needed per memory controller on TILEPro. */
+#define TILE_EDAC_NR_CSROWS 1
+
+/* Number of channels per memory controller on TILEPro. */
+#define TILE_EDAC_NR_CHANS 1
+
+/* Granularity of reported error in bytes on TILEPro. */
+#define TILE_EDAC_ERROR_GRAIN 8
+
+/* TILE processor has multiple independent memory controllers. */
+struct platform_device *mshim_pdev[TILE_MAX_MSHIMS];
+
+struct tile_edac_priv {
+ int hv_devhdl; /* Hypervisor device handle. */
+ int node; /* Memory controller instance #. */
+ unsigned int ce_count; /*
+ * Correctable-error counter
+ * kept by the driver.
+ */
+};
+
+static void tile_edac_check(struct mem_ctl_info *mci)
+{
+ struct tile_edac_priv *priv = mci->pvt_info;
+ struct mshim_mem_error mem_error;
+
+ if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_error,
+ sizeof(struct mshim_mem_error), MSHIM_MEM_ERROR_OFF) !=
+ sizeof(struct mshim_mem_error)) {
+ pr_err(DRV_NAME ": MSHIM_MEM_ERROR_OFF pread failure.\n");
+ return;
+ }
+
+ /* Check if the current error count is different from the saved one. */
+ if (mem_error.sbe_count != priv->ce_count) {
+ dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node);
+ priv->ce_count = mem_error.sbe_count;
+ edac_mc_handle_ce(mci, 0, 0, 0, 0, 0, mci->ctl_name);
+ }
+}
+
+/*
+ * Initialize the 'csrows' table within the mci control structure with the
+ * addressing of memory.
+ */
+static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
+{
+ struct csrow_info *csrow = &mci->csrows[0];
+ struct tile_edac_priv *priv = mci->pvt_info;
+ struct mshim_mem_info mem_info;
+
+ if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
+ sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
+ sizeof(struct mshim_mem_info)) {
+ pr_err(DRV_NAME ": MSHIM_MEM_INFO_OFF pread failure.\n");
+ return -1;
+ }
+
+ if (mem_info.mem_ecc)
+ csrow->edac_mode = EDAC_SECDED;
+ else
+ csrow->edac_mode = EDAC_NONE;
+ switch (mem_info.mem_type) {
+ case DDR2:
+ csrow->mtype = MEM_DDR2;
+ break;
+
+ case DDR3:
+ csrow->mtype = MEM_DDR3;
+ break;
+
+ default:
+ return -1;
+ }
+
+ csrow->first_page = 0;
+ csrow->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ csrow->grain = TILE_EDAC_ERROR_GRAIN;
+ csrow->dtype = DEV_UNKNOWN;
+
+ return 0;
+}
+
+static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
+{
+ char hv_file[32];
+ int hv_devhdl;
+ struct mem_ctl_info *mci;
+ struct tile_edac_priv *priv;
+ int rc;
+
+ sprintf(hv_file, "mshim/%d", pdev->id);
+ hv_devhdl = hv_dev_open((HV_VirtAddr)hv_file, 0);
+ if (hv_devhdl < 0)
+ return -EINVAL;
+
+ /* A TILE MC has a single channel and one chip-select row. */
+ mci = edac_mc_alloc(sizeof(struct tile_edac_priv),
+ TILE_EDAC_NR_CSROWS, TILE_EDAC_NR_CHANS, pdev->id);
+ if (mci == NULL)
+ return -ENOMEM;
+ priv = mci->pvt_info;
+ priv->node = pdev->id;
+ priv->hv_devhdl = hv_devhdl;
+
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_DDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+
+ mci->mod_name = DRV_NAME;
+ mci->ctl_name = "TILEPro_Memory_Controller";
+ mci->dev_name = dev_name(&pdev->dev);
+ mci->edac_check = tile_edac_check;
+
+ /*
+ * Initialize the MC control structure 'csrows' table
+ * with the mapping and control information.
+ */
+ if (tile_edac_init_csrows(mci)) {
+ /* No csrows found. */
+ mci->edac_cap = EDAC_FLAG_NONE;
+ } else {
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ }
+
+ platform_set_drvdata(pdev, mci);
+
+ /* Register with EDAC core */
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to register with EDAC core\n");
+ edac_mc_free(mci);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int __devexit tile_edac_mc_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ if (mci)
+ edac_mc_free(mci);
+ return 0;
+}
+
+static struct platform_driver tile_edac_mc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = tile_edac_mc_probe,
+ .remove = __devexit_p(tile_edac_mc_remove),
+};
+
+/*
+ * Driver init routine.
+ */
+static int __init tile_edac_init(void)
+{
+ char hv_file[32];
+ struct platform_device *pdev;
+ int i, err, num = 0;
+
+ /* Only support POLL mode. */
+ edac_op_state = EDAC_OPSTATE_POLL;
+
+ err = platform_driver_register(&tile_edac_mc_driver);
+ if (err)
+ return err;
+
+ for (i = 0; i < TILE_MAX_MSHIMS; i++) {
+ /*
+ * Not all memory controllers are configured such as in the
+ * case of a simulator. So we register only those mshims
+ * that are configured by the hypervisor.
+ */
+ sprintf(hv_file, "mshim/%d", i);
+ if (hv_dev_open((HV_VirtAddr)hv_file, 0) < 0)
+ continue;
+
+ pdev = platform_device_register_simple(DRV_NAME, i, NULL, 0);
+ if (IS_ERR(pdev))
+ continue;
+ mshim_pdev[i] = pdev;
+ num++;
+ }
+
+ if (num == 0) {
+ platform_driver_unregister(&tile_edac_mc_driver);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/*
+ * Driver cleanup routine.
+ */
+static void __exit tile_edac_exit(void)
+{
+ int i;
+
+ for (i = 0; i < TILE_MAX_MSHIMS; i++) {
+ struct platform_device *pdev = mshim_pdev[i];
+ if (!pdev)
+ continue;
+
+ platform_set_drvdata(pdev, NULL);
+ platform_device_unregister(pdev);
+ }
+ platform_driver_unregister(&tile_edac_mc_driver);
+}
+
+module_init(tile_edac_init);
+module_exit(tile_edac_exit);
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 0c56989cd907..2be6f4520772 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -75,7 +75,8 @@ config FIREWIRE_NOSY
The following cards are known to be based on PCILynx or PCILynx-2:
IOI IOI-1394TT (PCI card), Unibrain Fireboard 400 PCI Lynx-2
(PCI card), Newer Technology FireWire 2 Go (CardBus card),
- Apple Power Mac G3 blue & white (onboard controller).
+ Apple Power Mac G3 blue & white and G4 with PCI graphics
+ (onboard controller).
To compile this driver as a module, say M here: The module will be
called nosy. Source code of a userspace interface to nosy, called
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 24ff35511e2b..3c44fbc81acb 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -75,6 +75,13 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
#define BIB_IRMC ((1) << 31)
#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
+/*
+ * IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms),
+ * but we have to make it longer because there are many devices whose firmware
+ * is just too slow for that.
+ */
+#define DEFAULT_SPLIT_TIMEOUT (2 * 8000)
+
#define CANON_OUI 0x000085
static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
@@ -233,7 +240,7 @@ static void br_work(struct work_struct *work)
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
if (card->reset_jiffies != 0 &&
- time_is_after_jiffies(card->reset_jiffies + 2 * HZ)) {
+ time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
if (!schedule_delayed_work(&card->br_work, 2 * HZ))
fw_card_put(card);
return;
@@ -316,7 +323,8 @@ static void bm_work(struct work_struct *work)
irm_id = card->irm_node->node_id;
local_id = card->local_node->node_id;
- grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
+ grace = time_after64(get_jiffies_64(),
+ card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
if ((is_next_generation(generation, card->bm_generation) &&
!card->bm_abdicate) ||
@@ -511,10 +519,11 @@ void fw_card_initialize(struct fw_card *card,
card->device = device;
card->current_tlabel = 0;
card->tlabel_mask = 0;
- card->split_timeout_hi = 0;
- card->split_timeout_lo = 800 << 19;
- card->split_timeout_cycles = 800;
- card->split_timeout_jiffies = DIV_ROUND_UP(HZ, 10);
+ card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000;
+ card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
+ card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT;
+ card->split_timeout_jiffies =
+ DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000);
card->color = 0;
card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 48ae712e2101..62ac111af243 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -64,6 +64,7 @@ struct client {
struct idr resource_idr;
struct list_head event_list;
wait_queue_head_t wait;
+ wait_queue_head_t tx_flush_wait;
u64 bus_reset_closure;
struct fw_iso_context *iso_context;
@@ -251,6 +252,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
idr_init(&client->resource_idr);
INIT_LIST_HEAD(&client->event_list);
init_waitqueue_head(&client->wait);
+ init_waitqueue_head(&client->tx_flush_wait);
INIT_LIST_HEAD(&client->phy_receiver_link);
kref_init(&client->kref);
@@ -520,10 +522,6 @@ static int release_client_resource(struct client *client, u32 handle,
static void release_transaction(struct client *client,
struct client_resource *resource)
{
- struct outbound_transaction_resource *r = container_of(resource,
- struct outbound_transaction_resource, resource);
-
- fw_cancel_transaction(client->device->card, &r->transaction);
}
static void complete_transaction(struct fw_card *card, int rcode,
@@ -540,22 +538,9 @@ static void complete_transaction(struct fw_card *card, int rcode,
memcpy(rsp->data, payload, rsp->length);
spin_lock_irqsave(&client->lock, flags);
- /*
- * 1. If called while in shutdown, the idr tree must be left untouched.
- * The idr handle will be removed and the client reference will be
- * dropped later.
- * 2. If the call chain was release_client_resource ->
- * release_transaction -> complete_transaction (instead of a normal
- * conclusion of the transaction), i.e. if this resource was already
- * unregistered from the idr, the client reference will be dropped
- * by release_client_resource and we must not drop it here.
- */
- if (!client->in_shutdown &&
- idr_find(&client->resource_idr, e->r.resource.handle)) {
- idr_remove(&client->resource_idr, e->r.resource.handle);
- /* Drop the idr's reference */
- client_put(client);
- }
+ idr_remove(&client->resource_idr, e->r.resource.handle);
+ if (client->in_shutdown)
+ wake_up(&client->tx_flush_wait);
spin_unlock_irqrestore(&client->lock, flags);
rsp->type = FW_CDEV_EVENT_RESPONSE;
@@ -575,7 +560,7 @@ static void complete_transaction(struct fw_card *card, int rcode,
queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
NULL, 0);
- /* Drop the transaction callback's reference */
+ /* Drop the idr's reference */
client_put(client);
}
@@ -614,9 +599,6 @@ static int init_request(struct client *client,
if (ret < 0)
goto failed;
- /* Get a reference for the transaction callback */
- client_get(client);
-
fw_send_request(client->device->card, &e->r.transaction,
request->tcode, destination_id, request->generation,
speed, request->offset, e->response.data,
@@ -1223,7 +1205,8 @@ static void iso_resource_work(struct work_struct *work)
todo = r->todo;
/* Allow 1000ms grace period for other reallocations. */
if (todo == ISO_RES_ALLOC &&
- time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
+ time_before64(get_jiffies_64(),
+ client->device->card->reset_jiffies + HZ)) {
schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
skip = true;
} else {
@@ -1678,6 +1661,25 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
}
+static int is_outbound_transaction_resource(int id, void *p, void *data)
+{
+ struct client_resource *resource = p;
+
+ return resource->release == release_transaction;
+}
+
+static int has_outbound_transactions(struct client *client)
+{
+ int ret;
+
+ spin_lock_irq(&client->lock);
+ ret = idr_for_each(&client->resource_idr,
+ is_outbound_transaction_resource, NULL);
+ spin_unlock_irq(&client->lock);
+
+ return ret;
+}
+
static int shutdown_resource(int id, void *p, void *data)
{
struct client_resource *resource = p;
@@ -1713,6 +1715,8 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
client->in_shutdown = true;
spin_unlock_irq(&client->lock);
+ wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
+
idr_for_each(&client->resource_idr, shutdown_resource, client);
idr_remove_all(&client->resource_idr);
idr_destroy(&client->resource_idr);
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 6113b896e790..9a262439e3a7 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -747,7 +747,8 @@ static void fw_device_shutdown(struct work_struct *work)
container_of(work, struct fw_device, work.work);
int minor = MINOR(device->device.devt);
- if (time_is_after_jiffies(device->card->reset_jiffies + SHUTDOWN_DELAY)
+ if (time_before64(get_jiffies_64(),
+ device->card->reset_jiffies + SHUTDOWN_DELAY)
&& !list_empty(&device->card->link)) {
schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
return;
@@ -954,8 +955,9 @@ static void fw_device_init(struct work_struct *work)
device->config_rom_retries++;
schedule_delayed_work(&device->work, RETRY_DELAY);
} else {
- fw_notify("giving up on config rom for node id %x\n",
- device->node_id);
+ if (device->node->link_on)
+ fw_notify("giving up on config rom for node id %x\n",
+ device->node_id);
if (device->node == device->card->root_node)
fw_schedule_bm_work(device->card, 0);
fw_device_release(&device->device);
@@ -1168,9 +1170,12 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
switch (event) {
case FW_NODE_CREATED:
- case FW_NODE_LINK_ON:
- if (!node->link_on)
- break;
+ /*
+ * Attempt to scan the node, regardless whether its self ID has
+ * the L (link active) flag set or not. Some broken devices
+ * send L=0 but have an up-and-running link; others send L=1
+ * without actually having a link.
+ */
create:
device = kzalloc(sizeof(*device), GFP_ATOMIC);
if (device == NULL)
@@ -1213,6 +1218,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
break;
case FW_NODE_INITIATED_RESET:
+ case FW_NODE_LINK_ON:
device = node->data;
if (device == NULL)
goto create;
@@ -1230,10 +1236,10 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
break;
case FW_NODE_UPDATED:
- if (!node->link_on || node->data == NULL)
+ device = node->data;
+ if (device == NULL)
break;
- device = node->data;
device->node_id = node->node_id;
smp_wmb(); /* update node_id before generation */
device->generation = card->generation;
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index c003fa4e2db1..481056df9268 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -235,45 +235,45 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
static int manage_channel(struct fw_card *card, int irm_id, int generation,
u32 channels_mask, u64 offset, bool allocate, __be32 data[2])
{
- __be32 c, all, old;
- int i, ret = -EIO, retry = 5;
+ __be32 bit, all, old;
+ int channel, ret = -EIO, retry = 5;
old = all = allocate ? cpu_to_be32(~0) : 0;
- for (i = 0; i < 32; i++) {
- if (!(channels_mask & 1 << i))
+ for (channel = 0; channel < 32; channel++) {
+ if (!(channels_mask & 1 << channel))
continue;
ret = -EBUSY;
- c = cpu_to_be32(1 << (31 - i));
- if ((old & c) != (all & c))
+ bit = cpu_to_be32(1 << (31 - channel));
+ if ((old & bit) != (all & bit))
continue;
data[0] = old;
- data[1] = old ^ c;
+ data[1] = old ^ bit;
switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
irm_id, generation, SCODE_100,
offset, data, 8)) {
case RCODE_GENERATION:
/* A generation change frees all channels. */
- return allocate ? -EAGAIN : i;
+ return allocate ? -EAGAIN : channel;
case RCODE_COMPLETE:
if (data[0] == old)
- return i;
+ return channel;
old = data[0];
/* Is the IRM 1394a-2000 compliant? */
- if ((data[0] & c) == (data[1] & c))
+ if ((data[0] & bit) == (data[1] & bit))
continue;
/* 1394-1995 IRM, fall through to retry. */
default:
if (retry) {
retry--;
- i--;
+ channel--;
} else {
ret = -EIO;
}
@@ -362,3 +362,4 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
*channel = ret;
}
}
+EXPORT_SYMBOL(fw_iso_resource_manage);
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 09be1a635505..193ed9233144 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -545,7 +545,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
*/
smp_wmb();
card->generation = generation;
- card->reset_jiffies = jiffies;
+ card->reset_jiffies = get_jiffies_64();
card->bm_node_id = 0xffff;
card->bm_abdicate = bm_abdicate;
fw_schedule_bm_work(card, 0);
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index f8dfcf1c6cbe..25e729cde2f7 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -147,9 +147,6 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
/* -iso */
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
-void fw_iso_resource_manage(struct fw_card *card, int generation,
- u64 channels_mask, int *channel, int *bandwidth,
- bool allocate, __be32 buffer[2]);
/* -topology */
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index bd3c61b6dd8d..f903d7b6f34a 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -208,9 +208,11 @@ struct fw_ohci {
struct context at_request_ctx;
struct context at_response_ctx;
+ u32 it_context_support;
u32 it_context_mask; /* unoccupied IT contexts */
struct iso_context *it_context_list;
u64 ir_context_channels; /* unoccupied channels */
+ u32 ir_context_support;
u32 ir_context_mask; /* unoccupied IR contexts */
struct iso_context *ir_context_list;
u64 mc_channels; /* channels in use by the multichannel IR context */
@@ -338,7 +340,7 @@ static void log_irqs(u32 evt)
!(evt & OHCI1394_busReset))
return;
- fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
+ fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
evt & OHCI1394_selfIDComplete ? " selfID" : "",
evt & OHCI1394_RQPkt ? " AR_req" : "",
evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -351,6 +353,7 @@ static void log_irqs(u32 evt)
evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
+ evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
evt & OHCI1394_busReset ? " busReset" : "",
evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
OHCI1394_RSPkt | OHCI1394_reqTxComplete |
@@ -1326,21 +1329,8 @@ static int at_context_queue_packet(struct context *ctx,
DESCRIPTOR_IRQ_ALWAYS |
DESCRIPTOR_BRANCH_ALWAYS);
- /*
- * If the controller and packet generations don't match, we need to
- * bail out and try again. If IntEvent.busReset is set, the AT context
- * is halted, so appending to the context and trying to run it is
- * futile. Most controllers do the right thing and just flush the AT
- * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
- * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
- * up stalling out. So we just bail out in software and try again
- * later, and everyone is happy.
- * FIXME: Test of IntEvent.busReset may no longer be necessary since we
- * flush AT queues in bus_reset_tasklet.
- * FIXME: Document how the locking works.
- */
- if (ohci->generation != packet->generation ||
- reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
+ /* FIXME: Document how the locking works. */
+ if (ohci->generation != packet->generation) {
if (packet->payload_mapped)
dma_unmap_single(ohci->card.device, payload_bus,
packet->payload_length, DMA_TO_DEVICE);
@@ -1590,6 +1580,47 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
}
+static void detect_dead_context(struct fw_ohci *ohci,
+ const char *name, unsigned int regs)
+{
+ u32 ctl;
+
+ ctl = reg_read(ohci, CONTROL_SET(regs));
+ if (ctl & CONTEXT_DEAD) {
+#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
+ fw_error("DMA context %s has stopped, error code: %s\n",
+ name, evts[ctl & 0x1f]);
+#else
+ fw_error("DMA context %s has stopped, error code: %#x\n",
+ name, ctl & 0x1f);
+#endif
+ }
+}
+
+static void handle_dead_contexts(struct fw_ohci *ohci)
+{
+ unsigned int i;
+ char name[8];
+
+ detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
+ detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
+ detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
+ detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
+ for (i = 0; i < 32; ++i) {
+ if (!(ohci->it_context_support & (1 << i)))
+ continue;
+ sprintf(name, "IT%u", i);
+ detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
+ }
+ for (i = 0; i < 32; ++i) {
+ if (!(ohci->ir_context_support & (1 << i)))
+ continue;
+ sprintf(name, "IR%u", i);
+ detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
+ }
+ /* TODO: maybe try to flush and restart the dead contexts */
+}
+
static u32 cycle_timer_ticks(u32 cycle_timer)
{
u32 ticks;
@@ -1904,6 +1935,9 @@ static irqreturn_t irq_handler(int irq, void *data)
fw_notify("isochronous cycle inconsistent\n");
}
+ if (unlikely(event & OHCI1394_unrecoverableError))
+ handle_dead_contexts(ohci);
+
if (event & OHCI1394_cycle64Seconds) {
spin_lock(&ohci->lock);
update_bus_time(ohci);
@@ -2141,7 +2175,9 @@ static int ohci_enable(struct fw_card *card,
OHCI1394_selfIDComplete |
OHCI1394_regAccessFail |
OHCI1394_cycle64Seconds |
- OHCI1394_cycleInconsistent | OHCI1394_cycleTooLong |
+ OHCI1394_cycleInconsistent |
+ OHCI1394_unrecoverableError |
+ OHCI1394_cycleTooLong |
OHCI1394_masterIntEnable;
if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
irqs |= OHCI1394_busReset;
@@ -2657,6 +2693,10 @@ static int ohci_start_iso(struct fw_iso_context *base,
u32 control = IR_CONTEXT_ISOCH_HEADER, match;
int index;
+ /* the controller cannot start without any queued packets */
+ if (ctx->context.last->branch_address == 0)
+ return -ENODATA;
+
switch (ctx->base.type) {
case FW_ISO_CONTEXT_TRANSMIT:
index = ctx - ohci->it_context_list;
@@ -2715,6 +2755,7 @@ static int ohci_stop_iso(struct fw_iso_context *base)
}
flush_writes(ohci);
context_stop(&ctx->context);
+ tasklet_kill(&ctx->context.tasklet);
return 0;
}
@@ -3207,15 +3248,17 @@ static int __devinit pci_probe(struct pci_dev *dev,
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
ohci->ir_context_channels = ~0ULL;
- ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
+ ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
+ ohci->ir_context_mask = ohci->ir_context_support;
ohci->n_ir = hweight32(ohci->ir_context_mask);
size = sizeof(struct iso_context) * ohci->n_ir;
ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
- ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
+ ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
+ ohci->it_context_mask = ohci->it_context_support;
ohci->n_it = hweight32(ohci->it_context_mask);
size = sizeof(struct iso_context) * ohci->n_it;
ohci->it_context_list = kzalloc(size, GFP_KERNEL);
@@ -3266,7 +3309,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
fail_disable:
pci_disable_device(dev);
fail_free:
- kfree(&ohci->card);
+ kfree(ohci);
pmac_ohci_off(dev);
fail:
if (err == -ENOMEM)
@@ -3310,7 +3353,7 @@ static void pci_remove(struct pci_dev *dev)
pci_iounmap(dev, ohci->registers);
pci_release_region(dev, 0);
pci_disable_device(dev);
- kfree(&ohci->card);
+ kfree(ohci);
pmac_ohci_off(dev);
fw_notify("Removed fw-ohci device.\n");
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index afa576a75a8e..77ed589b360d 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -472,18 +472,12 @@ static void complete_transaction(struct fw_card *card, int rcode,
* So this callback only sets the rcode if it hasn't already
* been set and only does the cleanup if the transaction
* failed and we didn't already get a status write.
- *
- * Here we treat RCODE_CANCELLED like RCODE_COMPLETE because some
- * OXUF936QSE firmwares occasionally respond after Split_Timeout and
- * complete the ORB just fine. Note, we also get RCODE_CANCELLED
- * from sbp2_cancel_orbs() if fw_cancel_transaction() == 0.
*/
spin_lock_irqsave(&card->lock, flags);
if (orb->rcode == -1)
orb->rcode = rcode;
-
- if (orb->rcode != RCODE_COMPLETE && orb->rcode != RCODE_CANCELLED) {
+ if (orb->rcode != RCODE_COMPLETE) {
list_del(&orb->link);
spin_unlock_irqrestore(&card->lock, flags);
@@ -532,7 +526,8 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
list_for_each_entry_safe(orb, next, &list, link) {
retval = 0;
- fw_cancel_transaction(device->card, &orb->t);
+ if (fw_cancel_transaction(device->card, &orb->t) == 0)
+ continue;
orb->rcode = RCODE_CANCELLED;
orb->callback(orb, NULL);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 3c56afc5eb1b..b3a25a55ba23 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -145,4 +145,16 @@ config ISCSI_IBFT
detect iSCSI boot parameters dynamically during system boot, say Y.
Otherwise, say N.
+config SIGMA
+ tristate "SigmaStudio firmware loader"
+ depends on I2C
+ select CRC32
+ default n
+ help
+ Enable helper functions for working with Analog Devices SigmaDSP
+ parts and binary firmwares produced by Analog Devices SigmaStudio.
+
+ If unsure, say N here. Drivers that need these helpers will select
+ this option automatically.
+
endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 20c17fca1232..00bb0b80a79f 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_DMIID) += dmi-id.o
obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
+obj-$(CONFIG_SIGMA) += sigma.o
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 69ad529d92fb..ea5ac2dc1233 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -268,8 +268,10 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
}
/* generate SMI */
+ /* inb to force posted write through and make SMI happen now */
asm volatile (
- "outb %b0,%w1"
+ "outb %b0,%w1\n"
+ "inb %w1"
: /* no output args */
: "a" (smi_cmd->command_code),
"d" (smi_cmd->command_address),
diff --git a/drivers/firmware/sigma.c b/drivers/firmware/sigma.c
new file mode 100644
index 000000000000..c19cd2c39fa6
--- /dev/null
+++ b/drivers/firmware/sigma.c
@@ -0,0 +1,115 @@
+/*
+ * Load Analog Devices SigmaStudio firmware files
+ *
+ * Copyright 2009-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/sigma.h>
+
+/* Return: 0==OK, <0==error, =1 ==no more actions */
+static int
+process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
+{
+ struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
+ size_t len = sigma_action_len(sa);
+ int ret = 0;
+
+ pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
+ sa->instr, sa->addr, len);
+
+ switch (sa->instr) {
+ case SIGMA_ACTION_WRITEXBYTES:
+ case SIGMA_ACTION_WRITESINGLE:
+ case SIGMA_ACTION_WRITESAFELOAD:
+ if (ssfw->fw->size < ssfw->pos + len)
+ return -EINVAL;
+ ret = i2c_master_send(client, (void *)&sa->addr, len);
+ if (ret < 0)
+ return -EINVAL;
+ break;
+
+ case SIGMA_ACTION_DELAY:
+ ret = 0;
+ udelay(len);
+ len = 0;
+ break;
+
+ case SIGMA_ACTION_END:
+ return 1;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* when arrive here ret=0 or sent data */
+ ssfw->pos += sigma_action_size(sa, len);
+ return ssfw->pos == ssfw->fw->size;
+}
+
+static int
+process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
+{
+ pr_debug("%s: processing %p\n", __func__, ssfw);
+
+ while (1) {
+ int ret = process_sigma_action(client, ssfw);
+ pr_debug("%s: action returned %i\n", __func__, ret);
+ if (ret == 1)
+ return 0;
+ else if (ret)
+ return ret;
+ }
+}
+
+int process_sigma_firmware(struct i2c_client *client, const char *name)
+{
+ int ret;
+ struct sigma_firmware_header *ssfw_head;
+ struct sigma_firmware ssfw;
+ const struct firmware *fw;
+ u32 crc;
+
+ pr_debug("%s: loading firmware %s\n", __func__, name);
+
+ /* first load the blob */
+ ret = request_firmware(&fw, name, &client->dev);
+ if (ret) {
+ pr_debug("%s: request_firmware() failed with %i\n", __func__, ret);
+ return ret;
+ }
+ ssfw.fw = fw;
+
+ /* then verify the header */
+ ret = -EINVAL;
+ if (fw->size < sizeof(*ssfw_head))
+ goto done;
+
+ ssfw_head = (void *)fw->data;
+ if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
+ goto done;
+
+ crc = crc32(0, fw->data, fw->size);
+ pr_debug("%s: crc=%x\n", __func__, crc);
+ if (crc != ssfw_head->crc)
+ goto done;
+
+ ssfw.pos = sizeof(*ssfw_head);
+
+ /* finally process all of the actions */
+ ret = process_sigma_actions(client, &ssfw);
+
+ done:
+ release_firmware(fw);
+
+ pr_debug("%s: loaded %s\n", __func__, name);
+
+ return ret;
+}
+EXPORT_SYMBOL(process_sigma_firmware);
diff --git a/drivers/gpio/74x164.c b/drivers/gpio/74x164.c
index d91ff4c282e9..84e070219839 100644
--- a/drivers/gpio/74x164.c
+++ b/drivers/gpio/74x164.c
@@ -133,7 +133,7 @@ exit_destroy:
return ret;
}
-static int gen_74x164_remove(struct spi_device *spi)
+static int __devexit gen_74x164_remove(struct spi_device *spi)
{
struct gen_74x164_chip *chip;
int ret;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 664660e56335..d8d0cda2641d 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -100,18 +100,21 @@ config GPIO_VR41XX
Say yes here to support the NEC VR4100 series General-purpose I/O Uint
config GPIO_SCH
- tristate "Intel SCH GPIO"
- depends on GPIOLIB && PCI
+ tristate "Intel SCH/TunnelCreek GPIO"
+ depends on GPIOLIB && PCI && X86
select MFD_CORE
select LPC_SCH
help
- Say yes here to support GPIO interface on Intel Poulsbo SCH.
+ Say yes here to support GPIO interface on Intel Poulsbo SCH
+ or Intel Tunnel Creek processor.
The Intel SCH contains a total of 14 GPIO pins. Ten GPIOs are
powered by the core power rail and are turned off during sleep
modes (S3 and higher). The remaining four GPIOs are powered by
the Intel SCH suspend power supply. These GPIOs remain
active during S3. The suspend powered GPIOs can be used to wake the
system from the Suspend-to-RAM state.
+ The Intel Tunnel Creek processor has 5 GPIOs powered by the
+ core power rail and 9 from suspend power supply.
This driver can also be built as a module. If so, the module
will be called sch-gpio.
@@ -321,13 +324,13 @@ config GPIO_BT8XX
config GPIO_LANGWELL
bool "Intel Langwell/Penwell GPIO support"
- depends on PCI
+ depends on PCI && X86
help
Say Y here to support Intel Langwell/Penwell GPIO.
config GPIO_PCH
tristate "PCH GPIO of Intel Topcliff"
- depends on PCI
+ depends on PCI && X86
help
This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff
which is an IOH(Input/Output Hub) for x86 embedded processor.
@@ -368,11 +371,11 @@ config GPIO_MAX7301
GPIO driver for Maxim MAX7301 SPI-based GPIO expander.
config GPIO_MCP23S08
- tristate "Microchip MCP23S08 I/O expander"
+ tristate "Microchip MCP23Sxx I/O expander"
depends on SPI_MASTER
help
- SPI driver for Microchip MCP23S08 I/O expander. This provides
- a GPIO interface supporting inputs and outputs.
+ SPI driver for Microchip MCP23S08/MPC23S17 I/O expanders.
+ This provides a GPIO interface supporting inputs and outputs.
config GPIO_MC33880
tristate "Freescale MC33880 high-side/low-side switch"
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index 0d05ea7d499b..6e16cba56ad2 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -373,7 +373,7 @@ static int __devexit cs5535_gpio_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_driver cs5535_gpio_drv = {
+static struct platform_driver cs5535_gpio_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
@@ -384,12 +384,12 @@ static struct platform_driver cs5535_gpio_drv = {
static int __init cs5535_gpio_init(void)
{
- return platform_driver_register(&cs5535_gpio_drv);
+ return platform_driver_register(&cs5535_gpio_driver);
}
static void __exit cs5535_gpio_exit(void)
{
- platform_driver_unregister(&cs5535_gpio_drv);
+ platform_driver_unregister(&cs5535_gpio_driver);
}
module_init(cs5535_gpio_init);
diff --git a/drivers/gpio/janz-ttl.c b/drivers/gpio/janz-ttl.c
index 813ac077e5d7..2514fb075f4a 100644
--- a/drivers/gpio/janz-ttl.c
+++ b/drivers/gpio/janz-ttl.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/slab.h>
@@ -149,7 +150,7 @@ static int __devinit ttl_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = mfd_get_data(pdev);
if (!pdata) {
dev_err(dev, "no platform data\n");
ret = -ENXIO;
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 54d70a47afc1..560ab648cf18 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -187,31 +187,28 @@ MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
{
- struct lnw_gpio *lnw = get_irq_data(irq);
- u32 base, gpio;
+ struct irq_data *data = irq_desc_get_irq_data(desc);
+ struct lnw_gpio *lnw = irq_data_get_irq_handler_data(data);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ u32 base, gpio, mask;
+ unsigned long pending;
void __iomem *gedr;
- u32 gedr_v;
/* check GPIO controller to check which pin triggered the interrupt */
for (base = 0; base < lnw->chip.ngpio; base += 32) {
gedr = gpio_reg(&lnw->chip, base, GEDR);
- gedr_v = readl(gedr);
- if (!gedr_v)
- continue;
- for (gpio = base; gpio < base + 32; gpio++)
- if (gedr_v & BIT(gpio % 32)) {
- pr_debug("pin %d triggered\n", gpio);
- generic_handle_irq(lnw->irq_base + gpio);
- }
- /* clear the edge detect status bit */
- writel(gedr_v, gedr);
+ pending = readl(gedr);
+ while (pending) {
+ gpio = __ffs(pending) - 1;
+ mask = BIT(gpio);
+ pending &= ~mask;
+ /* Clear before handling so we can't lose an edge */
+ writel(mask, gedr);
+ generic_handle_irq(lnw->irq_base + base + gpio);
+ }
}
- if (desc->chip->irq_eoi)
- desc->chip->irq_eoi(irq_get_irq_data(irq));
- else
- dev_warn(lnw->chip.dev, "missing EOI handler for irq %d\n", irq);
-
+ chip->irq_eoi(data);
}
static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
@@ -279,12 +276,12 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "langwell gpiochip_add error %d\n", retval);
goto err5;
}
- set_irq_data(pdev->irq, lnw);
- set_irq_chained_handler(pdev->irq, lnw_irq_handler);
+ irq_set_handler_data(pdev->irq, lnw);
+ irq_set_chained_handler(pdev->irq, lnw_irq_handler);
for (i = 0; i < lnw->chip.ngpio; i++) {
- set_irq_chip_and_handler_name(i + lnw->irq_base, &lnw_irqchip,
- handle_simple_irq, "demux");
- set_irq_chip_data(i + lnw->irq_base, lnw);
+ irq_set_chip_and_handler_name(i + lnw->irq_base, &lnw_irqchip,
+ handle_simple_irq, "demux");
+ irq_set_chip_data(i + lnw->irq_base, lnw);
}
spin_lock_init(&lnw->lock);
diff --git a/drivers/gpio/mc33880.c b/drivers/gpio/mc33880.c
index 935479da6705..00f6d24c669d 100644
--- a/drivers/gpio/mc33880.c
+++ b/drivers/gpio/mc33880.c
@@ -146,7 +146,7 @@ exit_destroy:
return ret;
}
-static int mc33880_remove(struct spi_device *spi)
+static int __devexit mc33880_remove(struct spi_device *spi)
{
struct mc33880 *mc;
int ret;
diff --git a/drivers/gpio/mcp23s08.c b/drivers/gpio/mcp23s08.c
index 69f6f1955a31..40e076083ec0 100644
--- a/drivers/gpio/mcp23s08.c
+++ b/drivers/gpio/mcp23s08.c
@@ -10,7 +10,13 @@
#include <linux/spi/spi.h>
#include <linux/spi/mcp23s08.h>
#include <linux/slab.h>
+#include <asm/byteorder.h>
+/**
+ * MCP types supported by driver
+ */
+#define MCP_TYPE_S08 0
+#define MCP_TYPE_S17 1
/* Registers are all 8 bits wide.
*
@@ -35,27 +41,38 @@
#define MCP_GPIO 0x09
#define MCP_OLAT 0x0a
+struct mcp23s08;
+
+struct mcp23s08_ops {
+ int (*read)(struct mcp23s08 *mcp, unsigned reg);
+ int (*write)(struct mcp23s08 *mcp, unsigned reg, unsigned val);
+ int (*read_regs)(struct mcp23s08 *mcp, unsigned reg,
+ u16 *vals, unsigned n);
+};
+
struct mcp23s08 {
struct spi_device *spi;
u8 addr;
- u8 cache[11];
+ u16 cache[11];
/* lock protects the cached values */
struct mutex lock;
struct gpio_chip chip;
struct work_struct work;
+
+ const struct mcp23s08_ops *ops;
};
-/* A given spi_device can represent up to four mcp23s08 chips
+/* A given spi_device can represent up to eight mcp23sxx chips
* sharing the same chipselect but using different addresses
* (e.g. chips #0 and #3 might be populated, but not #1 or $2).
* Driver data holds all the per-chip data.
*/
struct mcp23s08_driver_data {
unsigned ngpio;
- struct mcp23s08 *mcp[4];
+ struct mcp23s08 *mcp[8];
struct mcp23s08 chip[];
};
@@ -70,7 +87,7 @@ static int mcp23s08_read(struct mcp23s08 *mcp, unsigned reg)
return (status < 0) ? status : rx[0];
}
-static int mcp23s08_write(struct mcp23s08 *mcp, unsigned reg, u8 val)
+static int mcp23s08_write(struct mcp23s08 *mcp, unsigned reg, unsigned val)
{
u8 tx[3];
@@ -81,17 +98,81 @@ static int mcp23s08_write(struct mcp23s08 *mcp, unsigned reg, u8 val)
}
static int
-mcp23s08_read_regs(struct mcp23s08 *mcp, unsigned reg, u8 *vals, unsigned n)
+mcp23s08_read_regs(struct mcp23s08 *mcp, unsigned reg, u16 *vals, unsigned n)
{
- u8 tx[2];
+ u8 tx[2], *tmp;
+ int status;
if ((n + reg) > sizeof mcp->cache)
return -EINVAL;
tx[0] = mcp->addr | 0x01;
tx[1] = reg;
- return spi_write_then_read(mcp->spi, tx, sizeof tx, vals, n);
+
+ tmp = (u8 *)vals;
+ status = spi_write_then_read(mcp->spi, tx, sizeof tx, tmp, n);
+ if (status >= 0) {
+ while (n--)
+ vals[n] = tmp[n]; /* expand to 16bit */
+ }
+ return status;
+}
+
+static int mcp23s17_read(struct mcp23s08 *mcp, unsigned reg)
+{
+ u8 tx[2], rx[2];
+ int status;
+
+ tx[0] = mcp->addr | 0x01;
+ tx[1] = reg << 1;
+ status = spi_write_then_read(mcp->spi, tx, sizeof tx, rx, sizeof rx);
+ return (status < 0) ? status : (rx[0] | (rx[1] << 8));
+}
+
+static int mcp23s17_write(struct mcp23s08 *mcp, unsigned reg, unsigned val)
+{
+ u8 tx[4];
+
+ tx[0] = mcp->addr;
+ tx[1] = reg << 1;
+ tx[2] = val;
+ tx[3] = val >> 8;
+ return spi_write_then_read(mcp->spi, tx, sizeof tx, NULL, 0);
+}
+
+static int
+mcp23s17_read_regs(struct mcp23s08 *mcp, unsigned reg, u16 *vals, unsigned n)
+{
+ u8 tx[2];
+ int status;
+
+ if ((n + reg) > sizeof mcp->cache)
+ return -EINVAL;
+ tx[0] = mcp->addr | 0x01;
+ tx[1] = reg << 1;
+
+ status = spi_write_then_read(mcp->spi, tx, sizeof tx,
+ (u8 *)vals, n * 2);
+ if (status >= 0) {
+ while (n--)
+ vals[n] = __le16_to_cpu((__le16)vals[n]);
+ }
+
+ return status;
}
+static const struct mcp23s08_ops mcp23s08_ops = {
+ .read = mcp23s08_read,
+ .write = mcp23s08_write,
+ .read_regs = mcp23s08_read_regs,
+};
+
+static const struct mcp23s08_ops mcp23s17_ops = {
+ .read = mcp23s17_read,
+ .write = mcp23s17_write,
+ .read_regs = mcp23s17_read_regs,
+};
+
+
/*----------------------------------------------------------------------*/
static int mcp23s08_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -101,7 +182,7 @@ static int mcp23s08_direction_input(struct gpio_chip *chip, unsigned offset)
mutex_lock(&mcp->lock);
mcp->cache[MCP_IODIR] |= (1 << offset);
- status = mcp23s08_write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]);
+ status = mcp->ops->write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]);
mutex_unlock(&mcp->lock);
return status;
}
@@ -114,7 +195,7 @@ static int mcp23s08_get(struct gpio_chip *chip, unsigned offset)
mutex_lock(&mcp->lock);
/* REVISIT reading this clears any IRQ ... */
- status = mcp23s08_read(mcp, MCP_GPIO);
+ status = mcp->ops->read(mcp, MCP_GPIO);
if (status < 0)
status = 0;
else {
@@ -127,20 +208,20 @@ static int mcp23s08_get(struct gpio_chip *chip, unsigned offset)
static int __mcp23s08_set(struct mcp23s08 *mcp, unsigned mask, int value)
{
- u8 olat = mcp->cache[MCP_OLAT];
+ unsigned olat = mcp->cache[MCP_OLAT];
if (value)
olat |= mask;
else
olat &= ~mask;
mcp->cache[MCP_OLAT] = olat;
- return mcp23s08_write(mcp, MCP_OLAT, olat);
+ return mcp->ops->write(mcp, MCP_OLAT, olat);
}
static void mcp23s08_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip);
- u8 mask = 1 << offset;
+ unsigned mask = 1 << offset;
mutex_lock(&mcp->lock);
__mcp23s08_set(mcp, mask, value);
@@ -151,14 +232,14 @@ static int
mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
{
struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip);
- u8 mask = 1 << offset;
+ unsigned mask = 1 << offset;
int status;
mutex_lock(&mcp->lock);
status = __mcp23s08_set(mcp, mask, value);
if (status == 0) {
mcp->cache[MCP_IODIR] &= ~mask;
- status = mcp23s08_write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]);
+ status = mcp->ops->write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]);
}
mutex_unlock(&mcp->lock);
return status;
@@ -184,16 +265,16 @@ static void mcp23s08_dbg_show(struct seq_file *s, struct gpio_chip *chip)
mcp = container_of(chip, struct mcp23s08, chip);
/* NOTE: we only handle one bank for now ... */
- bank = '0' + ((mcp->addr >> 1) & 0x3);
+ bank = '0' + ((mcp->addr >> 1) & 0x7);
mutex_lock(&mcp->lock);
- t = mcp23s08_read_regs(mcp, 0, mcp->cache, sizeof mcp->cache);
+ t = mcp->ops->read_regs(mcp, 0, mcp->cache, ARRAY_SIZE(mcp->cache));
if (t < 0) {
seq_printf(s, " I/O ERROR %d\n", t);
goto done;
}
- for (t = 0, mask = 1; t < 8; t++, mask <<= 1) {
+ for (t = 0, mask = 1; t < chip->ngpio; t++, mask <<= 1) {
const char *label;
label = gpiochip_is_requested(chip, t);
@@ -219,28 +300,33 @@ done:
/*----------------------------------------------------------------------*/
static int mcp23s08_probe_one(struct spi_device *spi, unsigned addr,
- unsigned base, unsigned pullups)
+ unsigned type, unsigned base, unsigned pullups)
{
struct mcp23s08_driver_data *data = spi_get_drvdata(spi);
struct mcp23s08 *mcp = data->mcp[addr];
int status;
- int do_update = 0;
mutex_init(&mcp->lock);
mcp->spi = spi;
mcp->addr = 0x40 | (addr << 1);
- mcp->chip.label = "mcp23s08",
-
mcp->chip.direction_input = mcp23s08_direction_input;
mcp->chip.get = mcp23s08_get;
mcp->chip.direction_output = mcp23s08_direction_output;
mcp->chip.set = mcp23s08_set;
mcp->chip.dbg_show = mcp23s08_dbg_show;
+ if (type == MCP_TYPE_S17) {
+ mcp->ops = &mcp23s17_ops;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = "mcp23s17";
+ } else {
+ mcp->ops = &mcp23s08_ops;
+ mcp->chip.ngpio = 8;
+ mcp->chip.label = "mcp23s08";
+ }
mcp->chip.base = base;
- mcp->chip.ngpio = 8;
mcp->chip.can_sleep = 1;
mcp->chip.dev = &spi->dev;
mcp->chip.owner = THIS_MODULE;
@@ -248,45 +334,39 @@ static int mcp23s08_probe_one(struct spi_device *spi, unsigned addr,
/* verify MCP_IOCON.SEQOP = 0, so sequential reads work,
* and MCP_IOCON.HAEN = 1, so we work with all chips.
*/
- status = mcp23s08_read(mcp, MCP_IOCON);
+ status = mcp->ops->read(mcp, MCP_IOCON);
if (status < 0)
goto fail;
if ((status & IOCON_SEQOP) || !(status & IOCON_HAEN)) {
- status &= ~IOCON_SEQOP;
- status |= IOCON_HAEN;
- status = mcp23s08_write(mcp, MCP_IOCON, (u8) status);
+ /* mcp23s17 has IOCON twice, make sure they are in sync */
+ status &= ~(IOCON_SEQOP | (IOCON_SEQOP << 8));
+ status |= IOCON_HAEN | (IOCON_HAEN << 8);
+ status = mcp->ops->write(mcp, MCP_IOCON, status);
if (status < 0)
goto fail;
}
/* configure ~100K pullups */
- status = mcp23s08_write(mcp, MCP_GPPU, pullups);
+ status = mcp->ops->write(mcp, MCP_GPPU, pullups);
if (status < 0)
goto fail;
- status = mcp23s08_read_regs(mcp, 0, mcp->cache, sizeof mcp->cache);
+ status = mcp->ops->read_regs(mcp, 0, mcp->cache, ARRAY_SIZE(mcp->cache));
if (status < 0)
goto fail;
/* disable inverter on input */
if (mcp->cache[MCP_IPOL] != 0) {
mcp->cache[MCP_IPOL] = 0;
- do_update = 1;
+ status = mcp->ops->write(mcp, MCP_IPOL, 0);
+ if (status < 0)
+ goto fail;
}
/* disable irqs */
if (mcp->cache[MCP_GPINTEN] != 0) {
mcp->cache[MCP_GPINTEN] = 0;
- do_update = 1;
- }
-
- if (do_update) {
- u8 tx[4];
-
- tx[0] = mcp->addr;
- tx[1] = MCP_IPOL;
- memcpy(&tx[2], &mcp->cache[MCP_IPOL], sizeof(tx) - 2);
- status = spi_write_then_read(mcp->spi, tx, sizeof tx, NULL, 0);
+ status = mcp->ops->write(mcp, MCP_GPINTEN, 0);
if (status < 0)
goto fail;
}
@@ -305,19 +385,26 @@ static int mcp23s08_probe(struct spi_device *spi)
unsigned addr;
unsigned chips = 0;
struct mcp23s08_driver_data *data;
- int status;
+ int status, type;
unsigned base;
+ type = spi_get_device_id(spi)->driver_data;
+
pdata = spi->dev.platform_data;
if (!pdata || !gpio_is_valid(pdata->base)) {
dev_dbg(&spi->dev, "invalid or missing platform data\n");
return -EINVAL;
}
- for (addr = 0; addr < 4; addr++) {
+ for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
if (!pdata->chip[addr].is_present)
continue;
chips++;
+ if ((type == MCP_TYPE_S08) && (addr > 3)) {
+ dev_err(&spi->dev,
+ "mcp23s08 only supports address 0..3\n");
+ return -EINVAL;
+ }
}
if (!chips)
return -ENODEV;
@@ -329,16 +416,17 @@ static int mcp23s08_probe(struct spi_device *spi)
spi_set_drvdata(spi, data);
base = pdata->base;
- for (addr = 0; addr < 4; addr++) {
+ for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
if (!pdata->chip[addr].is_present)
continue;
chips--;
data->mcp[addr] = &data->chip[chips];
- status = mcp23s08_probe_one(spi, addr, base,
- pdata->chip[addr].pullups);
+ status = mcp23s08_probe_one(spi, addr, type, base,
+ pdata->chip[addr].pullups);
if (status < 0)
goto fail;
- base += 8;
+
+ base += (type == MCP_TYPE_S17) ? 16 : 8;
}
data->ngpio = base - pdata->base;
@@ -358,7 +446,7 @@ static int mcp23s08_probe(struct spi_device *spi)
return 0;
fail:
- for (addr = 0; addr < 4; addr++) {
+ for (addr = 0; addr < ARRAY_SIZE(data->mcp); addr++) {
int tmp;
if (!data->mcp[addr])
@@ -388,7 +476,7 @@ static int mcp23s08_remove(struct spi_device *spi)
}
}
- for (addr = 0; addr < 4; addr++) {
+ for (addr = 0; addr < ARRAY_SIZE(data->mcp); addr++) {
int tmp;
if (!data->mcp[addr])
@@ -405,9 +493,17 @@ static int mcp23s08_remove(struct spi_device *spi)
return status;
}
+static const struct spi_device_id mcp23s08_ids[] = {
+ { "mcp23s08", MCP_TYPE_S08 },
+ { "mcp23s17", MCP_TYPE_S17 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, mcp23s08_ids);
+
static struct spi_driver mcp23s08_driver = {
.probe = mcp23s08_probe,
.remove = mcp23s08_remove,
+ .id_table = mcp23s08_ids,
.driver = {
.name = "mcp23s08",
.owner = THIS_MODULE,
@@ -432,4 +528,3 @@ static void __exit mcp23s08_exit(void)
module_exit(mcp23s08_exit);
MODULE_LICENSE("GPL");
-MODULE_ALIAS("spi:mcp23s08");
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index b473429eee75..2fc25dec7cf5 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -462,7 +462,8 @@ pca953x_get_alt_pdata(struct i2c_client *client)
{
struct pca953x_platform_data *pdata;
struct device_node *node;
- const uint16_t *val;
+ const __be32 *val;
+ int size;
node = client->dev.of_node;
if (node == NULL)
@@ -475,13 +476,13 @@ pca953x_get_alt_pdata(struct i2c_client *client)
}
pdata->gpio_base = -1;
- val = of_get_property(node, "linux,gpio-base", NULL);
+ val = of_get_property(node, "linux,gpio-base", &size);
if (val) {
- if (*val < 0)
- dev_warn(&client->dev,
- "invalid gpio-base in device tree\n");
+ if (size != sizeof(*val))
+ dev_warn(&client->dev, "%s: wrong linux,gpio-base\n",
+ node->full_name);
else
- pdata->gpio_base = *val;
+ pdata->gpio_base = be32_to_cpup(val);
}
val = of_get_property(node, "polarity", NULL);
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 2975d22daffe..838ddbdf90cc 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -232,7 +232,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
-static int pl061_probe(struct amba_device *dev, struct amba_id *id)
+static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
{
struct pl061_platform_data *pdata;
struct pl061_gpio *chip;
diff --git a/drivers/gpio/rdc321x-gpio.c b/drivers/gpio/rdc321x-gpio.c
index 897e0577e65e..a9bda881935a 100644
--- a/drivers/gpio/rdc321x-gpio.c
+++ b/drivers/gpio/rdc321x-gpio.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <linux/gpio.h>
#include <linux/mfd/rdc321x.h>
+#include <linux/mfd/core.h>
#include <linux/slab.h>
struct rdc321x_gpio {
@@ -135,7 +136,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
struct rdc321x_gpio *rdc321x_gpio_dev;
struct rdc321x_gpio_pdata *pdata;
- pdata = platform_get_drvdata(pdev);
+ pdata = mfd_get_data(pdev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data supplied\n");
return -ENODEV;
diff --git a/drivers/gpio/sch_gpio.c b/drivers/gpio/sch_gpio.c
index 583521352c16..56060421cdff 100644
--- a/drivers/gpio/sch_gpio.c
+++ b/drivers/gpio/sch_gpio.c
@@ -25,6 +25,7 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
+#include <linux/pci_ids.h>
#include <linux/gpio.h>
@@ -187,7 +188,11 @@ static struct gpio_chip sch_gpio_resume = {
static int __devinit sch_gpio_probe(struct platform_device *pdev)
{
struct resource *res;
- int err;
+ int err, id;
+
+ id = pdev->id;
+ if (!id)
+ return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res)
@@ -198,12 +203,40 @@ static int __devinit sch_gpio_probe(struct platform_device *pdev)
gpio_ba = res->start;
- sch_gpio_core.base = 0;
- sch_gpio_core.ngpio = 10;
- sch_gpio_core.dev = &pdev->dev;
+ switch (id) {
+ case PCI_DEVICE_ID_INTEL_SCH_LPC:
+ sch_gpio_core.base = 0;
+ sch_gpio_core.ngpio = 10;
+
+ sch_gpio_resume.base = 10;
+ sch_gpio_resume.ngpio = 4;
+
+ /*
+ * GPIO[6:0] enabled by default
+ * GPIO7 is configured by the CMC as SLPIOVR
+ * Enable GPIO[9:8] core powered gpios explicitly
+ */
+ outb(0x3, gpio_ba + CGEN + 1);
+ /*
+ * SUS_GPIO[2:0] enabled by default
+ * Enable SUS_GPIO3 resume powered gpio explicitly
+ */
+ outb(0x8, gpio_ba + RGEN);
+ break;
+
+ case PCI_DEVICE_ID_INTEL_ITC_LPC:
+ sch_gpio_core.base = 0;
+ sch_gpio_core.ngpio = 5;
+
+ sch_gpio_resume.base = 5;
+ sch_gpio_resume.ngpio = 9;
+ break;
+
+ default:
+ return -ENODEV;
+ }
- sch_gpio_resume.base = 10;
- sch_gpio_resume.ngpio = 4;
+ sch_gpio_core.dev = &pdev->dev;
sch_gpio_resume.dev = &pdev->dev;
err = gpiochip_add(&sch_gpio_core);
@@ -214,18 +247,6 @@ static int __devinit sch_gpio_probe(struct platform_device *pdev)
if (err < 0)
goto err_sch_gpio_resume;
- /*
- * GPIO[6:0] enabled by default
- * GPIO7 is configured by the CMC as SLPIOVR
- * Enable GPIO[9:8] core powered gpios explicitly
- */
- outb(0x3, gpio_ba + CGEN + 1);
- /*
- * SUS_GPIO[2:0] enabled by default
- * Enable SUS_GPIO3 resume powered gpio explicitly
- */
- outb(0x8, gpio_ba + RGEN);
-
return 0;
err_sch_gpio_resume:
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c
index e60be0015c9b..d2f874c3d3d5 100644
--- a/drivers/gpio/sx150x.c
+++ b/drivers/gpio/sx150x.c
@@ -25,6 +25,8 @@
#include <linux/workqueue.h>
#include <linux/i2c/sx150x.h>
+#define NO_UPDATE_PENDING -1
+
struct sx150x_device_data {
u8 reg_pullup;
u8 reg_pulldn;
@@ -47,8 +49,11 @@ struct sx150x_chip {
const struct sx150x_device_data *dev_cfg;
int irq_summary;
int irq_base;
+ int irq_update;
u32 irq_sense;
- unsigned long irq_set_type_pending;
+ u32 irq_masked;
+ u32 dev_sense;
+ u32 dev_masked;
struct irq_chip irq_chip;
struct mutex lock;
};
@@ -312,9 +317,8 @@ static void sx150x_irq_mask(struct irq_data *d)
chip = container_of(ic, struct sx150x_chip, irq_chip);
n = d->irq - chip->irq_base;
-
- sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 1);
- sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense, 0);
+ chip->irq_masked |= (1 << n);
+ chip->irq_update = n;
}
static void sx150x_irq_unmask(struct irq_data *d)
@@ -326,9 +330,8 @@ static void sx150x_irq_unmask(struct irq_data *d)
chip = container_of(ic, struct sx150x_chip, irq_chip);
n = d->irq - chip->irq_base;
- sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 0);
- sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense,
- chip->irq_sense >> (n * 2));
+ chip->irq_masked &= ~(1 << n);
+ chip->irq_update = n;
}
static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type)
@@ -350,7 +353,7 @@ static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type)
chip->irq_sense &= ~(3UL << (n * 2));
chip->irq_sense |= val << (n * 2);
- chip->irq_set_type_pending |= BIT(n);
+ chip->irq_update = n;
return 0;
}
@@ -404,15 +407,29 @@ static void sx150x_irq_bus_sync_unlock(struct irq_data *d)
chip = container_of(ic, struct sx150x_chip, irq_chip);
- while (chip->irq_set_type_pending) {
- n = __ffs(chip->irq_set_type_pending);
- chip->irq_set_type_pending &= ~BIT(n);
- if (!(irq_to_desc(n + chip->irq_base)->status & IRQ_MASKED))
- sx150x_write_cfg(chip, n, 2,
- chip->dev_cfg->reg_sense,
- chip->irq_sense >> (n * 2));
- }
+ if (chip->irq_update == NO_UPDATE_PENDING)
+ goto out;
+
+ n = chip->irq_update;
+ chip->irq_update = NO_UPDATE_PENDING;
+ /* Avoid updates if nothing changed */
+ if (chip->dev_sense == chip->irq_sense &&
+ chip->dev_sense == chip->irq_masked)
+ goto out;
+
+ chip->dev_sense = chip->irq_sense;
+ chip->dev_masked = chip->irq_masked;
+
+ if (chip->irq_masked & (1 << n)) {
+ sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 1);
+ sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense, 0);
+ } else {
+ sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 0);
+ sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense,
+ chip->irq_sense >> (n * 2));
+ }
+out:
mutex_unlock(&chip->lock);
}
@@ -445,8 +462,11 @@ static void sx150x_init_chip(struct sx150x_chip *chip,
chip->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock;
chip->irq_summary = -1;
chip->irq_base = -1;
+ chip->irq_masked = ~0;
chip->irq_sense = 0;
- chip->irq_set_type_pending = 0;
+ chip->dev_masked = ~0;
+ chip->dev_sense = 0;
+ chip->irq_update = NO_UPDATE_PENDING;
}
static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
index 58c8f30352dd..ffcd815b8b8b 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/timbgpio.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/timb_gpio.h>
@@ -228,7 +229,7 @@ static int __devinit timbgpio_probe(struct platform_device *pdev)
struct gpio_chip *gc;
struct timbgpio *tgpio;
struct resource *iomem;
- struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
+ struct timbgpio_platform_data *pdata = mfd_get_data(pdev);
int irq = platform_get_irq(pdev, 0);
if (!pdata || pdata->nr_pins > 32) {
@@ -319,14 +320,13 @@ err_mem:
static int __devexit timbgpio_remove(struct platform_device *pdev)
{
int err;
- struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
struct timbgpio *tgpio = platform_get_drvdata(pdev);
struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int irq = platform_get_irq(pdev, 0);
if (irq >= 0 && tgpio->irq_base > 0) {
int i;
- for (i = 0; i < pdata->nr_pins; i++) {
+ for (i = 0; i < tgpio->gpio.ngpio; i++) {
set_irq_chip(tgpio->irq_base + i, NULL);
set_irq_chip_data(tgpio->irq_base + i, NULL);
}
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
index 463aed9403db..34664587a74e 100644
--- a/drivers/gpu/drm/drm_sman.c
+++ b/drivers/gpu/drm/drm_sman.c
@@ -59,9 +59,7 @@ drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
{
int ret = 0;
- sman->mm = (struct drm_sman_mm *) kcalloc(num_managers,
- sizeof(*sman->mm),
- GFP_KERNEL);
+ sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL);
if (!sman->mm) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 449650545bb4..5004724ea57e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -383,6 +383,7 @@ typedef struct drm_i915_private {
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
+ u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 7e992a8e9098..da474153a0a2 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -796,6 +796,9 @@ int i915_save_state(struct drm_device *dev)
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+ /* Hardware status page */
+ dev_priv->saveHWS = I915_READ(HWS_PGA);
+
i915_save_display(dev);
/* Interrupt state */
@@ -842,6 +845,9 @@ int i915_restore_state(struct drm_device *dev)
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+ /* Hardware status page */
+ I915_WRITE(HWS_PGA, dev_priv->saveHWS);
+
i915_restore_display(dev);
/* Interrupt state */
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index d3a9c6e02477..00a55dfdba82 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -88,18 +88,20 @@ static const struct backlight_ops nv50_bl_ops = {
.update_status = nv50_set_intensity,
};
-static int nouveau_nv40_backlight_init(struct drm_device *dev)
+static int nouveau_nv40_backlight_init(struct drm_connector *connector)
{
- struct backlight_properties props;
+ struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct backlight_properties props;
struct backlight_device *bd;
if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
return 0;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = 31;
- bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
+ bd = backlight_device_register("nv_backlight", &connector->kdev, dev,
&nv40_bl_ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
@@ -111,18 +113,20 @@ static int nouveau_nv40_backlight_init(struct drm_device *dev)
return 0;
}
-static int nouveau_nv50_backlight_init(struct drm_device *dev)
+static int nouveau_nv50_backlight_init(struct drm_connector *connector)
{
- struct backlight_properties props;
+ struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct backlight_properties props;
struct backlight_device *bd;
if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
return 0;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = 1025;
- bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
+ bd = backlight_device_register("nv_backlight", &connector->kdev, dev,
&nv50_bl_ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
@@ -133,8 +137,9 @@ static int nouveau_nv50_backlight_init(struct drm_device *dev)
return 0;
}
-int nouveau_backlight_init(struct drm_device *dev)
+int nouveau_backlight_init(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
#ifdef CONFIG_ACPI
@@ -147,9 +152,9 @@ int nouveau_backlight_init(struct drm_device *dev)
switch (dev_priv->card_type) {
case NV_40:
- return nouveau_nv40_backlight_init(dev);
+ return nouveau_nv40_backlight_init(connector);
case NV_50:
- return nouveau_nv50_backlight_init(dev);
+ return nouveau_nv50_backlight_init(connector);
default:
break;
}
@@ -157,8 +162,9 @@ int nouveau_backlight_init(struct drm_device *dev)
return 0;
}
-void nouveau_backlight_exit(struct drm_device *dev)
+void nouveau_backlight_exit(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (dev_priv->backlight) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 390d82c3c4b0..7ae151109a66 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -116,6 +116,10 @@ nouveau_connector_destroy(struct drm_connector *connector)
nouveau_connector_hotplug, connector);
}
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ nouveau_backlight_exit(connector);
+
kfree(nv_connector->edid);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -894,6 +898,11 @@ nouveau_connector_create(struct drm_device *dev, int index)
}
drm_sysfs_connector_add(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ nouveau_backlight_init(connector);
+
dcb->drm = connector;
return dcb->drm;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 06111887b789..fff180a99867 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -999,15 +999,15 @@ static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector
/* nouveau_backlight.c */
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
-extern int nouveau_backlight_init(struct drm_device *);
-extern void nouveau_backlight_exit(struct drm_device *);
+extern int nouveau_backlight_init(struct drm_connector *);
+extern void nouveau_backlight_exit(struct drm_connector *);
#else
-static inline int nouveau_backlight_init(struct drm_device *dev)
+static inline int nouveau_backlight_init(struct drm_connector *dev)
{
return 0;
}
-static inline void nouveau_backlight_exit(struct drm_device *dev) { }
+static inline void nouveau_backlight_exit(struct drm_connector *dev) { }
#endif
/* nouveau_bios.c */
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 05294910e135..4fcbd091a117 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -704,10 +704,6 @@ nouveau_card_init(struct drm_device *dev)
goto out_fence;
}
- ret = nouveau_backlight_init(dev);
- if (ret)
- NV_ERROR(dev, "Error %d registering backlight\n", ret);
-
nouveau_fbcon_init(dev);
drm_kms_helper_poll_init(dev);
return 0;
@@ -759,8 +755,6 @@ static void nouveau_card_takedown(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
- nouveau_backlight_exit(dev);
-
if (!engine->graph.accel_blocked) {
nouveau_fence_fini(dev);
nouveau_channel_put_unlocked(&dev_priv->channel);
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 1c02d23f6fcc..9746fee59f56 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,6 +1,7 @@
config DRM_RADEON_KMS
bool "Enable modesetting on radeon by default - NEW DRIVER"
depends on DRM_RADEON
+ select BACKLIGHT_CLASS_DEVICE
help
Choose this option if you want kernel modesetting enabled by default.
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index b9427e689cf3..941080a77940 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2987,7 +2987,7 @@ int evergreen_resume(struct radeon_device *rdev)
r = r600_ib_test(rdev);
if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index fcc23e4e0b3c..f2204cb1ccdf 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3617,7 +3617,7 @@ int r100_ib_test(struct radeon_device *rdev)
if (i < rdev->usec_timeout) {
DRM_INFO("ib test succeeded in %u usecs\n", i);
} else {
- DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
+ DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
@@ -3637,13 +3637,13 @@ int r100_ib_init(struct radeon_device *rdev)
r = radeon_ib_pool_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB pool (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r);
r100_ib_fini(rdev);
return r;
}
r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failled testing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
r100_ib_fini(rdev);
return r;
}
@@ -3799,12 +3799,12 @@ static int r100_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 069efa8c8ecf..8713731fa014 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1401,12 +1401,12 @@ static int r300_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 0b59ed7c7d2c..417fab81812f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -260,13 +260,13 @@ static int r420_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r420_cp_errata_init(rdev);
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 2ce80d976568..3081d07f8de5 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -193,12 +193,12 @@ static int r520_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 12fdebf9aed8..be271c42de4d 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2464,7 +2464,7 @@ int r600_resume(struct radeon_device *rdev)
r = r600_ib_test(rdev);
if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 3f3c9aac46cc..28c7961cd19b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -40,6 +40,10 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder,
bool connected);
+extern void
+radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
+ struct drm_connector *drm_connector);
+
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -1526,6 +1530,17 @@ radeon_add_legacy_connector(struct drm_device *dev,
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector);
+ if (connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ struct drm_encoder *drm_encoder;
+
+ list_for_each_entry(drm_encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_encoder;
+
+ radeon_encoder = to_radeon_encoder(drm_encoder);
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_LVDS)
+ radeon_legacy_backlight_init(radeon_encoder, connector);
+ }
+ }
return;
failed:
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 66c9af1b3d96..41a5d48e657b 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -889,7 +889,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
}
if (rdev->flags & RADEON_IS_MOBILITY) {
- /* A temporal workaround for the occational blanking on certain laptop panels.
+ /* A temporal workaround for the occasional blanking on certain laptop panels.
This appears to related to the PLL divider registers (fail to lock?).
It occurs even when all dividers are the same with their old settings.
In this case we really don't need to fiddle with PLL registers.
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 59f834ba283d..5b54268ed6b2 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -28,6 +28,10 @@
#include "radeon_drm.h"
#include "radeon.h"
#include "atom.h"
+#include <linux/backlight.h>
+#ifdef CONFIG_PMAC_BACKLIGHT
+#include <asm/backlight.h>
+#endif
static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
{
@@ -39,7 +43,7 @@ static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
radeon_encoder->active_device = 0;
}
-static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
+static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -47,15 +51,23 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
int panel_pwr_delay = 2000;
bool is_mac = false;
+ uint8_t backlight_level;
DRM_DEBUG_KMS("\n");
+ lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ backlight_level = (lvds_gen_cntl >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
if (radeon_encoder->enc_priv) {
if (rdev->is_atom_bios) {
struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
panel_pwr_delay = lvds->panel_pwr_delay;
+ if (lvds->bl_dev)
+ backlight_level = lvds->backlight_level;
} else {
struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
panel_pwr_delay = lvds->panel_pwr_delay;
+ if (lvds->bl_dev)
+ backlight_level = lvds->backlight_level;
}
}
@@ -82,11 +94,13 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
- lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
- lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON);
+ lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS |
+ RADEON_LVDS_BL_MOD_LEVEL_MASK);
+ lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN |
+ RADEON_LVDS_DIGON | RADEON_LVDS_BLON |
+ (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT));
if (is_mac)
lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
- lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS);
udelay(panel_pwr_delay * 1000);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
break;
@@ -95,7 +109,6 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_OFF:
pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
- lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
if (is_mac) {
lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN;
@@ -119,6 +132,25 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
}
+static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DRM_DEBUG("\n");
+
+ if (radeon_encoder->enc_priv) {
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+ lvds->dpms_mode = mode;
+ } else {
+ struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+ lvds->dpms_mode = mode;
+ }
+ }
+
+ radeon_legacy_lvds_update(encoder, mode);
+}
+
static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
{
struct radeon_device *rdev = encoder->dev->dev_private;
@@ -237,9 +269,222 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
.disable = radeon_legacy_encoder_disable,
};
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+#define MAX_RADEON_LEVEL 0xFF
+
+struct radeon_backlight_privdata {
+ struct radeon_encoder *encoder;
+ uint8_t negative;
+};
+
+static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
+{
+ struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+ uint8_t level;
+
+ /* Convert brightness to hardware level */
+ if (bd->props.brightness < 0)
+ level = 0;
+ else if (bd->props.brightness > MAX_RADEON_LEVEL)
+ level = MAX_RADEON_LEVEL;
+ else
+ level = bd->props.brightness;
+
+ if (pdata->negative)
+ level = MAX_RADEON_LEVEL - level;
+
+ return level;
+}
+
+static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
+{
+ struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+ struct radeon_encoder *radeon_encoder = pdata->encoder;
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int dpms_mode = DRM_MODE_DPMS_ON;
+
+ if (radeon_encoder->enc_priv) {
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+ dpms_mode = lvds->dpms_mode;
+ lvds->backlight_level = radeon_legacy_lvds_level(bd);
+ } else {
+ struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+ dpms_mode = lvds->dpms_mode;
+ lvds->backlight_level = radeon_legacy_lvds_level(bd);
+ }
+ }
+
+ if (bd->props.brightness > 0)
+ radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
+ else
+ radeon_legacy_lvds_update(&radeon_encoder->base, DRM_MODE_DPMS_OFF);
+
+ return 0;
+}
+
+static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd)
+{
+ struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+ struct radeon_encoder *radeon_encoder = pdata->encoder;
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint8_t backlight_level;
+
+ backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+ RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+ return pdata->negative ? MAX_RADEON_LEVEL - backlight_level : backlight_level;
+}
+
+static const struct backlight_ops radeon_backlight_ops = {
+ .get_brightness = radeon_legacy_backlight_get_brightness,
+ .update_status = radeon_legacy_backlight_update_status,
+};
+
+void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
+ struct drm_connector *drm_connector)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ struct radeon_backlight_privdata *pdata;
+ uint8_t backlight_level;
+
+ if (!radeon_encoder->enc_priv)
+ return;
+
+#ifdef CONFIG_PMAC_BACKLIGHT
+ if (!pmac_has_backlight_type("ati") &&
+ !pmac_has_backlight_type("mnca"))
+ return;
+#endif
+
+ pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
+ if (!pdata) {
+ DRM_ERROR("Memory allocation failed\n");
+ goto error;
+ }
+
+ props.max_brightness = MAX_RADEON_LEVEL;
+ props.type = BACKLIGHT_RAW;
+ bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
+ pdata, &radeon_backlight_ops, &props);
+ if (IS_ERR(bd)) {
+ DRM_ERROR("Backlight registration failed\n");
+ goto error;
+ }
+
+ pdata->encoder = radeon_encoder;
+
+ backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+ RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+ /* First, try to detect backlight level sense based on the assumption
+ * that firmware set it up at full brightness
+ */
+ if (backlight_level == 0)
+ pdata->negative = true;
+ else if (backlight_level == 0xff)
+ pdata->negative = false;
+ else {
+ /* XXX hack... maybe some day we can figure out in what direction
+ * backlight should work on a given panel?
+ */
+ pdata->negative = (rdev->family != CHIP_RV200 &&
+ rdev->family != CHIP_RV250 &&
+ rdev->family != CHIP_RV280 &&
+ rdev->family != CHIP_RV350);
+
+#ifdef CONFIG_PMAC_BACKLIGHT
+ pdata->negative = (pdata->negative ||
+ of_machine_is_compatible("PowerBook4,3") ||
+ of_machine_is_compatible("PowerBook6,3") ||
+ of_machine_is_compatible("PowerBook6,5"));
+#endif
+ }
+
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+ lvds->bl_dev = bd;
+ } else {
+ struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+ lvds->bl_dev = bd;
+ }
+
+ bd->props.brightness = radeon_legacy_backlight_get_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+
+ DRM_INFO("radeon legacy LVDS backlight initialized\n");
+
+ return;
+
+error:
+ kfree(pdata);
+ return;
+}
+
+static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct backlight_device *bd = NULL;
+
+ if (!radeon_encoder->enc_priv)
+ return;
+
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+ bd = lvds->bl_dev;
+ lvds->bl_dev = NULL;
+ } else {
+ struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+ bd = lvds->bl_dev;
+ lvds->bl_dev = NULL;
+ }
+
+ if (bd) {
+ struct radeon_legacy_backlight_privdata *pdata;
+
+ pdata = bl_get_data(bd);
+ backlight_device_unregister(bd);
+ kfree(pdata);
+
+ DRM_INFO("radeon legacy LVDS backlight unloaded\n");
+ }
+}
+
+#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+void radeon_legacy_backlight_init(struct radeon_encoder *encoder)
+{
+}
+
+static void radeon_legacy_backlight_exit(struct radeon_encoder *encoder)
+{
+}
+
+#endif
+
+
+static void radeon_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->enc_priv) {
+ radeon_legacy_backlight_exit(radeon_encoder);
+ kfree(radeon_encoder->enc_priv);
+ }
+ drm_encoder_cleanup(encoder);
+ kfree(radeon_encoder);
+}
static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
- .destroy = radeon_enc_destroy,
+ .destroy = radeon_lvds_enc_destroy,
};
static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 5067d18d0009..e4582814bb78 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -302,6 +302,9 @@ struct radeon_encoder_lvds {
uint32_t lvds_gen_cntl;
/* panel mode */
struct drm_display_mode native_mode;
+ struct backlight_device *bl_dev;
+ int dpms_mode;
+ uint8_t backlight_level;
};
struct radeon_encoder_tv_dac {
@@ -355,6 +358,9 @@ struct radeon_encoder_atom_dig {
uint32_t lcd_ss_id;
/* panel mode */
struct drm_display_mode native_mode;
+ struct backlight_device *bl_dev;
+ int dpms_mode;
+ uint8_t backlight_level;
};
struct radeon_encoder_atom_dac {
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 992d99d13fc5..bbc9cd823334 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -151,7 +151,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
/* 64 dwords should be enough for fence too */
r = radeon_ring_lock(rdev, 64);
if (r) {
- DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
+ DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
return r;
}
radeon_ring_ib_execute(rdev, ib);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index c76283d9eb3d..aa6a66eeb4ec 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -412,12 +412,12 @@ static int rs400_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 8af4679db23e..19763f5df5e1 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -865,12 +865,12 @@ static int rs600_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 66c949b7c18c..a9049ed1a519 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -627,12 +627,12 @@ static int rs690_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 64b57af93714..6613ee9ecca3 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -398,12 +398,12 @@ static int rv515_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4cc7b717fedd..b974ac7df8df 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1209,7 +1209,7 @@ int rv770_resume(struct radeon_device *rdev)
r = r600_ib_test(rdev);
if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
return r;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 2560f01c1a63..b7ec4057841d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -68,9 +68,15 @@ config HID_A4TECH
---help---
Support for A4 tech X5 and WOP-35 / Trust 450L mice.
-config HID_ACRUX_FF
- tristate "ACRUX force feedback"
+config HID_ACRUX
+ tristate "ACRUX game controller support"
depends on USB_HID
+ ---help---
+ Say Y here if you want to enable support for ACRUX game controllers.
+
+config HID_ACRUX_FF
+ tristate "ACRUX force feedback support"
+ depends on HID_ACRUX
select INPUT_FF_MEMLESS
---help---
Say Y here if you want to enable force feedback support for ACRUX
@@ -140,7 +146,12 @@ config HID_DRAGONRISE
tristate "DragonRise Inc. game controller"
depends on USB_HID
---help---
- Say Y here if you have DragonRise Inc.game controllers.
+ Say Y here if you have DragonRise Inc. game controllers.
+ These might be branded as:
+ - Tesun USB-703
+ - Media-tech MT1504 "Rogue"
+ - DVTech JS19 "Gear"
+ - Defender Game Master
config DRAGONRISE_FF
bool "DragonRise Inc. force feedback"
@@ -160,13 +171,6 @@ config HID_EMS_FF
Currently the following devices are known to be supported:
- Trio Linker Plus II
-config HID_EGALAX
- tristate "eGalax multi-touch panel"
- depends on USB_HID
- ---help---
- Support for the eGalax dual-touch panels, including the
- Joojoo and Wetab tablets.
-
config HID_ELECOM
tristate "ELECOM BM084 bluetooth mouse"
depends on BT_HIDP
@@ -180,6 +184,14 @@ config HID_EZKEY
---help---
Support for Ezkey BTC 8193 keyboard.
+config HID_KEYTOUCH
+ tristate "Keyoutch HID devices"
+ depends on USB_HID
+ ---help---
+ Support for Keytouch HID devices not fully compliant with
+ the specification. Currently supported:
+ - Keytouch IEC 60945
+
config HID_KYE
tristate "Kye/Genius Ergo Mouse" if EXPERT
depends on USB_HID
@@ -218,6 +230,12 @@ config HID_KENSINGTON
---help---
Support for Kensington Slimblade Trackball.
+config HID_LCPOWER
+ tristate "LC-Power"
+ depends on USB_HID
+ ---help---
+ Support for LC-Power RC1000MCE RF remote control.
+
config HID_LOGITECH
tristate "Logitech devices" if EXPERT
depends on USB_HID
@@ -304,8 +322,11 @@ config HID_MULTITOUCH
Say Y here if you have one of the following devices:
- Cypress TrueTouch panels
- Hanvon dual touch panels
+ - IrTouch Infrared USB panels
- Pixcir dual touch panels
- 'Sensing Win7-TwoFinger' panel by GeneralTouch
+ - eGalax dual-touch panels, including the
+ Joojoo and Wetab tablets
If unsure, say N.
@@ -319,10 +340,10 @@ config HID_NTRIG
Support for N-Trig touch screen.
config HID_ORTEK
- tristate "Ortek WKB-2000 wireless keyboard and mouse trackpad"
+ tristate "Ortek PKB-1700/WKB-2000 wireless keyboard and mouse trackpad"
depends on USB_HID
---help---
- Support for Ortek WKB-2000 wireless keyboard + mouse trackpad.
+ Support for Ortek PKB-1700/WKB-2000 wireless keyboard + mouse trackpad.
config HID_PANTHERLORD
tristate "Pantherlord/GreenAsia game controller"
@@ -417,10 +438,22 @@ config HID_ROCCAT
Say Y here if you have a Roccat mouse or keyboard and want OSD or
macro execution support.
+config HID_ROCCAT_COMMON
+ tristate
+
+config HID_ROCCAT_ARVO
+ tristate "Roccat Arvo keyboard support"
+ depends on USB_HID
+ select HID_ROCCAT
+ select HID_ROCCAT_COMMON
+ ---help---
+ Support for Roccat Arvo keyboard.
+
config HID_ROCCAT_KONE
tristate "Roccat Kone Mouse support"
depends on USB_HID
select HID_ROCCAT
+ select HID_ROCCAT_COMMON
---help---
Support for Roccat Kone mouse.
@@ -428,13 +461,23 @@ config HID_ROCCAT_KONEPLUS
tristate "Roccat Kone[+] mouse support"
depends on USB_HID
select HID_ROCCAT
+ select HID_ROCCAT_COMMON
---help---
Support for Roccat Kone[+] mouse.
+config HID_ROCCAT_KOVAPLUS
+ tristate "Roccat Kova[+] mouse support"
+ depends on USB_HID
+ select HID_ROCCAT
+ select HID_ROCCAT_COMMON
+ ---help---
+ Support for Roccat Kova[+] mouse.
+
config HID_ROCCAT_PYRA
tristate "Roccat Pyra mouse support"
depends on USB_HID
select HID_ROCCAT
+ select HID_ROCCAT_COMMON
---help---
Support for Roccat Pyra mouse.
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 6efc2a0370ad..06c68ae3abee 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -27,21 +27,22 @@ endif
obj-$(CONFIG_HID_3M_PCT) += hid-3m-pct.o
obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o
-obj-$(CONFIG_HID_ACRUX_FF) += hid-axff.o
+obj-$(CONFIG_HID_ACRUX) += hid-axff.o
obj-$(CONFIG_HID_APPLE) += hid-apple.o
obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
obj-$(CONFIG_HID_CANDO) += hid-cando.o
obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o
-obj-$(CONFIG_HID_DRAGONRISE) += hid-drff.o
+obj-$(CONFIG_HID_DRAGONRISE) += hid-dr.o
obj-$(CONFIG_HID_EMS_FF) += hid-emsff.o
-obj-$(CONFIG_HID_EGALAX) += hid-egalax.o
obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
+obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
+obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
@@ -56,8 +57,11 @@ obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o
+obj-$(CONFIG_HID_ROCCAT_COMMON) += hid-roccat-common.o
+obj-$(CONFIG_HID_ROCCAT_ARVO) += hid-roccat-arvo.o
obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o
obj-$(CONFIG_HID_ROCCAT_KONEPLUS) += hid-roccat-koneplus.o
+obj-$(CONFIG_HID_ROCCAT_KOVAPLUS) += hid-roccat-kovaplus.o
obj-$(CONFIG_HID_ROCCAT_PYRA) += hid-roccat-pyra.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 61aa71233392..b85744fe8464 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -481,6 +481,12 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
index e5b961d6ff22..b4554288de00 100644
--- a/drivers/hid/hid-axff.c
+++ b/drivers/hid/hid-axff.c
@@ -33,6 +33,8 @@
#include <linux/hid.h>
#include "hid-ids.h"
+
+#ifdef CONFIG_HID_ACRUX_FF
#include "usbhid/usbhid.h"
struct axff_device {
@@ -109,6 +111,12 @@ err_free_mem:
kfree(axff);
return error;
}
+#else
+static inline int axff_init(struct hid_device *hid)
+{
+ return 0;
+}
+#endif
static int ax_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
@@ -139,9 +147,25 @@ static int ax_probe(struct hid_device *hdev, const struct hid_device_id *id)
error);
}
+ /*
+ * We need to start polling device right away, otherwise
+ * it will go into a coma.
+ */
+ error = hid_hw_open(hdev);
+ if (error) {
+ dev_err(&hdev->dev, "hw open failed\n");
+ return error;
+ }
+
return 0;
}
+static void ax_remove(struct hid_device *hdev)
+{
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
+}
+
static const struct hid_device_id ax_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802), },
{ }
@@ -149,9 +173,10 @@ static const struct hid_device_id ax_devices[] = {
MODULE_DEVICE_TABLE(hid, ax_devices);
static struct hid_driver ax_driver = {
- .name = "acrux",
- .id_table = ax_devices,
- .probe = ax_probe,
+ .name = "acrux",
+ .id_table = ax_devices,
+ .probe = ax_probe,
+ .remove = ax_remove,
};
static int __init ax_init(void)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index d678cf3d33d5..e9687768a335 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1159,6 +1159,32 @@ static bool hid_hiddev(struct hid_device *hdev)
return !!hid_match_id(hdev, hid_hiddev_list);
}
+
+static ssize_t
+read_report_descriptor(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+
+ if (off >= hdev->rsize)
+ return 0;
+
+ if (off + count > hdev->rsize)
+ count = hdev->rsize - off;
+
+ memcpy(buf, hdev->rdesc + off, count);
+
+ return count;
+}
+
+static struct bin_attribute dev_bin_attr_report_desc = {
+ .attr = { .name = "report_descriptor", .mode = 0444 },
+ .read = read_report_descriptor,
+ .size = HID_MAX_DESCRIPTOR_SIZE,
+};
+
int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
{
static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
@@ -1169,6 +1195,7 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
char buf[64];
unsigned int i;
int len;
+ int ret;
if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
@@ -1230,6 +1257,11 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
bus = "<UNKNOWN>";
}
+ ret = device_create_bin_file(&hdev->dev, &dev_bin_attr_report_desc);
+ if (ret)
+ hid_warn(hdev,
+ "can't create sysfs report descriptor attribute err: %d\n", ret);
+
hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
buf, bus, hdev->version >> 8, hdev->version & 0xff,
type, hdev->name, hdev->phys);
@@ -1240,6 +1272,7 @@ EXPORT_SYMBOL_GPL(hid_connect);
void hid_disconnect(struct hid_device *hdev)
{
+ device_remove_bin_file(&hdev->dev, &dev_bin_attr_report_desc);
if (hdev->claimed & HID_CLAIMED_INPUT)
hidinput_disconnect(hdev);
if (hdev->claimed & HID_CLAIMED_HIDDEV)
@@ -1256,9 +1289,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
-#if defined(CONFIG_HID_ACRUX_FF) || defined(CONFIG_HID_ACRUX_FF_MODULE)
{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
-#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
@@ -1302,6 +1333,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -1328,6 +1362,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
@@ -1345,9 +1380,12 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS, USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
@@ -1368,6 +1406,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
@@ -1400,12 +1439,15 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
@@ -1801,6 +1843,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ }
diff --git a/drivers/hid/hid-drff.c b/drivers/hid/hid-dr.c
index afcf3d67eb02..61eece47204d 100644
--- a/drivers/hid/hid-drff.c
+++ b/drivers/hid/hid-dr.c
@@ -145,6 +145,110 @@ static inline int drff_init(struct hid_device *hid)
}
#endif
+/*
+ * The original descriptor of joystick with PID 0x0011, represented by DVTech PC
+ * JS19. It seems both copied from another device and a result of confusion
+ * either about the specification or about the program used to create the
+ * descriptor. In any case, it's a wonder it works on Windows.
+ *
+ * Usage Page (Desktop), ; Generic desktop controls (01h)
+ * Usage (Joystik), ; Joystik (04h, application collection)
+ * Collection (Application),
+ * Collection (Logical),
+ * Report Size (8),
+ * Report Count (5),
+ * Logical Minimum (0),
+ * Logical Maximum (255),
+ * Physical Minimum (0),
+ * Physical Maximum (255),
+ * Usage (X), ; X (30h, dynamic value)
+ * Usage (X), ; X (30h, dynamic value)
+ * Usage (X), ; X (30h, dynamic value)
+ * Usage (X), ; X (30h, dynamic value)
+ * Usage (Y), ; Y (31h, dynamic value)
+ * Input (Variable),
+ * Report Size (4),
+ * Report Count (1),
+ * Logical Maximum (7),
+ * Physical Maximum (315),
+ * Unit (Degrees),
+ * Usage (00h),
+ * Input (Variable, Null State),
+ * Unit,
+ * Report Size (1),
+ * Report Count (10),
+ * Logical Maximum (1),
+ * Physical Maximum (1),
+ * Usage Page (Button), ; Button (09h)
+ * Usage Minimum (01h),
+ * Usage Maximum (0Ah),
+ * Input (Variable),
+ * Usage Page (FF00h), ; FF00h, vendor-defined
+ * Report Size (1),
+ * Report Count (10),
+ * Logical Maximum (1),
+ * Physical Maximum (1),
+ * Usage (01h),
+ * Input (Variable),
+ * End Collection,
+ * Collection (Logical),
+ * Report Size (8),
+ * Report Count (4),
+ * Physical Maximum (255),
+ * Logical Maximum (255),
+ * Usage (02h),
+ * Output (Variable),
+ * End Collection,
+ * End Collection
+ */
+
+/* Size of the original descriptor of the PID 0x0011 joystick */
+#define PID0011_RDESC_ORIG_SIZE 101
+
+/* Fixed report descriptor for PID 0x011 joystick */
+static __u8 pid0011_rdesc_fixed[] = {
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x04, /* Usage (Joystik), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x14, /* Logical Minimum (0), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x04, /* Report Count (4), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x95, 0x0A, /* Report Count (10), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x0A, /* Usage Maximum (0Ah), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x0A, /* Report Count (10), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0xC0, /* End Collection, */
+ 0xC0 /* End Collection */
+};
+
+static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ switch (hdev->product) {
+ case 0x0011:
+ if (*rsize == PID0011_RDESC_ORIG_SIZE) {
+ rdesc = pid0011_rdesc_fixed;
+ *rsize = sizeof(pid0011_rdesc_fixed);
+ }
+ break;
+ }
+ return rdesc;
+}
+
static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
@@ -163,7 +267,16 @@ static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err;
}
- drff_init(hdev);
+ switch (hdev->product) {
+ case 0x0006:
+ ret = drff_init(hdev);
+ if (ret) {
+ dev_err(&hdev->dev, "force feedback init failed\n");
+ hid_hw_stop(hdev);
+ goto err;
+ }
+ break;
+ }
return 0;
err:
@@ -172,6 +285,7 @@ err:
static const struct hid_device_id dr_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006), },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011), },
{ }
};
MODULE_DEVICE_TABLE(hid, dr_devices);
@@ -179,6 +293,7 @@ MODULE_DEVICE_TABLE(hid, dr_devices);
static struct hid_driver dr_driver = {
.name = "dragonrise",
.id_table = dr_devices,
+ .report_fixup = dr_report_fixup,
.probe = dr_probe,
};
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
deleted file mode 100644
index 03bee1970d70..000000000000
--- a/drivers/hid/hid-egalax.c
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * HID driver for eGalax dual-touch panels
- *
- * Copyright (c) 2010 Stephane Chatty <chatty@enac.fr>
- * Copyright (c) 2010 Henrik Rydberg <rydberg@euromail.se>
- * Copyright (c) 2010 Canonical, Ltd.
- *
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/device.h>
-#include <linux/hid.h>
-#include <linux/module.h>
-#include <linux/usb.h>
-#include <linux/input/mt.h>
-#include <linux/slab.h>
-#include "usbhid/usbhid.h"
-
-MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
-MODULE_DESCRIPTION("eGalax dual-touch panel");
-MODULE_LICENSE("GPL");
-
-#include "hid-ids.h"
-
-#define MAX_SLOTS 2
-
-/* estimated signal-to-noise ratios */
-#define SN_MOVE 4096
-#define SN_PRESSURE 32
-
-struct egalax_data {
- int valid;
- int slot;
- int touch;
- int x, y, z;
-};
-
-static void set_abs(struct input_dev *input, unsigned int code,
- struct hid_field *field, int snratio)
-{
- int fmin = field->logical_minimum;
- int fmax = field->logical_maximum;
- int fuzz = snratio ? (fmax - fmin) / snratio : 0;
- input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
-}
-
-static int egalax_input_mapping(struct hid_device *hdev, struct hid_input *hi,
- struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
-{
- struct input_dev *input = hi->input;
-
- switch (usage->hid & HID_USAGE_PAGE) {
-
- case HID_UP_GENDESK:
- switch (usage->hid) {
- case HID_GD_X:
- field->logical_maximum = 32760;
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_POSITION_X);
- set_abs(input, ABS_MT_POSITION_X, field, SN_MOVE);
- /* touchscreen emulation */
- set_abs(input, ABS_X, field, SN_MOVE);
- return 1;
- case HID_GD_Y:
- field->logical_maximum = 32760;
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_POSITION_Y);
- set_abs(input, ABS_MT_POSITION_Y, field, SN_MOVE);
- /* touchscreen emulation */
- set_abs(input, ABS_Y, field, SN_MOVE);
- return 1;
- }
- return 0;
-
- case HID_UP_DIGITIZER:
- switch (usage->hid) {
- case HID_DG_TIPSWITCH:
- /* touchscreen emulation */
- hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
- input_set_capability(input, EV_KEY, BTN_TOUCH);
- return 1;
- case HID_DG_INRANGE:
- case HID_DG_CONFIDENCE:
- case HID_DG_CONTACTCOUNT:
- case HID_DG_CONTACTMAX:
- return -1;
- case HID_DG_CONTACTID:
- input_mt_init_slots(input, MAX_SLOTS);
- return 1;
- case HID_DG_TIPPRESSURE:
- field->logical_minimum = 0;
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_PRESSURE);
- set_abs(input, ABS_MT_PRESSURE, field, SN_PRESSURE);
- /* touchscreen emulation */
- set_abs(input, ABS_PRESSURE, field, SN_PRESSURE);
- return 1;
- }
- return 0;
- }
-
- /* ignore others (from other reports we won't get anyway) */
- return -1;
-}
-
-static int egalax_input_mapped(struct hid_device *hdev, struct hid_input *hi,
- struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
-{
- /* tell hid-input to skip setup of these event types */
- if (usage->type == EV_KEY || usage->type == EV_ABS)
- set_bit(usage->type, hi->input->evbit);
- return -1;
-}
-
-/*
- * this function is called when a whole finger has been parsed,
- * so that it can decide what to send to the input layer.
- */
-static void egalax_filter_event(struct egalax_data *td, struct input_dev *input)
-{
- input_mt_slot(input, td->slot);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, td->touch);
- if (td->touch) {
- input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
- input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
- input_event(input, EV_ABS, ABS_MT_PRESSURE, td->z);
- }
- input_mt_report_pointer_emulation(input, true);
-}
-
-static int egalax_event(struct hid_device *hid, struct hid_field *field,
- struct hid_usage *usage, __s32 value)
-{
- struct egalax_data *td = hid_get_drvdata(hid);
-
- /* Note, eGalax has two product lines: the first is resistive and
- * uses a standard parallel multitouch protocol (product ID ==
- * 48xx). The second is capacitive and uses an unusual "serial"
- * protocol with a different message for each multitouch finger
- * (product ID == 72xx).
- */
- if (hid->claimed & HID_CLAIMED_INPUT) {
- struct input_dev *input = field->hidinput->input;
-
- switch (usage->hid) {
- case HID_DG_INRANGE:
- td->valid = value;
- break;
- case HID_DG_CONFIDENCE:
- /* avoid interference from generic hidinput handling */
- break;
- case HID_DG_TIPSWITCH:
- td->touch = value;
- break;
- case HID_DG_TIPPRESSURE:
- td->z = value;
- break;
- case HID_DG_CONTACTID:
- td->slot = clamp_val(value, 0, MAX_SLOTS - 1);
- break;
- case HID_GD_X:
- td->x = value;
- break;
- case HID_GD_Y:
- td->y = value;
- /* this is the last field in a finger */
- if (td->valid)
- egalax_filter_event(td, input);
- break;
- case HID_DG_CONTACTCOUNT:
- /* touch emulation: this is the last field in a frame */
- break;
-
- default:
- /* fallback to the generic hidinput handling */
- return 0;
- }
- }
-
- /* we have handled the hidinput part, now remains hiddev */
- if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
- hid->hiddev_hid_event(hid, field, usage, value);
-
- return 1;
-}
-
-static int egalax_probe(struct hid_device *hdev, const struct hid_device_id *id)
-{
- int ret;
- struct egalax_data *td;
- struct hid_report *report;
-
- td = kzalloc(sizeof(struct egalax_data), GFP_KERNEL);
- if (!td) {
- hid_err(hdev, "cannot allocate eGalax data\n");
- return -ENOMEM;
- }
- hid_set_drvdata(hdev, td);
-
- ret = hid_parse(hdev);
- if (ret)
- goto end;
-
- ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
- if (ret)
- goto end;
-
- report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[5];
- if (report) {
- report->field[0]->value[0] = 2;
- usbhid_submit_report(hdev, report, USB_DIR_OUT);
- }
-
-end:
- if (ret)
- kfree(td);
-
- return ret;
-}
-
-static void egalax_remove(struct hid_device *hdev)
-{
- hid_hw_stop(hdev);
- kfree(hid_get_drvdata(hdev));
- hid_set_drvdata(hdev, NULL);
-}
-
-static const struct hid_device_id egalax_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
- { }
-};
-MODULE_DEVICE_TABLE(hid, egalax_devices);
-
-static const struct hid_usage_id egalax_grabbed_usages[] = {
- { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
- { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
-};
-
-static struct hid_driver egalax_driver = {
- .name = "egalax-touch",
- .id_table = egalax_devices,
- .probe = egalax_probe,
- .remove = egalax_remove,
- .input_mapping = egalax_input_mapping,
- .input_mapped = egalax_input_mapped,
- .usage_table = egalax_grabbed_usages,
- .event = egalax_event,
-};
-
-static int __init egalax_init(void)
-{
- return hid_register_driver(&egalax_driver);
-}
-
-static void __exit egalax_exit(void)
-{
- hid_unregister_driver(&egalax_driver);
-}
-
-module_init(egalax_init);
-module_exit(egalax_exit);
-
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index 3975e039c3dd..e88b951cd10d 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -43,6 +43,11 @@ static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case 0x048: gy_map_key_clear(KEY_MEDIA); break;
case 0x049: gy_map_key_clear(KEY_CAMERA); break;
case 0x04a: gy_map_key_clear(KEY_VIDEO); break;
+ case 0x05a: gy_map_key_clear(KEY_TEXT); break;
+ case 0x05b: gy_map_key_clear(KEY_RED); break;
+ case 0x05c: gy_map_key_clear(KEY_GREEN); break;
+ case 0x05d: gy_map_key_clear(KEY_YELLOW); break;
+ case 0x05e: gy_map_key_clear(KEY_BLUE); break;
default:
return 0;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 92a0d61a7379..65ac53d7aecc 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -103,6 +103,9 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242
#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243
#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244
+#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245
+#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246
+#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
@@ -333,6 +336,9 @@
#define USB_VENDOR_ID_IMATION 0x0718
#define USB_DEVICE_ID_DISC_STAKKA 0xd000
+#define USB_VENDOR_ID_IRTOUCHSYSTEMS 0x6615
+#define USB_DEVICE_ID_IRTOUCH_INFRARED_USB 0x0070
+
#define USB_VENDOR_ID_JESS 0x0c45
#define USB_DEVICE_ID_JESS_YUREX 0x1010
@@ -345,6 +351,9 @@
#define USB_VENDOR_ID_KWORLD 0x1b80
#define USB_DEVICE_ID_KWORLD_RADIO_FM700 0xd700
+#define USB_VENDOR_ID_KEYTOUCH 0x0926
+#define USB_DEVICE_ID_KEYTOUCH_IEC 0x3333
+
#define USB_VENDOR_ID_KYE 0x0458
#define USB_DEVICE_ID_KYE_ERGO_525V 0x0087
#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
@@ -352,6 +361,9 @@
#define USB_VENDOR_ID_LABTEC 0x1020
#define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006
+#define USB_VENDOR_ID_LCPOWER 0x1241
+#define USB_DEVICE_ID_LCPOWER_LC1000 0xf767
+
#define USB_VENDOR_ID_LD 0x0f11
#define USB_DEVICE_ID_LD_CASSY 0x1000
#define USB_DEVICE_ID_LD_POCKETCASSY 0x1010
@@ -383,6 +395,7 @@
#define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294
#define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG 0xc293
#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295
+#define USB_DEVICE_ID_LOGITECH_DFP_WHEEL 0xc298
#define USB_DEVICE_ID_LOGITECH_G25_WHEEL 0xc299
#define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c
#define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a
@@ -466,6 +479,7 @@
#define USB_DEVICE_ID_ONTRAK_ADU100 0x0064
#define USB_VENDOR_ID_ORTEK 0x05a4
+#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700
#define USB_DEVICE_ID_ORTEK_WKB2000 0x2000
#define USB_VENDOR_ID_PANJIT 0x134c
@@ -496,8 +510,10 @@
#define USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN 0x3001
#define USB_VENDOR_ID_ROCCAT 0x1e7d
+#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
#define USB_DEVICE_ID_ROCCAT_KONEPLUS 0x2d51
+#define USB_DEVICE_ID_ROCCAT_KOVAPLUS 0x2d50
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRED 0x2c24
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS 0x2cf6
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 7f552bfad32c..33dde8724e02 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -290,14 +290,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
goto ignore;
}
- if (field->report_type == HID_FEATURE_REPORT) {
- if (device->driver->feature_mapping) {
- device->driver->feature_mapping(device, hidinput, field,
- usage);
- }
- goto ignore;
- }
-
if (device->driver->input_mapping) {
int ret = device->driver->input_mapping(device, hidinput, field,
usage, &bit, &max);
@@ -835,6 +827,24 @@ static void hidinput_close(struct input_dev *dev)
hid_hw_close(hid);
}
+static void report_features(struct hid_device *hid)
+{
+ struct hid_driver *drv = hid->driver;
+ struct hid_report_enum *rep_enum;
+ struct hid_report *rep;
+ int i, j;
+
+ if (!drv->feature_mapping)
+ return;
+
+ rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
+ list_for_each_entry(rep, &rep_enum->report_list, list)
+ for (i = 0; i < rep->maxfield; i++)
+ for (j = 0; j < rep->field[i]->maxusage; j++)
+ drv->feature_mapping(hid, rep->field[i],
+ rep->field[i]->usage + j);
+}
+
/*
* Register the input device; print a message.
* Configure the input layer interface
@@ -863,7 +873,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
return -1;
}
- for (k = HID_INPUT_REPORT; k <= HID_FEATURE_REPORT; k++) {
+ report_features(hid);
+
+ for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) {
if (k == HID_OUTPUT_REPORT &&
hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS)
continue;
@@ -888,8 +900,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
hid->ll_driver->hidinput_input_event;
input_dev->open = hidinput_open;
input_dev->close = hidinput_close;
- input_dev->setkeycode_new = hidinput_setkeycode;
- input_dev->getkeycode_new = hidinput_getkeycode;
+ input_dev->setkeycode = hidinput_setkeycode;
+ input_dev->getkeycode = hidinput_getkeycode;
input_dev->name = hid->name;
input_dev->phys = hid->phys;
@@ -928,6 +940,7 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
return 0;
out_cleanup:
+ list_del(&hidinput->list);
input_free_device(hidinput->input);
kfree(hidinput);
out_unwind:
diff --git a/drivers/hid/hid-keytouch.c b/drivers/hid/hid-keytouch.c
new file mode 100644
index 000000000000..07cd825f6f01
--- /dev/null
+++ b/drivers/hid/hid-keytouch.c
@@ -0,0 +1,66 @@
+/*
+ * HID driver for Keytouch devices not fully compliant with HID standard
+ *
+ * Copyright (c) 2011 Jiri Kosina
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+/* Replace the broken report descriptor of this device with rather
+ * a default one */
+static __u8 keytouch_fixed_rdesc[] = {
+0x05, 0x01, 0x09, 0x06, 0xa1, 0x01, 0x05, 0x07, 0x19, 0xe0, 0x29, 0xe7, 0x15,
+0x00, 0x25, 0x01, 0x75, 0x01, 0x95, 0x08, 0x81, 0x02, 0x95, 0x01, 0x75, 0x08,
+0x81, 0x01, 0x95, 0x03, 0x75, 0x01, 0x05, 0x08, 0x19, 0x01, 0x29, 0x03, 0x91,
+0x02, 0x95, 0x05, 0x75, 0x01, 0x91, 0x01, 0x95, 0x06, 0x75, 0x08, 0x15, 0x00,
+0x26, 0xff, 0x00, 0x05, 0x07, 0x19, 0x00, 0x2a, 0xff, 0x00, 0x81, 0x00, 0xc0
+};
+
+static __u8 *keytouch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ hid_info(hdev, "fixing up Keytouch IEC report descriptor\n");
+
+ rdesc = keytouch_fixed_rdesc;
+ *rsize = sizeof(keytouch_fixed_rdesc);
+
+ return rdesc;
+}
+
+static const struct hid_device_id keytouch_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, keytouch_devices);
+
+static struct hid_driver keytouch_driver = {
+ .name = "keytouch",
+ .id_table = keytouch_devices,
+ .report_fixup = keytouch_report_fixup,
+};
+
+static int __init keytouch_init(void)
+{
+ return hid_register_driver(&keytouch_driver);
+}
+
+static void __exit keytouch_exit(void)
+{
+ hid_unregister_driver(&keytouch_driver);
+}
+
+module_init(keytouch_init);
+module_exit(keytouch_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jiri Kosina");
diff --git a/drivers/hid/hid-lcpower.c b/drivers/hid/hid-lcpower.c
new file mode 100644
index 000000000000..c4fe9bd095b7
--- /dev/null
+++ b/drivers/hid/hid-lcpower.c
@@ -0,0 +1,70 @@
+/*
+ * HID driver for LC Power Model RC1000MCE
+ *
+ * Copyright (c) 2011 Chris Schlund
+ * based on hid-topseed module
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+#define ts_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
+ EV_KEY, (c))
+static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if ((usage->hid & HID_USAGE_PAGE) != 0x0ffbc0000)
+ return 0;
+
+ switch (usage->hid & HID_USAGE) {
+ case 0x046: ts_map_key_clear(KEY_YELLOW); break;
+ case 0x047: ts_map_key_clear(KEY_GREEN); break;
+ case 0x049: ts_map_key_clear(KEY_BLUE); break;
+ case 0x04a: ts_map_key_clear(KEY_RED); break;
+ case 0x00d: ts_map_key_clear(KEY_HOME); break;
+ case 0x025: ts_map_key_clear(KEY_TV); break;
+ case 0x048: ts_map_key_clear(KEY_VCR); break;
+ case 0x024: ts_map_key_clear(KEY_MENU); break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static const struct hid_device_id ts_devices[] = {
+ { HID_USB_DEVICE( USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, ts_devices);
+
+static struct hid_driver ts_driver = {
+ .name = "LC RC1000MCE",
+ .id_table = ts_devices,
+ .input_mapping = ts_input_mapping,
+};
+
+static int __init ts_init(void)
+{
+ return hid_register_driver(&ts_driver);
+}
+
+static void __exit ts_exit(void)
+{
+ hid_unregister_driver(&ts_driver);
+}
+
+module_init(ts_init);
+module_exit(ts_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index aef4104da141..3da90402ee81 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -377,6 +377,8 @@ static const struct hid_device_id lg_devices[] = {
.driver_data = LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL),
.driver_data = LG_FF },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL),
+ .driver_data = LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL),
.driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ),
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 698e6459fd0b..318cc40df92d 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -258,7 +258,7 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
input_report_abs(input, ABS_MT_TRACKING_ID, id);
input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major << 2);
input_report_abs(input, ABS_MT_TOUCH_MINOR, touch_minor << 2);
- input_report_abs(input, ABS_MT_ORIENTATION, orientation);
+ input_report_abs(input, ABS_MT_ORIENTATION, -orientation);
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
@@ -397,7 +397,7 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0);
input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0);
input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0);
- input_set_abs_params(input, ABS_MT_ORIENTATION, -32, 31, 1, 0);
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
/* Note: Touch Y position from the device is inverted relative
* to how pointer motion is reported (and relative to how USB
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 07d3183fdde5..ee01e65e22d6 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -5,6 +5,12 @@
* Copyright (c) 2010-2011 Benjamin Tissoires <benjamin.tissoires@gmail.com>
* Copyright (c) 2010-2011 Ecole Nationale de l'Aviation Civile, France
*
+ * This code is partly based on hid-egalax.c:
+ *
+ * Copyright (c) 2010 Stephane Chatty <chatty@enac.fr>
+ * Copyright (c) 2010 Henrik Rydberg <rydberg@euromail.se>
+ * Copyright (c) 2010 Canonical, Ltd.
+ *
*/
/*
@@ -24,6 +30,7 @@
MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
MODULE_DESCRIPTION("HID multitouch panels");
MODULE_LICENSE("GPL");
@@ -36,6 +43,7 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_SLOT_IS_CONTACTNUMBER (1 << 3)
#define MT_QUIRK_VALID_IS_INRANGE (1 << 4)
#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 5)
+#define MT_QUIRK_EGALAX_XYZ_FIXUP (1 << 6)
struct mt_slot {
__s32 x, y, p, w, h;
@@ -65,10 +73,11 @@ struct mt_class {
};
/* classes of device behavior */
-#define MT_CLS_DEFAULT 1
-#define MT_CLS_DUAL1 2
-#define MT_CLS_DUAL2 3
-#define MT_CLS_CYPRESS 4
+#define MT_CLS_DEFAULT 1
+#define MT_CLS_DUAL_INRANGE_CONTACTID 2
+#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 3
+#define MT_CLS_CYPRESS 4
+#define MT_CLS_EGALAX 5
/*
* these device-dependent functions determine what slot corresponds
@@ -104,13 +113,13 @@ static int find_slot_from_contactid(struct mt_device *td)
struct mt_class mt_classes[] = {
{ .name = MT_CLS_DEFAULT,
- .quirks = MT_QUIRK_VALID_IS_INRANGE,
+ .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP,
.maxcontacts = 10 },
- { .name = MT_CLS_DUAL1,
+ { .name = MT_CLS_DUAL_INRANGE_CONTACTID,
.quirks = MT_QUIRK_VALID_IS_INRANGE |
MT_QUIRK_SLOT_IS_CONTACTID,
.maxcontacts = 2 },
- { .name = MT_CLS_DUAL2,
+ { .name = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
.quirks = MT_QUIRK_VALID_IS_INRANGE |
MT_QUIRK_SLOT_IS_CONTACTNUMBER,
.maxcontacts = 2 },
@@ -119,10 +128,18 @@ struct mt_class mt_classes[] = {
MT_QUIRK_CYPRESS,
.maxcontacts = 10 },
+ { .name = MT_CLS_EGALAX,
+ .quirks = MT_QUIRK_SLOT_IS_CONTACTID |
+ MT_QUIRK_VALID_IS_INRANGE |
+ MT_QUIRK_EGALAX_XYZ_FIXUP,
+ .maxcontacts = 2,
+ .sn_move = 4096,
+ .sn_pressure = 32,
+ },
{ }
};
-static void mt_feature_mapping(struct hid_device *hdev, struct hid_input *hi,
+static void mt_feature_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
if (usage->hid == HID_DG_INPUTMODE) {
@@ -146,11 +163,15 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
{
struct mt_device *td = hid_get_drvdata(hdev);
struct mt_class *cls = td->mtclass;
+ __s32 quirks = cls->quirks;
+
switch (usage->hid & HID_USAGE_PAGE) {
case HID_UP_GENDESK:
switch (usage->hid) {
case HID_GD_X:
+ if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
+ field->logical_maximum = 32760;
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_POSITION_X);
set_abs(hi->input, ABS_MT_POSITION_X, field,
@@ -160,6 +181,8 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
td->last_slot_field = usage->hid;
return 1;
case HID_GD_Y:
+ if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
+ field->logical_maximum = 32760;
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_POSITION_Y);
set_abs(hi->input, ABS_MT_POSITION_Y, field,
@@ -203,6 +226,8 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
td->last_slot_field = usage->hid;
return 1;
case HID_DG_TIPPRESSURE:
+ if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
+ field->logical_minimum = 0;
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_PRESSURE);
set_abs(hi->input, ABS_MT_PRESSURE, field,
@@ -363,8 +388,11 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
return 0;
}
- if (usage->hid == td->last_slot_field)
+ if (usage->hid == td->last_slot_field) {
mt_complete_slot(td);
+ if (!td->last_field_index)
+ mt_emit_event(td, field->hidinput->input);
+ }
if (field->index == td->last_field_index
&& td->num_received >= td->num_expected)
@@ -466,18 +494,42 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
/* GeneralTouch panel */
- { .driver_data = MT_CLS_DUAL2,
+ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) },
+ /* IRTOUCH panels */
+ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
+ HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS,
+ USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
+
/* PixCir-based panels */
- { .driver_data = MT_CLS_DUAL1,
+ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
HID_USB_DEVICE(USB_VENDOR_ID_HANVON,
USB_DEVICE_ID_HANVON_MULTITOUCH) },
- { .driver_data = MT_CLS_DUAL1,
+ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
+ /* Resistive eGalax devices */
+ { .driver_data = MT_CLS_EGALAX,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+ { .driver_data = MT_CLS_EGALAX,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
+
+ /* Capacitive eGalax devices */
+ { .driver_data = MT_CLS_EGALAX,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
+ { .driver_data = MT_CLS_EGALAX,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
+ { .driver_data = MT_CLS_EGALAX,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
+
{ }
};
MODULE_DEVICE_TABLE(hid, mt_devices);
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index beb403421e72..9fae2ebdd758 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -110,6 +110,36 @@ static int ntrig_version_string(unsigned char *raw, char *buf)
return sprintf(buf, "%u.%u.%u.%u.%u", a, b, c, d, e);
}
+static inline int ntrig_get_mode(struct hid_device *hdev)
+{
+ struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT].
+ report_id_hash[0x0d];
+
+ if (!report)
+ return -EINVAL;
+
+ usbhid_submit_report(hdev, report, USB_DIR_IN);
+ usbhid_wait_io(hdev);
+ return (int)report->field[0]->value[0];
+}
+
+static inline void ntrig_set_mode(struct hid_device *hdev, const int mode)
+{
+ struct hid_report *report;
+ __u8 mode_commands[4] = { 0xe, 0xf, 0x1b, 0x10 };
+
+ if (mode < 0 || mode > 3)
+ return;
+
+ report = hdev->report_enum[HID_FEATURE_REPORT].
+ report_id_hash[mode_commands[mode]];
+
+ if (!report)
+ return;
+
+ usbhid_submit_report(hdev, report, USB_DIR_IN);
+}
+
static void ntrig_report_version(struct hid_device *hdev)
{
int ret;
@@ -539,277 +569,288 @@ static int ntrig_input_mapped(struct hid_device *hdev, struct hid_input *hi,
static int ntrig_event (struct hid_device *hid, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
- struct input_dev *input = field->hidinput->input;
struct ntrig_data *nd = hid_get_drvdata(hid);
+ struct input_dev *input;
+
+ /* Skip processing if not a claimed input */
+ if (!(hid->claimed & HID_CLAIMED_INPUT))
+ goto not_claimed_input;
+
+ /* This function is being called before the structures are fully
+ * initialized */
+ if(!(field->hidinput && field->hidinput->input))
+ return -EINVAL;
+
+ input = field->hidinput->input;
/* No special handling needed for the pen */
if (field->application == HID_DG_PEN)
return 0;
- if (hid->claimed & HID_CLAIMED_INPUT) {
- switch (usage->hid) {
- case 0xff000001:
- /* Tag indicating the start of a multitouch group */
- nd->reading_mt = 1;
- nd->first_contact_touch = 0;
- break;
- case HID_DG_TIPSWITCH:
- nd->tipswitch = value;
- /* Prevent emission of touch until validated */
- return 1;
- case HID_DG_CONFIDENCE:
- nd->confidence = value;
- break;
- case HID_GD_X:
- nd->x = value;
- /* Clear the contact footer */
- nd->mt_foot_count = 0;
- break;
- case HID_GD_Y:
- nd->y = value;
- break;
- case HID_DG_CONTACTID:
- nd->id = value;
- break;
- case HID_DG_WIDTH:
- nd->w = value;
- break;
- case HID_DG_HEIGHT:
- nd->h = value;
+ switch (usage->hid) {
+ case 0xff000001:
+ /* Tag indicating the start of a multitouch group */
+ nd->reading_mt = 1;
+ nd->first_contact_touch = 0;
+ break;
+ case HID_DG_TIPSWITCH:
+ nd->tipswitch = value;
+ /* Prevent emission of touch until validated */
+ return 1;
+ case HID_DG_CONFIDENCE:
+ nd->confidence = value;
+ break;
+ case HID_GD_X:
+ nd->x = value;
+ /* Clear the contact footer */
+ nd->mt_foot_count = 0;
+ break;
+ case HID_GD_Y:
+ nd->y = value;
+ break;
+ case HID_DG_CONTACTID:
+ nd->id = value;
+ break;
+ case HID_DG_WIDTH:
+ nd->w = value;
+ break;
+ case HID_DG_HEIGHT:
+ nd->h = value;
+ /*
+ * when in single touch mode, this is the last
+ * report received in a finger event. We want
+ * to emit a normal (X, Y) position
+ */
+ if (!nd->reading_mt) {
/*
- * when in single touch mode, this is the last
- * report received in a finger event. We want
- * to emit a normal (X, Y) position
+ * TipSwitch indicates the presence of a
+ * finger in single touch mode.
*/
- if (!nd->reading_mt) {
- /*
- * TipSwitch indicates the presence of a
- * finger in single touch mode.
- */
- input_report_key(input, BTN_TOUCH,
- nd->tipswitch);
- input_report_key(input, BTN_TOOL_DOUBLETAP,
- nd->tipswitch);
- input_event(input, EV_ABS, ABS_X, nd->x);
- input_event(input, EV_ABS, ABS_Y, nd->y);
- }
+ input_report_key(input, BTN_TOUCH,
+ nd->tipswitch);
+ input_report_key(input, BTN_TOOL_DOUBLETAP,
+ nd->tipswitch);
+ input_event(input, EV_ABS, ABS_X, nd->x);
+ input_event(input, EV_ABS, ABS_Y, nd->y);
+ }
+ break;
+ case 0xff000002:
+ /*
+ * we receive this when the device is in multitouch
+ * mode. The first of the three values tagged with
+ * this usage tells if the contact point is real
+ * or a placeholder
+ */
+
+ /* Shouldn't get more than 4 footer packets, so skip */
+ if (nd->mt_foot_count >= 4)
break;
- case 0xff000002:
- /*
- * we receive this when the device is in multitouch
- * mode. The first of the three values tagged with
- * this usage tells if the contact point is real
- * or a placeholder
- */
- /* Shouldn't get more than 4 footer packets, so skip */
- if (nd->mt_foot_count >= 4)
- break;
+ nd->mt_footer[nd->mt_foot_count++] = value;
- nd->mt_footer[nd->mt_foot_count++] = value;
+ /* if the footer isn't complete break */
+ if (nd->mt_foot_count != 4)
+ break;
- /* if the footer isn't complete break */
- if (nd->mt_foot_count != 4)
- break;
+ /* Pen activity signal. */
+ if (nd->mt_footer[2]) {
+ /*
+ * When the pen deactivates touch, we see a
+ * bogus frame with ContactCount > 0.
+ * We can
+ * save a bit of work by ensuring act_state < 0
+ * even if deactivation slack is turned off.
+ */
+ nd->act_state = deactivate_slack - 1;
+ nd->confidence = 0;
+ break;
+ }
- /* Pen activity signal. */
- if (nd->mt_footer[2]) {
- /*
- * When the pen deactivates touch, we see a
- * bogus frame with ContactCount > 0.
- * We can
- * save a bit of work by ensuring act_state < 0
- * even if deactivation slack is turned off.
- */
- nd->act_state = deactivate_slack - 1;
+ /*
+ * The first footer value indicates the presence of a
+ * finger.
+ */
+ if (nd->mt_footer[0]) {
+ /*
+ * We do not want to process contacts under
+ * the size threshold, but do not want to
+ * ignore them for activation state
+ */
+ if (nd->w < nd->min_width ||
+ nd->h < nd->min_height)
nd->confidence = 0;
- break;
- }
+ } else
+ break;
+ if (nd->act_state > 0) {
/*
- * The first footer value indicates the presence of a
- * finger.
+ * Contact meets the activation size threshold
*/
- if (nd->mt_footer[0]) {
- /*
- * We do not want to process contacts under
- * the size threshold, but do not want to
- * ignore them for activation state
- */
- if (nd->w < nd->min_width ||
- nd->h < nd->min_height)
- nd->confidence = 0;
- } else
- break;
-
- if (nd->act_state > 0) {
- /*
- * Contact meets the activation size threshold
- */
- if (nd->w >= nd->activation_width &&
- nd->h >= nd->activation_height) {
- if (nd->id)
- /*
- * first contact, activate now
- */
- nd->act_state = 0;
- else {
- /*
- * avoid corrupting this frame
- * but ensure next frame will
- * be active
- */
- nd->act_state = 1;
- break;
- }
- } else
+ if (nd->w >= nd->activation_width &&
+ nd->h >= nd->activation_height) {
+ if (nd->id)
/*
- * Defer adjusting the activation state
- * until the end of the frame.
+ * first contact, activate now
*/
+ nd->act_state = 0;
+ else {
+ /*
+ * avoid corrupting this frame
+ * but ensure next frame will
+ * be active
+ */
+ nd->act_state = 1;
break;
- }
-
- /* Discarding this contact */
- if (!nd->confidence)
- break;
-
- /* emit a normal (X, Y) for the first point only */
- if (nd->id == 0) {
+ }
+ } else
/*
- * TipSwitch is superfluous in multitouch
- * mode. The footer events tell us
- * if there is a finger on the screen or
- * not.
+ * Defer adjusting the activation state
+ * until the end of the frame.
*/
- nd->first_contact_touch = nd->confidence;
- input_event(input, EV_ABS, ABS_X, nd->x);
- input_event(input, EV_ABS, ABS_Y, nd->y);
- }
+ break;
+ }
- /* Emit MT events */
- input_event(input, EV_ABS, ABS_MT_POSITION_X, nd->x);
- input_event(input, EV_ABS, ABS_MT_POSITION_Y, nd->y);
+ /* Discarding this contact */
+ if (!nd->confidence)
+ break;
+ /* emit a normal (X, Y) for the first point only */
+ if (nd->id == 0) {
/*
- * Translate from height and width to size
- * and orientation.
+ * TipSwitch is superfluous in multitouch
+ * mode. The footer events tell us
+ * if there is a finger on the screen or
+ * not.
*/
- if (nd->w > nd->h) {
- input_event(input, EV_ABS,
- ABS_MT_ORIENTATION, 1);
- input_event(input, EV_ABS,
- ABS_MT_TOUCH_MAJOR, nd->w);
- input_event(input, EV_ABS,
- ABS_MT_TOUCH_MINOR, nd->h);
- } else {
- input_event(input, EV_ABS,
- ABS_MT_ORIENTATION, 0);
- input_event(input, EV_ABS,
- ABS_MT_TOUCH_MAJOR, nd->h);
- input_event(input, EV_ABS,
- ABS_MT_TOUCH_MINOR, nd->w);
- }
- input_mt_sync(field->hidinput->input);
- break;
+ nd->first_contact_touch = nd->confidence;
+ input_event(input, EV_ABS, ABS_X, nd->x);
+ input_event(input, EV_ABS, ABS_Y, nd->y);
+ }
- case HID_DG_CONTACTCOUNT: /* End of a multitouch group */
- if (!nd->reading_mt) /* Just to be sure */
- break;
+ /* Emit MT events */
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, nd->x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, nd->y);
+
+ /*
+ * Translate from height and width to size
+ * and orientation.
+ */
+ if (nd->w > nd->h) {
+ input_event(input, EV_ABS,
+ ABS_MT_ORIENTATION, 1);
+ input_event(input, EV_ABS,
+ ABS_MT_TOUCH_MAJOR, nd->w);
+ input_event(input, EV_ABS,
+ ABS_MT_TOUCH_MINOR, nd->h);
+ } else {
+ input_event(input, EV_ABS,
+ ABS_MT_ORIENTATION, 0);
+ input_event(input, EV_ABS,
+ ABS_MT_TOUCH_MAJOR, nd->h);
+ input_event(input, EV_ABS,
+ ABS_MT_TOUCH_MINOR, nd->w);
+ }
+ input_mt_sync(field->hidinput->input);
+ break;
- nd->reading_mt = 0;
+ case HID_DG_CONTACTCOUNT: /* End of a multitouch group */
+ if (!nd->reading_mt) /* Just to be sure */
+ break;
+ nd->reading_mt = 0;
+
+
+ /*
+ * Activation state machine logic:
+ *
+ * Fundamental states:
+ * state > 0: Inactive
+ * state <= 0: Active
+ * state < -deactivate_slack:
+ * Pen termination of touch
+ *
+ * Specific values of interest
+ * state == activate_slack
+ * no valid input since the last reset
+ *
+ * state == 0
+ * general operational state
+ *
+ * state == -deactivate_slack
+ * read sufficient empty frames to accept
+ * the end of input and reset
+ */
+
+ if (nd->act_state > 0) { /* Currently inactive */
+ if (value)
+ /*
+ * Consider each live contact as
+ * evidence of intentional activity.
+ */
+ nd->act_state = (nd->act_state > value)
+ ? nd->act_state - value
+ : 0;
+ else
+ /*
+ * Empty frame before we hit the
+ * activity threshold, reset.
+ */
+ nd->act_state = nd->activate_slack;
/*
- * Activation state machine logic:
- *
- * Fundamental states:
- * state > 0: Inactive
- * state <= 0: Active
- * state < -deactivate_slack:
- * Pen termination of touch
- *
- * Specific values of interest
- * state == activate_slack
- * no valid input since the last reset
- *
- * state == 0
- * general operational state
- *
- * state == -deactivate_slack
- * read sufficient empty frames to accept
- * the end of input and reset
+ * Entered this block inactive and no
+ * coordinates sent this frame, so hold off
+ * on button state.
*/
-
- if (nd->act_state > 0) { /* Currently inactive */
- if (value)
- /*
- * Consider each live contact as
- * evidence of intentional activity.
- */
- nd->act_state = (nd->act_state > value)
- ? nd->act_state - value
- : 0;
- else
- /*
- * Empty frame before we hit the
- * activity threshold, reset.
- */
- nd->act_state = nd->activate_slack;
-
+ break;
+ } else { /* Currently active */
+ if (value && nd->act_state >=
+ nd->deactivate_slack)
/*
- * Entered this block inactive and no
- * coordinates sent this frame, so hold off
- * on button state.
+ * Live point: clear accumulated
+ * deactivation count.
*/
- break;
- } else { /* Currently active */
- if (value && nd->act_state >=
- nd->deactivate_slack)
- /*
- * Live point: clear accumulated
- * deactivation count.
- */
- nd->act_state = 0;
- else if (nd->act_state <= nd->deactivate_slack)
- /*
- * We've consumed the deactivation
- * slack, time to deactivate and reset.
- */
- nd->act_state =
- nd->activate_slack;
- else { /* Move towards deactivation */
- nd->act_state--;
- break;
- }
- }
-
- if (nd->first_contact_touch && nd->act_state <= 0) {
+ nd->act_state = 0;
+ else if (nd->act_state <= nd->deactivate_slack)
/*
- * Check to see if we're ready to start
- * emitting touch events.
- *
- * Note: activation slack will decrease over
- * the course of the frame, and it will be
- * inconsistent from the start to the end of
- * the frame. However if the frame starts
- * with slack, first_contact_touch will still
- * be 0 and we will not get to this point.
+ * We've consumed the deactivation
+ * slack, time to deactivate and reset.
*/
- input_report_key(input, BTN_TOOL_DOUBLETAP, 1);
- input_report_key(input, BTN_TOUCH, 1);
- } else {
- input_report_key(input, BTN_TOOL_DOUBLETAP, 0);
- input_report_key(input, BTN_TOUCH, 0);
+ nd->act_state =
+ nd->activate_slack;
+ else { /* Move towards deactivation */
+ nd->act_state--;
+ break;
}
- break;
+ }
- default:
- /* fall-back to the generic hidinput handling */
- return 0;
+ if (nd->first_contact_touch && nd->act_state <= 0) {
+ /*
+ * Check to see if we're ready to start
+ * emitting touch events.
+ *
+ * Note: activation slack will decrease over
+ * the course of the frame, and it will be
+ * inconsistent from the start to the end of
+ * the frame. However if the frame starts
+ * with slack, first_contact_touch will still
+ * be 0 and we will not get to this point.
+ */
+ input_report_key(input, BTN_TOOL_DOUBLETAP, 1);
+ input_report_key(input, BTN_TOUCH, 1);
+ } else {
+ input_report_key(input, BTN_TOOL_DOUBLETAP, 0);
+ input_report_key(input, BTN_TOUCH, 0);
}
+ break;
+
+ default:
+ /* fall-back to the generic hidinput handling */
+ return 0;
}
+not_claimed_input:
+
/* we have handled the hidinput part, now remains hiddev */
if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_hid_event)
hid->hiddev_hid_event(hid, field, usage, value);
@@ -826,7 +867,8 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
struct hid_report *report;
if (id->driver_data)
- hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT
+ | HID_QUIRK_NO_INIT_REPORTS;
nd = kmalloc(sizeof(struct ntrig_data), GFP_KERNEL);
if (!nd) {
@@ -893,8 +935,19 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
/* This is needed for devices with more recent firmware versions */
report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0x0a];
- if (report)
- usbhid_submit_report(hdev, report, USB_DIR_OUT);
+ if (report) {
+ /* Let the device settle to ensure the wakeup message gets
+ * through */
+ usbhid_wait_io(hdev);
+ usbhid_submit_report(hdev, report, USB_DIR_IN);
+
+ /*
+ * Sanity check: if the current mode is invalid reset it to
+ * something reasonable.
+ */
+ if (ntrig_get_mode(hdev) >= 4)
+ ntrig_set_mode(hdev, 3);
+ }
ntrig_report_version(hdev);
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c
index e90edfc63051..f9b7dd4f607f 100644
--- a/drivers/hid/hid-ortek.c
+++ b/drivers/hid/hid-ortek.c
@@ -1,7 +1,6 @@
/*
- * HID driver for Ortek WKB-2000 (wireless keyboard + mouse trackpad).
- * Fixes LogicalMaximum error in USB report description, see
- * http://bugzilla.kernel.org/show_bug.cgi?id=14787
+ * HID driver for Ortek PKB-1700/WKB-2000 (wireless keyboard + mouse trackpad).
+ * Fixes LogicalMaximum error in HID report description.
*
* Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
*/
@@ -30,6 +29,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
static const struct hid_device_id ortek_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
{ }
};
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index de9cf21b3494..657da5a3d5c6 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -944,6 +944,7 @@ static int picolcd_init_backlight(struct picolcd_data *data, struct hid_report *
}
memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = 0xff;
bdev = backlight_device_register(dev_name(dev), dev, data,
&picolcd_blops, &props);
diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
new file mode 100644
index 000000000000..2307471d96dc
--- /dev/null
+++ b/drivers/hid/hid-roccat-arvo.c
@@ -0,0 +1,450 @@
+/*
+ * Roccat Arvo driver for Linux
+ *
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Arvo is a gamer keyboard with 5 macro keys that can be configured in
+ * 5 profiles.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hid-roccat.h>
+#include "hid-ids.h"
+#include "hid-roccat-common.h"
+#include "hid-roccat-arvo.h"
+
+static struct class *arvo_class;
+
+static ssize_t arvo_sysfs_show_mode_key(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arvo_device *arvo =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ struct usb_device *usb_dev =
+ interface_to_usbdev(to_usb_interface(dev->parent->parent));
+ struct arvo_mode_key temp_buf;
+ int retval;
+
+ mutex_lock(&arvo->arvo_lock);
+ retval = roccat_common_receive(usb_dev, ARVO_USB_COMMAND_MODE_KEY,
+ &temp_buf, sizeof(struct arvo_mode_key));
+ mutex_unlock(&arvo->arvo_lock);
+ if (retval)
+ return retval;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp_buf.state);
+}
+
+static ssize_t arvo_sysfs_set_mode_key(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct arvo_device *arvo =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ struct usb_device *usb_dev =
+ interface_to_usbdev(to_usb_interface(dev->parent->parent));
+ struct arvo_mode_key temp_buf;
+ unsigned long state;
+ int retval;
+
+ retval = strict_strtoul(buf, 10, &state);
+ if (retval)
+ return retval;
+
+ temp_buf.command = ARVO_COMMAND_MODE_KEY;
+ temp_buf.state = state;
+
+ mutex_lock(&arvo->arvo_lock);
+ retval = roccat_common_send(usb_dev, ARVO_USB_COMMAND_MODE_KEY,
+ &temp_buf, sizeof(struct arvo_mode_key));
+ mutex_unlock(&arvo->arvo_lock);
+ if (retval)
+ return retval;
+
+ return size;
+}
+
+static ssize_t arvo_sysfs_show_key_mask(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arvo_device *arvo =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ struct usb_device *usb_dev =
+ interface_to_usbdev(to_usb_interface(dev->parent->parent));
+ struct arvo_key_mask temp_buf;
+ int retval;
+
+ mutex_lock(&arvo->arvo_lock);
+ retval = roccat_common_receive(usb_dev, ARVO_USB_COMMAND_KEY_MASK,
+ &temp_buf, sizeof(struct arvo_key_mask));
+ mutex_unlock(&arvo->arvo_lock);
+ if (retval)
+ return retval;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp_buf.key_mask);
+}
+
+static ssize_t arvo_sysfs_set_key_mask(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct arvo_device *arvo =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ struct usb_device *usb_dev =
+ interface_to_usbdev(to_usb_interface(dev->parent->parent));
+ struct arvo_key_mask temp_buf;
+ unsigned long key_mask;
+ int retval;
+
+ retval = strict_strtoul(buf, 10, &key_mask);
+ if (retval)
+ return retval;
+
+ temp_buf.command = ARVO_COMMAND_KEY_MASK;
+ temp_buf.key_mask = key_mask;
+
+ mutex_lock(&arvo->arvo_lock);
+ retval = roccat_common_send(usb_dev, ARVO_USB_COMMAND_KEY_MASK,
+ &temp_buf, sizeof(struct arvo_key_mask));
+ mutex_unlock(&arvo->arvo_lock);
+ if (retval)
+ return retval;
+
+ return size;
+}
+
+/* retval is 1-5 on success, < 0 on error */
+static int arvo_get_actual_profile(struct usb_device *usb_dev)
+{
+ struct arvo_actual_profile temp_buf;
+ int retval;
+
+ retval = roccat_common_receive(usb_dev, ARVO_USB_COMMAND_ACTUAL_PROFILE,
+ &temp_buf, sizeof(struct arvo_actual_profile));
+
+ if (retval)
+ return retval;
+
+ return temp_buf.actual_profile;
+}
+
+static ssize_t arvo_sysfs_show_actual_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arvo_device *arvo =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", arvo->actual_profile);
+}
+
+static ssize_t arvo_sysfs_set_actual_profile(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct arvo_device *arvo =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ struct usb_device *usb_dev =
+ interface_to_usbdev(to_usb_interface(dev->parent->parent));
+ struct arvo_actual_profile temp_buf;
+ unsigned long profile;
+ int retval;
+
+ retval = strict_strtoul(buf, 10, &profile);
+ if (retval)
+ return retval;
+
+ temp_buf.command = ARVO_COMMAND_ACTUAL_PROFILE;
+ temp_buf.actual_profile = profile;
+
+ mutex_lock(&arvo->arvo_lock);
+ retval = roccat_common_send(usb_dev, ARVO_USB_COMMAND_ACTUAL_PROFILE,
+ &temp_buf, sizeof(struct arvo_actual_profile));
+ if (!retval) {
+ arvo->actual_profile = profile;
+ retval = size;
+ }
+ mutex_unlock(&arvo->arvo_lock);
+ return retval;
+}
+
+static ssize_t arvo_sysfs_write(struct file *fp,
+ struct kobject *kobj, void const *buf,
+ loff_t off, size_t count, size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&arvo->arvo_lock);
+ retval = roccat_common_send(usb_dev, command, buf, real_size);
+ mutex_unlock(&arvo->arvo_lock);
+
+ return (retval ? retval : real_size);
+}
+
+static ssize_t arvo_sysfs_read(struct file *fp,
+ struct kobject *kobj, void *buf, loff_t off,
+ size_t count, size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off >= real_size)
+ return 0;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&arvo->arvo_lock);
+ retval = roccat_common_receive(usb_dev, command, buf, real_size);
+ mutex_unlock(&arvo->arvo_lock);
+
+ return (retval ? retval : real_size);
+}
+
+static ssize_t arvo_sysfs_write_button(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ return arvo_sysfs_write(fp, kobj, buf, off, count,
+ sizeof(struct arvo_button), ARVO_USB_COMMAND_BUTTON);
+}
+
+static ssize_t arvo_sysfs_read_info(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ return arvo_sysfs_read(fp, kobj, buf, off, count,
+ sizeof(struct arvo_info), ARVO_USB_COMMAND_INFO);
+}
+
+
+static struct device_attribute arvo_attributes[] = {
+ __ATTR(mode_key, 0660,
+ arvo_sysfs_show_mode_key, arvo_sysfs_set_mode_key),
+ __ATTR(key_mask, 0660,
+ arvo_sysfs_show_key_mask, arvo_sysfs_set_key_mask),
+ __ATTR(actual_profile, 0660,
+ arvo_sysfs_show_actual_profile,
+ arvo_sysfs_set_actual_profile),
+ __ATTR_NULL
+};
+
+static struct bin_attribute arvo_bin_attributes[] = {
+ {
+ .attr = { .name = "button", .mode = 0220 },
+ .size = sizeof(struct arvo_button),
+ .write = arvo_sysfs_write_button
+ },
+ {
+ .attr = { .name = "info", .mode = 0440 },
+ .size = sizeof(struct arvo_info),
+ .read = arvo_sysfs_read_info
+ },
+ __ATTR_NULL
+};
+
+static int arvo_init_arvo_device_struct(struct usb_device *usb_dev,
+ struct arvo_device *arvo)
+{
+ int retval;
+
+ mutex_init(&arvo->arvo_lock);
+
+ retval = arvo_get_actual_profile(usb_dev);
+ if (retval < 0)
+ return retval;
+ arvo->actual_profile = retval;
+
+ return 0;
+}
+
+static int arvo_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct arvo_device *arvo;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_KEYBOARD) {
+ hid_set_drvdata(hdev, NULL);
+ return 0;
+ }
+
+ arvo = kzalloc(sizeof(*arvo), GFP_KERNEL);
+ if (!arvo) {
+ hid_err(hdev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, arvo);
+
+ retval = arvo_init_arvo_device_struct(usb_dev, arvo);
+ if (retval) {
+ hid_err(hdev, "couldn't init struct arvo_device\n");
+ goto exit_free;
+ }
+
+ retval = roccat_connect(arvo_class, hdev,
+ sizeof(struct arvo_roccat_report));
+ if (retval < 0) {
+ hid_err(hdev, "couldn't init char dev\n");
+ } else {
+ arvo->chrdev_minor = retval;
+ arvo->roccat_claimed = 1;
+ }
+
+ return 0;
+exit_free:
+ kfree(arvo);
+ return retval;
+}
+
+static void arvo_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct arvo_device *arvo;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_KEYBOARD)
+ return;
+
+ arvo = hid_get_drvdata(hdev);
+ if (arvo->roccat_claimed)
+ roccat_disconnect(arvo->chrdev_minor);
+ kfree(arvo);
+}
+
+static int arvo_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ hid_err(hdev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = arvo_init_specials(hdev);
+ if (retval) {
+ hid_err(hdev, "couldn't install keyboard\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void arvo_remove(struct hid_device *hdev)
+{
+ arvo_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+static void arvo_report_to_chrdev(struct arvo_device const *arvo,
+ u8 const *data)
+{
+ struct arvo_special_report const *special_report;
+ struct arvo_roccat_report roccat_report;
+
+ special_report = (struct arvo_special_report const *)data;
+
+ roccat_report.profile = arvo->actual_profile;
+ roccat_report.button = special_report->event &
+ ARVO_SPECIAL_REPORT_EVENT_MASK_BUTTON;
+ if ((special_report->event & ARVO_SPECIAL_REPORT_EVENT_MASK_ACTION) ==
+ ARVO_SPECIAL_REPORT_EVENT_ACTION_PRESS)
+ roccat_report.action = ARVO_ROCCAT_REPORT_ACTION_PRESS;
+ else
+ roccat_report.action = ARVO_ROCCAT_REPORT_ACTION_RELEASE;
+
+ roccat_report_event(arvo->chrdev_minor,
+ (uint8_t const *)&roccat_report);
+}
+
+static int arvo_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct arvo_device *arvo = hid_get_drvdata(hdev);
+
+ if (size != 3)
+ return 0;
+
+ if (arvo->roccat_claimed)
+ arvo_report_to_chrdev(arvo, data);
+
+ return 0;
+}
+
+static const struct hid_device_id arvo_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, arvo_devices);
+
+static struct hid_driver arvo_driver = {
+ .name = "arvo",
+ .id_table = arvo_devices,
+ .probe = arvo_probe,
+ .remove = arvo_remove,
+ .raw_event = arvo_raw_event
+};
+
+static int __init arvo_init(void)
+{
+ int retval;
+
+ arvo_class = class_create(THIS_MODULE, "arvo");
+ if (IS_ERR(arvo_class))
+ return PTR_ERR(arvo_class);
+ arvo_class->dev_attrs = arvo_attributes;
+ arvo_class->dev_bin_attrs = arvo_bin_attributes;
+
+ retval = hid_register_driver(&arvo_driver);
+ if (retval)
+ class_destroy(arvo_class);
+ return retval;
+}
+
+static void __exit arvo_exit(void)
+{
+ hid_unregister_driver(&arvo_driver);
+ class_destroy(arvo_class);
+}
+
+module_init(arvo_init);
+module_exit(arvo_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Arvo driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-arvo.h b/drivers/hid/hid-roccat-arvo.h
new file mode 100644
index 000000000000..d284a781c99e
--- /dev/null
+++ b/drivers/hid/hid-roccat-arvo.h
@@ -0,0 +1,98 @@
+#ifndef __HID_ROCCAT_ARVO_H
+#define __HID_ROCCAT_ARVO_H
+
+/*
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+struct arvo_mode_key { /* 2 bytes */
+ uint8_t command; /* ARVO_COMMAND_MODE_KEY */
+ uint8_t state;
+} __packed;
+
+struct arvo_button {
+ uint8_t unknown[24];
+} __packed;
+
+struct arvo_info {
+ uint8_t unknown[8];
+} __packed;
+
+struct arvo_key_mask { /* 2 bytes */
+ uint8_t command; /* ARVO_COMMAND_KEY_MASK */
+ uint8_t key_mask;
+} __packed;
+
+/* selected profile is persistent */
+struct arvo_actual_profile { /* 2 bytes */
+ uint8_t command; /* ARVO_COMMAND_ACTUAL_PROFILE */
+ uint8_t actual_profile;
+} __packed;
+
+enum arvo_commands {
+ ARVO_COMMAND_MODE_KEY = 0x3,
+ ARVO_COMMAND_BUTTON = 0x4,
+ ARVO_COMMAND_INFO = 0x5,
+ ARVO_COMMAND_KEY_MASK = 0x6,
+ ARVO_COMMAND_ACTUAL_PROFILE = 0x7,
+};
+
+enum arvo_usb_commands {
+ ARVO_USB_COMMAND_MODE_KEY = 0x303,
+ /*
+ * read/write
+ * Read uses both index bytes as profile/key indexes
+ * Write has index 0, profile/key is determined by payload
+ */
+ ARVO_USB_COMMAND_BUTTON = 0x304,
+ ARVO_USB_COMMAND_INFO = 0x305,
+ ARVO_USB_COMMAND_KEY_MASK = 0x306,
+ ARVO_USB_COMMAND_ACTUAL_PROFILE = 0x307,
+};
+
+struct arvo_special_report {
+ uint8_t unknown1; /* always 0x01 */
+ uint8_t event;
+ uint8_t unknown2; /* always 0x70 */
+} __packed;
+
+enum arvo_special_report_events {
+ ARVO_SPECIAL_REPORT_EVENT_ACTION_PRESS = 0x10,
+ ARVO_SPECIAL_REPORT_EVENT_ACTION_RELEASE = 0x0,
+};
+
+enum arvo_special_report_event_masks {
+ ARVO_SPECIAL_REPORT_EVENT_MASK_ACTION = 0xf0,
+ ARVO_SPECIAL_REPORT_EVENT_MASK_BUTTON = 0x0f,
+};
+
+struct arvo_roccat_report {
+ uint8_t profile;
+ uint8_t button;
+ uint8_t action;
+} __packed;
+
+enum arvo_roccat_report_action {
+ ARVO_ROCCAT_REPORT_ACTION_RELEASE = 0,
+ ARVO_ROCCAT_REPORT_ACTION_PRESS = 1,
+};
+
+struct arvo_device {
+ int roccat_claimed;
+ int chrdev_minor;
+
+ struct mutex arvo_lock;
+
+ int actual_profile;
+};
+
+#endif
diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c
new file mode 100644
index 000000000000..13b1eb0c8c65
--- /dev/null
+++ b/drivers/hid/hid-roccat-common.c
@@ -0,0 +1,62 @@
+/*
+ * Roccat common functions for device specific drivers
+ *
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/slab.h>
+#include "hid-roccat-common.h"
+
+int roccat_common_receive(struct usb_device *usb_dev, uint usb_command,
+ void *data, uint size)
+{
+ char *buf;
+ int len;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+
+ memcpy(data, buf, size);
+ kfree(buf);
+ return ((len < 0) ? len : ((len != size) ? -EIO : 0));
+}
+EXPORT_SYMBOL_GPL(roccat_common_receive);
+
+int roccat_common_send(struct usb_device *usb_dev, uint usb_command,
+ void const *data, uint size)
+{
+ char *buf;
+ int len;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ memcpy(buf, data, size);
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_CONFIGURATION,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+
+ kfree(buf);
+ return ((len < 0) ? len : ((len != size) ? -EIO : 0));
+}
+EXPORT_SYMBOL_GPL(roccat_common_send);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat common driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-common.h b/drivers/hid/hid-roccat-common.h
new file mode 100644
index 000000000000..fe45fae05bb9
--- /dev/null
+++ b/drivers/hid/hid-roccat-common.h
@@ -0,0 +1,23 @@
+#ifndef __HID_ROCCAT_COMMON_H
+#define __HID_ROCCAT_COMMON_H
+
+/*
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/usb.h>
+#include <linux/types.h>
+
+int roccat_common_receive(struct usb_device *usb_dev, uint usb_command,
+ void *data, uint size);
+int roccat_common_send(struct usb_device *usb_dev, uint usb_command,
+ void const *data, uint size);
+
+#endif
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index cbd8cc42e75a..a57838d15267 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -28,11 +28,11 @@
#include <linux/device.h>
#include <linux/input.h>
#include <linux/hid.h>
-#include <linux/usb.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/hid-roccat.h>
#include "hid-ids.h"
-#include "hid-roccat.h"
+#include "hid-roccat-common.h"
#include "hid-roccat-kone.h"
static uint profile_numbers[5] = {0, 1, 2, 3, 4};
@@ -58,12 +58,8 @@ static void kone_set_settings_checksum(struct kone_settings *settings)
*/
static int kone_check_write(struct usb_device *usb_dev)
{
- int len;
- unsigned char *data;
-
- data = kmalloc(1, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ int retval;
+ uint8_t data;
do {
/*
@@ -72,56 +68,36 @@ static int kone_check_write(struct usb_device *usb_dev)
*/
msleep(80);
- len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE |
- USB_DIR_IN,
- kone_command_confirm_write, 0, data, 1,
- USB_CTRL_SET_TIMEOUT);
-
- if (len != 1) {
- kfree(data);
- return -EIO;
- }
+ retval = roccat_common_receive(usb_dev,
+ kone_command_confirm_write, &data, 1);
+ if (retval)
+ return retval;
/*
* value of 3 seems to mean something like
* "not finished yet, but it looks good"
* So check again after a moment.
*/
- } while (*data == 3);
+ } while (data == 3);
- if (*data == 1) { /* everything alright */
- kfree(data);
+ if (data == 1) /* everything alright */
return 0;
- } else { /* unknown answer */
- hid_err(usb_dev, "got retval %d when checking write\n", *data);
- kfree(data);
- return -EIO;
- }
+
+ /* unknown answer */
+ hid_err(usb_dev, "got retval %d when checking write\n", data);
+ return -EIO;
}
/*
* Reads settings from mouse and stores it in @buf
- * @buf has to be alloced with GFP_KERNEL
* On success returns 0
* On failure returns errno
*/
static int kone_get_settings(struct usb_device *usb_dev,
struct kone_settings *buf)
{
- int len;
-
- len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- kone_command_settings, 0, buf,
- sizeof(struct kone_settings), USB_CTRL_SET_TIMEOUT);
-
- if (len != sizeof(struct kone_settings))
- return -EIO;
-
- return 0;
+ return roccat_common_receive(usb_dev, kone_command_settings, buf,
+ sizeof(struct kone_settings));
}
/*
@@ -132,22 +108,12 @@ static int kone_get_settings(struct usb_device *usb_dev,
static int kone_set_settings(struct usb_device *usb_dev,
struct kone_settings const *settings)
{
- int len;
-
- len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- kone_command_settings, 0, (char *)settings,
- sizeof(struct kone_settings),
- USB_CTRL_SET_TIMEOUT);
-
- if (len != sizeof(struct kone_settings))
- return -EIO;
-
- if (kone_check_write(usb_dev))
- return -EIO;
-
- return 0;
+ int retval;
+ retval = roccat_common_send(usb_dev, kone_command_settings,
+ settings, sizeof(struct kone_settings));
+ if (retval)
+ return retval;
+ return kone_check_write(usb_dev);
}
/*
@@ -193,7 +159,7 @@ static int kone_set_profile(struct usb_device *usb_dev,
len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_CONFIGURATION,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- kone_command_profile, number, (char *)profile,
+ kone_command_profile, number, (void *)profile,
sizeof(struct kone_profile),
USB_CTRL_SET_TIMEOUT);
@@ -213,24 +179,15 @@ static int kone_set_profile(struct usb_device *usb_dev,
*/
static int kone_get_weight(struct usb_device *usb_dev, int *result)
{
- int len;
- uint8_t *data;
+ int retval;
+ uint8_t data;
- data = kmalloc(1, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ retval = roccat_common_receive(usb_dev, kone_command_weight, &data, 1);
- len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- kone_command_weight, 0, data, 1, USB_CTRL_SET_TIMEOUT);
+ if (retval)
+ return retval;
- if (len != 1) {
- kfree(data);
- return -EIO;
- }
- *result = (int)*data;
- kfree(data);
+ *result = (int)data;
return 0;
}
@@ -241,25 +198,15 @@ static int kone_get_weight(struct usb_device *usb_dev, int *result)
*/
static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
{
- int len;
- unsigned char *data;
-
- data = kmalloc(2, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ int retval;
+ uint16_t data;
- len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- kone_command_firmware_version, 0, data, 2,
- USB_CTRL_SET_TIMEOUT);
+ retval = roccat_common_receive(usb_dev, kone_command_firmware_version,
+ &data, 2);
+ if (retval)
+ return retval;
- if (len != 2) {
- kfree(data);
- return -EIO;
- }
- *result = le16_to_cpu(*data);
- kfree(data);
+ *result = le16_to_cpu(data);
return 0;
}
@@ -435,23 +382,9 @@ static ssize_t kone_sysfs_show_tcu(struct device *dev,
static int kone_tcu_command(struct usb_device *usb_dev, int number)
{
- int len;
- char *value;
-
- value = kmalloc(1, GFP_KERNEL);
- if (!value)
- return -ENOMEM;
-
- *value = number;
-
- len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- kone_command_calibrate, 0, value, 1,
- USB_CTRL_SET_TIMEOUT);
-
- kfree(value);
- return ((len != 1) ? -EIO : 0);
+ unsigned char value;
+ value = number;
+ return roccat_common_send(usb_dev, kone_command_calibrate, &value, 1);
}
/*
@@ -727,7 +660,8 @@ static int kone_init_specials(struct hid_device *hdev)
goto exit_free;
}
- retval = roccat_connect(kone_class, hdev);
+ retval = roccat_connect(kone_class, hdev,
+ sizeof(struct kone_roccat_report));
if (retval < 0) {
hid_err(hdev, "couldn't init char dev\n");
/* be tolerant about not getting chrdev */
@@ -827,8 +761,7 @@ static void kone_report_to_chrdev(struct kone_device const *kone,
roccat_report.value = event->value;
roccat_report.key = 0;
roccat_report_event(kone->chrdev_minor,
- (uint8_t *)&roccat_report,
- sizeof(struct kone_roccat_report));
+ (uint8_t *)&roccat_report);
break;
case kone_mouse_event_call_overlong_macro:
if (event->value == kone_keystroke_action_press) {
@@ -836,8 +769,7 @@ static void kone_report_to_chrdev(struct kone_device const *kone,
roccat_report.value = kone->actual_profile;
roccat_report.key = event->macro_key;
roccat_report_event(kone->chrdev_minor,
- (uint8_t *)&roccat_report,
- sizeof(struct kone_roccat_report));
+ (uint8_t *)&roccat_report);
}
break;
}
@@ -912,8 +844,8 @@ static int __init kone_init(void)
static void __exit kone_exit(void)
{
- class_destroy(kone_class);
hid_unregister_driver(&kone_driver);
+ class_destroy(kone_class);
}
module_init(kone_init);
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 1608c8d1efd6..33eec74e0615 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -19,11 +19,11 @@
#include <linux/device.h>
#include <linux/input.h>
#include <linux/hid.h>
-#include <linux/usb.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/hid-roccat.h>
#include "hid-ids.h"
-#include "hid-roccat.h"
+#include "hid-roccat-common.h"
#include "hid-roccat-koneplus.h"
static uint profile_numbers[5] = {0, 1, 2, 3, 4};
@@ -39,110 +39,63 @@ static void koneplus_profile_activated(struct koneplus_device *koneplus,
static int koneplus_send_control(struct usb_device *usb_dev, uint value,
enum koneplus_control_requests request)
{
- int len;
- struct koneplus_control *control;
+ struct koneplus_control control;
if ((request == KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS ||
request == KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) &&
value > 4)
return -EINVAL;
- control = kmalloc(sizeof(struct koneplus_control), GFP_KERNEL);
- if (!control)
- return -ENOMEM;
+ control.command = KONEPLUS_COMMAND_CONTROL;
+ control.value = value;
+ control.request = request;
- control->command = KONEPLUS_COMMAND_CONTROL;
- control->value = value;
- control->request = request;
-
- len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- KONEPLUS_USB_COMMAND_CONTROL, 0, control,
- sizeof(struct koneplus_control),
- USB_CTRL_SET_TIMEOUT);
-
- kfree(control);
-
- if (len != sizeof(struct koneplus_control))
- return len;
-
- return 0;
-}
-
-static int koneplus_receive(struct usb_device *usb_dev, uint usb_command,
- void *buf, uint size) {
- int len;
-
- len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
-
- return (len != size) ? -EIO : 0;
+ return roccat_common_send(usb_dev, KONEPLUS_USB_COMMAND_CONTROL,
+ &control, sizeof(struct koneplus_control));
}
static int koneplus_receive_control_status(struct usb_device *usb_dev)
{
int retval;
- struct koneplus_control *control;
-
- control = kmalloc(sizeof(struct koneplus_control), GFP_KERNEL);
- if (!control)
- return -ENOMEM;
+ struct koneplus_control control;
do {
- retval = koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_CONTROL,
- control, sizeof(struct koneplus_control));
+ retval = roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_CONTROL,
+ &control, sizeof(struct koneplus_control));
/* check if we get a completely wrong answer */
if (retval)
- goto out;
+ return retval;
- if (control->value == KONEPLUS_CONTROL_REQUEST_STATUS_OK) {
- retval = 0;
- goto out;
- }
+ if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_OK)
+ return 0;
/* indicates that hardware needs some more time to complete action */
- if (control->value == KONEPLUS_CONTROL_REQUEST_STATUS_WAIT) {
+ if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_WAIT) {
msleep(500); /* windows driver uses 1000 */
continue;
}
/* seems to be critical - replug necessary */
- if (control->value == KONEPLUS_CONTROL_REQUEST_STATUS_OVERLOAD) {
- retval = -EINVAL;
- goto out;
- }
-
- dev_err(&usb_dev->dev, "koneplus_receive_control_status: "
- "unknown response value 0x%x\n", control->value);
- retval = -EINVAL;
- goto out;
+ if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_OVERLOAD)
+ return -EINVAL;
+ hid_err(usb_dev, "koneplus_receive_control_status: "
+ "unknown response value 0x%x\n", control.value);
+ return -EINVAL;
} while (1);
-out:
- kfree(control);
- return retval;
}
static int koneplus_send(struct usb_device *usb_dev, uint command,
- void *buf, uint size) {
- int len;
-
- len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
-
- if (len != size)
- return -EIO;
+ void const *buf, uint size)
+{
+ int retval;
- if (koneplus_receive_control_status(usb_dev))
- return -EIO;
+ retval = roccat_common_send(usb_dev, command, buf, size);
+ if (retval)
+ return retval;
- return 0;
+ return koneplus_receive_control_status(usb_dev);
}
static int koneplus_select_profile(struct usb_device *usb_dev, uint number,
@@ -167,7 +120,7 @@ static int koneplus_select_profile(struct usb_device *usb_dev, uint number,
static int koneplus_get_info(struct usb_device *usb_dev,
struct koneplus_info *buf)
{
- return koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_INFO,
+ return roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_INFO,
buf, sizeof(struct koneplus_info));
}
@@ -181,7 +134,7 @@ static int koneplus_get_profile_settings(struct usb_device *usb_dev,
if (retval)
return retval;
- return koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS,
+ return roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct koneplus_profile_settings));
}
@@ -189,7 +142,7 @@ static int koneplus_set_profile_settings(struct usb_device *usb_dev,
struct koneplus_profile_settings const *settings)
{
return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS,
- (void *)settings, sizeof(struct koneplus_profile_settings));
+ settings, sizeof(struct koneplus_profile_settings));
}
static int koneplus_get_profile_buttons(struct usb_device *usb_dev,
@@ -202,7 +155,7 @@ static int koneplus_get_profile_buttons(struct usb_device *usb_dev,
if (retval)
return retval;
- return koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS,
+ return roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct koneplus_profile_buttons));
}
@@ -210,27 +163,19 @@ static int koneplus_set_profile_buttons(struct usb_device *usb_dev,
struct koneplus_profile_buttons const *buttons)
{
return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS,
- (void *)buttons, sizeof(struct koneplus_profile_buttons));
+ buttons, sizeof(struct koneplus_profile_buttons));
}
/* retval is 0-4 on success, < 0 on error */
static int koneplus_get_startup_profile(struct usb_device *usb_dev)
{
- struct koneplus_startup_profile *buf;
+ struct koneplus_startup_profile buf;
int retval;
- buf = kmalloc(sizeof(struct koneplus_startup_profile), GFP_KERNEL);
+ retval = roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_STARTUP_PROFILE,
+ &buf, sizeof(struct koneplus_startup_profile));
- retval = koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_STARTUP_PROFILE,
- buf, sizeof(struct koneplus_startup_profile));
-
- if (retval)
- goto out;
-
- retval = buf->startup_profile;
-out:
- kfree(buf);
- return retval;
+ return retval ? retval : buf.startup_profile;
}
static int koneplus_set_startup_profile(struct usb_device *usb_dev,
@@ -243,7 +188,7 @@ static int koneplus_set_startup_profile(struct usb_device *usb_dev,
buf.startup_profile = startup_profile;
return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_STARTUP_PROFILE,
- (char *)&buf, sizeof(struct koneplus_profile_buttons));
+ &buf, sizeof(struct koneplus_profile_buttons));
}
static ssize_t koneplus_sysfs_read(struct file *fp, struct kobject *kobj,
@@ -256,11 +201,14 @@ static ssize_t koneplus_sysfs_read(struct file *fp, struct kobject *kobj,
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
int retval;
+ if (off >= real_size)
+ return 0;
+
if (off != 0 || count != real_size)
return -EINVAL;
mutex_lock(&koneplus->koneplus_lock);
- retval = koneplus_receive(usb_dev, command, buf, real_size);
+ retval = roccat_common_receive(usb_dev, command, buf, real_size);
mutex_unlock(&koneplus->koneplus_lock);
if (retval)
@@ -283,7 +231,7 @@ static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj,
return -EINVAL;
mutex_lock(&koneplus->koneplus_lock);
- retval = koneplus_send(usb_dev, command, (void *)buf, real_size);
+ retval = koneplus_send(usb_dev, command, buf, real_size);
mutex_unlock(&koneplus->koneplus_lock);
if (retval)
@@ -347,7 +295,7 @@ static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
count = sizeof(struct koneplus_profile_settings) - off;
mutex_lock(&koneplus->koneplus_lock);
- memcpy(buf, ((void const *)&koneplus->profile_settings[*(uint *)(attr->private)]) + off,
+ memcpy(buf, ((char const *)&koneplus->profile_settings[*(uint *)(attr->private)]) + off,
count);
mutex_unlock(&koneplus->koneplus_lock);
@@ -406,7 +354,7 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
count = sizeof(struct koneplus_profile_buttons) - off;
mutex_lock(&koneplus->koneplus_lock);
- memcpy(buf, ((void const *)&koneplus->profile_buttons[*(uint *)(attr->private)]) + off,
+ memcpy(buf, ((char const *)&koneplus->profile_buttons[*(uint *)(attr->private)]) + off,
count);
mutex_unlock(&koneplus->koneplus_lock);
@@ -512,7 +460,7 @@ static struct device_attribute koneplus_attributes[] = {
static struct bin_attribute koneplus_bin_attributes[] = {
{
- .attr = { .name = "sensor", .mode = 0220 },
+ .attr = { .name = "sensor", .mode = 0660 },
.size = sizeof(struct koneplus_sensor),
.read = koneplus_sysfs_read_sensor,
.write = koneplus_sysfs_write_sensor
@@ -609,11 +557,13 @@ static int koneplus_init_koneplus_device_struct(struct usb_device *usb_dev,
struct koneplus_device *koneplus)
{
int retval, i;
- static uint wait = 70; /* device will freeze with just 60 */
+ static uint wait = 100; /* device will freeze with just 60 */
mutex_init(&koneplus->koneplus_lock);
koneplus->startup_profile = koneplus_get_startup_profile(usb_dev);
+ if (koneplus->startup_profile < 0)
+ return koneplus->startup_profile;
msleep(wait);
retval = koneplus_get_info(usb_dev, &koneplus->info);
@@ -651,21 +601,21 @@ static int koneplus_init_specials(struct hid_device *hdev)
koneplus = kzalloc(sizeof(*koneplus), GFP_KERNEL);
if (!koneplus) {
- dev_err(&hdev->dev, "can't alloc device descriptor\n");
+ hid_err(hdev, "can't alloc device descriptor\n");
return -ENOMEM;
}
hid_set_drvdata(hdev, koneplus);
retval = koneplus_init_koneplus_device_struct(usb_dev, koneplus);
if (retval) {
- dev_err(&hdev->dev,
- "couldn't init struct koneplus_device\n");
+ hid_err(hdev, "couldn't init struct koneplus_device\n");
goto exit_free;
}
- retval = roccat_connect(koneplus_class, hdev);
+ retval = roccat_connect(koneplus_class, hdev,
+ sizeof(struct koneplus_roccat_report));
if (retval < 0) {
- dev_err(&hdev->dev, "couldn't init char dev\n");
+ hid_err(hdev, "couldn't init char dev\n");
} else {
koneplus->chrdev_minor = retval;
koneplus->roccat_claimed = 1;
@@ -701,19 +651,19 @@ static int koneplus_probe(struct hid_device *hdev,
retval = hid_parse(hdev);
if (retval) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto exit;
}
retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (retval) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto exit;
}
retval = koneplus_init_specials(hdev);
if (retval) {
- dev_err(&hdev->dev, "couldn't install mouse\n");
+ hid_err(hdev, "couldn't install mouse\n");
goto exit_stop;
}
@@ -769,8 +719,7 @@ static void koneplus_report_to_chrdev(struct koneplus_device const *koneplus,
roccat_report.data2 = button_report->data2;
roccat_report.profile = koneplus->actual_profile + 1;
roccat_report_event(koneplus->chrdev_minor,
- (uint8_t const *)&roccat_report,
- sizeof(struct koneplus_roccat_report));
+ (uint8_t const *)&roccat_report);
}
static int koneplus_raw_event(struct hid_device *hdev,
@@ -825,8 +774,8 @@ static int __init koneplus_init(void)
static void __exit koneplus_exit(void)
{
- class_destroy(koneplus_class);
hid_unregister_driver(&koneplus_driver);
+ class_destroy(koneplus_class);
}
module_init(koneplus_init);
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
new file mode 100644
index 000000000000..984be2f8967e
--- /dev/null
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -0,0 +1,715 @@
+/*
+ * Roccat Kova[+] driver for Linux
+ *
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Kova[+] is a bigger version of the Pyra with two more side buttons.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hid-roccat.h>
+#include "hid-ids.h"
+#include "hid-roccat-common.h"
+#include "hid-roccat-kovaplus.h"
+
+static uint profile_numbers[5] = {0, 1, 2, 3, 4};
+
+static struct class *kovaplus_class;
+
+static uint kovaplus_convert_event_cpi(uint value)
+{
+ return (value == 7 ? 4 : (value == 4 ? 3 : value));
+}
+
+static void kovaplus_profile_activated(struct kovaplus_device *kovaplus,
+ uint new_profile_index)
+{
+ kovaplus->actual_profile = new_profile_index;
+ kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level;
+ kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x;
+ kovaplus->actual_y_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_y;
+}
+
+static int kovaplus_send_control(struct usb_device *usb_dev, uint value,
+ enum kovaplus_control_requests request)
+{
+ int retval;
+ struct kovaplus_control control;
+
+ if ((request == KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS ||
+ request == KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) &&
+ value > 4)
+ return -EINVAL;
+
+ control.command = KOVAPLUS_COMMAND_CONTROL;
+ control.value = value;
+ control.request = request;
+
+ retval = roccat_common_send(usb_dev, KOVAPLUS_USB_COMMAND_CONTROL,
+ &control, sizeof(struct kovaplus_control));
+
+ return retval;
+}
+
+static int kovaplus_receive_control_status(struct usb_device *usb_dev)
+{
+ int retval;
+ struct kovaplus_control control;
+
+ do {
+ retval = roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_CONTROL,
+ &control, sizeof(struct kovaplus_control));
+
+ /* check if we get a completely wrong answer */
+ if (retval)
+ return retval;
+
+ if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_OK)
+ return 0;
+
+ /* indicates that hardware needs some more time to complete action */
+ if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_WAIT) {
+ msleep(500); /* windows driver uses 1000 */
+ continue;
+ }
+
+ /* seems to be critical - replug necessary */
+ if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_OVERLOAD)
+ return -EINVAL;
+
+ hid_err(usb_dev, "kovaplus_receive_control_status: "
+ "unknown response value 0x%x\n", control.value);
+ return -EINVAL;
+ } while (1);
+}
+
+static int kovaplus_send(struct usb_device *usb_dev, uint command,
+ void const *buf, uint size)
+{
+ int retval;
+
+ retval = roccat_common_send(usb_dev, command, buf, size);
+ if (retval)
+ return retval;
+
+ msleep(100);
+
+ return kovaplus_receive_control_status(usb_dev);
+}
+
+static int kovaplus_select_profile(struct usb_device *usb_dev, uint number,
+ enum kovaplus_control_requests request)
+{
+ return kovaplus_send_control(usb_dev, number, request);
+}
+
+static int kovaplus_get_info(struct usb_device *usb_dev,
+ struct kovaplus_info *buf)
+{
+ return roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_INFO,
+ buf, sizeof(struct kovaplus_info));
+}
+
+static int kovaplus_get_profile_settings(struct usb_device *usb_dev,
+ struct kovaplus_profile_settings *buf, uint number)
+{
+ int retval;
+
+ retval = kovaplus_select_profile(usb_dev, number,
+ KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS);
+ if (retval)
+ return retval;
+
+ return roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_SETTINGS,
+ buf, sizeof(struct kovaplus_profile_settings));
+}
+
+static int kovaplus_set_profile_settings(struct usb_device *usb_dev,
+ struct kovaplus_profile_settings const *settings)
+{
+ return kovaplus_send(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_SETTINGS,
+ settings, sizeof(struct kovaplus_profile_settings));
+}
+
+static int kovaplus_get_profile_buttons(struct usb_device *usb_dev,
+ struct kovaplus_profile_buttons *buf, int number)
+{
+ int retval;
+
+ retval = kovaplus_select_profile(usb_dev, number,
+ KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS);
+ if (retval)
+ return retval;
+
+ return roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_BUTTONS,
+ buf, sizeof(struct kovaplus_profile_buttons));
+}
+
+static int kovaplus_set_profile_buttons(struct usb_device *usb_dev,
+ struct kovaplus_profile_buttons const *buttons)
+{
+ return kovaplus_send(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_BUTTONS,
+ buttons, sizeof(struct kovaplus_profile_buttons));
+}
+
+/* retval is 0-4 on success, < 0 on error */
+static int kovaplus_get_actual_profile(struct usb_device *usb_dev)
+{
+ struct kovaplus_actual_profile buf;
+ int retval;
+
+ retval = roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_ACTUAL_PROFILE,
+ &buf, sizeof(struct kovaplus_actual_profile));
+
+ return retval ? retval : buf.actual_profile;
+}
+
+static int kovaplus_set_actual_profile(struct usb_device *usb_dev,
+ int new_profile)
+{
+ struct kovaplus_actual_profile buf;
+
+ buf.command = KOVAPLUS_COMMAND_ACTUAL_PROFILE;
+ buf.size = sizeof(struct kovaplus_actual_profile);
+ buf.actual_profile = new_profile;
+
+ return kovaplus_send(usb_dev, KOVAPLUS_USB_COMMAND_ACTUAL_PROFILE,
+ &buf, sizeof(struct kovaplus_actual_profile));
+}
+
+static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
+
+ if (off >= sizeof(struct kovaplus_profile_settings))
+ return 0;
+
+ if (off + count > sizeof(struct kovaplus_profile_settings))
+ count = sizeof(struct kovaplus_profile_settings) - off;
+
+ mutex_lock(&kovaplus->kovaplus_lock);
+ memcpy(buf, ((char const *)&kovaplus->profile_settings[*(uint *)(attr->private)]) + off,
+ count);
+ mutex_unlock(&kovaplus->kovaplus_lock);
+
+ return count;
+}
+
+static ssize_t kovaplus_sysfs_write_profile_settings(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval = 0;
+ int difference;
+ int profile_index;
+ struct kovaplus_profile_settings *profile_settings;
+
+ if (off != 0 || count != sizeof(struct kovaplus_profile_settings))
+ return -EINVAL;
+
+ profile_index = ((struct kovaplus_profile_settings const *)buf)->profile_index;
+ profile_settings = &kovaplus->profile_settings[profile_index];
+
+ mutex_lock(&kovaplus->kovaplus_lock);
+ difference = memcmp(buf, profile_settings,
+ sizeof(struct kovaplus_profile_settings));
+ if (difference) {
+ retval = kovaplus_set_profile_settings(usb_dev,
+ (struct kovaplus_profile_settings const *)buf);
+ if (!retval)
+ memcpy(profile_settings, buf,
+ sizeof(struct kovaplus_profile_settings));
+ }
+ mutex_unlock(&kovaplus->kovaplus_lock);
+
+ if (retval)
+ return retval;
+
+ return sizeof(struct kovaplus_profile_settings);
+}
+
+static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
+
+ if (off >= sizeof(struct kovaplus_profile_buttons))
+ return 0;
+
+ if (off + count > sizeof(struct kovaplus_profile_buttons))
+ count = sizeof(struct kovaplus_profile_buttons) - off;
+
+ mutex_lock(&kovaplus->kovaplus_lock);
+ memcpy(buf, ((char const *)&kovaplus->profile_buttons[*(uint *)(attr->private)]) + off,
+ count);
+ mutex_unlock(&kovaplus->kovaplus_lock);
+
+ return count;
+}
+
+static ssize_t kovaplus_sysfs_write_profile_buttons(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval = 0;
+ int difference;
+ uint profile_index;
+ struct kovaplus_profile_buttons *profile_buttons;
+
+ if (off != 0 || count != sizeof(struct kovaplus_profile_buttons))
+ return -EINVAL;
+
+ profile_index = ((struct kovaplus_profile_buttons const *)buf)->profile_index;
+ profile_buttons = &kovaplus->profile_buttons[profile_index];
+
+ mutex_lock(&kovaplus->kovaplus_lock);
+ difference = memcmp(buf, profile_buttons,
+ sizeof(struct kovaplus_profile_buttons));
+ if (difference) {
+ retval = kovaplus_set_profile_buttons(usb_dev,
+ (struct kovaplus_profile_buttons const *)buf);
+ if (!retval)
+ memcpy(profile_buttons, buf,
+ sizeof(struct kovaplus_profile_buttons));
+ }
+ mutex_unlock(&kovaplus->kovaplus_lock);
+
+ if (retval)
+ return retval;
+
+ return sizeof(struct kovaplus_profile_buttons);
+}
+
+static ssize_t kovaplus_sysfs_show_actual_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kovaplus_device *kovaplus =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_profile);
+}
+
+static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct kovaplus_device *kovaplus;
+ struct usb_device *usb_dev;
+ unsigned long profile;
+ int retval;
+
+ dev = dev->parent->parent;
+ kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
+ usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
+ retval = strict_strtoul(buf, 10, &profile);
+ if (retval)
+ return retval;
+
+ if (profile >= 5)
+ return -EINVAL;
+
+ mutex_lock(&kovaplus->kovaplus_lock);
+ retval = kovaplus_set_actual_profile(usb_dev, profile);
+ kovaplus->actual_profile = profile;
+ mutex_unlock(&kovaplus->kovaplus_lock);
+ if (retval)
+ return retval;
+
+ return size;
+}
+
+static ssize_t kovaplus_sysfs_show_actual_cpi(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kovaplus_device *kovaplus =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_cpi);
+}
+
+static ssize_t kovaplus_sysfs_show_actual_sensitivity_x(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kovaplus_device *kovaplus =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_x_sensitivity);
+}
+
+static ssize_t kovaplus_sysfs_show_actual_sensitivity_y(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kovaplus_device *kovaplus =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_y_sensitivity);
+}
+
+static ssize_t kovaplus_sysfs_show_firmware_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kovaplus_device *kovaplus =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->info.firmware_version);
+}
+
+static struct device_attribute kovaplus_attributes[] = {
+ __ATTR(actual_cpi, 0440,
+ kovaplus_sysfs_show_actual_cpi, NULL),
+ __ATTR(firmware_version, 0440,
+ kovaplus_sysfs_show_firmware_version, NULL),
+ __ATTR(actual_profile, 0660,
+ kovaplus_sysfs_show_actual_profile,
+ kovaplus_sysfs_set_actual_profile),
+ __ATTR(actual_sensitivity_x, 0440,
+ kovaplus_sysfs_show_actual_sensitivity_x, NULL),
+ __ATTR(actual_sensitivity_y, 0440,
+ kovaplus_sysfs_show_actual_sensitivity_y, NULL),
+ __ATTR_NULL
+};
+
+static struct bin_attribute kovaplus_bin_attributes[] = {
+ {
+ .attr = { .name = "profile_settings", .mode = 0220 },
+ .size = sizeof(struct kovaplus_profile_settings),
+ .write = kovaplus_sysfs_write_profile_settings
+ },
+ {
+ .attr = { .name = "profile1_settings", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_settings),
+ .read = kovaplus_sysfs_read_profilex_settings,
+ .private = &profile_numbers[0]
+ },
+ {
+ .attr = { .name = "profile2_settings", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_settings),
+ .read = kovaplus_sysfs_read_profilex_settings,
+ .private = &profile_numbers[1]
+ },
+ {
+ .attr = { .name = "profile3_settings", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_settings),
+ .read = kovaplus_sysfs_read_profilex_settings,
+ .private = &profile_numbers[2]
+ },
+ {
+ .attr = { .name = "profile4_settings", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_settings),
+ .read = kovaplus_sysfs_read_profilex_settings,
+ .private = &profile_numbers[3]
+ },
+ {
+ .attr = { .name = "profile5_settings", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_settings),
+ .read = kovaplus_sysfs_read_profilex_settings,
+ .private = &profile_numbers[4]
+ },
+ {
+ .attr = { .name = "profile_buttons", .mode = 0220 },
+ .size = sizeof(struct kovaplus_profile_buttons),
+ .write = kovaplus_sysfs_write_profile_buttons
+ },
+ {
+ .attr = { .name = "profile1_buttons", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_buttons),
+ .read = kovaplus_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[0]
+ },
+ {
+ .attr = { .name = "profile2_buttons", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_buttons),
+ .read = kovaplus_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[1]
+ },
+ {
+ .attr = { .name = "profile3_buttons", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_buttons),
+ .read = kovaplus_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[2]
+ },
+ {
+ .attr = { .name = "profile4_buttons", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_buttons),
+ .read = kovaplus_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[3]
+ },
+ {
+ .attr = { .name = "profile5_buttons", .mode = 0440 },
+ .size = sizeof(struct kovaplus_profile_buttons),
+ .read = kovaplus_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[4]
+ },
+ __ATTR_NULL
+};
+
+static int kovaplus_init_kovaplus_device_struct(struct usb_device *usb_dev,
+ struct kovaplus_device *kovaplus)
+{
+ int retval, i;
+ static uint wait = 70; /* device will freeze with just 60 */
+
+ mutex_init(&kovaplus->kovaplus_lock);
+
+ retval = kovaplus_get_info(usb_dev, &kovaplus->info);
+ if (retval)
+ return retval;
+
+ for (i = 0; i < 5; ++i) {
+ msleep(wait);
+ retval = kovaplus_get_profile_settings(usb_dev,
+ &kovaplus->profile_settings[i], i);
+ if (retval)
+ return retval;
+
+ msleep(wait);
+ retval = kovaplus_get_profile_buttons(usb_dev,
+ &kovaplus->profile_buttons[i], i);
+ if (retval)
+ return retval;
+ }
+
+ msleep(wait);
+ retval = kovaplus_get_actual_profile(usb_dev);
+ if (retval < 0)
+ return retval;
+ kovaplus_profile_activated(kovaplus, retval);
+
+ return 0;
+}
+
+static int kovaplus_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct kovaplus_device *kovaplus;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_MOUSE) {
+
+ kovaplus = kzalloc(sizeof(*kovaplus), GFP_KERNEL);
+ if (!kovaplus) {
+ hid_err(hdev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, kovaplus);
+
+ retval = kovaplus_init_kovaplus_device_struct(usb_dev, kovaplus);
+ if (retval) {
+ hid_err(hdev, "couldn't init struct kovaplus_device\n");
+ goto exit_free;
+ }
+
+ retval = roccat_connect(kovaplus_class, hdev,
+ sizeof(struct kovaplus_roccat_report));
+ if (retval < 0) {
+ hid_err(hdev, "couldn't init char dev\n");
+ } else {
+ kovaplus->chrdev_minor = retval;
+ kovaplus->roccat_claimed = 1;
+ }
+
+ } else {
+ hid_set_drvdata(hdev, NULL);
+ }
+
+ return 0;
+exit_free:
+ kfree(kovaplus);
+ return retval;
+}
+
+static void kovaplus_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct kovaplus_device *kovaplus;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_MOUSE) {
+ kovaplus = hid_get_drvdata(hdev);
+ if (kovaplus->roccat_claimed)
+ roccat_disconnect(kovaplus->chrdev_minor);
+ kfree(kovaplus);
+ }
+}
+
+static int kovaplus_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ hid_err(hdev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = kovaplus_init_specials(hdev);
+ if (retval) {
+ hid_err(hdev, "couldn't install mouse\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void kovaplus_remove(struct hid_device *hdev)
+{
+ kovaplus_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+static void kovaplus_keep_values_up_to_date(struct kovaplus_device *kovaplus,
+ u8 const *data)
+{
+ struct kovaplus_mouse_report_button const *button_report;
+
+ if (data[0] != KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON)
+ return;
+
+ button_report = (struct kovaplus_mouse_report_button const *)data;
+
+ switch (button_report->type) {
+ case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_1:
+ kovaplus_profile_activated(kovaplus, button_report->data1 - 1);
+ break;
+ case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI:
+ kovaplus->actual_cpi = kovaplus_convert_event_cpi(button_report->data1);
+ case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY:
+ kovaplus->actual_x_sensitivity = button_report->data1;
+ kovaplus->actual_y_sensitivity = button_report->data2;
+ }
+}
+
+static void kovaplus_report_to_chrdev(struct kovaplus_device const *kovaplus,
+ u8 const *data)
+{
+ struct kovaplus_roccat_report roccat_report;
+ struct kovaplus_mouse_report_button const *button_report;
+
+ if (data[0] != KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON)
+ return;
+
+ button_report = (struct kovaplus_mouse_report_button const *)data;
+
+ if (button_report->type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_2)
+ return;
+
+ roccat_report.type = button_report->type;
+ roccat_report.profile = kovaplus->actual_profile + 1;
+
+ if (roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_MACRO ||
+ roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SHORTCUT ||
+ roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH ||
+ roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_TIMER)
+ roccat_report.button = button_report->data1;
+ else
+ roccat_report.button = 0;
+
+ if (roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI)
+ roccat_report.data1 = kovaplus_convert_event_cpi(button_report->data1);
+ else
+ roccat_report.data1 = button_report->data1;
+
+ roccat_report.data2 = button_report->data2;
+
+ roccat_report_event(kovaplus->chrdev_minor,
+ (uint8_t const *)&roccat_report);
+}
+
+static int kovaplus_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct kovaplus_device *kovaplus = hid_get_drvdata(hdev);
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != USB_INTERFACE_PROTOCOL_MOUSE)
+ return 0;
+
+ kovaplus_keep_values_up_to_date(kovaplus, data);
+
+ if (kovaplus->roccat_claimed)
+ kovaplus_report_to_chrdev(kovaplus, data);
+
+ return 0;
+}
+
+static const struct hid_device_id kovaplus_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, kovaplus_devices);
+
+static struct hid_driver kovaplus_driver = {
+ .name = "kovaplus",
+ .id_table = kovaplus_devices,
+ .probe = kovaplus_probe,
+ .remove = kovaplus_remove,
+ .raw_event = kovaplus_raw_event
+};
+
+static int __init kovaplus_init(void)
+{
+ int retval;
+
+ kovaplus_class = class_create(THIS_MODULE, "kovaplus");
+ if (IS_ERR(kovaplus_class))
+ return PTR_ERR(kovaplus_class);
+ kovaplus_class->dev_attrs = kovaplus_attributes;
+ kovaplus_class->dev_bin_attrs = kovaplus_bin_attributes;
+
+ retval = hid_register_driver(&kovaplus_driver);
+ if (retval)
+ class_destroy(kovaplus_class);
+ return retval;
+}
+
+static void __exit kovaplus_exit(void)
+{
+ hid_unregister_driver(&kovaplus_driver);
+ class_destroy(kovaplus_class);
+}
+
+module_init(kovaplus_init);
+module_exit(kovaplus_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Kova[+] driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-kovaplus.h b/drivers/hid/hid-roccat-kovaplus.h
new file mode 100644
index 000000000000..ce40607d21c7
--- /dev/null
+++ b/drivers/hid/hid-roccat-kovaplus.h
@@ -0,0 +1,157 @@
+#ifndef __HID_ROCCAT_KOVAPLUS_H
+#define __HID_ROCCAT_KOVAPLUS_H
+
+/*
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+struct kovaplus_control {
+ uint8_t command; /* KOVAPLUS_COMMAND_CONTROL */
+ uint8_t value;
+ uint8_t request;
+} __packed;
+
+enum kovaplus_control_requests {
+ /* read after write; value = 1 */
+ KOVAPLUS_CONTROL_REQUEST_STATUS = 0x0,
+ /* write; value = profile number range 0-4 */
+ KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 0x10,
+ /* write; value = profile number range 0-4 */
+ KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS = 0x20,
+};
+
+enum kovaplus_control_values {
+ KOVAPLUS_CONTROL_REQUEST_STATUS_OVERLOAD = 0, /* supposed */
+ KOVAPLUS_CONTROL_REQUEST_STATUS_OK = 1,
+ KOVAPLUS_CONTROL_REQUEST_STATUS_WAIT = 3, /* supposed */
+};
+
+struct kovaplus_actual_profile {
+ uint8_t command; /* KOVAPLUS_COMMAND_ACTUAL_PROFILE */
+ uint8_t size; /* always 3 */
+ uint8_t actual_profile; /* Range 0-4! */
+} __packed;
+
+struct kovaplus_profile_settings {
+ uint8_t command; /* KOVAPLUS_COMMAND_PROFILE_SETTINGS */
+ uint8_t size; /* 16 */
+ uint8_t profile_index; /* range 0-4 */
+ uint8_t unknown1;
+ uint8_t sensitivity_x; /* range 1-10 */
+ uint8_t sensitivity_y; /* range 1-10 */
+ uint8_t cpi_levels_enabled;
+ uint8_t cpi_startup_level; /* range 1-4 */
+ uint8_t data[8];
+} __packed;
+
+struct kovaplus_profile_buttons {
+ uint8_t command; /* KOVAPLUS_COMMAND_PROFILE_BUTTONS */
+ uint8_t size; /* 23 */
+ uint8_t profile_index; /* range 0-4 */
+ uint8_t data[20];
+} __packed;
+
+struct kovaplus_info {
+ uint8_t command; /* KOVAPLUS_COMMAND_INFO */
+ uint8_t size; /* 6 */
+ uint8_t firmware_version;
+ uint8_t unknown[3];
+} __packed;
+
+/* writes 1 on plugin */
+struct kovaplus_a {
+ uint8_t command; /* KOVAPLUS_COMMAND_A */
+ uint8_t size; /* 3 */
+ uint8_t unknown;
+} __packed;
+
+enum kovaplus_commands {
+ KOVAPLUS_COMMAND_CONTROL = 0x4,
+ KOVAPLUS_COMMAND_ACTUAL_PROFILE = 0x5,
+ KOVAPLUS_COMMAND_PROFILE_SETTINGS = 0x6,
+ KOVAPLUS_COMMAND_PROFILE_BUTTONS = 0x7,
+ KOVAPLUS_COMMAND_INFO = 0x9,
+ KOVAPLUS_COMMAND_A = 0xa,
+};
+
+enum kovaplus_usb_commands {
+ KOVAPLUS_USB_COMMAND_CONTROL = 0x304,
+ KOVAPLUS_USB_COMMAND_ACTUAL_PROFILE = 0x305,
+ KOVAPLUS_USB_COMMAND_PROFILE_SETTINGS = 0x306,
+ KOVAPLUS_USB_COMMAND_PROFILE_BUTTONS = 0x307,
+ KOVAPLUS_USB_COMMAND_INFO = 0x309,
+ KOVAPLUS_USB_COMMAND_A = 0x30a,
+};
+
+enum kovaplus_mouse_report_numbers {
+ KOVAPLUS_MOUSE_REPORT_NUMBER_MOUSE = 1,
+ KOVAPLUS_MOUSE_REPORT_NUMBER_AUDIO = 2,
+ KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON = 3,
+ KOVAPLUS_MOUSE_REPORT_NUMBER_KBD = 4,
+};
+
+struct kovaplus_mouse_report_button {
+ uint8_t report_number; /* KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON */
+ uint8_t unknown1;
+ uint8_t type;
+ uint8_t data1;
+ uint8_t data2;
+} __packed;
+
+enum kovaplus_mouse_report_button_types {
+ /* data1 = profile_number range 1-5; no release event */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_1 = 0x20,
+ /* data1 = profile_number range 1-5; no release event */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_2 = 0x30,
+ /* data1 = button_number range 1-18; data2 = action */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_MACRO = 0x40,
+ /* data1 = button_number range 1-18; data2 = action */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SHORTCUT = 0x50,
+ /* data1 = button_number range 1-18; data2 = action */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH = 0x60,
+ /* data1 = button_number range 1-18; data2 = action */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_TIMER = 0x80,
+ /* data1 = 1 = 400, 2 = 800, 4 = 1600, 7 = 3200; no release event */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI = 0xb0,
+ /* data1 + data2 = sense range 1-10; no release event */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY = 0xc0,
+ /* data1 = type as in profile_buttons; data2 = action */
+ KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_MULTIMEDIA = 0xf0,
+};
+
+enum kovaplus_mouse_report_button_actions {
+ KOVAPLUS_MOUSE_REPORT_BUTTON_ACTION_PRESS = 0,
+ KOVAPLUS_MOUSE_REPORT_BUTTON_ACTION_RELEASE = 1,
+};
+
+struct kovaplus_roccat_report {
+ uint8_t type;
+ uint8_t profile;
+ uint8_t button;
+ uint8_t data1;
+ uint8_t data2;
+} __packed;
+
+struct kovaplus_device {
+ int actual_profile;
+ int actual_cpi;
+ int actual_x_sensitivity;
+ int actual_y_sensitivity;
+ int roccat_claimed;
+ int chrdev_minor;
+ struct mutex kovaplus_lock;
+ struct kovaplus_info info;
+ struct kovaplus_profile_settings profile_settings[5];
+ struct kovaplus_profile_buttons profile_buttons[5];
+};
+
+#endif
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 02c58e015bee..160f481344f6 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -20,11 +20,11 @@
#include <linux/device.h>
#include <linux/input.h>
#include <linux/hid.h>
-#include <linux/usb.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/hid-roccat.h>
#include "hid-ids.h"
-#include "hid-roccat.h"
+#include "hid-roccat-common.h"
#include "hid-roccat-pyra.h"
static uint profile_numbers[5] = {0, 1, 2, 3, 4};
@@ -42,7 +42,6 @@ static void profile_activated(struct pyra_device *pyra,
static int pyra_send_control(struct usb_device *usb_dev, int value,
enum pyra_control_requests request)
{
- int len;
struct pyra_control control;
if ((request == PYRA_CONTROL_REQUEST_PROFILE_SETTINGS ||
@@ -54,47 +53,31 @@ static int pyra_send_control(struct usb_device *usb_dev, int value,
control.value = value;
control.request = request;
- len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- PYRA_USB_COMMAND_CONTROL, 0, (char *)&control,
- sizeof(struct pyra_control),
- USB_CTRL_SET_TIMEOUT);
-
- if (len != sizeof(struct pyra_control))
- return len;
-
- return 0;
+ return roccat_common_send(usb_dev, PYRA_USB_COMMAND_CONTROL,
+ &control, sizeof(struct pyra_control));
}
static int pyra_receive_control_status(struct usb_device *usb_dev)
{
- int len;
+ int retval;
struct pyra_control control;
do {
msleep(10);
-
- len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE |
- USB_DIR_IN,
- PYRA_USB_COMMAND_CONTROL, 0, (char *)&control,
- sizeof(struct pyra_control),
- USB_CTRL_SET_TIMEOUT);
+ retval = roccat_common_receive(usb_dev, PYRA_USB_COMMAND_CONTROL,
+ &control, sizeof(struct pyra_control));
/* requested too early, try again */
- } while (len == -EPROTO);
+ } while (retval == -EPROTO);
- if (len == sizeof(struct pyra_control) &&
- control.command == PYRA_COMMAND_CONTROL &&
+ if (!retval && control.command == PYRA_COMMAND_CONTROL &&
control.request == PYRA_CONTROL_REQUEST_STATUS &&
control.value == 1)
- return 0;
+ return 0;
else {
hid_err(usb_dev, "receive control status: unknown response 0x%x 0x%x\n",
control.request, control.value);
- return -EINVAL;
+ return retval ? retval : -EINVAL;
}
}
@@ -102,125 +85,72 @@ static int pyra_get_profile_settings(struct usb_device *usb_dev,
struct pyra_profile_settings *buf, int number)
{
int retval;
-
retval = pyra_send_control(usb_dev, number,
PYRA_CONTROL_REQUEST_PROFILE_SETTINGS);
-
if (retval)
return retval;
-
- retval = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- PYRA_USB_COMMAND_PROFILE_SETTINGS, 0, (char *)buf,
- sizeof(struct pyra_profile_settings),
- USB_CTRL_SET_TIMEOUT);
-
- if (retval != sizeof(struct pyra_profile_settings))
- return retval;
-
- return 0;
+ return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_PROFILE_SETTINGS,
+ buf, sizeof(struct pyra_profile_settings));
}
static int pyra_get_profile_buttons(struct usb_device *usb_dev,
struct pyra_profile_buttons *buf, int number)
{
int retval;
-
retval = pyra_send_control(usb_dev, number,
PYRA_CONTROL_REQUEST_PROFILE_BUTTONS);
-
if (retval)
return retval;
-
- retval = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- PYRA_USB_COMMAND_PROFILE_BUTTONS, 0, (char *)buf,
- sizeof(struct pyra_profile_buttons),
- USB_CTRL_SET_TIMEOUT);
-
- if (retval != sizeof(struct pyra_profile_buttons))
- return retval;
-
- return 0;
+ return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_PROFILE_BUTTONS,
+ buf, sizeof(struct pyra_profile_buttons));
}
static int pyra_get_settings(struct usb_device *usb_dev,
struct pyra_settings *buf)
{
- int len;
- len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- PYRA_USB_COMMAND_SETTINGS, 0, buf,
- sizeof(struct pyra_settings), USB_CTRL_SET_TIMEOUT);
- if (len != sizeof(struct pyra_settings))
- return -EIO;
- return 0;
+ return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_SETTINGS,
+ buf, sizeof(struct pyra_settings));
}
static int pyra_get_info(struct usb_device *usb_dev, struct pyra_info *buf)
{
- int len;
- len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- PYRA_USB_COMMAND_INFO, 0, buf,
- sizeof(struct pyra_info), USB_CTRL_SET_TIMEOUT);
- if (len != sizeof(struct pyra_info))
- return -EIO;
- return 0;
+ return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_INFO,
+ buf, sizeof(struct pyra_info));
+}
+
+static int pyra_send(struct usb_device *usb_dev, uint command,
+ void const *buf, uint size)
+{
+ int retval;
+ retval = roccat_common_send(usb_dev, command, buf, size);
+ if (retval)
+ return retval;
+ return pyra_receive_control_status(usb_dev);
}
static int pyra_set_profile_settings(struct usb_device *usb_dev,
struct pyra_profile_settings const *settings)
{
- int len;
- len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- PYRA_USB_COMMAND_PROFILE_SETTINGS, 0, (char *)settings,
- sizeof(struct pyra_profile_settings),
- USB_CTRL_SET_TIMEOUT);
- if (len != sizeof(struct pyra_profile_settings))
- return -EIO;
- if (pyra_receive_control_status(usb_dev))
- return -EIO;
- return 0;
+ return pyra_send(usb_dev, PYRA_USB_COMMAND_PROFILE_SETTINGS, settings,
+ sizeof(struct pyra_profile_settings));
}
static int pyra_set_profile_buttons(struct usb_device *usb_dev,
struct pyra_profile_buttons const *buttons)
{
- int len;
- len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- PYRA_USB_COMMAND_PROFILE_BUTTONS, 0, (char *)buttons,
- sizeof(struct pyra_profile_buttons),
- USB_CTRL_SET_TIMEOUT);
- if (len != sizeof(struct pyra_profile_buttons))
- return -EIO;
- if (pyra_receive_control_status(usb_dev))
- return -EIO;
- return 0;
+ return pyra_send(usb_dev, PYRA_USB_COMMAND_PROFILE_BUTTONS, buttons,
+ sizeof(struct pyra_profile_buttons));
}
static int pyra_set_settings(struct usb_device *usb_dev,
struct pyra_settings const *settings)
{
- int len;
- len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- PYRA_USB_COMMAND_SETTINGS, 0, (char *)settings,
- sizeof(struct pyra_settings), USB_CTRL_SET_TIMEOUT);
- if (len != sizeof(struct pyra_settings))
- return -EIO;
- if (pyra_receive_control_status(usb_dev))
- return -EIO;
- return 0;
+ int retval;
+ retval = roccat_common_send(usb_dev, PYRA_USB_COMMAND_SETTINGS, settings,
+ sizeof(struct pyra_settings));
+ if (retval)
+ return retval;
+ return pyra_receive_control_status(usb_dev);
}
static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
@@ -521,21 +451,16 @@ static struct bin_attribute pyra_bin_attributes[] = {
static int pyra_init_pyra_device_struct(struct usb_device *usb_dev,
struct pyra_device *pyra)
{
- struct pyra_info *info;
+ struct pyra_info info;
int retval, i;
mutex_init(&pyra->pyra_lock);
- info = kmalloc(sizeof(struct pyra_info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
- retval = pyra_get_info(usb_dev, info);
- if (retval) {
- kfree(info);
+ retval = pyra_get_info(usb_dev, &info);
+ if (retval)
return retval;
- }
- pyra->firmware_version = info->firmware_version;
- kfree(info);
+
+ pyra->firmware_version = info.firmware_version;
retval = pyra_get_settings(usb_dev, &pyra->settings);
if (retval)
@@ -581,7 +506,8 @@ static int pyra_init_specials(struct hid_device *hdev)
goto exit_free;
}
- retval = roccat_connect(pyra_class, hdev);
+ retval = roccat_connect(pyra_class, hdev,
+ sizeof(struct pyra_roccat_report));
if (retval < 0) {
hid_err(hdev, "couldn't init char dev\n");
} else {
@@ -685,8 +611,7 @@ static void pyra_report_to_chrdev(struct pyra_device const *pyra,
roccat_report.value = button_event->data1;
roccat_report.key = 0;
roccat_report_event(pyra->chrdev_minor,
- (uint8_t const *)&roccat_report,
- sizeof(struct pyra_roccat_report));
+ (uint8_t const *)&roccat_report);
break;
case PYRA_MOUSE_EVENT_BUTTON_TYPE_MACRO:
case PYRA_MOUSE_EVENT_BUTTON_TYPE_SHORTCUT:
@@ -700,8 +625,7 @@ static void pyra_report_to_chrdev(struct pyra_device const *pyra,
*/
roccat_report.value = pyra->actual_profile + 1;
roccat_report_event(pyra->chrdev_minor,
- (uint8_t const *)&roccat_report,
- sizeof(struct pyra_roccat_report));
+ (uint8_t const *)&roccat_report);
}
break;
}
@@ -761,8 +685,8 @@ static int __init pyra_init(void)
static void __exit pyra_exit(void)
{
- class_destroy(pyra_class);
hid_unregister_driver(&pyra_driver);
+ class_destroy(pyra_class);
}
module_init(pyra_init);
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index a14c579ea781..5666e7587b18 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -26,8 +26,7 @@
#include <linux/cdev.h>
#include <linux/poll.h>
#include <linux/sched.h>
-
-#include "hid-roccat.h"
+#include <linux/hid-roccat.h>
#define ROCCAT_FIRST_MINOR 0
#define ROCCAT_MAX_DEVICES 8
@@ -37,11 +36,11 @@
struct roccat_report {
uint8_t *value;
- int len;
};
struct roccat_device {
unsigned int minor;
+ int report_size;
int open;
int exist;
wait_queue_head_t wait;
@@ -123,7 +122,7 @@ static ssize_t roccat_read(struct file *file, char __user *buffer,
* If report is larger than requested amount of data, rest of report
* is lost!
*/
- len = report->len > count ? count : report->len;
+ len = device->report_size > count ? count : device->report_size;
if (copy_to_user(buffer, report->value, len)) {
retval = -EFAULT;
@@ -248,26 +247,25 @@ static int roccat_release(struct inode *inode, struct file *file)
*
* This is called from interrupt handler.
*/
-int roccat_report_event(int minor, u8 const *data, int len)
+int roccat_report_event(int minor, u8 const *data)
{
struct roccat_device *device;
struct roccat_reader *reader;
struct roccat_report *report;
uint8_t *new_value;
- new_value = kmemdup(data, len, GFP_ATOMIC);
+ device = devices[minor];
+
+ new_value = kmemdup(data, device->report_size, GFP_ATOMIC);
if (!new_value)
return -ENOMEM;
- device = devices[minor];
-
report = &device->cbuf[device->cbuf_end];
/* passing NULL is safe */
kfree(report->value);
report->value = new_value;
- report->len = len;
device->cbuf_end = (device->cbuf_end + 1) % ROCCAT_CBUF_SIZE;
list_for_each_entry(reader, &device->readers, node) {
@@ -295,7 +293,7 @@ EXPORT_SYMBOL_GPL(roccat_report_event);
* Return value is minor device number in Range [0, ROCCAT_MAX_DEVICES] on
* success, a negative error code on failure.
*/
-int roccat_connect(struct class *klass, struct hid_device *hid)
+int roccat_connect(struct class *klass, struct hid_device *hid, int report_size)
{
unsigned int minor;
struct roccat_device *device;
@@ -343,6 +341,7 @@ int roccat_connect(struct class *klass, struct hid_device *hid)
device->hid = hid;
device->exist = 1;
device->cbuf_end = 0;
+ device->report_size = report_size;
return minor;
}
@@ -357,13 +356,16 @@ void roccat_disconnect(int minor)
mutex_lock(&devices_lock);
device = devices[minor];
- devices[minor] = NULL;
mutex_unlock(&devices_lock);
device->exist = 0; /* TODO exist maybe not needed */
device_destroy(device->dev->class, MKDEV(roccat_major, minor));
+ mutex_lock(&devices_lock);
+ devices[minor] = NULL;
+ mutex_unlock(&devices_lock);
+
if (device->open) {
hid_hw_close(device->hid);
wake_up_interruptible(&device->wait);
@@ -373,6 +375,34 @@ void roccat_disconnect(int minor)
}
EXPORT_SYMBOL_GPL(roccat_disconnect);
+static long roccat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct roccat_device *device;
+ unsigned int minor = iminor(inode);
+ long retval = 0;
+
+ mutex_lock(&devices_lock);
+
+ device = devices[minor];
+ if (!device) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ switch (cmd) {
+ case ROCCATIOCGREPSIZE:
+ if (put_user(device->report_size, (int __user *)arg))
+ retval = -EFAULT;
+ break;
+ default:
+ retval = -ENOTTY;
+ }
+out:
+ mutex_unlock(&devices_lock);
+ return retval;
+}
+
static const struct file_operations roccat_ops = {
.owner = THIS_MODULE,
.read = roccat_read,
@@ -380,6 +410,7 @@ static const struct file_operations roccat_ops = {
.open = roccat_open,
.release = roccat_release,
.llseek = noop_llseek,
+ .unlocked_ioctl = roccat_ioctl,
};
static int __init roccat_init(void)
diff --git a/drivers/hid/hid-roccat.h b/drivers/hid/hid-roccat.h
deleted file mode 100644
index 5784281d613f..000000000000
--- a/drivers/hid/hid-roccat.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __HID_ROCCAT_H
-#define __HID_ROCCAT_H
-
-/*
- * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/hid.h>
-#include <linux/types.h>
-
-#if defined(CONFIG_HID_ROCCAT) || defined(CONFIG_HID_ROCCAT_MODULE)
-int roccat_connect(struct class *klass, struct hid_device *hid);
-void roccat_disconnect(int minor);
-int roccat_report_event(int minor, u8 const *data, int len);
-#else
-static inline int roccat_connect(struct class *klass,
- struct hid_device *hid) { return -1; }
-static inline void roccat_disconnect(int minor) {}
-static inline int roccat_report_event(int minor, u8 const *data, int len)
-{
- return 0;
-}
-#endif
-
-#endif
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 68d7b36e31e4..93819a08121a 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -46,6 +46,16 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
+/*
+ * The Sony Sixaxis does not handle HID Output Reports on the Interrupt EP
+ * like it should according to usbhid/hid-core.c::usbhid_output_raw_report()
+ * so we need to override that forcing HID Output Reports on the Control EP.
+ *
+ * There is also another issue about HID Output Reports via USB, the Sixaxis
+ * does not want the report_id as part of the data packet, so we have to
+ * discard buf[0] when sending the actual control message, even for numbered
+ * reports, humpf!
+ */
static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf,
size_t count, unsigned char report_type)
{
@@ -55,6 +65,12 @@ static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf,
int report_id = buf[0];
int ret;
+ if (report_type == HID_OUTPUT_REPORT) {
+ /* Don't send the Report ID */
+ buf++;
+ count--;
+ }
+
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
HID_REQ_SET_REPORT,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
@@ -62,6 +78,10 @@ static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf,
interface->desc.bInterfaceNumber, buf, count,
USB_CTRL_SET_TIMEOUT);
+ /* Count also the Report ID, in case of an Output report. */
+ if (ret > 0 && report_type == HID_OUTPUT_REPORT)
+ ret++;
+
return ret;
}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 468e87b53ed2..54409cba018c 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -91,7 +91,7 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count,
ret = -EFAULT;
goto out;
}
- ret += len;
+ ret = len;
kfree(list->buffer[list->tail].value);
list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1);
@@ -102,15 +102,14 @@ out:
}
/* the first byte is expected to be a report number */
-static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
+/* This function is to be called with the minors_lock mutex held */
+static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, size_t count, unsigned char report_type)
{
unsigned int minor = iminor(file->f_path.dentry->d_inode);
struct hid_device *dev;
__u8 *buf;
int ret = 0;
- mutex_lock(&minors_lock);
-
if (!hidraw_table[minor]) {
ret = -ENODEV;
goto out;
@@ -148,14 +147,92 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t
goto out_free;
}
- ret = dev->hid_output_raw_report(dev, buf, count, HID_OUTPUT_REPORT);
+ ret = dev->hid_output_raw_report(dev, buf, count, report_type);
out_free:
kfree(buf);
out:
+ return ret;
+}
+
+/* the first byte is expected to be a report number */
+static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
+{
+ ssize_t ret;
+ mutex_lock(&minors_lock);
+ ret = hidraw_send_report(file, buffer, count, HID_OUTPUT_REPORT);
mutex_unlock(&minors_lock);
return ret;
}
+
+/* This function performs a Get_Report transfer over the control endpoint
+ per section 7.2.1 of the HID specification, version 1.1. The first byte
+ of buffer is the report number to request, or 0x0 if the defice does not
+ use numbered reports. The report_type parameter can be HID_FEATURE_REPORT
+ or HID_INPUT_REPORT. This function is to be called with the minors_lock
+ mutex held. */
+static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t count, unsigned char report_type)
+{
+ unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ struct hid_device *dev;
+ __u8 *buf;
+ int ret = 0, len;
+ unsigned char report_number;
+
+ dev = hidraw_table[minor]->hid;
+
+ if (!dev->hid_get_raw_report) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (count > HID_MAX_BUFFER_SIZE) {
+ printk(KERN_WARNING "hidraw: pid %d passed too large report\n",
+ task_pid_nr(current));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (count < 2) {
+ printk(KERN_WARNING "hidraw: pid %d passed too short report\n",
+ task_pid_nr(current));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ buf = kmalloc(count * sizeof(__u8), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Read the first byte from the user. This is the report number,
+ which is passed to dev->hid_get_raw_report(). */
+ if (copy_from_user(&report_number, buffer, 1)) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ ret = dev->hid_get_raw_report(dev, report_number, buf, count, report_type);
+
+ if (ret < 0)
+ goto out_free;
+
+ len = (ret < count) ? ret : count;
+
+ if (copy_to_user(buffer, buf, len)) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ ret = len;
+
+out_free:
+ kfree(buf);
+out:
+ return ret;
+}
+
static unsigned int hidraw_poll(struct file *file, poll_table *wait)
{
struct hidraw_list *list = file->private_data;
@@ -295,7 +372,24 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
default:
{
struct hid_device *hid = dev->hid;
- if (_IOC_TYPE(cmd) != 'H' || _IOC_DIR(cmd) != _IOC_READ) {
+ if (_IOC_TYPE(cmd) != 'H') {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSFEATURE(0))) {
+ int len = _IOC_SIZE(cmd);
+ ret = hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT);
+ break;
+ }
+ if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGFEATURE(0))) {
+ int len = _IOC_SIZE(cmd);
+ ret = hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT);
+ break;
+ }
+
+ /* Begin Read-only ioctls. */
+ if (_IOC_DIR(cmd) != _IOC_READ) {
ret = -EINVAL;
break;
}
@@ -327,7 +421,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
-EFAULT : len;
break;
}
- }
+ }
ret = -ENOTTY;
}
@@ -428,12 +522,12 @@ void hidraw_disconnect(struct hid_device *hid)
hidraw->exist = 0;
+ device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
+
mutex_lock(&minors_lock);
hidraw_table[hidraw->minor] = NULL;
mutex_unlock(&minors_lock);
- device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
-
if (hidraw->open) {
hid_hw_close(hid);
wake_up_interruptible(&hidraw->wait);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index b336dd84036f..38c261a40c74 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -799,6 +799,40 @@ static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid)
return 0;
}
+static int usbhid_get_raw_report(struct hid_device *hid,
+ unsigned char report_number, __u8 *buf, size_t count,
+ unsigned char report_type)
+{
+ struct usbhid_device *usbhid = hid->driver_data;
+ struct usb_device *dev = hid_to_usb_dev(hid);
+ struct usb_interface *intf = usbhid->intf;
+ struct usb_host_interface *interface = intf->cur_altsetting;
+ int skipped_report_id = 0;
+ int ret;
+
+ /* Byte 0 is the report number. Report data starts at byte 1.*/
+ buf[0] = report_number;
+ if (report_number == 0x0) {
+ /* Offset the return buffer by 1, so that the report ID
+ will remain in byte 0. */
+ buf++;
+ count--;
+ skipped_report_id = 1;
+ }
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ HID_REQ_GET_REPORT,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ ((report_type + 1) << 8) | report_number,
+ interface->desc.bInterfaceNumber, buf, count,
+ USB_CTRL_SET_TIMEOUT);
+
+ /* count also the report id */
+ if (ret > 0 && skipped_report_id)
+ ret++;
+
+ return ret;
+}
+
static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count,
unsigned char report_type)
{
@@ -1139,6 +1173,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
usb_set_intfdata(intf, hid);
hid->ll_driver = &usb_hid_driver;
+ hid->hid_get_raw_report = usbhid_get_raw_report;
hid->hid_output_raw_report = usbhid_output_raw_report;
hid->ff_init = hid_pidff_init;
#ifdef CONFIG_USB_HIDDEV
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 297bc9a7d6e6..81131eda5544 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -467,6 +467,17 @@ config SENSORS_JC42
This driver can also be built as a module. If so, the module
will be called jc42.
+config SENSORS_LINEAGE
+ tristate "Lineage Compact Power Line Power Entry Module"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for the Lineage Compact Power Line
+ series of DC/DC and AC/DC converters such as CP1800, CP2000AC,
+ CP2000DC, CP2725, and others.
+
+ This driver can also be built as a module. If so, the module
+ will be called lineage-pem.
+
config SENSORS_LM63
tristate "National Semiconductor LM63 and LM64"
depends on I2C
@@ -510,7 +521,7 @@ config SENSORS_LM75
- Dallas Semiconductor DS75 and DS1775
- Maxim MAX6625 and MAX6626
- Microchip MCP980x
- - National Semiconductor LM75
+ - National Semiconductor LM75, LM75A
- NXP's LM75A
- ST Microelectronics STDS75
- TelCom (now Microchip) TCN75
@@ -625,6 +636,17 @@ config SENSORS_LM93
This driver can also be built as a module. If so, the module
will be called lm93.
+config SENSORS_LTC4151
+ tristate "Linear Technology LTC4151"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC4151
+ High Voltage I2C Current and Voltage Monitor interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4151.
+
config SENSORS_LTC4215
tristate "Linear Technology LTC4215"
depends on I2C && EXPERIMENTAL
@@ -685,6 +707,16 @@ config SENSORS_MAX1619
This driver can also be built as a module. If so, the module
will be called max1619.
+config SENSORS_MAX6639
+ tristate "Maxim MAX6639 sensor chip"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for the MAX6639
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6639.
+
config SENSORS_MAX6650
tristate "Maxim MAX6650 sensor chip"
depends on I2C && EXPERIMENTAL
@@ -735,6 +767,61 @@ config SENSORS_PCF8591
These devices are hard to detect and rarely found on mainstream
hardware. If unsure, say N.
+config PMBUS
+ tristate "PMBus support"
+ depends on I2C && EXPERIMENTAL
+ default n
+ help
+ Say yes here if you want to enable PMBus support.
+
+ This driver can also be built as a module. If so, the module will
+ be called pmbus_core.
+
+if PMBUS
+
+config SENSORS_PMBUS
+ tristate "Generic PMBus devices"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for generic
+ PMBus devices, including but not limited to BMR450, BMR451, BMR453,
+ BMR454, and LTC2978.
+
+ This driver can also be built as a module. If so, the module will
+ be called pmbus.
+
+config SENSORS_MAX16064
+ tristate "Maxim MAX16064"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX16064.
+
+ This driver can also be built as a module. If so, the module will
+ be called max16064.
+
+config SENSORS_MAX34440
+ tristate "Maxim MAX34440/MAX34441"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX34440 and MAX34441.
+
+ This driver can also be built as a module. If so, the module will
+ be called max34440.
+
+config SENSORS_MAX8688
+ tristate "Maxim MAX8688"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX8688.
+
+ This driver can also be built as a module. If so, the module will
+ be called max8688.
+
+endif # PMBUS
+
config SENSORS_SHT15
tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
depends on GENERIC_GPIO
@@ -872,6 +959,25 @@ config SENSORS_SMSC47B397
This driver can also be built as a module. If so, the module
will be called smsc47b397.
+config SENSORS_SCH5627
+ tristate "SMSC SCH5627"
+ help
+ If you say yes here you get support for the hardware monitoring
+ features of the SMSC SCH5627 Super-I/O chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called sch5627.
+
+config SENSORS_ADS1015
+ tristate "Texas Instruments ADS1015"
+ depends on I2C
+ help
+ If you say yes here you get support for Texas Instruments ADS1015
+ 12-bit 4-input ADC device.
+
+ This driver can also be built as a module. If so, the module
+ will be called ads1015.
+
config SENSORS_ADS7828
tristate "Texas Instruments ADS7828"
depends on I2C
@@ -941,6 +1047,16 @@ config SENSORS_TMP421
This driver can also be built as a module. If so, the module
will be called tmp421.
+config SENSORS_TWL4030_MADC
+ tristate "Texas Instruments TWL4030 MADC Hwmon"
+ depends on TWL4030_MADC
+ help
+ If you say yes here you get hwmon support for triton
+ TWL4030-MADC.
+
+ This driver can also be built as a module. If so it will be called
+ twl4030-madc-hwmon.
+
config SENSORS_VIA_CPUTEMP
tristate "VIA CPU temperature sensor"
depends on X86
@@ -1083,7 +1199,7 @@ config SENSORS_W83627HF
will be called w83627hf.
config SENSORS_W83627EHF
- tristate "Winbond W83627EHF/EHG/DHG, W83667HG"
+ tristate "Winbond W83627EHF/EHG/DHG, W83667HG, NCT6775F, NCT6776F"
select HWMON_VID
help
If you say yes here you get support for the hardware
@@ -1094,7 +1210,8 @@ config SENSORS_W83627EHF
chip suited for specific Intel processors that use PECI such as
the Core 2 Duo.
- This driver also supports the W83667HG chip.
+ This driver also supports Nuvoton W83667HG, W83667HG-B, NCT6775F
+ (also known as W83667HG-I), and NCT6776F.
This driver can also be built as a module. If so, the module
will be called w83627ehf.
@@ -1127,40 +1244,6 @@ config SENSORS_ULTRA45
This driver provides support for the Ultra45 workstation environmental
sensors.
-config SENSORS_LIS3_SPI
- tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)"
- depends on !ACPI && SPI_MASTER && INPUT
- select INPUT_POLLDEV
- default n
- help
- This driver provides support for the LIS3LV02Dx accelerometer connected
- via SPI. The accelerometer data is readable via
- /sys/devices/platform/lis3lv02d.
-
- This driver also provides an absolute input class device, allowing
- the laptop to act as a pinball machine-esque joystick.
-
- This driver can also be built as modules. If so, the core module
- will be called lis3lv02d and a specific module for the SPI transport
- is called lis3lv02d_spi.
-
-config SENSORS_LIS3_I2C
- tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (I2C)"
- depends on I2C && INPUT
- select INPUT_POLLDEV
- default n
- help
- This driver provides support for the LIS3LV02Dx accelerometer connected
- via I2C. The accelerometer data is readable via
- /sys/devices/platform/lis3lv02d.
-
- This driver also provides an absolute input class device, allowing
- the device to act as a pinball machine-esque joystick.
-
- This driver can also be built as modules. If so, the core module
- will be called lis3lv02d and a specific module for the I2C transport
- is called lis3lv02d_i2c.
-
config SENSORS_APPLESMC
tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
depends on INPUT && X86
@@ -1208,36 +1291,6 @@ config SENSORS_ATK0110
This driver can also be built as a module. If so, the module
will be called asus_atk0110.
-config SENSORS_LIS3LV02D
- tristate "STMicroeletronics LIS3* three-axis digital accelerometer"
- depends on INPUT
- select INPUT_POLLDEV
- select NEW_LEDS
- select LEDS_CLASS
- default n
- help
- This driver provides support for the LIS3* accelerometers, such as the
- LIS3LV02DL or the LIS331DL. In particular, it can be found in a number
- of HP laptops, which have the "Mobile Data Protection System 3D" or
- "3D DriveGuard" feature. On such systems the driver should load
- automatically (via ACPI alias). The accelerometer might also be found
- in other systems, connected via SPI or I2C. The accelerometer data is
- readable via /sys/devices/platform/lis3lv02d.
-
- This driver also provides an absolute input class device, allowing
- a laptop to act as a pinball machine-esque joystick. It provides also
- a misc device which can be used to detect free-fall. On HP laptops,
- if the led infrastructure is activated, support for a led indicating
- disk protection will be provided as hp::hddprotect. For more
- information on the feature, refer to Documentation/hwmon/lis3lv02d.
-
- This driver can also be built as modules. If so, the core module
- will be called lis3lv02d and a specific module for HP laptops will be
- called hp_accel.
-
- Say Y here if you have an applicable laptop and want to experience
- the awesome power of lis3lv02d.
-
endif # ACPI
endif # HWMON
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index dde02d99c238..967d0ea9447f 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o
obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o
obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o
obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o
+obj-$(CONFIG_SENSORS_ADS1015) += ads1015.o
obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o
obj-$(CONFIG_SENSORS_ADT7411) += adt7411.o
@@ -62,9 +63,7 @@ obj-$(CONFIG_SENSORS_JC42) += jc42.o
obj-$(CONFIG_SENSORS_JZ4740) += jz4740-hwmon.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
-obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o hp_accel.o
-obj-$(CONFIG_SENSORS_LIS3_SPI) += lis3lv02d.o lis3lv02d_spi.o
-obj-$(CONFIG_SENSORS_LIS3_I2C) += lis3lv02d.o lis3lv02d_i2c.o
+obj-$(CONFIG_SENSORS_LINEAGE) += lineage-pem.o
obj-$(CONFIG_SENSORS_LM63) += lm63.o
obj-$(CONFIG_SENSORS_LM70) += lm70.o
obj-$(CONFIG_SENSORS_LM73) += lm73.o
@@ -79,17 +78,20 @@ obj-$(CONFIG_SENSORS_LM90) += lm90.o
obj-$(CONFIG_SENSORS_LM92) += lm92.o
obj-$(CONFIG_SENSORS_LM93) += lm93.o
obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
+obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o
obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o
obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o
obj-$(CONFIG_SENSORS_MAX1111) += max1111.o
obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
+obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
obj-$(CONFIG_SENSORS_S3C) += s3c-hwmon.o
+obj-$(CONFIG_SENSORS_SCH5627) += sch5627.o
obj-$(CONFIG_SENSORS_SHT15) += sht15.o
obj-$(CONFIG_SENSORS_SHT21) += sht21.o
obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
@@ -102,6 +104,7 @@ obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
obj-$(CONFIG_SENSORS_TMP102) += tmp102.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
+obj-$(CONFIG_SENSORS_TWL4030_MADC)+= twl4030-madc-hwmon.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
obj-$(CONFIG_SENSORS_VT1211) += vt1211.o
@@ -112,7 +115,12 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o
obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o
obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
-ifeq ($(CONFIG_HWMON_DEBUG_CHIP),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+# PMBus drivers
+obj-$(CONFIG_PMBUS) += pmbus_core.o
+obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
+obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
+obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
+obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
+
+ccflags-$(CONFIG_HWMON_DEBUG_CHIP) := -DDEBUG
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index 8f07a9dda152..0e05aa179eaa 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -1,5 +1,5 @@
/*
- abituguru.c Copyright (c) 2005-2006 Hans de Goede <j.w.r.degoede@hhs.nl>
+ abituguru.c Copyright (c) 2005-2006 Hans de Goede <hdegoede@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1505,7 +1505,7 @@ static void __exit abituguru_exit(void)
platform_driver_unregister(&abituguru_driver);
}
-MODULE_AUTHOR("Hans de Goede <j.w.r.degoede@hhs.nl>");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("Abit uGuru Sensor device");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index 48d21e22e930..034cebfcd273 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -1,7 +1,7 @@
/*
abituguru3.c
- Copyright (c) 2006-2008 Hans de Goede <j.w.r.degoede@hhs.nl>
+ Copyright (c) 2006-2008 Hans de Goede <hdegoede@redhat.com>
Copyright (c) 2008 Alistair John Strachan <alistair@devzero.co.uk>
This program is free software; you can redistribute it and/or modify
@@ -1266,7 +1266,7 @@ static void __exit abituguru3_exit(void)
platform_driver_unregister(&abituguru3_driver);
}
-MODULE_AUTHOR("Hans de Goede <j.w.r.degoede@hhs.nl>");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("Abit uGuru3 Sensor device");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
new file mode 100644
index 000000000000..e9beeda4cbe5
--- /dev/null
+++ b/drivers/hwmon/ads1015.c
@@ -0,0 +1,337 @@
+/*
+ * ads1015.c - lm_sensors driver for ads1015 12-bit 4-input ADC
+ * (C) Copyright 2010
+ * Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
+ *
+ * Based on the ads7828 driver by Steve Hardy.
+ *
+ * Datasheet available at: http://focus.ti.com/lit/ds/symlink/ads1015.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+
+#include <linux/i2c/ads1015.h>
+
+/* ADS1015 registers */
+enum {
+ ADS1015_CONVERSION = 0,
+ ADS1015_CONFIG = 1,
+};
+
+/* PGA fullscale voltages in mV */
+static const unsigned int fullscale_table[8] = {
+ 6144, 4096, 2048, 1024, 512, 256, 256, 256 };
+
+/* Data rates in samples per second */
+static const unsigned int data_rate_table[8] = {
+ 128, 250, 490, 920, 1600, 2400, 3300, 3300 };
+
+#define ADS1015_DEFAULT_CHANNELS 0xff
+#define ADS1015_DEFAULT_PGA 2
+#define ADS1015_DEFAULT_DATA_RATE 4
+
+struct ads1015_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock; /* mutex protect updates */
+ struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
+};
+
+static s32 ads1015_read_reg(struct i2c_client *client, unsigned int reg)
+{
+ s32 data = i2c_smbus_read_word_data(client, reg);
+
+ return (data < 0) ? data : swab16(data);
+}
+
+static s32 ads1015_write_reg(struct i2c_client *client, unsigned int reg,
+ u16 val)
+{
+ return i2c_smbus_write_word_data(client, reg, swab16(val));
+}
+
+static int ads1015_read_value(struct i2c_client *client, unsigned int channel,
+ int *value)
+{
+ u16 config;
+ s16 conversion;
+ struct ads1015_data *data = i2c_get_clientdata(client);
+ unsigned int pga = data->channel_data[channel].pga;
+ int fullscale;
+ unsigned int data_rate = data->channel_data[channel].data_rate;
+ unsigned int conversion_time_ms;
+ int res;
+
+ mutex_lock(&data->update_lock);
+
+ /* get channel parameters */
+ res = ads1015_read_reg(client, ADS1015_CONFIG);
+ if (res < 0)
+ goto err_unlock;
+ config = res;
+ fullscale = fullscale_table[pga];
+ conversion_time_ms = DIV_ROUND_UP(1000, data_rate_table[data_rate]);
+
+ /* setup and start single conversion */
+ config &= 0x001f;
+ config |= (1 << 15) | (1 << 8);
+ config |= (channel & 0x0007) << 12;
+ config |= (pga & 0x0007) << 9;
+ config |= (data_rate & 0x0007) << 5;
+
+ res = ads1015_write_reg(client, ADS1015_CONFIG, config);
+ if (res < 0)
+ goto err_unlock;
+
+ /* wait until conversion finished */
+ msleep(conversion_time_ms);
+ res = ads1015_read_reg(client, ADS1015_CONFIG);
+ if (res < 0)
+ goto err_unlock;
+ config = res;
+ if (!(config & (1 << 15))) {
+ /* conversion not finished in time */
+ res = -EIO;
+ goto err_unlock;
+ }
+
+ res = ads1015_read_reg(client, ADS1015_CONVERSION);
+ if (res < 0)
+ goto err_unlock;
+ conversion = res;
+
+ mutex_unlock(&data->update_lock);
+
+ *value = DIV_ROUND_CLOSEST(conversion * fullscale, 0x7ff0);
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&data->update_lock);
+ return res;
+}
+
+/* sysfs callback function */
+static ssize_t show_in(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct i2c_client *client = to_i2c_client(dev);
+ int in;
+ int res;
+
+ res = ads1015_read_value(client, attr->index, &in);
+
+ return (res < 0) ? res : sprintf(buf, "%d\n", in);
+}
+
+static const struct sensor_device_attribute ads1015_in[] = {
+ SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0),
+ SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1),
+ SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2),
+ SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3),
+ SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4),
+ SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5),
+ SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6),
+ SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7),
+};
+
+/*
+ * Driver interface
+ */
+
+static int ads1015_remove(struct i2c_client *client)
+{
+ struct ads1015_data *data = i2c_get_clientdata(client);
+ int k;
+
+ hwmon_device_unregister(data->hwmon_dev);
+ for (k = 0; k < ADS1015_CHANNELS; ++k)
+ device_remove_file(&client->dev, &ads1015_in[k].dev_attr);
+ kfree(data);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static int ads1015_get_channels_config_of(struct i2c_client *client)
+{
+ struct ads1015_data *data = i2c_get_clientdata(client);
+ struct device_node *node;
+
+ if (!client->dev.of_node
+ || !of_get_next_child(client->dev.of_node, NULL))
+ return -EINVAL;
+
+ for_each_child_of_node(client->dev.of_node, node) {
+ const __be32 *property;
+ int len;
+ unsigned int channel;
+ unsigned int pga = ADS1015_DEFAULT_PGA;
+ unsigned int data_rate = ADS1015_DEFAULT_DATA_RATE;
+
+ property = of_get_property(node, "reg", &len);
+ if (!property || len != sizeof(int)) {
+ dev_err(&client->dev, "invalid reg on %s\n",
+ node->full_name);
+ continue;
+ }
+
+ channel = be32_to_cpup(property);
+ if (channel > ADS1015_CHANNELS) {
+ dev_err(&client->dev,
+ "invalid channel index %d on %s\n",
+ channel, node->full_name);
+ continue;
+ }
+
+ property = of_get_property(node, "ti,gain", &len);
+ if (property && len == sizeof(int)) {
+ pga = be32_to_cpup(property);
+ if (pga > 6) {
+ dev_err(&client->dev,
+ "invalid gain on %s\n",
+ node->full_name);
+ }
+ }
+
+ property = of_get_property(node, "ti,datarate", &len);
+ if (property && len == sizeof(int)) {
+ data_rate = be32_to_cpup(property);
+ if (data_rate > 7) {
+ dev_err(&client->dev,
+ "invalid data_rate on %s\n",
+ node->full_name);
+ }
+ }
+
+ data->channel_data[channel].enabled = true;
+ data->channel_data[channel].pga = pga;
+ data->channel_data[channel].data_rate = data_rate;
+ }
+
+ return 0;
+}
+#endif
+
+static void ads1015_get_channels_config(struct i2c_client *client)
+{
+ unsigned int k;
+ struct ads1015_data *data = i2c_get_clientdata(client);
+ struct ads1015_platform_data *pdata = dev_get_platdata(&client->dev);
+
+ /* prefer platform data */
+ if (pdata) {
+ memcpy(data->channel_data, pdata->channel_data,
+ sizeof(data->channel_data));
+ return;
+ }
+
+#ifdef CONFIG_OF
+ if (!ads1015_get_channels_config_of(client))
+ return;
+#endif
+
+ /* fallback on default configuration */
+ for (k = 0; k < ADS1015_CHANNELS; ++k) {
+ data->channel_data[k].enabled = true;
+ data->channel_data[k].pga = ADS1015_DEFAULT_PGA;
+ data->channel_data[k].data_rate = ADS1015_DEFAULT_DATA_RATE;
+ }
+}
+
+static int ads1015_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ads1015_data *data;
+ int err;
+ unsigned int k;
+
+ data = kzalloc(sizeof(struct ads1015_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* build sysfs attribute group */
+ ads1015_get_channels_config(client);
+ for (k = 0; k < ADS1015_CHANNELS; ++k) {
+ if (!data->channel_data[k].enabled)
+ continue;
+ err = device_create_file(&client->dev, &ads1015_in[k].dev_attr);
+ if (err)
+ goto exit_free;
+ }
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto exit_remove;
+ }
+
+ return 0;
+
+exit_remove:
+ for (k = 0; k < ADS1015_CHANNELS; ++k)
+ device_remove_file(&client->dev, &ads1015_in[k].dev_attr);
+exit_free:
+ kfree(data);
+exit:
+ return err;
+}
+
+static const struct i2c_device_id ads1015_id[] = {
+ { "ads1015", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ads1015_id);
+
+static struct i2c_driver ads1015_driver = {
+ .driver = {
+ .name = "ads1015",
+ },
+ .probe = ads1015_probe,
+ .remove = ads1015_remove,
+ .id_table = ads1015_id,
+};
+
+static int __init sensors_ads1015_init(void)
+{
+ return i2c_add_driver(&ads1015_driver);
+}
+
+static void __exit sensors_ads1015_exit(void)
+{
+ i2c_del_driver(&ads1015_driver);
+}
+
+MODULE_AUTHOR("Dirk Eibach <eibach@gdsys.de>");
+MODULE_DESCRIPTION("ADS1015 driver");
+MODULE_LICENSE("GPL");
+
+module_init(sensors_ads1015_init);
+module_exit(sensors_ads1015_exit);
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 6e06019015a5..a4d430ee7e20 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -1,6 +1,6 @@
/***************************************************************************
* Copyright (C) 2006 by Hans Edgington <hans@edgington.nl> *
- * Copyright (C) 2007-2009 Hans de Goede <hdegoede@redhat.com> *
+ * Copyright (C) 2007-2011 Hans de Goede <hdegoede@redhat.com> *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
@@ -47,22 +47,23 @@
#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
#define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */
+#define SIO_F71808E_ID 0x0901 /* Chipset ID */
#define SIO_F71858_ID 0x0507 /* Chipset ID */
#define SIO_F71862_ID 0x0601 /* Chipset ID */
+#define SIO_F71869_ID 0x0814 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
#define SIO_F71889_ID 0x0723 /* Chipset ID */
+#define SIO_F71889E_ID 0x0909 /* Chipset ID */
#define SIO_F8000_ID 0x0581 /* Chipset ID */
#define REGION_LENGTH 8
#define ADDR_REG_OFFSET 5
#define DATA_REG_OFFSET 6
-#define F71882FG_REG_PECI 0x0A
-
-#define F71882FG_REG_IN_STATUS 0x12 /* f71882fg only */
-#define F71882FG_REG_IN_BEEP 0x13 /* f71882fg only */
+#define F71882FG_REG_IN_STATUS 0x12 /* f7188x only */
+#define F71882FG_REG_IN_BEEP 0x13 /* f7188x only */
#define F71882FG_REG_IN(nr) (0x20 + (nr))
-#define F71882FG_REG_IN1_HIGH 0x32 /* f71882fg only */
+#define F71882FG_REG_IN1_HIGH 0x32 /* f7188x only */
#define F71882FG_REG_FAN(nr) (0xA0 + (16 * (nr)))
#define F71882FG_REG_FAN_TARGET(nr) (0xA2 + (16 * (nr)))
@@ -86,28 +87,71 @@
#define F71882FG_REG_FAN_HYST(nr) (0x98 + (nr))
+#define F71882FG_REG_FAN_FAULT_T 0x9F
+#define F71882FG_FAN_NEG_TEMP_EN 0x20
+#define F71882FG_FAN_PROG_SEL 0x80
+
#define F71882FG_REG_POINT_PWM(pwm, point) (0xAA + (point) + (16 * (pwm)))
#define F71882FG_REG_POINT_TEMP(pwm, point) (0xA6 + (point) + (16 * (pwm)))
#define F71882FG_REG_POINT_MAPPING(nr) (0xAF + 16 * (nr))
#define F71882FG_REG_START 0x01
+#define F71882FG_MAX_INS 9
+
#define FAN_MIN_DETECT 366 /* Lowest detectable fanspeed */
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-enum chips { f71858fg, f71862fg, f71882fg, f71889fg, f8000 };
+enum chips { f71808e, f71858fg, f71862fg, f71869, f71882fg, f71889fg,
+ f71889ed, f8000 };
static const char *f71882fg_names[] = {
+ "f71808e",
"f71858fg",
"f71862fg",
+ "f71869", /* Both f71869f and f71869e, reg. compatible and same id */
"f71882fg",
"f71889fg",
+ "f71889ed",
"f8000",
};
+static const char f71882fg_has_in[8][F71882FG_MAX_INS] = {
+ { 1, 1, 1, 1, 1, 1, 0, 1, 1 }, /* f71808e */
+ { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, /* f71858fg */
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71862fg */
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71869 */
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71882fg */
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71889fg */
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71889ed */
+ { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, /* f8000 */
+};
+
+static const char f71882fg_has_in1_alarm[8] = {
+ 0, /* f71808e */
+ 0, /* f71858fg */
+ 0, /* f71862fg */
+ 0, /* f71869 */
+ 1, /* f71882fg */
+ 1, /* f71889fg */
+ 1, /* f71889ed */
+ 0, /* f8000 */
+};
+
+static const char f71882fg_has_beep[8] = {
+ 0, /* f71808e */
+ 0, /* f71858fg */
+ 1, /* f71862fg */
+ 1, /* f71869 */
+ 1, /* f71882fg */
+ 1, /* f71889fg */
+ 1, /* f71889ed */
+ 0, /* f8000 */
+};
+
static struct platform_device *f71882fg_pdev;
/* Super-I/O Function prototypes */
@@ -129,11 +173,12 @@ struct f71882fg_data {
struct mutex update_lock;
int temp_start; /* temp numbering start (0 or 1) */
char valid; /* !=0 if following fields are valid */
+ char auto_point_temp_signed;
unsigned long last_updated; /* In jiffies */
unsigned long last_limits; /* In jiffies */
/* Register Values */
- u8 in[9];
+ u8 in[F71882FG_MAX_INS];
u8 in1_max;
u8 in_status;
u8 in_beep;
@@ -142,7 +187,7 @@ struct f71882fg_data {
u16 fan_full_speed[4];
u8 fan_status;
u8 fan_beep;
- /* Note: all models have only 3 temperature channels, but on some
+ /* Note: all models have max 3 temperature channels, but on some
they are addressed as 0-2 and on others as 1-3, so for coding
convenience we reserve space for 4 channels */
u16 temp[4];
@@ -262,13 +307,9 @@ static struct platform_driver f71882fg_driver = {
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-/* Temp and in attr for the f71858fg, the f71858fg is special as it
- has its temperature indexes start at 0 (the others start at 1) and
- it only has 3 voltage inputs */
-static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = {
- SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
- SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
- SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
+/* Temp attr for the f71858fg, the f71858fg is special as it has its
+ temperature indexes start at 0 (the others start at 1) */
+static struct sensor_device_attribute_2 f71858fg_temp_attr[] = {
SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0),
SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 0),
@@ -292,7 +333,6 @@ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = {
SENSOR_ATTR_2(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
0, 1),
SENSOR_ATTR_2(temp2_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5),
- SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 1),
SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 1),
SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2),
SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max,
@@ -308,17 +348,8 @@ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = {
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
};
-/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */
-static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
- SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
- SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
- SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
- SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3),
- SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4),
- SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5),
- SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6),
- SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7),
- SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8),
+/* Temp attr for the standard models */
+static struct sensor_device_attribute_2 fxxxx_temp_attr[3][9] = { {
SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1),
SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 1),
@@ -328,17 +359,14 @@ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
the max and crit alarms separately and lm_sensors v2 depends on the
presence of temp#_alarm files. The same goes for temp2/3 _alarm. */
SENSOR_ATTR_2(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 1),
- SENSOR_ATTR_2(temp1_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 1),
SENSOR_ATTR_2(temp1_crit, S_IRUGO|S_IWUSR, show_temp_crit,
store_temp_crit, 0, 1),
SENSOR_ATTR_2(temp1_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
0, 1),
SENSOR_ATTR_2(temp1_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5),
- SENSOR_ATTR_2(temp1_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 5),
SENSOR_ATTR_2(temp1_type, S_IRUGO, show_temp_type, NULL, 0, 1),
SENSOR_ATTR_2(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0, 1),
+}, {
SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 2),
SENSOR_ATTR_2(temp2_max, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 2),
@@ -346,17 +374,14 @@ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
store_temp_max_hyst, 0, 2),
/* Should be temp2_max_alarm, see temp1_alarm note */
SENSOR_ATTR_2(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 2),
- SENSOR_ATTR_2(temp2_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 2),
SENSOR_ATTR_2(temp2_crit, S_IRUGO|S_IWUSR, show_temp_crit,
store_temp_crit, 0, 2),
SENSOR_ATTR_2(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
0, 2),
SENSOR_ATTR_2(temp2_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 6),
- SENSOR_ATTR_2(temp2_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 6),
SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2),
SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
+}, {
SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3),
SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 3),
@@ -364,37 +389,39 @@ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
store_temp_max_hyst, 0, 3),
/* Should be temp3_max_alarm, see temp1_alarm note */
SENSOR_ATTR_2(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 3),
- SENSOR_ATTR_2(temp3_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 3),
SENSOR_ATTR_2(temp3_crit, S_IRUGO|S_IWUSR, show_temp_crit,
store_temp_crit, 0, 3),
SENSOR_ATTR_2(temp3_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
0, 3),
SENSOR_ATTR_2(temp3_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 7),
- SENSOR_ATTR_2(temp3_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 7),
SENSOR_ATTR_2(temp3_type, S_IRUGO, show_temp_type, NULL, 0, 3),
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 3),
-};
+} };
-/* For models with in1 alarm capability */
-static struct sensor_device_attribute_2 fxxxx_in1_alarm_attr[] = {
- SENSOR_ATTR_2(in1_max, S_IRUGO|S_IWUSR, show_in_max, store_in_max,
- 0, 1),
- SENSOR_ATTR_2(in1_beep, S_IRUGO|S_IWUSR, show_in_beep, store_in_beep,
- 0, 1),
- SENSOR_ATTR_2(in1_alarm, S_IRUGO, show_in_alarm, NULL, 0, 1),
-};
+/* Temp attr for models which can beep on temp alarm */
+static struct sensor_device_attribute_2 fxxxx_temp_beep_attr[3][2] = { {
+ SENSOR_ATTR_2(temp1_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 1),
+ SENSOR_ATTR_2(temp1_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 5),
+}, {
+ SENSOR_ATTR_2(temp2_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 2),
+ SENSOR_ATTR_2(temp2_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 6),
+}, {
+ SENSOR_ATTR_2(temp3_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 3),
+ SENSOR_ATTR_2(temp3_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 7),
+} };
-/* Temp and in attr for the f8000
+/* Temp attr for the f8000
Note on the f8000 temp_ovt (crit) is used as max, and temp_high (max)
is used as hysteresis value to clear alarms
Also like the f71858fg its temperature indexes start at 0
*/
-static struct sensor_device_attribute_2 f8000_in_temp_attr[] = {
- SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
- SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
- SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
+static struct sensor_device_attribute_2 f8000_temp_attr[] = {
SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0),
SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_crit,
store_temp_crit, 0, 0),
@@ -408,7 +435,6 @@ static struct sensor_device_attribute_2 f8000_in_temp_attr[] = {
SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 1),
SENSOR_ATTR_2(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5),
- SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 1),
SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 1),
SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2),
SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_crit,
@@ -419,6 +445,28 @@ static struct sensor_device_attribute_2 f8000_in_temp_attr[] = {
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
};
+/* in attr for all models */
+static struct sensor_device_attribute_2 fxxxx_in_attr[] = {
+ SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
+ SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
+ SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
+ SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3),
+ SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4),
+ SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5),
+ SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6),
+ SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7),
+ SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8),
+};
+
+/* For models with in1 alarm capability */
+static struct sensor_device_attribute_2 fxxxx_in1_alarm_attr[] = {
+ SENSOR_ATTR_2(in1_max, S_IRUGO|S_IWUSR, show_in_max, store_in_max,
+ 0, 1),
+ SENSOR_ATTR_2(in1_beep, S_IRUGO|S_IWUSR, show_in_beep, store_in_beep,
+ 0, 1),
+ SENSOR_ATTR_2(in1_alarm, S_IRUGO, show_in_alarm, NULL, 0, 1),
+};
+
/* Fan / PWM attr common to all models */
static struct sensor_device_attribute_2 fxxxx_fan_attr[4][6] = { {
SENSOR_ATTR_2(fan1_input, S_IRUGO, show_fan, NULL, 0, 0),
@@ -479,7 +527,7 @@ static struct sensor_device_attribute_2 fxxxx_fan_beep_attr[] = {
};
/* PWM attr for the f71862fg, fewer pwms and fewer zones per pwm than the
- f71858fg / f71882fg / f71889fg */
+ standard models */
static struct sensor_device_attribute_2 f71862fg_auto_pwm_attr[] = {
SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR,
show_pwm_auto_point_channel,
@@ -548,7 +596,87 @@ static struct sensor_device_attribute_2 f71862fg_auto_pwm_attr[] = {
show_pwm_auto_point_temp_hyst, NULL, 3, 2),
};
-/* PWM attr common to the f71858fg, f71882fg and f71889fg */
+/* PWM attr for the f71808e/f71869, almost identical to the f71862fg, but the
+ pwm setting when the temperature is above the pwmX_auto_point1_temp can be
+ programmed instead of being hardcoded to 0xff */
+static struct sensor_device_attribute_2 f71869_auto_pwm_attr[] = {
+ SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_channel,
+ store_pwm_auto_point_channel, 0, 0),
+ SENSOR_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 0, 0),
+ SENSOR_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 1, 0),
+ SENSOR_ATTR_2(pwm1_auto_point3_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 4, 0),
+ SENSOR_ATTR_2(pwm1_auto_point1_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 0, 0),
+ SENSOR_ATTR_2(pwm1_auto_point2_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 3, 0),
+ SENSOR_ATTR_2(pwm1_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp_hyst,
+ store_pwm_auto_point_temp_hyst,
+ 0, 0),
+ SENSOR_ATTR_2(pwm1_auto_point2_temp_hyst, S_IRUGO,
+ show_pwm_auto_point_temp_hyst, NULL, 3, 0),
+
+ SENSOR_ATTR_2(pwm2_auto_channels_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_channel,
+ store_pwm_auto_point_channel, 0, 1),
+ SENSOR_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 0, 1),
+ SENSOR_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 1, 1),
+ SENSOR_ATTR_2(pwm2_auto_point3_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 4, 1),
+ SENSOR_ATTR_2(pwm2_auto_point1_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 0, 1),
+ SENSOR_ATTR_2(pwm2_auto_point2_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 3, 1),
+ SENSOR_ATTR_2(pwm2_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp_hyst,
+ store_pwm_auto_point_temp_hyst,
+ 0, 1),
+ SENSOR_ATTR_2(pwm2_auto_point2_temp_hyst, S_IRUGO,
+ show_pwm_auto_point_temp_hyst, NULL, 3, 1),
+
+ SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_channel,
+ store_pwm_auto_point_channel, 0, 2),
+ SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 0, 2),
+ SENSOR_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 1, 2),
+ SENSOR_ATTR_2(pwm3_auto_point3_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 4, 2),
+ SENSOR_ATTR_2(pwm3_auto_point1_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 0, 2),
+ SENSOR_ATTR_2(pwm3_auto_point2_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 3, 2),
+ SENSOR_ATTR_2(pwm3_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp_hyst,
+ store_pwm_auto_point_temp_hyst,
+ 0, 2),
+ SENSOR_ATTR_2(pwm3_auto_point2_temp_hyst, S_IRUGO,
+ show_pwm_auto_point_temp_hyst, NULL, 3, 2),
+};
+
+/* PWM attr for the standard models */
static struct sensor_device_attribute_2 fxxxx_auto_pwm_attr[4][14] = { {
SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR,
show_pwm_auto_point_channel,
@@ -943,16 +1071,16 @@ static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr)
static struct f71882fg_data *f71882fg_update_device(struct device *dev)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr, reg = 0, reg2;
+ int nr, reg, point;
int nr_fans = (data->type == f71882fg) ? 4 : 3;
- int nr_ins = (data->type == f71858fg || data->type == f8000) ? 3 : 9;
+ int nr_temps = (data->type == f71808e) ? 2 : 3;
mutex_lock(&data->update_lock);
/* Update once every 60 seconds */
if (time_after(jiffies, data->last_limits + 60 * HZ) ||
!data->valid) {
- if (data->type == f71882fg || data->type == f71889fg) {
+ if (f71882fg_has_in1_alarm[data->type]) {
data->in1_max =
f71882fg_read8(data, F71882FG_REG_IN1_HIGH);
data->in_beep =
@@ -960,7 +1088,8 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
}
/* Get High & boundary temps*/
- for (nr = data->temp_start; nr < 3 + data->temp_start; nr++) {
+ for (nr = data->temp_start; nr < nr_temps + data->temp_start;
+ nr++) {
data->temp_ovt[nr] = f71882fg_read8(data,
F71882FG_REG_TEMP_OVT(nr));
data->temp_high[nr] = f71882fg_read8(data,
@@ -973,44 +1102,19 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
data->temp_hyst[1] = f71882fg_read8(data,
F71882FG_REG_TEMP_HYST(1));
}
+ /* All but the f71858fg / f8000 have this register */
+ if ((data->type != f71858fg) && (data->type != f8000)) {
+ reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
+ data->temp_type[1] = (reg & 0x02) ? 2 : 4;
+ data->temp_type[2] = (reg & 0x04) ? 2 : 4;
+ data->temp_type[3] = (reg & 0x08) ? 2 : 4;
+ }
- if (data->type == f71862fg || data->type == f71882fg ||
- data->type == f71889fg) {
+ if (f71882fg_has_beep[data->type]) {
data->fan_beep = f71882fg_read8(data,
F71882FG_REG_FAN_BEEP);
data->temp_beep = f71882fg_read8(data,
F71882FG_REG_TEMP_BEEP);
- /* Have to hardcode type, because temp1 is special */
- reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
- data->temp_type[2] = (reg & 0x04) ? 2 : 4;
- data->temp_type[3] = (reg & 0x08) ? 2 : 4;
- }
- /* Determine temp index 1 sensor type */
- if (data->type == f71889fg) {
- reg2 = f71882fg_read8(data, F71882FG_REG_START);
- switch ((reg2 & 0x60) >> 5) {
- case 0x00: /* BJT / Thermistor */
- data->temp_type[1] = (reg & 0x02) ? 2 : 4;
- break;
- case 0x01: /* AMDSI */
- data->temp_type[1] = 5;
- break;
- case 0x02: /* PECI */
- case 0x03: /* Ibex Peak ?? Report as PECI for now */
- data->temp_type[1] = 6;
- break;
- }
- } else {
- reg2 = f71882fg_read8(data, F71882FG_REG_PECI);
- if ((reg2 & 0x03) == 0x01)
- data->temp_type[1] = 6; /* PECI */
- else if ((reg2 & 0x03) == 0x02)
- data->temp_type[1] = 5; /* AMDSI */
- else if (data->type == f71862fg ||
- data->type == f71882fg)
- data->temp_type[1] = (reg & 0x02) ? 2 : 4;
- else /* f71858fg and f8000 only support BJT */
- data->temp_type[1] = 2;
}
data->pwm_enable = f71882fg_read8(data,
@@ -1025,8 +1129,8 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
f71882fg_read8(data,
F71882FG_REG_POINT_MAPPING(nr));
- if (data->type != f71862fg) {
- int point;
+ switch (data->type) {
+ default:
for (point = 0; point < 5; point++) {
data->pwm_auto_point_pwm[nr][point] =
f71882fg_read8(data,
@@ -1039,7 +1143,14 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
F71882FG_REG_POINT_TEMP
(nr, point));
}
- } else {
+ break;
+ case f71808e:
+ case f71869:
+ data->pwm_auto_point_pwm[nr][0] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_PWM(nr, 0));
+ /* Fall through */
+ case f71862fg:
data->pwm_auto_point_pwm[nr][1] =
f71882fg_read8(data,
F71882FG_REG_POINT_PWM
@@ -1056,6 +1167,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
f71882fg_read8(data,
F71882FG_REG_POINT_TEMP
(nr, 3));
+ break;
}
}
data->last_limits = jiffies;
@@ -1067,7 +1179,8 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
F71882FG_REG_TEMP_STATUS);
data->temp_diode_open = f71882fg_read8(data,
F71882FG_REG_TEMP_DIODE_OPEN);
- for (nr = data->temp_start; nr < 3 + data->temp_start; nr++)
+ for (nr = data->temp_start; nr < nr_temps + data->temp_start;
+ nr++)
data->temp[nr] = f71882fg_read_temp(data, nr);
data->fan_status = f71882fg_read8(data,
@@ -1083,17 +1196,18 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
data->pwm[nr] =
f71882fg_read8(data, F71882FG_REG_PWM(nr));
}
-
/* The f8000 can monitor 1 more fan, but has no pwm for it */
if (data->type == f8000)
data->fan[3] = f71882fg_read16(data,
F71882FG_REG_FAN(3));
- if (data->type == f71882fg || data->type == f71889fg)
+
+ if (f71882fg_has_in1_alarm[data->type])
data->in_status = f71882fg_read8(data,
F71882FG_REG_IN_STATUS);
- for (nr = 0; nr < nr_ins; nr++)
- data->in[nr] = f71882fg_read8(data,
- F71882FG_REG_IN(nr));
+ for (nr = 0; nr < F71882FG_MAX_INS; nr++)
+ if (f71882fg_has_in[data->type][nr])
+ data->in[nr] = f71882fg_read8(data,
+ F71882FG_REG_IN(nr));
data->last_updated = jiffies;
data->valid = 1;
@@ -1882,7 +1996,7 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
val /= 1000;
- if (data->type == f71889fg)
+ if (data->auto_point_temp_signed)
val = SENSORS_LIMIT(val, -128, 127);
else
val = SENSORS_LIMIT(val, 0, 127);
@@ -1929,7 +2043,8 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
struct f71882fg_data *data;
struct f71882fg_sio_data *sio_data = pdev->dev.platform_data;
int err, i, nr_fans = (sio_data->type == f71882fg) ? 4 : 3;
- u8 start_reg;
+ int nr_temps = (sio_data->type == f71808e) ? 2 : 3;
+ u8 start_reg, reg;
data = kzalloc(sizeof(struct f71882fg_data), GFP_KERNEL);
if (!data)
@@ -1968,37 +2083,72 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
/* The f71858fg temperature alarms behave as
the f8000 alarms in this mode */
err = f71882fg_create_sysfs_files(pdev,
- f8000_in_temp_attr,
- ARRAY_SIZE(f8000_in_temp_attr));
+ f8000_temp_attr,
+ ARRAY_SIZE(f8000_temp_attr));
else
err = f71882fg_create_sysfs_files(pdev,
- f71858fg_in_temp_attr,
- ARRAY_SIZE(f71858fg_in_temp_attr));
- break;
- case f71882fg:
- case f71889fg:
- err = f71882fg_create_sysfs_files(pdev,
- fxxxx_in1_alarm_attr,
- ARRAY_SIZE(fxxxx_in1_alarm_attr));
- if (err)
- goto exit_unregister_sysfs;
- /* fall through! */
- case f71862fg:
- err = f71882fg_create_sysfs_files(pdev,
- fxxxx_in_temp_attr,
- ARRAY_SIZE(fxxxx_in_temp_attr));
+ f71858fg_temp_attr,
+ ARRAY_SIZE(f71858fg_temp_attr));
break;
case f8000:
err = f71882fg_create_sysfs_files(pdev,
- f8000_in_temp_attr,
- ARRAY_SIZE(f8000_in_temp_attr));
+ f8000_temp_attr,
+ ARRAY_SIZE(f8000_temp_attr));
break;
+ default:
+ err = f71882fg_create_sysfs_files(pdev,
+ &fxxxx_temp_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps);
}
if (err)
goto exit_unregister_sysfs;
+
+ if (f71882fg_has_beep[data->type]) {
+ err = f71882fg_create_sysfs_files(pdev,
+ &fxxxx_temp_beep_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_beep_attr[0])
+ * nr_temps);
+ if (err)
+ goto exit_unregister_sysfs;
+ }
+
+ for (i = 0; i < F71882FG_MAX_INS; i++) {
+ if (f71882fg_has_in[data->type][i]) {
+ err = device_create_file(&pdev->dev,
+ &fxxxx_in_attr[i].dev_attr);
+ if (err)
+ goto exit_unregister_sysfs;
+ }
+ }
+ if (f71882fg_has_in1_alarm[data->type]) {
+ err = f71882fg_create_sysfs_files(pdev,
+ fxxxx_in1_alarm_attr,
+ ARRAY_SIZE(fxxxx_in1_alarm_attr));
+ if (err)
+ goto exit_unregister_sysfs;
+ }
}
if (start_reg & 0x02) {
+ switch (data->type) {
+ case f71808e:
+ case f71869:
+ /* These always have signed auto point temps */
+ data->auto_point_temp_signed = 1;
+ /* Fall through to select correct fan/pwm reg bank! */
+ case f71889fg:
+ case f71889ed:
+ reg = f71882fg_read8(data, F71882FG_REG_FAN_FAULT_T);
+ if (reg & F71882FG_FAN_NEG_TEMP_EN)
+ data->auto_point_temp_signed = 1;
+ /* Ensure banked pwm registers point to right bank */
+ reg &= ~F71882FG_FAN_PROG_SEL;
+ f71882fg_write8(data, F71882FG_REG_FAN_FAULT_T, reg);
+ break;
+ default:
+ break;
+ }
+
data->pwm_enable =
f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
@@ -2013,8 +2163,11 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
case f71862fg:
err = (data->pwm_enable & 0x15) != 0x15;
break;
+ case f71808e:
+ case f71869:
case f71882fg:
case f71889fg:
+ case f71889ed:
err = 0;
break;
case f8000:
@@ -2034,8 +2187,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
if (err)
goto exit_unregister_sysfs;
- if (data->type == f71862fg || data->type == f71882fg ||
- data->type == f71889fg) {
+ if (f71882fg_has_beep[data->type]) {
err = f71882fg_create_sysfs_files(pdev,
fxxxx_fan_beep_attr, nr_fans);
if (err)
@@ -2043,11 +2195,42 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
}
switch (data->type) {
+ case f71808e:
+ case f71869:
+ case f71889fg:
+ case f71889ed:
+ for (i = 0; i < nr_fans; i++) {
+ data->pwm_auto_point_mapping[i] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_MAPPING(i));
+ if ((data->pwm_auto_point_mapping[i] & 0x80) ||
+ (data->pwm_auto_point_mapping[i] & 3) == 0)
+ break;
+ }
+ if (i != nr_fans) {
+ dev_warn(&pdev->dev,
+ "Auto pwm controlled by raw digital "
+ "data, disabling pwm auto_point "
+ "sysfs attributes\n");
+ goto no_pwm_auto_point;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (data->type) {
case f71862fg:
err = f71882fg_create_sysfs_files(pdev,
f71862fg_auto_pwm_attr,
ARRAY_SIZE(f71862fg_auto_pwm_attr));
break;
+ case f71808e:
+ case f71869:
+ err = f71882fg_create_sysfs_files(pdev,
+ f71869_auto_pwm_attr,
+ ARRAY_SIZE(f71869_auto_pwm_attr));
+ break;
case f8000:
err = f71882fg_create_sysfs_files(pdev,
f8000_fan_attr,
@@ -2058,23 +2241,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
f8000_auto_pwm_attr,
ARRAY_SIZE(f8000_auto_pwm_attr));
break;
- case f71889fg:
- for (i = 0; i < nr_fans; i++) {
- data->pwm_auto_point_mapping[i] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_MAPPING(i));
- if (data->pwm_auto_point_mapping[i] & 0x80)
- break;
- }
- if (i != nr_fans) {
- dev_warn(&pdev->dev,
- "Auto pwm controlled by raw digital "
- "data, disabling pwm auto_point "
- "sysfs attributes\n");
- break;
- }
- /* fall through */
- default: /* f71858fg / f71882fg */
+ default:
err = f71882fg_create_sysfs_files(pdev,
&fxxxx_auto_pwm_attr[0][0],
ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
@@ -2082,6 +2249,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
if (err)
goto exit_unregister_sysfs;
+no_pwm_auto_point:
for (i = 0; i < nr_fans; i++)
dev_info(&pdev->dev, "Fan: %d is in %s mode\n", i + 1,
(data->pwm_enable & (1 << 2 * i)) ?
@@ -2108,7 +2276,8 @@ exit_free:
static int f71882fg_remove(struct platform_device *pdev)
{
struct f71882fg_data *data = platform_get_drvdata(pdev);
- int nr_fans = (data->type == f71882fg) ? 4 : 3;
+ int i, nr_fans = (data->type == f71882fg) ? 4 : 3;
+ int nr_temps = (data->type == f71808e) ? 2 : 3;
u8 start_reg = f71882fg_read8(data, F71882FG_REG_START);
if (data->hwmon_dev)
@@ -2121,29 +2290,39 @@ static int f71882fg_remove(struct platform_device *pdev)
case f71858fg:
if (data->temp_config & 0x10)
f71882fg_remove_sysfs_files(pdev,
- f8000_in_temp_attr,
- ARRAY_SIZE(f8000_in_temp_attr));
+ f8000_temp_attr,
+ ARRAY_SIZE(f8000_temp_attr));
else
f71882fg_remove_sysfs_files(pdev,
- f71858fg_in_temp_attr,
- ARRAY_SIZE(f71858fg_in_temp_attr));
- break;
- case f71882fg:
- case f71889fg:
- f71882fg_remove_sysfs_files(pdev,
- fxxxx_in1_alarm_attr,
- ARRAY_SIZE(fxxxx_in1_alarm_attr));
- /* fall through! */
- case f71862fg:
- f71882fg_remove_sysfs_files(pdev,
- fxxxx_in_temp_attr,
- ARRAY_SIZE(fxxxx_in_temp_attr));
+ f71858fg_temp_attr,
+ ARRAY_SIZE(f71858fg_temp_attr));
break;
case f8000:
f71882fg_remove_sysfs_files(pdev,
- f8000_in_temp_attr,
- ARRAY_SIZE(f8000_in_temp_attr));
+ f8000_temp_attr,
+ ARRAY_SIZE(f8000_temp_attr));
break;
+ default:
+ f71882fg_remove_sysfs_files(pdev,
+ &fxxxx_temp_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps);
+ }
+ if (f71882fg_has_beep[data->type]) {
+ f71882fg_remove_sysfs_files(pdev,
+ &fxxxx_temp_beep_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_beep_attr[0]) * nr_temps);
+ }
+
+ for (i = 0; i < F71882FG_MAX_INS; i++) {
+ if (f71882fg_has_in[data->type][i]) {
+ device_remove_file(&pdev->dev,
+ &fxxxx_in_attr[i].dev_attr);
+ }
+ }
+ if (f71882fg_has_in1_alarm[data->type]) {
+ f71882fg_remove_sysfs_files(pdev,
+ fxxxx_in1_alarm_attr,
+ ARRAY_SIZE(fxxxx_in1_alarm_attr));
}
}
@@ -2151,10 +2330,10 @@ static int f71882fg_remove(struct platform_device *pdev)
f71882fg_remove_sysfs_files(pdev, &fxxxx_fan_attr[0][0],
ARRAY_SIZE(fxxxx_fan_attr[0]) * nr_fans);
- if (data->type == f71862fg || data->type == f71882fg ||
- data->type == f71889fg)
+ if (f71882fg_has_beep[data->type]) {
f71882fg_remove_sysfs_files(pdev,
fxxxx_fan_beep_attr, nr_fans);
+ }
switch (data->type) {
case f71862fg:
@@ -2162,6 +2341,12 @@ static int f71882fg_remove(struct platform_device *pdev)
f71862fg_auto_pwm_attr,
ARRAY_SIZE(f71862fg_auto_pwm_attr));
break;
+ case f71808e:
+ case f71869:
+ f71882fg_remove_sysfs_files(pdev,
+ f71869_auto_pwm_attr,
+ ARRAY_SIZE(f71869_auto_pwm_attr));
+ break;
case f8000:
f71882fg_remove_sysfs_files(pdev,
f8000_fan_attr,
@@ -2170,7 +2355,7 @@ static int f71882fg_remove(struct platform_device *pdev)
f8000_auto_pwm_attr,
ARRAY_SIZE(f8000_auto_pwm_attr));
break;
- default: /* f71858fg / f71882fg / f71889fg */
+ default:
f71882fg_remove_sysfs_files(pdev,
&fxxxx_auto_pwm_attr[0][0],
ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
@@ -2200,18 +2385,27 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID);
switch (devid) {
+ case SIO_F71808E_ID:
+ sio_data->type = f71808e;
+ break;
case SIO_F71858_ID:
sio_data->type = f71858fg;
break;
case SIO_F71862_ID:
sio_data->type = f71862fg;
break;
+ case SIO_F71869_ID:
+ sio_data->type = f71869;
+ break;
case SIO_F71882_ID:
sio_data->type = f71882fg;
break;
case SIO_F71889_ID:
sio_data->type = f71889fg;
break;
+ case SIO_F71889E_ID:
+ sio_data->type = f71889ed;
+ break;
case SIO_F8000_ID:
sio_data->type = f8000;
break;
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index 1c8b3d9e2051..fea292d43407 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -32,7 +32,7 @@ struct jz4740_hwmon {
int irq;
- struct mfd_cell *cell;
+ const struct mfd_cell *cell;
struct device *hwmon;
struct completion read_completion;
@@ -112,7 +112,7 @@ static int __devinit jz4740_hwmon_probe(struct platform_device *pdev)
return -ENOMEM;
}
- hwmon->cell = pdev->dev.platform_data;
+ hwmon->cell = mfd_get_cell(pdev);
hwmon->irq = platform_get_irq(pdev, 0);
if (hwmon->irq < 0) {
diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
new file mode 100644
index 000000000000..58eded27f385
--- /dev/null
+++ b/drivers/hwmon/lineage-pem.c
@@ -0,0 +1,586 @@
+/*
+ * Driver for Lineage Compact Power Line series of power entry modules.
+ *
+ * Copyright (C) 2010, 2011 Ericsson AB.
+ *
+ * Documentation:
+ * http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+/*
+ * This driver supports various Lineage Compact Power Line DC/DC and AC/DC
+ * converters such as CP1800, CP2000AC, CP2000DC, CP2100DC, and others.
+ *
+ * The devices are nominally PMBus compliant. However, most standard PMBus
+ * commands are not supported. Specifically, all hardware monitoring and
+ * status reporting commands are non-standard. For this reason, a standard
+ * PMBus driver can not be used.
+ *
+ * All Lineage CPL devices have a built-in I2C bus master selector (PCA9541).
+ * To ensure device access, this driver should only be used as client driver
+ * to the pca9541 I2C master selector driver.
+ */
+
+/* Command codes */
+#define PEM_OPERATION 0x01
+#define PEM_CLEAR_INFO_FLAGS 0x03
+#define PEM_VOUT_COMMAND 0x21
+#define PEM_VOUT_OV_FAULT_LIMIT 0x40
+#define PEM_READ_DATA_STRING 0xd0
+#define PEM_READ_INPUT_STRING 0xdc
+#define PEM_READ_FIRMWARE_REV 0xdd
+#define PEM_READ_RUN_TIMER 0xde
+#define PEM_FAN_HI_SPEED 0xdf
+#define PEM_FAN_NORMAL_SPEED 0xe0
+#define PEM_READ_FAN_SPEED 0xe1
+
+/* offsets in data string */
+#define PEM_DATA_STATUS_2 0
+#define PEM_DATA_STATUS_1 1
+#define PEM_DATA_ALARM_2 2
+#define PEM_DATA_ALARM_1 3
+#define PEM_DATA_VOUT_LSB 4
+#define PEM_DATA_VOUT_MSB 5
+#define PEM_DATA_CURRENT 6
+#define PEM_DATA_TEMP 7
+
+/* Virtual entries, to report constants */
+#define PEM_DATA_TEMP_MAX 10
+#define PEM_DATA_TEMP_CRIT 11
+
+/* offsets in input string */
+#define PEM_INPUT_VOLTAGE 0
+#define PEM_INPUT_POWER_LSB 1
+#define PEM_INPUT_POWER_MSB 2
+
+/* offsets in fan data */
+#define PEM_FAN_ADJUSTMENT 0
+#define PEM_FAN_FAN1 1
+#define PEM_FAN_FAN2 2
+#define PEM_FAN_FAN3 3
+
+/* Status register bits */
+#define STS1_OUTPUT_ON (1 << 0)
+#define STS1_LEDS_FLASHING (1 << 1)
+#define STS1_EXT_FAULT (1 << 2)
+#define STS1_SERVICE_LED_ON (1 << 3)
+#define STS1_SHUTDOWN_OCCURRED (1 << 4)
+#define STS1_INT_FAULT (1 << 5)
+#define STS1_ISOLATION_TEST_OK (1 << 6)
+
+#define STS2_ENABLE_PIN_HI (1 << 0)
+#define STS2_DATA_OUT_RANGE (1 << 1)
+#define STS2_RESTARTED_OK (1 << 1)
+#define STS2_ISOLATION_TEST_FAIL (1 << 3)
+#define STS2_HIGH_POWER_CAP (1 << 4)
+#define STS2_INVALID_INSTR (1 << 5)
+#define STS2_WILL_RESTART (1 << 6)
+#define STS2_PEC_ERR (1 << 7)
+
+/* Alarm register bits */
+#define ALRM1_VIN_OUT_LIMIT (1 << 0)
+#define ALRM1_VOUT_OUT_LIMIT (1 << 1)
+#define ALRM1_OV_VOLT_SHUTDOWN (1 << 2)
+#define ALRM1_VIN_OVERCURRENT (1 << 3)
+#define ALRM1_TEMP_WARNING (1 << 4)
+#define ALRM1_TEMP_SHUTDOWN (1 << 5)
+#define ALRM1_PRIMARY_FAULT (1 << 6)
+#define ALRM1_POWER_LIMIT (1 << 7)
+
+#define ALRM2_5V_OUT_LIMIT (1 << 1)
+#define ALRM2_TEMP_FAULT (1 << 2)
+#define ALRM2_OV_LOW (1 << 3)
+#define ALRM2_DCDC_TEMP_HIGH (1 << 4)
+#define ALRM2_PRI_TEMP_HIGH (1 << 5)
+#define ALRM2_NO_PRIMARY (1 << 6)
+#define ALRM2_FAN_FAULT (1 << 7)
+
+#define FIRMWARE_REV_LEN 4
+#define DATA_STRING_LEN 9
+#define INPUT_STRING_LEN 5 /* 4 for most devices */
+#define FAN_SPEED_LEN 5
+
+struct pem_data {
+ struct device *hwmon_dev;
+
+ struct mutex update_lock;
+ bool valid;
+ bool fans_supported;
+ int input_length;
+ unsigned long last_updated; /* in jiffies */
+
+ u8 firmware_rev[FIRMWARE_REV_LEN];
+ u8 data_string[DATA_STRING_LEN];
+ u8 input_string[INPUT_STRING_LEN];
+ u8 fan_speed[FAN_SPEED_LEN];
+};
+
+static int pem_read_block(struct i2c_client *client, u8 command, u8 *data,
+ int data_len)
+{
+ u8 block_buffer[I2C_SMBUS_BLOCK_MAX];
+ int result;
+
+ result = i2c_smbus_read_block_data(client, command, block_buffer);
+ if (unlikely(result < 0))
+ goto abort;
+ if (unlikely(result == 0xff || result != data_len)) {
+ result = -EIO;
+ goto abort;
+ }
+ memcpy(data, block_buffer, data_len);
+ result = 0;
+abort:
+ return result;
+}
+
+static struct pem_data *pem_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pem_data *data = i2c_get_clientdata(client);
+ struct pem_data *ret = data;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ int result;
+
+ /* Read data string */
+ result = pem_read_block(client, PEM_READ_DATA_STRING,
+ data->data_string,
+ sizeof(data->data_string));
+ if (unlikely(result < 0)) {
+ ret = ERR_PTR(result);
+ goto abort;
+ }
+
+ /* Read input string */
+ if (data->input_length) {
+ result = pem_read_block(client, PEM_READ_INPUT_STRING,
+ data->input_string,
+ data->input_length);
+ if (unlikely(result < 0)) {
+ ret = ERR_PTR(result);
+ goto abort;
+ }
+ }
+
+ /* Read fan speeds */
+ if (data->fans_supported) {
+ result = pem_read_block(client, PEM_READ_FAN_SPEED,
+ data->fan_speed,
+ sizeof(data->fan_speed));
+ if (unlikely(result < 0)) {
+ ret = ERR_PTR(result);
+ goto abort;
+ }
+ }
+
+ i2c_smbus_write_byte(client, PEM_CLEAR_INFO_FLAGS);
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static long pem_get_data(u8 *data, int len, int index)
+{
+ long val;
+
+ switch (index) {
+ case PEM_DATA_VOUT_LSB:
+ val = (data[index] + (data[index+1] << 8)) * 5 / 2;
+ break;
+ case PEM_DATA_CURRENT:
+ val = data[index] * 200;
+ break;
+ case PEM_DATA_TEMP:
+ val = data[index] * 1000;
+ break;
+ case PEM_DATA_TEMP_MAX:
+ val = 97 * 1000; /* 97 degrees C per datasheet */
+ break;
+ case PEM_DATA_TEMP_CRIT:
+ val = 107 * 1000; /* 107 degrees C per datasheet */
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ val = 0;
+ }
+ return val;
+}
+
+static long pem_get_input(u8 *data, int len, int index)
+{
+ long val;
+
+ switch (index) {
+ case PEM_INPUT_VOLTAGE:
+ if (len == INPUT_STRING_LEN)
+ val = (data[index] + (data[index+1] << 8) - 75) * 1000;
+ else
+ val = (data[index] - 75) * 1000;
+ break;
+ case PEM_INPUT_POWER_LSB:
+ if (len == INPUT_STRING_LEN)
+ index++;
+ val = (data[index] + (data[index+1] << 8)) * 1000000L;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ val = 0;
+ }
+ return val;
+}
+
+static long pem_get_fan(u8 *data, int len, int index)
+{
+ long val;
+
+ switch (index) {
+ case PEM_FAN_FAN1:
+ case PEM_FAN_FAN2:
+ case PEM_FAN_FAN3:
+ val = data[index] * 100;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ val = 0;
+ }
+ return val;
+}
+
+/*
+ * Show boolean, either a fault or an alarm.
+ * .nr points to the register, .index is the bit mask to check
+ */
+static ssize_t pem_show_bool(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da);
+ struct pem_data *data = pem_update_device(dev);
+ u8 status;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ status = data->data_string[attr->nr] & attr->index;
+ return snprintf(buf, PAGE_SIZE, "%d\n", !!status);
+}
+
+static ssize_t pem_show_data(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pem_data *data = pem_update_device(dev);
+ long value;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ value = pem_get_data(data->data_string, sizeof(data->data_string),
+ attr->index);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", value);
+}
+
+static ssize_t pem_show_input(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pem_data *data = pem_update_device(dev);
+ long value;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ value = pem_get_input(data->input_string, sizeof(data->input_string),
+ attr->index);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", value);
+}
+
+static ssize_t pem_show_fan(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pem_data *data = pem_update_device(dev);
+ long value;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ value = pem_get_fan(data->fan_speed, sizeof(data->fan_speed),
+ attr->index);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", value);
+}
+
+/* Voltages */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_VOUT_LSB);
+static SENSOR_DEVICE_ATTR_2(in1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_VOUT_OUT_LIMIT);
+static SENSOR_DEVICE_ATTR_2(in1_crit_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_OV_VOLT_SHUTDOWN);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, pem_show_input, NULL,
+ PEM_INPUT_VOLTAGE);
+static SENSOR_DEVICE_ATTR_2(in2_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1,
+ ALRM1_VIN_OUT_LIMIT | ALRM1_PRIMARY_FAULT);
+
+/* Currents */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_CURRENT);
+static SENSOR_DEVICE_ATTR_2(curr1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_VIN_OVERCURRENT);
+
+/* Power */
+static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, pem_show_input, NULL,
+ PEM_INPUT_POWER_LSB);
+static SENSOR_DEVICE_ATTR_2(power1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_POWER_LIMIT);
+
+/* Fans */
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, pem_show_fan, NULL,
+ PEM_FAN_FAN1);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, pem_show_fan, NULL,
+ PEM_FAN_FAN2);
+static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, pem_show_fan, NULL,
+ PEM_FAN_FAN3);
+static SENSOR_DEVICE_ATTR_2(fan1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_2, ALRM2_FAN_FAULT);
+
+/* Temperatures */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_TEMP);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_TEMP_MAX);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_TEMP_CRIT);
+static SENSOR_DEVICE_ATTR_2(temp1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_TEMP_WARNING);
+static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_TEMP_SHUTDOWN);
+static SENSOR_DEVICE_ATTR_2(temp1_fault, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_2, ALRM2_TEMP_FAULT);
+
+static struct attribute *pem_attributes[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_in2_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_curr1_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_power1_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_fan1_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_fault.dev_attr.attr,
+
+ NULL,
+};
+
+static const struct attribute_group pem_group = {
+ .attrs = pem_attributes,
+};
+
+static struct attribute *pem_input_attributes[] = {
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_power1_input.dev_attr.attr,
+};
+
+static const struct attribute_group pem_input_group = {
+ .attrs = pem_input_attributes,
+};
+
+static struct attribute *pem_fan_attributes[] = {
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan3_input.dev_attr.attr,
+};
+
+static const struct attribute_group pem_fan_group = {
+ .attrs = pem_fan_attributes,
+};
+
+static int pem_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct pem_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BLOCK_DATA
+ | I2C_FUNC_SMBUS_WRITE_BYTE))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /*
+ * We use the next two commands to determine if the device is really
+ * there.
+ */
+ ret = pem_read_block(client, PEM_READ_FIRMWARE_REV,
+ data->firmware_rev, sizeof(data->firmware_rev));
+ if (ret < 0)
+ goto out_kfree;
+
+ ret = i2c_smbus_write_byte(client, PEM_CLEAR_INFO_FLAGS);
+ if (ret < 0)
+ goto out_kfree;
+
+ dev_info(&client->dev, "Firmware revision %d.%d.%d\n",
+ data->firmware_rev[0], data->firmware_rev[1],
+ data->firmware_rev[2]);
+
+ /* Register sysfs hooks */
+ ret = sysfs_create_group(&client->dev.kobj, &pem_group);
+ if (ret)
+ goto out_kfree;
+
+ /*
+ * Check if input readings are supported.
+ * This is the case if we can read input data,
+ * and if the returned data is not all zeros.
+ * Note that input alarms are always supported.
+ */
+ ret = pem_read_block(client, PEM_READ_INPUT_STRING,
+ data->input_string,
+ sizeof(data->input_string) - 1);
+ if (!ret && (data->input_string[0] || data->input_string[1] ||
+ data->input_string[2]))
+ data->input_length = sizeof(data->input_string) - 1;
+ else if (ret < 0) {
+ /* Input string is one byte longer for some devices */
+ ret = pem_read_block(client, PEM_READ_INPUT_STRING,
+ data->input_string,
+ sizeof(data->input_string));
+ if (!ret && (data->input_string[0] || data->input_string[1] ||
+ data->input_string[2] || data->input_string[3]))
+ data->input_length = sizeof(data->input_string);
+ }
+ ret = 0;
+ if (data->input_length) {
+ ret = sysfs_create_group(&client->dev.kobj, &pem_input_group);
+ if (ret)
+ goto out_remove_groups;
+ }
+
+ /*
+ * Check if fan speed readings are supported.
+ * This is the case if we can read fan speed data,
+ * and if the returned data is not all zeros.
+ * Note that the fan alarm is always supported.
+ */
+ ret = pem_read_block(client, PEM_READ_FAN_SPEED,
+ data->fan_speed,
+ sizeof(data->fan_speed));
+ if (!ret && (data->fan_speed[0] || data->fan_speed[1] ||
+ data->fan_speed[2] || data->fan_speed[3])) {
+ data->fans_supported = true;
+ ret = sysfs_create_group(&client->dev.kobj, &pem_fan_group);
+ if (ret)
+ goto out_remove_groups;
+ }
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ goto out_remove_groups;
+ }
+
+ return 0;
+
+out_remove_groups:
+ sysfs_remove_group(&client->dev.kobj, &pem_input_group);
+ sysfs_remove_group(&client->dev.kobj, &pem_fan_group);
+ sysfs_remove_group(&client->dev.kobj, &pem_group);
+out_kfree:
+ kfree(data);
+ return ret;
+}
+
+static int pem_remove(struct i2c_client *client)
+{
+ struct pem_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+
+ sysfs_remove_group(&client->dev.kobj, &pem_input_group);
+ sysfs_remove_group(&client->dev.kobj, &pem_fan_group);
+ sysfs_remove_group(&client->dev.kobj, &pem_group);
+
+ kfree(data);
+ return 0;
+}
+
+static const struct i2c_device_id pem_id[] = {
+ {"lineage_pem", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pem_id);
+
+static struct i2c_driver pem_driver = {
+ .driver = {
+ .name = "lineage_pem",
+ },
+ .probe = pem_probe,
+ .remove = pem_remove,
+ .id_table = pem_id,
+};
+
+static int __init pem_init(void)
+{
+ return i2c_add_driver(&pem_driver);
+}
+
+static void __exit pem_exit(void)
+{
+ i2c_del_driver(&pem_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_DESCRIPTION("Lineage CPL PEM hardware monitoring driver");
+MODULE_LICENSE("GPL");
+
+module_init(pem_init);
+module_exit(pem_exit);
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index f36eb80d227f..ef902d5d06ab 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -232,13 +232,16 @@ static const struct i2c_device_id lm75_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, lm75_ids);
+#define LM75A_ID 0xA1
+
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm75_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
int i;
- int cur, conf, hyst, os;
+ int conf, hyst, os;
+ bool is_lm75a = 0;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA))
@@ -250,37 +253,58 @@ static int lm75_detect(struct i2c_client *new_client,
addresses 0x04-0x07 returning the last read value.
The cycling+unused addresses combination is not tested,
since it would significantly slow the detection down and would
- hardly add any value. */
+ hardly add any value.
- /* Unused addresses */
- cur = i2c_smbus_read_word_data(new_client, 0);
- conf = i2c_smbus_read_byte_data(new_client, 1);
- hyst = i2c_smbus_read_word_data(new_client, 2);
- if (i2c_smbus_read_word_data(new_client, 4) != hyst
- || i2c_smbus_read_word_data(new_client, 5) != hyst
- || i2c_smbus_read_word_data(new_client, 6) != hyst
- || i2c_smbus_read_word_data(new_client, 7) != hyst)
- return -ENODEV;
- os = i2c_smbus_read_word_data(new_client, 3);
- if (i2c_smbus_read_word_data(new_client, 4) != os
- || i2c_smbus_read_word_data(new_client, 5) != os
- || i2c_smbus_read_word_data(new_client, 6) != os
- || i2c_smbus_read_word_data(new_client, 7) != os)
- return -ENODEV;
+ The National Semiconductor LM75A is different than earlier
+ LM75s. It has an ID byte of 0xaX (where X is the chip
+ revision, with 1 being the only revision in existence) in
+ register 7, and unused registers return 0xff rather than the
+ last read value. */
/* Unused bits */
+ conf = i2c_smbus_read_byte_data(new_client, 1);
if (conf & 0xe0)
return -ENODEV;
+ /* First check for LM75A */
+ if (i2c_smbus_read_byte_data(new_client, 7) == LM75A_ID) {
+ /* LM75A returns 0xff on unused registers so
+ just to be sure we check for that too. */
+ if (i2c_smbus_read_byte_data(new_client, 4) != 0xff
+ || i2c_smbus_read_byte_data(new_client, 5) != 0xff
+ || i2c_smbus_read_byte_data(new_client, 6) != 0xff)
+ return -ENODEV;
+ is_lm75a = 1;
+ hyst = i2c_smbus_read_byte_data(new_client, 2);
+ os = i2c_smbus_read_byte_data(new_client, 3);
+ } else { /* Traditional style LM75 detection */
+ /* Unused addresses */
+ hyst = i2c_smbus_read_byte_data(new_client, 2);
+ if (i2c_smbus_read_byte_data(new_client, 4) != hyst
+ || i2c_smbus_read_byte_data(new_client, 5) != hyst
+ || i2c_smbus_read_byte_data(new_client, 6) != hyst
+ || i2c_smbus_read_byte_data(new_client, 7) != hyst)
+ return -ENODEV;
+ os = i2c_smbus_read_byte_data(new_client, 3);
+ if (i2c_smbus_read_byte_data(new_client, 4) != os
+ || i2c_smbus_read_byte_data(new_client, 5) != os
+ || i2c_smbus_read_byte_data(new_client, 6) != os
+ || i2c_smbus_read_byte_data(new_client, 7) != os)
+ return -ENODEV;
+ }
+
/* Addresses cycling */
- for (i = 8; i < 0xff; i += 8) {
+ for (i = 8; i <= 248; i += 40) {
if (i2c_smbus_read_byte_data(new_client, i + 1) != conf
- || i2c_smbus_read_word_data(new_client, i + 2) != hyst
- || i2c_smbus_read_word_data(new_client, i + 3) != os)
+ || i2c_smbus_read_byte_data(new_client, i + 2) != hyst
+ || i2c_smbus_read_byte_data(new_client, i + 3) != os)
+ return -ENODEV;
+ if (is_lm75a && i2c_smbus_read_byte_data(new_client, i + 7)
+ != LM75A_ID)
return -ENODEV;
}
- strlcpy(info->type, "lm75", I2C_NAME_SIZE);
+ strlcpy(info->type, is_lm75a ? "lm75a" : "lm75", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index d2cc28660816..cf47e6e476ed 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
enum chips {
any_chip, lm85b, lm85c,
adm1027, adt7463, adt7468,
- emc6d100, emc6d102, emc6d103
+ emc6d100, emc6d102, emc6d103, emc6d103s
};
/* The LM85 registers */
@@ -283,10 +283,6 @@ struct lm85_zone {
u8 hyst; /* Low limit hysteresis. (0-15) */
u8 range; /* Temp range, encoded */
s8 critical; /* "All fans ON" temp limit */
- u8 off_desired; /* Actual "off" temperature specified. Preserved
- * to prevent "drift" as other autofan control
- * values change.
- */
u8 max_desired; /* Actual "max" temperature specified. Preserved
* to prevent "drift" as other autofan control
* values change.
@@ -306,6 +302,8 @@ struct lm85_data {
const int *freq_map;
enum chips type;
+ bool has_vid5; /* true if VID5 is configured for ADT7463 or ADT7468 */
+
struct mutex update_lock;
int valid; /* !=0 if following fields are valid */
unsigned long last_reading; /* In jiffies */
@@ -352,6 +350,7 @@ static const struct i2c_device_id lm85_id[] = {
{ "emc6d101", emc6d100 },
{ "emc6d102", emc6d102 },
{ "emc6d103", emc6d103 },
+ { "emc6d103s", emc6d103s },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm85_id);
@@ -420,8 +419,7 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
struct lm85_data *data = lm85_update_device(dev);
int vid;
- if ((data->type == adt7463 || data->type == adt7468) &&
- (data->vid & 0x80)) {
+ if (data->has_vid5) {
/* 6-pin VID (VRM 10) */
vid = vid_from_reg(data->vid & 0x3f, data->vrm);
} else {
@@ -891,7 +889,6 @@ static ssize_t set_temp_auto_temp_off(struct device *dev,
mutex_lock(&data->update_lock);
min = TEMP_FROM_REG(data->zone[nr].limit);
- data->zone[nr].off_desired = TEMP_TO_REG(val);
data->zone[nr].hyst = HYST_TO_REG(min - val);
if (nr == 0 || nr == 1) {
lm85_write_value(client, LM85_REG_AFAN_HYST1,
@@ -934,18 +931,6 @@ static ssize_t set_temp_auto_temp_min(struct device *dev,
((data->zone[nr].range & 0x0f) << 4)
| (data->pwm_freq[nr] & 0x07));
-/* Update temp_auto_hyst and temp_auto_off */
- data->zone[nr].hyst = HYST_TO_REG(TEMP_FROM_REG(
- data->zone[nr].limit) - TEMP_FROM_REG(
- data->zone[nr].off_desired));
- if (nr == 0 || nr == 1) {
- lm85_write_value(client, LM85_REG_AFAN_HYST1,
- (data->zone[0].hyst << 4)
- | data->zone[1].hyst);
- } else {
- lm85_write_value(client, LM85_REG_AFAN_HYST2,
- (data->zone[2].hyst << 4));
- }
mutex_unlock(&data->update_lock);
return count;
}
@@ -1084,13 +1069,7 @@ static struct attribute *lm85_attributes[] = {
&sensor_dev_attr_pwm1_auto_pwm_min.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_pwm_min.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_pwm_min.dev_attr.attr,
- &sensor_dev_attr_pwm1_auto_pwm_minctl.dev_attr.attr,
- &sensor_dev_attr_pwm2_auto_pwm_minctl.dev_attr.attr,
- &sensor_dev_attr_pwm3_auto_pwm_minctl.dev_attr.attr,
- &sensor_dev_attr_temp1_auto_temp_off.dev_attr.attr,
- &sensor_dev_attr_temp2_auto_temp_off.dev_attr.attr,
- &sensor_dev_attr_temp3_auto_temp_off.dev_attr.attr,
&sensor_dev_attr_temp1_auto_temp_min.dev_attr.attr,
&sensor_dev_attr_temp2_auto_temp_min.dev_attr.attr,
&sensor_dev_attr_temp3_auto_temp_min.dev_attr.attr,
@@ -1111,6 +1090,26 @@ static const struct attribute_group lm85_group = {
.attrs = lm85_attributes,
};
+static struct attribute *lm85_attributes_minctl[] = {
+ &sensor_dev_attr_pwm1_auto_pwm_minctl.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_pwm_minctl.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_pwm_minctl.dev_attr.attr,
+};
+
+static const struct attribute_group lm85_group_minctl = {
+ .attrs = lm85_attributes_minctl,
+};
+
+static struct attribute *lm85_attributes_temp_off[] = {
+ &sensor_dev_attr_temp1_auto_temp_off.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_temp_off.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_temp_off.dev_attr.attr,
+};
+
+static const struct attribute_group lm85_group_temp_off = {
+ .attrs = lm85_attributes_temp_off,
+};
+
static struct attribute *lm85_attributes_in4[] = {
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in4_min.dev_attr.attr,
@@ -1258,16 +1257,9 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
case LM85_VERSTEP_EMC6D103_A1:
type_name = "emc6d103";
break;
- /*
- * Registers apparently missing in EMC6D103S/EMC6D103:A2
- * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102
- * (according to the data sheets), but used unconditionally
- * in the driver: 62[5:7], 6D[0:7], and 6E[0:7].
- * So skip EMC6D103S for now.
case LM85_VERSTEP_EMC6D103S:
type_name = "emc6d103s";
break;
- */
}
} else {
dev_dbg(&adapter->dev,
@@ -1280,6 +1272,19 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
return 0;
}
+static void lm85_remove_files(struct i2c_client *client, struct lm85_data *data)
+{
+ sysfs_remove_group(&client->dev.kobj, &lm85_group);
+ if (data->type != emc6d103s) {
+ sysfs_remove_group(&client->dev.kobj, &lm85_group_minctl);
+ sysfs_remove_group(&client->dev.kobj, &lm85_group_temp_off);
+ }
+ if (!data->has_vid5)
+ sysfs_remove_group(&client->dev.kobj, &lm85_group_in4);
+ if (data->type == emc6d100)
+ sysfs_remove_group(&client->dev.kobj, &lm85_group_in567);
+}
+
static int lm85_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1302,6 +1307,7 @@ static int lm85_probe(struct i2c_client *client,
case emc6d100:
case emc6d102:
case emc6d103:
+ case emc6d103s:
data->freq_map = adm1027_freq_map;
break;
default:
@@ -1319,11 +1325,26 @@ static int lm85_probe(struct i2c_client *client,
if (err)
goto err_kfree;
+ /* minctl and temp_off exist on all chips except emc6d103s */
+ if (data->type != emc6d103s) {
+ err = sysfs_create_group(&client->dev.kobj, &lm85_group_minctl);
+ if (err)
+ goto err_kfree;
+ err = sysfs_create_group(&client->dev.kobj,
+ &lm85_group_temp_off);
+ if (err)
+ goto err_kfree;
+ }
+
/* The ADT7463/68 have an optional VRM 10 mode where pin 21 is used
as a sixth digital VID input rather than an analog input. */
- data->vid = lm85_read_value(client, LM85_REG_VID);
- if (!((data->type == adt7463 || data->type == adt7468) &&
- (data->vid & 0x80)))
+ if (data->type == adt7463 || data->type == adt7468) {
+ u8 vid = lm85_read_value(client, LM85_REG_VID);
+ if (vid & 0x80)
+ data->has_vid5 = true;
+ }
+
+ if (!data->has_vid5)
if ((err = sysfs_create_group(&client->dev.kobj,
&lm85_group_in4)))
goto err_remove_files;
@@ -1344,10 +1365,7 @@ static int lm85_probe(struct i2c_client *client,
/* Error out and cleanup code */
err_remove_files:
- sysfs_remove_group(&client->dev.kobj, &lm85_group);
- sysfs_remove_group(&client->dev.kobj, &lm85_group_in4);
- if (data->type == emc6d100)
- sysfs_remove_group(&client->dev.kobj, &lm85_group_in567);
+ lm85_remove_files(client, data);
err_kfree:
kfree(data);
return err;
@@ -1357,10 +1375,7 @@ static int lm85_remove(struct i2c_client *client)
{
struct lm85_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &lm85_group);
- sysfs_remove_group(&client->dev.kobj, &lm85_group_in4);
- if (data->type == emc6d100)
- sysfs_remove_group(&client->dev.kobj, &lm85_group_in567);
+ lm85_remove_files(client, data);
kfree(data);
return 0;
}
@@ -1457,11 +1472,8 @@ static struct lm85_data *lm85_update_device(struct device *dev)
lm85_read_value(client, LM85_REG_FAN(i));
}
- if (!((data->type == adt7463 || data->type == adt7468) &&
- (data->vid & 0x80))) {
- data->in[4] = lm85_read_value(client,
- LM85_REG_IN(4));
- }
+ if (!data->has_vid5)
+ data->in[4] = lm85_read_value(client, LM85_REG_IN(4));
if (data->type == adt7468)
data->cfg5 = lm85_read_value(client, ADT7468_REG_CFG5);
@@ -1487,7 +1499,8 @@ static struct lm85_data *lm85_update_device(struct device *dev)
/* More alarm bits */
data->alarms |= lm85_read_value(client,
EMC6D100_REG_ALARM3) << 16;
- } else if (data->type == emc6d102 || data->type == emc6d103) {
+ } else if (data->type == emc6d102 || data->type == emc6d103 ||
+ data->type == emc6d103s) {
/* Have to read LSB bits after the MSB ones because
the reading of the MSB bits has frozen the
LSBs (backward from the ADM1027).
@@ -1528,8 +1541,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
lm85_read_value(client, LM85_REG_FAN_MIN(i));
}
- if (!((data->type == adt7463 || data->type == adt7468) &&
- (data->vid & 0x80))) {
+ if (!data->has_vid5) {
data->in_min[4] = lm85_read_value(client,
LM85_REG_IN_MIN(4));
data->in_max[4] = lm85_read_value(client,
@@ -1573,17 +1585,19 @@ static struct lm85_data *lm85_update_device(struct device *dev)
}
}
- i = lm85_read_value(client, LM85_REG_AFAN_SPIKE1);
- data->autofan[0].min_off = (i & 0x20) != 0;
- data->autofan[1].min_off = (i & 0x40) != 0;
- data->autofan[2].min_off = (i & 0x80) != 0;
+ if (data->type != emc6d103s) {
+ i = lm85_read_value(client, LM85_REG_AFAN_SPIKE1);
+ data->autofan[0].min_off = (i & 0x20) != 0;
+ data->autofan[1].min_off = (i & 0x40) != 0;
+ data->autofan[2].min_off = (i & 0x80) != 0;
- i = lm85_read_value(client, LM85_REG_AFAN_HYST1);
- data->zone[0].hyst = i >> 4;
- data->zone[1].hyst = i & 0x0f;
+ i = lm85_read_value(client, LM85_REG_AFAN_HYST1);
+ data->zone[0].hyst = i >> 4;
+ data->zone[1].hyst = i & 0x0f;
- i = lm85_read_value(client, LM85_REG_AFAN_HYST2);
- data->zone[2].hyst = i >> 4;
+ i = lm85_read_value(client, LM85_REG_AFAN_HYST2);
+ data->zone[2].hyst = i >> 4;
+ }
data->last_config = jiffies;
} /* last_config */
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
new file mode 100644
index 000000000000..4ac06b75aa60
--- /dev/null
+++ b/drivers/hwmon/ltc4151.c
@@ -0,0 +1,256 @@
+/*
+ * Driver for Linear Technology LTC4151 High Voltage I2C Current
+ * and Voltage Monitor
+ *
+ * Copyright (C) 2011 AppearTV AS
+ *
+ * Derived from:
+ *
+ * Driver for Linear Technology LTC4261 I2C Negative Voltage Hot
+ * Swap Controller
+ * Copyright (C) 2010 Ericsson AB.
+ *
+ * Datasheet: http://www.linear.com/docs/Datasheet/4151fc.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+/* chip registers */
+#define LTC4151_SENSE_H 0x00
+#define LTC4151_SENSE_L 0x01
+#define LTC4151_VIN_H 0x02
+#define LTC4151_VIN_L 0x03
+#define LTC4151_ADIN_H 0x04
+#define LTC4151_ADIN_L 0x05
+
+struct ltc4151_data {
+ struct device *hwmon_dev;
+
+ struct mutex update_lock;
+ bool valid;
+ unsigned long last_updated; /* in jiffies */
+
+ /* Registers */
+ u8 regs[6];
+};
+
+static struct ltc4151_data *ltc4151_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ltc4151_data *data = i2c_get_clientdata(client);
+ struct ltc4151_data *ret = data;
+
+ mutex_lock(&data->update_lock);
+
+ /*
+ * The chip's A/D updates 6 times per second
+ * (Conversion Rate 6 - 9 Hz)
+ */
+ if (time_after(jiffies, data->last_updated + HZ / 6) || !data->valid) {
+ int i;
+
+ dev_dbg(&client->dev, "Starting ltc4151 update\n");
+
+ /* Read all registers */
+ for (i = 0; i < ARRAY_SIZE(data->regs); i++) {
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, i);
+ if (unlikely(val < 0)) {
+ dev_dbg(dev,
+ "Failed to read ADC value: error %d\n",
+ val);
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->regs[i] = val;
+ }
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+/* Return the voltage from the given register in mV */
+static int ltc4151_get_value(struct ltc4151_data *data, u8 reg)
+{
+ u32 val;
+
+ val = (data->regs[reg] << 4) + (data->regs[reg + 1] >> 4);
+
+ switch (reg) {
+ case LTC4151_ADIN_H:
+ /* 500uV resolution. Convert to mV. */
+ val = val * 500 / 1000;
+ break;
+ case LTC4151_SENSE_H:
+ /*
+ * 20uV resolution. Convert to current as measured with
+ * an 1 mOhm sense resistor, in mA.
+ */
+ val = val * 20;
+ break;
+ case LTC4151_VIN_H:
+ /* 25 mV per increment */
+ val = val * 25;
+ break;
+ default:
+ /* If we get here, the developer messed up */
+ WARN_ON_ONCE(1);
+ val = 0;
+ break;
+ }
+
+ return val;
+}
+
+static ssize_t ltc4151_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ltc4151_data *data = ltc4151_update_device(dev);
+ int value;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ value = ltc4151_get_value(data, attr->index);
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+/*
+ * Input voltages.
+ */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, \
+ ltc4151_show_value, NULL, LTC4151_VIN_H);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, \
+ ltc4151_show_value, NULL, LTC4151_ADIN_H);
+
+/* Currents (via sense resistor) */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, \
+ ltc4151_show_value, NULL, LTC4151_SENSE_H);
+
+/* Finally, construct an array of pointers to members of the above objects,
+ * as required for sysfs_create_group()
+ */
+static struct attribute *ltc4151_attributes[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+
+ NULL,
+};
+
+static const struct attribute_group ltc4151_group = {
+ .attrs = ltc4151_attributes,
+};
+
+static int ltc4151_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct ltc4151_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out_kzalloc;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* Register sysfs hooks */
+ ret = sysfs_create_group(&client->dev.kobj, &ltc4151_group);
+ if (ret)
+ goto out_sysfs_create_group;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ goto out_hwmon_device_register;
+ }
+
+ return 0;
+
+out_hwmon_device_register:
+ sysfs_remove_group(&client->dev.kobj, &ltc4151_group);
+out_sysfs_create_group:
+ kfree(data);
+out_kzalloc:
+ return ret;
+}
+
+static int ltc4151_remove(struct i2c_client *client)
+{
+ struct ltc4151_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &ltc4151_group);
+
+ kfree(data);
+
+ return 0;
+}
+
+static const struct i2c_device_id ltc4151_id[] = {
+ { "ltc4151", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ltc4151_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver ltc4151_driver = {
+ .driver = {
+ .name = "ltc4151",
+ },
+ .probe = ltc4151_probe,
+ .remove = ltc4151_remove,
+ .id_table = ltc4151_id,
+};
+
+static int __init ltc4151_init(void)
+{
+ return i2c_add_driver(&ltc4151_driver);
+}
+
+static void __exit ltc4151_exit(void)
+{
+ i2c_del_driver(&ltc4151_driver);
+}
+
+MODULE_AUTHOR("Per Dalen <per.dalen@appeartv.com>");
+MODULE_DESCRIPTION("LTC4151 driver");
+MODULE_LICENSE("GPL");
+
+module_init(ltc4151_init);
+module_exit(ltc4151_exit);
diff --git a/drivers/hwmon/max16064.c b/drivers/hwmon/max16064.c
new file mode 100644
index 000000000000..1d6d717060d3
--- /dev/null
+++ b/drivers/hwmon/max16064.c
@@ -0,0 +1,91 @@
+/*
+ * Hardware monitoring driver for Maxim MAX16064
+ *
+ * Copyright (c) 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+static struct pmbus_driver_info max16064_info = {
+ .pages = 4,
+ .direct[PSC_VOLTAGE_IN] = true,
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .m[PSC_VOLTAGE_IN] = 19995,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -1,
+ .m[PSC_VOLTAGE_OUT] = 19995,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = -1,
+ .m[PSC_TEMPERATURE] = -7612,
+ .b[PSC_TEMPERATURE] = 335,
+ .R[PSC_TEMPERATURE] = -3,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_TEMP,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+};
+
+static int max16064_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &max16064_info);
+}
+
+static int max16064_remove(struct i2c_client *client)
+{
+ return pmbus_do_remove(client);
+}
+
+static const struct i2c_device_id max16064_id[] = {
+ {"max16064", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, max16064_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver max16064_driver = {
+ .driver = {
+ .name = "max16064",
+ },
+ .probe = max16064_probe,
+ .remove = max16064_remove,
+ .id_table = max16064_id,
+};
+
+static int __init max16064_init(void)
+{
+ return i2c_add_driver(&max16064_driver);
+}
+
+static void __exit max16064_exit(void)
+{
+ i2c_del_driver(&max16064_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX16064");
+MODULE_LICENSE("GPL");
+module_init(max16064_init);
+module_exit(max16064_exit);
diff --git a/drivers/hwmon/max34440.c b/drivers/hwmon/max34440.c
new file mode 100644
index 000000000000..992b701b4c5e
--- /dev/null
+++ b/drivers/hwmon/max34440.c
@@ -0,0 +1,199 @@
+/*
+ * Hardware monitoring driver for Maxim MAX34440/MAX34441
+ *
+ * Copyright (c) 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+enum chips { max34440, max34441 };
+
+#define MAX34440_STATUS_OC_WARN (1 << 0)
+#define MAX34440_STATUS_OC_FAULT (1 << 1)
+#define MAX34440_STATUS_OT_FAULT (1 << 5)
+#define MAX34440_STATUS_OT_WARN (1 << 6)
+
+static int max34440_get_status(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+ int mfg_status;
+
+ ret = pmbus_set_page(client, page);
+ if (ret < 0)
+ return ret;
+
+ switch (reg) {
+ case PMBUS_STATUS_IOUT:
+ mfg_status = pmbus_read_word_data(client, 0,
+ PMBUS_STATUS_MFR_SPECIFIC);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX34440_STATUS_OC_WARN)
+ ret |= PB_IOUT_OC_WARNING;
+ if (mfg_status & MAX34440_STATUS_OC_FAULT)
+ ret |= PB_IOUT_OC_FAULT;
+ break;
+ case PMBUS_STATUS_TEMPERATURE:
+ mfg_status = pmbus_read_word_data(client, 0,
+ PMBUS_STATUS_MFR_SPECIFIC);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX34440_STATUS_OT_WARN)
+ ret |= PB_TEMP_OT_WARNING;
+ if (mfg_status & MAX34440_STATUS_OT_FAULT)
+ ret |= PB_TEMP_OT_FAULT;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static struct pmbus_driver_info max34440_info[] = {
+ [max34440] = {
+ .pages = 14,
+ .direct[PSC_VOLTAGE_IN] = true,
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .direct[PSC_CURRENT_OUT] = true,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 3, /* R = 0 in datasheet reflects mV */
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3, /* R = 0 in datasheet reflects mV */
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 3, /* R = 0 in datasheet reflects mA */
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[5] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[6] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[7] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[8] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[9] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[10] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[11] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[12] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[13] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .get_status = max34440_get_status,
+ },
+ [max34441] = {
+ .pages = 12,
+ .direct[PSC_VOLTAGE_IN] = true,
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .direct[PSC_CURRENT_OUT] = true,
+ .direct[PSC_FAN] = true,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 3,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 3,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ .m[PSC_FAN] = 1,
+ .b[PSC_FAN] = 0,
+ .R[PSC_FAN] = 0,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[5] = PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12,
+ .func[6] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[7] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[8] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[9] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[10] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[11] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .get_status = max34440_get_status,
+ },
+};
+
+static int max34440_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &max34440_info[id->driver_data]);
+}
+
+static int max34440_remove(struct i2c_client *client)
+{
+ return pmbus_do_remove(client);
+}
+
+static const struct i2c_device_id max34440_id[] = {
+ {"max34440", max34440},
+ {"max34441", max34441},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, max34440_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver max34440_driver = {
+ .driver = {
+ .name = "max34440",
+ },
+ .probe = max34440_probe,
+ .remove = max34440_remove,
+ .id_table = max34440_id,
+};
+
+static int __init max34440_init(void)
+{
+ return i2c_add_driver(&max34440_driver);
+}
+
+static void __exit max34440_exit(void)
+{
+ i2c_del_driver(&max34440_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX34440/MAX34441");
+MODULE_LICENSE("GPL");
+module_init(max34440_init);
+module_exit(max34440_exit);
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
new file mode 100644
index 000000000000..f20d9978ee78
--- /dev/null
+++ b/drivers/hwmon/max6639.c
@@ -0,0 +1,653 @@
+/*
+ * max6639.c - Support for Maxim MAX6639
+ *
+ * 2-Channel Temperature Monitor with Dual PWM Fan-Speed Controller
+ *
+ * Copyright (C) 2010, 2011 Roland Stigge <stigge@antcom.de>
+ *
+ * based on the initial MAX6639 support from semptian.net
+ * by He Changqing <hechangqing@semptian.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/i2c/max6639.h>
+
+/* Addresses to scan */
+static unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END };
+
+/* The MAX6639 registers, valid channel numbers: 0, 1 */
+#define MAX6639_REG_TEMP(ch) (0x00 + (ch))
+#define MAX6639_REG_STATUS 0x02
+#define MAX6639_REG_OUTPUT_MASK 0x03
+#define MAX6639_REG_GCONFIG 0x04
+#define MAX6639_REG_TEMP_EXT(ch) (0x05 + (ch))
+#define MAX6639_REG_ALERT_LIMIT(ch) (0x08 + (ch))
+#define MAX6639_REG_OT_LIMIT(ch) (0x0A + (ch))
+#define MAX6639_REG_THERM_LIMIT(ch) (0x0C + (ch))
+#define MAX6639_REG_FAN_CONFIG1(ch) (0x10 + (ch) * 4)
+#define MAX6639_REG_FAN_CONFIG2a(ch) (0x11 + (ch) * 4)
+#define MAX6639_REG_FAN_CONFIG2b(ch) (0x12 + (ch) * 4)
+#define MAX6639_REG_FAN_CONFIG3(ch) (0x13 + (ch) * 4)
+#define MAX6639_REG_FAN_CNT(ch) (0x20 + (ch))
+#define MAX6639_REG_TARGET_CNT(ch) (0x22 + (ch))
+#define MAX6639_REG_FAN_PPR(ch) (0x24 + (ch))
+#define MAX6639_REG_TARGTDUTY(ch) (0x26 + (ch))
+#define MAX6639_REG_FAN_START_TEMP(ch) (0x28 + (ch))
+#define MAX6639_REG_DEVID 0x3D
+#define MAX6639_REG_MANUID 0x3E
+#define MAX6639_REG_DEVREV 0x3F
+
+/* Register bits */
+#define MAX6639_GCONFIG_STANDBY 0x80
+#define MAX6639_GCONFIG_POR 0x40
+#define MAX6639_GCONFIG_DISABLE_TIMEOUT 0x20
+#define MAX6639_GCONFIG_CH2_LOCAL 0x10
+#define MAX6639_GCONFIG_PWM_FREQ_HI 0x08
+
+#define MAX6639_FAN_CONFIG1_PWM 0x80
+
+#define MAX6639_FAN_CONFIG3_THERM_FULL_SPEED 0x40
+
+static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
+
+#define FAN_FROM_REG(val, div, rpm_range) ((val) == 0 ? -1 : \
+ (val) == 255 ? 0 : (rpm_ranges[rpm_range] * 30) / ((div + 1) * (val)))
+#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255)
+
+/*
+ * Client data (each client gets its own)
+ */
+struct max6639_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+ char valid; /* !=0 if following fields are valid */
+ unsigned long last_updated; /* In jiffies */
+
+ /* Register values sampled regularly */
+ u16 temp[2]; /* Temperature, in 1/8 C, 0..255 C */
+ bool temp_fault[2]; /* Detected temperature diode failure */
+ u8 fan[2]; /* Register value: TACH count for fans >=30 */
+ u8 status; /* Detected channel alarms and fan failures */
+
+ /* Register values only written to */
+ u8 pwm[2]; /* Register value: Duty cycle 0..120 */
+ u8 temp_therm[2]; /* THERM Temperature, 0..255 C (->_max) */
+ u8 temp_alert[2]; /* ALERT Temperature, 0..255 C (->_crit) */
+ u8 temp_ot[2]; /* OT Temperature, 0..255 C (->_emergency) */
+
+ /* Register values initialized only once */
+ u8 ppr; /* Pulses per rotation 0..3 for 1..4 ppr */
+ u8 rpm_range; /* Index in above rpm_ranges table */
+};
+
+static struct max6639_data *max6639_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct max6639_data *ret = data;
+ int i;
+ int status_reg;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
+ int res;
+
+ dev_dbg(&client->dev, "Starting max6639 update\n");
+
+ status_reg = i2c_smbus_read_byte_data(client,
+ MAX6639_REG_STATUS);
+ if (status_reg < 0) {
+ ret = ERR_PTR(status_reg);
+ goto abort;
+ }
+
+ data->status = status_reg;
+
+ for (i = 0; i < 2; i++) {
+ res = i2c_smbus_read_byte_data(client,
+ MAX6639_REG_FAN_CNT(i));
+ if (res < 0) {
+ ret = ERR_PTR(res);
+ goto abort;
+ }
+ data->fan[i] = res;
+
+ res = i2c_smbus_read_byte_data(client,
+ MAX6639_REG_TEMP_EXT(i));
+ if (res < 0) {
+ ret = ERR_PTR(res);
+ goto abort;
+ }
+ data->temp[i] = res >> 5;
+ data->temp_fault[i] = res & 0x01;
+
+ res = i2c_smbus_read_byte_data(client,
+ MAX6639_REG_TEMP(i));
+ if (res < 0) {
+ ret = ERR_PTR(res);
+ goto abort;
+ }
+ data->temp[i] |= res << 3;
+ }
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+
+ return ret;
+}
+
+static ssize_t show_temp_input(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ long temp;
+ struct max6639_data *data = max6639_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ temp = data->temp[attr->index] * 125;
+ return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t show_temp_fault(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct max6639_data *data = max6639_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", data->temp_fault[attr->index]);
+}
+
+static ssize_t show_temp_max(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ return sprintf(buf, "%d\n", (data->temp_therm[attr->index] * 1000));
+}
+
+static ssize_t set_temp_max(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned long val;
+ int res;
+
+ res = strict_strtoul(buf, 10, &val);
+ if (res)
+ return res;
+
+ mutex_lock(&data->update_lock);
+ data->temp_therm[attr->index] = TEMP_LIMIT_TO_REG(val);
+ i2c_smbus_write_byte_data(client,
+ MAX6639_REG_THERM_LIMIT(attr->index),
+ data->temp_therm[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_temp_crit(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ return sprintf(buf, "%d\n", (data->temp_alert[attr->index] * 1000));
+}
+
+static ssize_t set_temp_crit(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned long val;
+ int res;
+
+ res = strict_strtoul(buf, 10, &val);
+ if (res)
+ return res;
+
+ mutex_lock(&data->update_lock);
+ data->temp_alert[attr->index] = TEMP_LIMIT_TO_REG(val);
+ i2c_smbus_write_byte_data(client,
+ MAX6639_REG_ALERT_LIMIT(attr->index),
+ data->temp_alert[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_temp_emergency(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ return sprintf(buf, "%d\n", (data->temp_ot[attr->index] * 1000));
+}
+
+static ssize_t set_temp_emergency(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned long val;
+ int res;
+
+ res = strict_strtoul(buf, 10, &val);
+ if (res)
+ return res;
+
+ mutex_lock(&data->update_lock);
+ data->temp_ot[attr->index] = TEMP_LIMIT_TO_REG(val);
+ i2c_smbus_write_byte_data(client,
+ MAX6639_REG_OT_LIMIT(attr->index),
+ data->temp_ot[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_pwm(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ return sprintf(buf, "%d\n", data->pwm[attr->index] * 255 / 120);
+}
+
+static ssize_t set_pwm(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned long val;
+ int res;
+
+ res = strict_strtoul(buf, 10, &val);
+ if (res)
+ return res;
+
+ val = SENSORS_LIMIT(val, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ data->pwm[attr->index] = (u8)(val * 120 / 255);
+ i2c_smbus_write_byte_data(client,
+ MAX6639_REG_TARGTDUTY(attr->index),
+ data->pwm[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_fan_input(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct max6639_data *data = max6639_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index],
+ data->ppr, data->rpm_range));
+}
+
+static ssize_t show_alarm(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct max6639_data *data = max6639_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", !!(data->status & (1 << attr->index)));
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
+ set_temp_max, 0);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
+ set_temp_max, 1);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_crit,
+ set_temp_crit, 0);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit,
+ set_temp_crit, 1);
+static SENSOR_DEVICE_ATTR(temp1_emergency, S_IWUSR | S_IRUGO,
+ show_temp_emergency, set_temp_emergency, 0);
+static SENSOR_DEVICE_ATTR(temp2_emergency, S_IWUSR | S_IRUGO,
+ show_temp_emergency, set_temp_emergency, 1);
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0);
+static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1);
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, show_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, show_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp1_emergency_alarm, S_IRUGO, show_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp2_emergency_alarm, S_IRUGO, show_alarm, NULL, 4);
+
+
+static struct attribute *max6639_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_fault.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency.dev_attr.attr,
+ &sensor_dev_attr_temp2_emergency.dev_attr.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_pwm2.dev_attr.attr,
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_fault.dev_attr.attr,
+ &sensor_dev_attr_fan2_fault.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_emergency_alarm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group max6639_group = {
+ .attrs = max6639_attributes,
+};
+
+/*
+ * returns respective index in rpm_ranges table
+ * 1 by default on invalid range
+ */
+static int rpm_range_to_reg(int range)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rpm_ranges); i++) {
+ if (rpm_ranges[i] == range)
+ return i;
+ }
+
+ return 1; /* default: 4000 RPM */
+}
+
+static int max6639_init_client(struct i2c_client *client)
+{
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct max6639_platform_data *max6639_info =
+ client->dev.platform_data;
+ int i = 0;
+ int rpm_range = 1; /* default: 4000 RPM */
+ int err = 0;
+
+ /* Reset chip to default values, see below for GCONFIG setup */
+ err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
+ MAX6639_GCONFIG_POR);
+ if (err)
+ goto exit;
+
+ /* Fans pulse per revolution is 2 by default */
+ if (max6639_info && max6639_info->ppr > 0 &&
+ max6639_info->ppr < 5)
+ data->ppr = max6639_info->ppr;
+ else
+ data->ppr = 2;
+ data->ppr -= 1;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_PPR(i),
+ data->ppr << 5);
+ if (err)
+ goto exit;
+
+ if (max6639_info)
+ rpm_range = rpm_range_to_reg(max6639_info->rpm_range);
+ data->rpm_range = rpm_range;
+
+ for (i = 0; i < 2; i++) {
+
+ /* Fans config PWM, RPM */
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG1(i),
+ MAX6639_FAN_CONFIG1_PWM | rpm_range);
+ if (err)
+ goto exit;
+
+ /* Fans PWM polarity high by default */
+ if (max6639_info && max6639_info->pwm_polarity == 0)
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG2a(i), 0x00);
+ else
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG2a(i), 0x02);
+ if (err)
+ goto exit;
+
+ /*
+ * /THERM full speed enable,
+ * PWM frequency 25kHz, see also GCONFIG below
+ */
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG3(i),
+ MAX6639_FAN_CONFIG3_THERM_FULL_SPEED | 0x03);
+ if (err)
+ goto exit;
+
+ /* Max. temp. 80C/90C/100C */
+ data->temp_therm[i] = 80;
+ data->temp_alert[i] = 90;
+ data->temp_ot[i] = 100;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_THERM_LIMIT(i),
+ data->temp_therm[i]);
+ if (err)
+ goto exit;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_ALERT_LIMIT(i),
+ data->temp_alert[i]);
+ if (err)
+ goto exit;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_OT_LIMIT(i), data->temp_ot[i]);
+ if (err)
+ goto exit;
+
+ /* PWM 120/120 (i.e. 100%) */
+ data->pwm[i] = 120;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_TARGTDUTY(i), data->pwm[i]);
+ if (err)
+ goto exit;
+ }
+ /* Start monitoring */
+ err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
+ MAX6639_GCONFIG_DISABLE_TIMEOUT | MAX6639_GCONFIG_CH2_LOCAL |
+ MAX6639_GCONFIG_PWM_FREQ_HI);
+exit:
+ return err;
+}
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int max6639_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int dev_id, manu_id;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ /* Actual detection via device and manufacturer ID */
+ dev_id = i2c_smbus_read_byte_data(client, MAX6639_REG_DEVID);
+ manu_id = i2c_smbus_read_byte_data(client, MAX6639_REG_MANUID);
+ if (dev_id != 0x58 || manu_id != 0x4D)
+ return -ENODEV;
+
+ strlcpy(info->type, "max6639", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int max6639_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max6639_data *data;
+ int err;
+
+ data = kzalloc(sizeof(struct max6639_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* Initialize the max6639 chip */
+ err = max6639_init_client(client);
+ if (err < 0)
+ goto error_free;
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&client->dev.kobj, &max6639_group);
+ if (err)
+ goto error_free;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto error_remove;
+ }
+
+ dev_info(&client->dev, "temperature sensor and fan control found\n");
+
+ return 0;
+
+error_remove:
+ sysfs_remove_group(&client->dev.kobj, &max6639_group);
+error_free:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int max6639_remove(struct i2c_client *client)
+{
+ struct max6639_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &max6639_group);
+
+ kfree(data);
+ return 0;
+}
+
+static int max6639_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
+ if (data < 0)
+ return data;
+
+ return i2c_smbus_write_byte_data(client,
+ MAX6639_REG_GCONFIG, data | MAX6639_GCONFIG_STANDBY);
+}
+
+static int max6639_resume(struct i2c_client *client)
+{
+ int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
+ if (data < 0)
+ return data;
+
+ return i2c_smbus_write_byte_data(client,
+ MAX6639_REG_GCONFIG, data & ~MAX6639_GCONFIG_STANDBY);
+}
+
+static const struct i2c_device_id max6639_id[] = {
+ {"max6639", 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, max6639_id);
+
+static struct i2c_driver max6639_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max6639",
+ },
+ .probe = max6639_probe,
+ .remove = max6639_remove,
+ .suspend = max6639_suspend,
+ .resume = max6639_resume,
+ .id_table = max6639_id,
+ .detect = max6639_detect,
+ .address_list = normal_i2c,
+};
+
+static int __init max6639_init(void)
+{
+ return i2c_add_driver(&max6639_driver);
+}
+
+static void __exit max6639_exit(void)
+{
+ i2c_del_driver(&max6639_driver);
+}
+
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("max6639 driver");
+MODULE_LICENSE("GPL");
+
+module_init(max6639_init);
+module_exit(max6639_exit);
diff --git a/drivers/hwmon/max8688.c b/drivers/hwmon/max8688.c
new file mode 100644
index 000000000000..8ebfef2ecf26
--- /dev/null
+++ b/drivers/hwmon/max8688.c
@@ -0,0 +1,158 @@
+/*
+ * Hardware monitoring driver for Maxim MAX8688
+ *
+ * Copyright (c) 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+#define MAX8688_MFG_STATUS 0xd8
+
+#define MAX8688_STATUS_OC_FAULT (1 << 4)
+#define MAX8688_STATUS_OV_FAULT (1 << 5)
+#define MAX8688_STATUS_OV_WARNING (1 << 8)
+#define MAX8688_STATUS_UV_FAULT (1 << 9)
+#define MAX8688_STATUS_UV_WARNING (1 << 10)
+#define MAX8688_STATUS_UC_FAULT (1 << 11)
+#define MAX8688_STATUS_OC_WARNING (1 << 12)
+#define MAX8688_STATUS_OT_FAULT (1 << 13)
+#define MAX8688_STATUS_OT_WARNING (1 << 14)
+
+static int max8688_get_status(struct i2c_client *client, int page, int reg)
+{
+ int ret = 0;
+ int mfg_status;
+
+ if (page)
+ return -EINVAL;
+
+ switch (reg) {
+ case PMBUS_STATUS_VOUT:
+ mfg_status = pmbus_read_word_data(client, 0,
+ MAX8688_MFG_STATUS);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX8688_STATUS_UV_WARNING)
+ ret |= PB_VOLTAGE_UV_WARNING;
+ if (mfg_status & MAX8688_STATUS_UV_FAULT)
+ ret |= PB_VOLTAGE_UV_FAULT;
+ if (mfg_status & MAX8688_STATUS_OV_WARNING)
+ ret |= PB_VOLTAGE_OV_WARNING;
+ if (mfg_status & MAX8688_STATUS_OV_FAULT)
+ ret |= PB_VOLTAGE_OV_FAULT;
+ break;
+ case PMBUS_STATUS_IOUT:
+ mfg_status = pmbus_read_word_data(client, 0,
+ MAX8688_MFG_STATUS);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX8688_STATUS_UC_FAULT)
+ ret |= PB_IOUT_UC_FAULT;
+ if (mfg_status & MAX8688_STATUS_OC_WARNING)
+ ret |= PB_IOUT_OC_WARNING;
+ if (mfg_status & MAX8688_STATUS_OC_FAULT)
+ ret |= PB_IOUT_OC_FAULT;
+ break;
+ case PMBUS_STATUS_TEMPERATURE:
+ mfg_status = pmbus_read_word_data(client, 0,
+ MAX8688_MFG_STATUS);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX8688_STATUS_OT_WARNING)
+ ret |= PB_TEMP_OT_WARNING;
+ if (mfg_status & MAX8688_STATUS_OT_FAULT)
+ ret |= PB_TEMP_OT_FAULT;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static struct pmbus_driver_info max8688_info = {
+ .pages = 1,
+ .direct[PSC_VOLTAGE_IN] = true,
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .direct[PSC_CURRENT_OUT] = true,
+ .m[PSC_VOLTAGE_IN] = 19995,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -1,
+ .m[PSC_VOLTAGE_OUT] = 19995,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = -1,
+ .m[PSC_CURRENT_OUT] = 23109,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = -2,
+ .m[PSC_TEMPERATURE] = -7612,
+ .b[PSC_TEMPERATURE] = 335,
+ .R[PSC_TEMPERATURE] = -3,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_STATUS_TEMP,
+ .get_status = max8688_get_status,
+};
+
+static int max8688_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &max8688_info);
+}
+
+static int max8688_remove(struct i2c_client *client)
+{
+ return pmbus_do_remove(client);
+}
+
+static const struct i2c_device_id max8688_id[] = {
+ {"max8688", 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, max8688_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver max8688_driver = {
+ .driver = {
+ .name = "max8688",
+ },
+ .probe = max8688_probe,
+ .remove = max8688_remove,
+ .id_table = max8688_id,
+};
+
+static int __init max8688_init(void)
+{
+ return i2c_add_driver(&max8688_driver);
+}
+
+static void __exit max8688_exit(void)
+{
+ i2c_del_driver(&max8688_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX8688");
+MODULE_LICENSE("GPL");
+module_init(max8688_init);
+module_exit(max8688_exit);
diff --git a/drivers/hwmon/pmbus.c b/drivers/hwmon/pmbus.c
new file mode 100644
index 000000000000..98e2e28899e2
--- /dev/null
+++ b/drivers/hwmon/pmbus.c
@@ -0,0 +1,203 @@
+/*
+ * Hardware monitoring driver for PMBus devices
+ *
+ * Copyright (c) 2010, 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+/*
+ * Find sensor groups and status registers on each page.
+ */
+static void pmbus_find_sensor_groups(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ int page;
+
+ /* Sensors detected on page 0 only */
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_VIN))
+ info->func[0] |= PMBUS_HAVE_VIN;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_VCAP))
+ info->func[0] |= PMBUS_HAVE_VCAP;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_IIN))
+ info->func[0] |= PMBUS_HAVE_IIN;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_PIN))
+ info->func[0] |= PMBUS_HAVE_PIN;
+ if (info->func[0]
+ && pmbus_check_byte_register(client, 0, PMBUS_STATUS_INPUT))
+ info->func[0] |= PMBUS_HAVE_STATUS_INPUT;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
+ info->func[0] |= PMBUS_HAVE_FAN12;
+ if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_12))
+ info->func[0] |= PMBUS_HAVE_STATUS_FAN12;
+ }
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
+ info->func[0] |= PMBUS_HAVE_FAN34;
+ if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_34))
+ info->func[0] |= PMBUS_HAVE_STATUS_FAN34;
+ }
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_1)) {
+ info->func[0] |= PMBUS_HAVE_TEMP;
+ if (pmbus_check_byte_register(client, 0,
+ PMBUS_STATUS_TEMPERATURE))
+ info->func[0] |= PMBUS_HAVE_STATUS_TEMP;
+ }
+
+ /* Sensors detected on all pages */
+ for (page = 0; page < info->pages; page++) {
+ if (pmbus_check_word_register(client, page, PMBUS_READ_VOUT)) {
+ info->func[page] |= PMBUS_HAVE_VOUT;
+ if (pmbus_check_byte_register(client, page,
+ PMBUS_STATUS_VOUT))
+ info->func[page] |= PMBUS_HAVE_STATUS_VOUT;
+ }
+ if (pmbus_check_word_register(client, page, PMBUS_READ_IOUT)) {
+ info->func[page] |= PMBUS_HAVE_IOUT;
+ if (pmbus_check_byte_register(client, 0,
+ PMBUS_STATUS_IOUT))
+ info->func[page] |= PMBUS_HAVE_STATUS_IOUT;
+ }
+ if (pmbus_check_word_register(client, page, PMBUS_READ_POUT))
+ info->func[page] |= PMBUS_HAVE_POUT;
+ }
+}
+
+/*
+ * Identify chip parameters.
+ */
+static int pmbus_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ if (!info->pages) {
+ /*
+ * Check if the PAGE command is supported. If it is,
+ * keep setting the page number until it fails or until the
+ * maximum number of pages has been reached. Assume that
+ * this is the number of pages supported by the chip.
+ */
+ if (pmbus_check_byte_register(client, 0, PMBUS_PAGE)) {
+ int page;
+
+ for (page = 1; page < PMBUS_PAGES; page++) {
+ if (pmbus_set_page(client, page) < 0)
+ break;
+ }
+ pmbus_set_page(client, 0);
+ info->pages = page;
+ } else {
+ info->pages = 1;
+ }
+ }
+
+ /*
+ * We should check if the COEFFICIENTS register is supported.
+ * If it is, and the chip is configured for direct mode, we can read
+ * the coefficients from the chip, one set per group of sensor
+ * registers.
+ *
+ * To do this, we will need access to a chip which actually supports the
+ * COEFFICIENTS command, since the command is too complex to implement
+ * without testing it.
+ */
+
+ /* Try to find sensor groups */
+ pmbus_find_sensor_groups(client, info);
+
+ return 0;
+}
+
+static int pmbus_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pmbus_driver_info *info;
+ int ret;
+
+ info = kzalloc(sizeof(struct pmbus_driver_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->pages = id->driver_data;
+ info->identify = pmbus_identify;
+
+ ret = pmbus_do_probe(client, id, info);
+ if (ret < 0)
+ goto out;
+ return 0;
+
+out:
+ kfree(info);
+ return ret;
+}
+
+static int pmbus_remove(struct i2c_client *client)
+{
+ int ret;
+ const struct pmbus_driver_info *info;
+
+ info = pmbus_get_driver_info(client);
+ ret = pmbus_do_remove(client);
+ kfree(info);
+ return ret;
+}
+
+/*
+ * Use driver_data to set the number of pages supported by the chip.
+ */
+static const struct i2c_device_id pmbus_id[] = {
+ {"bmr450", 1},
+ {"bmr451", 1},
+ {"bmr453", 1},
+ {"bmr454", 1},
+ {"ltc2978", 8},
+ {"pmbus", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, pmbus_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver pmbus_driver = {
+ .driver = {
+ .name = "pmbus",
+ },
+ .probe = pmbus_probe,
+ .remove = pmbus_remove,
+ .id_table = pmbus_id,
+};
+
+static int __init pmbus_init(void)
+{
+ return i2c_add_driver(&pmbus_driver);
+}
+
+static void __exit pmbus_exit(void)
+{
+ i2c_del_driver(&pmbus_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("Generic PMBus driver");
+MODULE_LICENSE("GPL");
+module_init(pmbus_init);
+module_exit(pmbus_exit);
diff --git a/drivers/hwmon/pmbus.h b/drivers/hwmon/pmbus.h
new file mode 100644
index 000000000000..a81f7f228762
--- /dev/null
+++ b/drivers/hwmon/pmbus.h
@@ -0,0 +1,313 @@
+/*
+ * pmbus.h - Common defines and structures for PMBus devices
+ *
+ * Copyright (c) 2010, 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef PMBUS_H
+#define PMBUS_H
+
+/*
+ * Registers
+ */
+#define PMBUS_PAGE 0x00
+#define PMBUS_OPERATION 0x01
+#define PMBUS_ON_OFF_CONFIG 0x02
+#define PMBUS_CLEAR_FAULTS 0x03
+#define PMBUS_PHASE 0x04
+
+#define PMBUS_CAPABILITY 0x19
+#define PMBUS_QUERY 0x1A
+
+#define PMBUS_VOUT_MODE 0x20
+#define PMBUS_VOUT_COMMAND 0x21
+#define PMBUS_VOUT_TRIM 0x22
+#define PMBUS_VOUT_CAL_OFFSET 0x23
+#define PMBUS_VOUT_MAX 0x24
+#define PMBUS_VOUT_MARGIN_HIGH 0x25
+#define PMBUS_VOUT_MARGIN_LOW 0x26
+#define PMBUS_VOUT_TRANSITION_RATE 0x27
+#define PMBUS_VOUT_DROOP 0x28
+#define PMBUS_VOUT_SCALE_LOOP 0x29
+#define PMBUS_VOUT_SCALE_MONITOR 0x2A
+
+#define PMBUS_COEFFICIENTS 0x30
+#define PMBUS_POUT_MAX 0x31
+
+#define PMBUS_FAN_CONFIG_12 0x3A
+#define PMBUS_FAN_COMMAND_1 0x3B
+#define PMBUS_FAN_COMMAND_2 0x3C
+#define PMBUS_FAN_CONFIG_34 0x3D
+#define PMBUS_FAN_COMMAND_3 0x3E
+#define PMBUS_FAN_COMMAND_4 0x3F
+
+#define PMBUS_VOUT_OV_FAULT_LIMIT 0x40
+#define PMBUS_VOUT_OV_FAULT_RESPONSE 0x41
+#define PMBUS_VOUT_OV_WARN_LIMIT 0x42
+#define PMBUS_VOUT_UV_WARN_LIMIT 0x43
+#define PMBUS_VOUT_UV_FAULT_LIMIT 0x44
+#define PMBUS_VOUT_UV_FAULT_RESPONSE 0x45
+#define PMBUS_IOUT_OC_FAULT_LIMIT 0x46
+#define PMBUS_IOUT_OC_FAULT_RESPONSE 0x47
+#define PMBUS_IOUT_OC_LV_FAULT_LIMIT 0x48
+#define PMBUS_IOUT_OC_LV_FAULT_RESPONSE 0x49
+#define PMBUS_IOUT_OC_WARN_LIMIT 0x4A
+#define PMBUS_IOUT_UC_FAULT_LIMIT 0x4B
+#define PMBUS_IOUT_UC_FAULT_RESPONSE 0x4C
+
+#define PMBUS_OT_FAULT_LIMIT 0x4F
+#define PMBUS_OT_FAULT_RESPONSE 0x50
+#define PMBUS_OT_WARN_LIMIT 0x51
+#define PMBUS_UT_WARN_LIMIT 0x52
+#define PMBUS_UT_FAULT_LIMIT 0x53
+#define PMBUS_UT_FAULT_RESPONSE 0x54
+#define PMBUS_VIN_OV_FAULT_LIMIT 0x55
+#define PMBUS_VIN_OV_FAULT_RESPONSE 0x56
+#define PMBUS_VIN_OV_WARN_LIMIT 0x57
+#define PMBUS_VIN_UV_WARN_LIMIT 0x58
+#define PMBUS_VIN_UV_FAULT_LIMIT 0x59
+
+#define PMBUS_IIN_OC_FAULT_LIMIT 0x5B
+#define PMBUS_IIN_OC_WARN_LIMIT 0x5D
+
+#define PMBUS_POUT_OP_FAULT_LIMIT 0x68
+#define PMBUS_POUT_OP_WARN_LIMIT 0x6A
+#define PMBUS_PIN_OP_WARN_LIMIT 0x6B
+
+#define PMBUS_STATUS_BYTE 0x78
+#define PMBUS_STATUS_WORD 0x79
+#define PMBUS_STATUS_VOUT 0x7A
+#define PMBUS_STATUS_IOUT 0x7B
+#define PMBUS_STATUS_INPUT 0x7C
+#define PMBUS_STATUS_TEMPERATURE 0x7D
+#define PMBUS_STATUS_CML 0x7E
+#define PMBUS_STATUS_OTHER 0x7F
+#define PMBUS_STATUS_MFR_SPECIFIC 0x80
+#define PMBUS_STATUS_FAN_12 0x81
+#define PMBUS_STATUS_FAN_34 0x82
+
+#define PMBUS_READ_VIN 0x88
+#define PMBUS_READ_IIN 0x89
+#define PMBUS_READ_VCAP 0x8A
+#define PMBUS_READ_VOUT 0x8B
+#define PMBUS_READ_IOUT 0x8C
+#define PMBUS_READ_TEMPERATURE_1 0x8D
+#define PMBUS_READ_TEMPERATURE_2 0x8E
+#define PMBUS_READ_TEMPERATURE_3 0x8F
+#define PMBUS_READ_FAN_SPEED_1 0x90
+#define PMBUS_READ_FAN_SPEED_2 0x91
+#define PMBUS_READ_FAN_SPEED_3 0x92
+#define PMBUS_READ_FAN_SPEED_4 0x93
+#define PMBUS_READ_DUTY_CYCLE 0x94
+#define PMBUS_READ_FREQUENCY 0x95
+#define PMBUS_READ_POUT 0x96
+#define PMBUS_READ_PIN 0x97
+
+#define PMBUS_REVISION 0x98
+#define PMBUS_MFR_ID 0x99
+#define PMBUS_MFR_MODEL 0x9A
+#define PMBUS_MFR_REVISION 0x9B
+#define PMBUS_MFR_LOCATION 0x9C
+#define PMBUS_MFR_DATE 0x9D
+#define PMBUS_MFR_SERIAL 0x9E
+
+/*
+ * CAPABILITY
+ */
+#define PB_CAPABILITY_SMBALERT (1<<4)
+#define PB_CAPABILITY_ERROR_CHECK (1<<7)
+
+/*
+ * VOUT_MODE
+ */
+#define PB_VOUT_MODE_MODE_MASK 0xe0
+#define PB_VOUT_MODE_PARAM_MASK 0x1f
+
+#define PB_VOUT_MODE_LINEAR 0x00
+#define PB_VOUT_MODE_VID 0x20
+#define PB_VOUT_MODE_DIRECT 0x40
+
+/*
+ * Fan configuration
+ */
+#define PB_FAN_2_PULSE_MASK ((1 << 0) | (1 << 1))
+#define PB_FAN_2_RPM (1 << 2)
+#define PB_FAN_2_INSTALLED (1 << 3)
+#define PB_FAN_1_PULSE_MASK ((1 << 4) | (1 << 5))
+#define PB_FAN_1_RPM (1 << 6)
+#define PB_FAN_1_INSTALLED (1 << 7)
+
+/*
+ * STATUS_BYTE, STATUS_WORD (lower)
+ */
+#define PB_STATUS_NONE_ABOVE (1<<0)
+#define PB_STATUS_CML (1<<1)
+#define PB_STATUS_TEMPERATURE (1<<2)
+#define PB_STATUS_VIN_UV (1<<3)
+#define PB_STATUS_IOUT_OC (1<<4)
+#define PB_STATUS_VOUT_OV (1<<5)
+#define PB_STATUS_OFF (1<<6)
+#define PB_STATUS_BUSY (1<<7)
+
+/*
+ * STATUS_WORD (upper)
+ */
+#define PB_STATUS_UNKNOWN (1<<8)
+#define PB_STATUS_OTHER (1<<9)
+#define PB_STATUS_FANS (1<<10)
+#define PB_STATUS_POWER_GOOD_N (1<<11)
+#define PB_STATUS_WORD_MFR (1<<12)
+#define PB_STATUS_INPUT (1<<13)
+#define PB_STATUS_IOUT_POUT (1<<14)
+#define PB_STATUS_VOUT (1<<15)
+
+/*
+ * STATUS_IOUT
+ */
+#define PB_POUT_OP_WARNING (1<<0)
+#define PB_POUT_OP_FAULT (1<<1)
+#define PB_POWER_LIMITING (1<<2)
+#define PB_CURRENT_SHARE_FAULT (1<<3)
+#define PB_IOUT_UC_FAULT (1<<4)
+#define PB_IOUT_OC_WARNING (1<<5)
+#define PB_IOUT_OC_LV_FAULT (1<<6)
+#define PB_IOUT_OC_FAULT (1<<7)
+
+/*
+ * STATUS_VOUT, STATUS_INPUT
+ */
+#define PB_VOLTAGE_UV_FAULT (1<<4)
+#define PB_VOLTAGE_UV_WARNING (1<<5)
+#define PB_VOLTAGE_OV_WARNING (1<<6)
+#define PB_VOLTAGE_OV_FAULT (1<<7)
+
+/*
+ * STATUS_INPUT
+ */
+#define PB_PIN_OP_WARNING (1<<0)
+#define PB_IIN_OC_WARNING (1<<1)
+#define PB_IIN_OC_FAULT (1<<2)
+
+/*
+ * STATUS_TEMPERATURE
+ */
+#define PB_TEMP_UT_FAULT (1<<4)
+#define PB_TEMP_UT_WARNING (1<<5)
+#define PB_TEMP_OT_WARNING (1<<6)
+#define PB_TEMP_OT_FAULT (1<<7)
+
+/*
+ * STATUS_FAN
+ */
+#define PB_FAN_AIRFLOW_WARNING (1<<0)
+#define PB_FAN_AIRFLOW_FAULT (1<<1)
+#define PB_FAN_FAN2_SPEED_OVERRIDE (1<<2)
+#define PB_FAN_FAN1_SPEED_OVERRIDE (1<<3)
+#define PB_FAN_FAN2_WARNING (1<<4)
+#define PB_FAN_FAN1_WARNING (1<<5)
+#define PB_FAN_FAN2_FAULT (1<<6)
+#define PB_FAN_FAN1_FAULT (1<<7)
+
+/*
+ * CML_FAULT_STATUS
+ */
+#define PB_CML_FAULT_OTHER_MEM_LOGIC (1<<0)
+#define PB_CML_FAULT_OTHER_COMM (1<<1)
+#define PB_CML_FAULT_PROCESSOR (1<<3)
+#define PB_CML_FAULT_MEMORY (1<<4)
+#define PB_CML_FAULT_PACKET_ERROR (1<<5)
+#define PB_CML_FAULT_INVALID_DATA (1<<6)
+#define PB_CML_FAULT_INVALID_COMMAND (1<<7)
+
+enum pmbus_sensor_classes {
+ PSC_VOLTAGE_IN = 0,
+ PSC_VOLTAGE_OUT,
+ PSC_CURRENT_IN,
+ PSC_CURRENT_OUT,
+ PSC_POWER,
+ PSC_TEMPERATURE,
+ PSC_FAN,
+ PSC_NUM_CLASSES /* Number of power sensor classes */
+};
+
+#define PMBUS_PAGES 32 /* Per PMBus specification */
+
+/* Functionality bit mask */
+#define PMBUS_HAVE_VIN (1 << 0)
+#define PMBUS_HAVE_VCAP (1 << 1)
+#define PMBUS_HAVE_VOUT (1 << 2)
+#define PMBUS_HAVE_IIN (1 << 3)
+#define PMBUS_HAVE_IOUT (1 << 4)
+#define PMBUS_HAVE_PIN (1 << 5)
+#define PMBUS_HAVE_POUT (1 << 6)
+#define PMBUS_HAVE_FAN12 (1 << 7)
+#define PMBUS_HAVE_FAN34 (1 << 8)
+#define PMBUS_HAVE_TEMP (1 << 9)
+#define PMBUS_HAVE_TEMP2 (1 << 10)
+#define PMBUS_HAVE_TEMP3 (1 << 11)
+#define PMBUS_HAVE_STATUS_VOUT (1 << 12)
+#define PMBUS_HAVE_STATUS_IOUT (1 << 13)
+#define PMBUS_HAVE_STATUS_INPUT (1 << 14)
+#define PMBUS_HAVE_STATUS_TEMP (1 << 15)
+#define PMBUS_HAVE_STATUS_FAN12 (1 << 16)
+#define PMBUS_HAVE_STATUS_FAN34 (1 << 17)
+
+struct pmbus_driver_info {
+ int pages; /* Total number of pages */
+ bool direct[PSC_NUM_CLASSES];
+ /* true if device uses direct data format
+ for the given sensor class */
+ /*
+ * Support one set of coefficients for each sensor type
+ * Used for chips providing data in direct mode.
+ */
+ int m[PSC_NUM_CLASSES]; /* mantissa for direct data format */
+ int b[PSC_NUM_CLASSES]; /* offset */
+ int R[PSC_NUM_CLASSES]; /* exponent */
+
+ u32 func[PMBUS_PAGES]; /* Functionality, per page */
+ /*
+ * The get_status function maps manufacturing specific status values
+ * into PMBus standard status values.
+ * This function is optional and only necessary if chip specific status
+ * register values have to be mapped into standard PMBus status register
+ * values.
+ */
+ int (*get_status)(struct i2c_client *client, int page, int reg);
+ /*
+ * The identify function determines supported PMBus functionality.
+ * This function is only necessary if a chip driver supports multiple
+ * chips, and the chip functionality is not pre-determined.
+ */
+ int (*identify)(struct i2c_client *client,
+ struct pmbus_driver_info *info);
+};
+
+/* Function declarations */
+
+int pmbus_set_page(struct i2c_client *client, u8 page);
+int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
+void pmbus_clear_faults(struct i2c_client *client);
+bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg);
+bool pmbus_check_word_register(struct i2c_client *client, int page, int reg);
+int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
+ struct pmbus_driver_info *info);
+int pmbus_do_remove(struct i2c_client *client);
+const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client
+ *client);
+
+#endif /* PMBUS_H */
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
new file mode 100644
index 000000000000..6474512f49b0
--- /dev/null
+++ b/drivers/hwmon/pmbus_core.c
@@ -0,0 +1,1658 @@
+/*
+ * Hardware monitoring driver for PMBus devices
+ *
+ * Copyright (c) 2010, 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/delay.h>
+#include <linux/i2c/pmbus.h>
+#include "pmbus.h"
+
+/*
+ * Constants needed to determine number of sensors, booleans, and labels.
+ */
+#define PMBUS_MAX_INPUT_SENSORS 11 /* 6*volt, 3*curr, 2*power */
+#define PMBUS_VOUT_SENSORS_PER_PAGE 5 /* input, min, max, lcrit,
+ crit */
+#define PMBUS_IOUT_SENSORS_PER_PAGE 4 /* input, min, max, crit */
+#define PMBUS_POUT_SENSORS_PER_PAGE 4 /* input, cap, max, crit */
+#define PMBUS_MAX_SENSORS_PER_FAN 1 /* input */
+#define PMBUS_MAX_SENSORS_PER_TEMP 5 /* input, min, max, lcrit,
+ crit */
+
+#define PMBUS_MAX_INPUT_BOOLEANS 7 /* v: min_alarm, max_alarm,
+ lcrit_alarm, crit_alarm;
+ c: alarm, crit_alarm;
+ p: crit_alarm */
+#define PMBUS_VOUT_BOOLEANS_PER_PAGE 4 /* min_alarm, max_alarm,
+ lcrit_alarm, crit_alarm */
+#define PMBUS_IOUT_BOOLEANS_PER_PAGE 3 /* alarm, lcrit_alarm,
+ crit_alarm */
+#define PMBUS_POUT_BOOLEANS_PER_PAGE 2 /* alarm, crit_alarm */
+#define PMBUS_MAX_BOOLEANS_PER_FAN 2 /* alarm, fault */
+#define PMBUS_MAX_BOOLEANS_PER_TEMP 4 /* min_alarm, max_alarm,
+ lcrit_alarm, crit_alarm */
+
+#define PMBUS_MAX_INPUT_LABELS 4 /* vin, vcap, iin, pin */
+
+/*
+ * status, status_vout, status_iout, status_fans, status_fan34, and status_temp
+ * are paged. status_input is unpaged.
+ */
+#define PB_NUM_STATUS_REG (PMBUS_PAGES * 6 + 1)
+
+/*
+ * Index into status register array, per status register group
+ */
+#define PB_STATUS_BASE 0
+#define PB_STATUS_VOUT_BASE (PB_STATUS_BASE + PMBUS_PAGES)
+#define PB_STATUS_IOUT_BASE (PB_STATUS_VOUT_BASE + PMBUS_PAGES)
+#define PB_STATUS_FAN_BASE (PB_STATUS_IOUT_BASE + PMBUS_PAGES)
+#define PB_STATUS_FAN34_BASE (PB_STATUS_FAN_BASE + PMBUS_PAGES)
+#define PB_STATUS_INPUT_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES)
+#define PB_STATUS_TEMP_BASE (PB_STATUS_INPUT_BASE + 1)
+
+struct pmbus_sensor {
+ char name[I2C_NAME_SIZE]; /* sysfs sensor name */
+ struct sensor_device_attribute attribute;
+ u8 page; /* page number */
+ u8 reg; /* register */
+ enum pmbus_sensor_classes class; /* sensor class */
+ bool update; /* runtime sensor update needed */
+ int data; /* Sensor data.
+ Negative if there was a read error */
+};
+
+struct pmbus_boolean {
+ char name[I2C_NAME_SIZE]; /* sysfs boolean name */
+ struct sensor_device_attribute attribute;
+};
+
+struct pmbus_label {
+ char name[I2C_NAME_SIZE]; /* sysfs label name */
+ struct sensor_device_attribute attribute;
+ char label[I2C_NAME_SIZE]; /* label */
+};
+
+struct pmbus_data {
+ struct device *hwmon_dev;
+
+ u32 flags; /* from platform data */
+
+ int exponent; /* linear mode: exponent for output voltages */
+
+ const struct pmbus_driver_info *info;
+
+ int max_attributes;
+ int num_attributes;
+ struct attribute **attributes;
+ struct attribute_group group;
+
+ /*
+ * Sensors cover both sensor and limit registers.
+ */
+ int max_sensors;
+ int num_sensors;
+ struct pmbus_sensor *sensors;
+ /*
+ * Booleans are used for alarms.
+ * Values are determined from status registers.
+ */
+ int max_booleans;
+ int num_booleans;
+ struct pmbus_boolean *booleans;
+ /*
+ * Labels are used to map generic names (e.g., "in1")
+ * to PMBus specific names (e.g., "vin" or "vout1").
+ */
+ int max_labels;
+ int num_labels;
+ struct pmbus_label *labels;
+
+ struct mutex update_lock;
+ bool valid;
+ unsigned long last_updated; /* in jiffies */
+
+ /*
+ * A single status register covers multiple attributes,
+ * so we keep them all together.
+ */
+ u8 status_bits;
+ u8 status[PB_NUM_STATUS_REG];
+
+ u8 currpage;
+};
+
+int pmbus_set_page(struct i2c_client *client, u8 page)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ int rv = 0;
+ int newpage;
+
+ if (page != data->currpage) {
+ rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
+ if (newpage != page)
+ rv = -EINVAL;
+ else
+ data->currpage = page;
+ }
+ return rv;
+}
+EXPORT_SYMBOL_GPL(pmbus_set_page);
+
+static int pmbus_write_byte(struct i2c_client *client, u8 page, u8 value)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_write_byte(client, value);
+}
+
+static int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg,
+ u16 word)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_write_word_data(client, reg, word);
+}
+
+int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_read_word_data(client, reg);
+}
+EXPORT_SYMBOL_GPL(pmbus_read_word_data);
+
+static int pmbus_read_byte_data(struct i2c_client *client, u8 page, u8 reg)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_read_byte_data(client, reg);
+}
+
+static void pmbus_clear_fault_page(struct i2c_client *client, int page)
+{
+ pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
+}
+
+void pmbus_clear_faults(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < data->info->pages; i++)
+ pmbus_clear_fault_page(client, i);
+}
+EXPORT_SYMBOL_GPL(pmbus_clear_faults);
+
+static int pmbus_check_status_cml(struct i2c_client *client, int page)
+{
+ int status, status2;
+
+ status = pmbus_read_byte_data(client, page, PMBUS_STATUS_BYTE);
+ if (status < 0 || (status & PB_STATUS_CML)) {
+ status2 = pmbus_read_byte_data(client, page, PMBUS_STATUS_CML);
+ if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND))
+ return -EINVAL;
+ }
+ return 0;
+}
+
+bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
+{
+ int rv;
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ rv = pmbus_read_byte_data(client, page, reg);
+ if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
+ rv = pmbus_check_status_cml(client, page);
+ pmbus_clear_fault_page(client, page);
+ return rv >= 0;
+}
+EXPORT_SYMBOL_GPL(pmbus_check_byte_register);
+
+bool pmbus_check_word_register(struct i2c_client *client, int page, int reg)
+{
+ int rv;
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ rv = pmbus_read_word_data(client, page, reg);
+ if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
+ rv = pmbus_check_status_cml(client, page);
+ pmbus_clear_fault_page(client, page);
+ return rv >= 0;
+}
+EXPORT_SYMBOL_GPL(pmbus_check_word_register);
+
+const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ return data->info;
+}
+EXPORT_SYMBOL_GPL(pmbus_get_driver_info);
+
+static int pmbus_get_status(struct i2c_client *client, int page, int reg)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+ int status;
+
+ if (info->get_status) {
+ status = info->get_status(client, page, reg);
+ if (status != -ENODATA)
+ return status;
+ }
+ return pmbus_read_byte_data(client, page, reg);
+}
+
+static struct pmbus_data *pmbus_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+
+ mutex_lock(&data->update_lock);
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ int i;
+
+ for (i = 0; i < info->pages; i++)
+ data->status[PB_STATUS_BASE + i]
+ = pmbus_read_byte_data(client, i,
+ PMBUS_STATUS_BYTE);
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_VOUT))
+ continue;
+ data->status[PB_STATUS_VOUT_BASE + i]
+ = pmbus_get_status(client, i, PMBUS_STATUS_VOUT);
+ }
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_IOUT))
+ continue;
+ data->status[PB_STATUS_IOUT_BASE + i]
+ = pmbus_get_status(client, i, PMBUS_STATUS_IOUT);
+ }
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_TEMP))
+ continue;
+ data->status[PB_STATUS_TEMP_BASE + i]
+ = pmbus_get_status(client, i,
+ PMBUS_STATUS_TEMPERATURE);
+ }
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN12))
+ continue;
+ data->status[PB_STATUS_FAN_BASE + i]
+ = pmbus_get_status(client, i, PMBUS_STATUS_FAN_12);
+ }
+
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN34))
+ continue;
+ data->status[PB_STATUS_FAN34_BASE + i]
+ = pmbus_get_status(client, i, PMBUS_STATUS_FAN_34);
+ }
+
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
+ data->status[PB_STATUS_INPUT_BASE]
+ = pmbus_get_status(client, 0, PMBUS_STATUS_INPUT);
+
+ for (i = 0; i < data->num_sensors; i++) {
+ struct pmbus_sensor *sensor = &data->sensors[i];
+
+ if (!data->valid || sensor->update)
+ sensor->data
+ = pmbus_read_word_data(client, sensor->page,
+ sensor->reg);
+ }
+ pmbus_clear_faults(client);
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+ mutex_unlock(&data->update_lock);
+ return data;
+}
+
+/*
+ * Convert linear sensor values to milli- or micro-units
+ * depending on sensor type.
+ */
+static int pmbus_reg2data_linear(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
+{
+ s16 exponent;
+ s32 mantissa;
+ long val;
+
+ if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */
+ exponent = data->exponent;
+ mantissa = (u16) sensor->data;
+ } else { /* LINEAR11 */
+ exponent = (sensor->data >> 11) & 0x001f;
+ mantissa = sensor->data & 0x07ff;
+
+ if (exponent > 0x0f)
+ exponent |= 0xffe0; /* sign extend exponent */
+ if (mantissa > 0x03ff)
+ mantissa |= 0xfffff800; /* sign extend mantissa */
+ }
+
+ val = mantissa;
+
+ /* scale result to milli-units for all sensors except fans */
+ if (sensor->class != PSC_FAN)
+ val = val * 1000L;
+
+ /* scale result to micro-units for power sensors */
+ if (sensor->class == PSC_POWER)
+ val = val * 1000L;
+
+ if (exponent >= 0)
+ val <<= exponent;
+ else
+ val >>= -exponent;
+
+ return (int)val;
+}
+
+/*
+ * Convert direct sensor values to milli- or micro-units
+ * depending on sensor type.
+ */
+static int pmbus_reg2data_direct(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
+{
+ long val = (s16) sensor->data;
+ long m, b, R;
+
+ m = data->info->m[sensor->class];
+ b = data->info->b[sensor->class];
+ R = data->info->R[sensor->class];
+
+ if (m == 0)
+ return 0;
+
+ /* X = 1/m * (Y * 10^-R - b) */
+ R = -R;
+ /* scale result to milli-units for everything but fans */
+ if (sensor->class != PSC_FAN) {
+ R += 3;
+ b *= 1000;
+ }
+
+ /* scale result to micro-units for power sensors */
+ if (sensor->class == PSC_POWER) {
+ R += 3;
+ b *= 1000;
+ }
+
+ while (R > 0) {
+ val *= 10;
+ R--;
+ }
+ while (R < 0) {
+ val = DIV_ROUND_CLOSEST(val, 10);
+ R++;
+ }
+
+ return (int)((val - b) / m);
+}
+
+static int pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
+{
+ int val;
+
+ if (data->info->direct[sensor->class])
+ val = pmbus_reg2data_direct(data, sensor);
+ else
+ val = pmbus_reg2data_linear(data, sensor);
+
+ return val;
+}
+
+#define MAX_MANTISSA (1023 * 1000)
+#define MIN_MANTISSA (511 * 1000)
+
+static u16 pmbus_data2reg_linear(struct pmbus_data *data,
+ enum pmbus_sensor_classes class, long val)
+{
+ s16 exponent = 0, mantissa;
+ bool negative = false;
+
+ /* simple case */
+ if (val == 0)
+ return 0;
+
+ if (class == PSC_VOLTAGE_OUT) {
+ /* LINEAR16 does not support negative voltages */
+ if (val < 0)
+ return 0;
+
+ /*
+ * For a static exponents, we don't have a choice
+ * but to adjust the value to it.
+ */
+ if (data->exponent < 0)
+ val <<= -data->exponent;
+ else
+ val >>= data->exponent;
+ val = DIV_ROUND_CLOSEST(val, 1000);
+ return val & 0xffff;
+ }
+
+ if (val < 0) {
+ negative = true;
+ val = -val;
+ }
+
+ /* Power is in uW. Convert to mW before converting. */
+ if (class == PSC_POWER)
+ val = DIV_ROUND_CLOSEST(val, 1000L);
+
+ /*
+ * For simplicity, convert fan data to milli-units
+ * before calculating the exponent.
+ */
+ if (class == PSC_FAN)
+ val = val * 1000;
+
+ /* Reduce large mantissa until it fits into 10 bit */
+ while (val >= MAX_MANTISSA && exponent < 15) {
+ exponent++;
+ val >>= 1;
+ }
+ /* Increase small mantissa to improve precision */
+ while (val < MIN_MANTISSA && exponent > -15) {
+ exponent--;
+ val <<= 1;
+ }
+
+ /* Convert mantissa from milli-units to units */
+ mantissa = DIV_ROUND_CLOSEST(val, 1000);
+
+ /* Ensure that resulting number is within range */
+ if (mantissa > 0x3ff)
+ mantissa = 0x3ff;
+
+ /* restore sign */
+ if (negative)
+ mantissa = -mantissa;
+
+ /* Convert to 5 bit exponent, 11 bit mantissa */
+ return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
+}
+
+static u16 pmbus_data2reg_direct(struct pmbus_data *data,
+ enum pmbus_sensor_classes class, long val)
+{
+ long m, b, R;
+
+ m = data->info->m[class];
+ b = data->info->b[class];
+ R = data->info->R[class];
+
+ /* Power is in uW. Adjust R and b. */
+ if (class == PSC_POWER) {
+ R -= 3;
+ b *= 1000;
+ }
+
+ /* Calculate Y = (m * X + b) * 10^R */
+ if (class != PSC_FAN) {
+ R -= 3; /* Adjust R and b for data in milli-units */
+ b *= 1000;
+ }
+ val = val * m + b;
+
+ while (R > 0) {
+ val *= 10;
+ R--;
+ }
+ while (R < 0) {
+ val = DIV_ROUND_CLOSEST(val, 10);
+ R++;
+ }
+
+ return val;
+}
+
+static u16 pmbus_data2reg(struct pmbus_data *data,
+ enum pmbus_sensor_classes class, long val)
+{
+ u16 regval;
+
+ if (data->info->direct[class])
+ regval = pmbus_data2reg_direct(data, class, val);
+ else
+ regval = pmbus_data2reg_linear(data, class, val);
+
+ return regval;
+}
+
+/*
+ * Return boolean calculated from converted data.
+ * <index> defines a status register index and mask, and optionally
+ * two sensor indexes.
+ * The upper half-word references the two sensors,
+ * two sensor indices.
+ * The upper half-word references the two optional sensors,
+ * the lower half word references status register and mask.
+ * The function returns true if (status[reg] & mask) is true and,
+ * if specified, if v1 >= v2.
+ * To determine if an object exceeds upper limits, specify <v, limit>.
+ * To determine if an object exceeds lower limits, specify <limit, v>.
+ *
+ * For booleans created with pmbus_add_boolean_reg(), only the lower 16 bits of
+ * index are set. s1 and s2 (the sensor index values) are zero in this case.
+ * The function returns true if (status[reg] & mask) is true.
+ *
+ * If the boolean was created with pmbus_add_boolean_cmp(), a comparison against
+ * a specified limit has to be performed to determine the boolean result.
+ * In this case, the function returns true if v1 >= v2 (where v1 and v2 are
+ * sensor values referenced by sensor indices s1 and s2).
+ *
+ * To determine if an object exceeds upper limits, specify <s1,s2> = <v,limit>.
+ * To determine if an object exceeds lower limits, specify <s1,s2> = <limit,v>.
+ *
+ * If a negative value is stored in any of the referenced registers, this value
+ * reflects an error code which will be returned.
+ */
+static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val)
+{
+ u8 s1 = (index >> 24) & 0xff;
+ u8 s2 = (index >> 16) & 0xff;
+ u8 reg = (index >> 8) & 0xff;
+ u8 mask = index & 0xff;
+ int status;
+ u8 regval;
+
+ status = data->status[reg];
+ if (status < 0)
+ return status;
+
+ regval = status & mask;
+ if (!s1 && !s2)
+ *val = !!regval;
+ else {
+ int v1, v2;
+ struct pmbus_sensor *sensor1, *sensor2;
+
+ sensor1 = &data->sensors[s1];
+ if (sensor1->data < 0)
+ return sensor1->data;
+ sensor2 = &data->sensors[s2];
+ if (sensor2->data < 0)
+ return sensor2->data;
+
+ v1 = pmbus_reg2data(data, sensor1);
+ v2 = pmbus_reg2data(data, sensor2);
+ *val = !!(regval && v1 >= v2);
+ }
+ return 0;
+}
+
+static ssize_t pmbus_show_boolean(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pmbus_data *data = pmbus_update_device(dev);
+ int val;
+ int err;
+
+ err = pmbus_get_boolean(data, attr->index, &val);
+ if (err)
+ return err;
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t pmbus_show_sensor(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pmbus_data *data = pmbus_update_device(dev);
+ struct pmbus_sensor *sensor;
+
+ sensor = &data->sensors[attr->index];
+ if (sensor->data < 0)
+ return sensor->data;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pmbus_reg2data(data, sensor));
+}
+
+static ssize_t pmbus_set_sensor(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ struct pmbus_sensor *sensor = &data->sensors[attr->index];
+ ssize_t rv = count;
+ long val = 0;
+ int ret;
+ u16 regval;
+
+ if (strict_strtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ regval = pmbus_data2reg(data, sensor->class, val);
+ ret = pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
+ if (ret < 0)
+ rv = ret;
+ else
+ data->sensors[attr->index].data = regval;
+ mutex_unlock(&data->update_lock);
+ return rv;
+}
+
+static ssize_t pmbus_show_label(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ data->labels[attr->index].label);
+}
+
+#define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \
+do { \
+ struct sensor_device_attribute *a \
+ = &data->_type##s[data->num_##_type##s].attribute; \
+ BUG_ON(data->num_attributes >= data->max_attributes); \
+ a->dev_attr.attr.name = _name; \
+ a->dev_attr.attr.mode = _mode; \
+ a->dev_attr.show = _show; \
+ a->dev_attr.store = _set; \
+ a->index = _idx; \
+ data->attributes[data->num_attributes] = &a->dev_attr.attr; \
+ data->num_attributes++; \
+} while (0)
+
+#define PMBUS_ADD_GET_ATTR(data, _name, _type, _idx) \
+ PMBUS_ADD_ATTR(data, _name, _idx, S_IRUGO, _type, \
+ pmbus_show_##_type, NULL)
+
+#define PMBUS_ADD_SET_ATTR(data, _name, _type, _idx) \
+ PMBUS_ADD_ATTR(data, _name, _idx, S_IWUSR | S_IRUGO, _type, \
+ pmbus_show_##_type, pmbus_set_##_type)
+
+static void pmbus_add_boolean(struct pmbus_data *data,
+ const char *name, const char *type, int seq,
+ int idx)
+{
+ struct pmbus_boolean *boolean;
+
+ BUG_ON(data->num_booleans >= data->max_booleans);
+
+ boolean = &data->booleans[data->num_booleans];
+
+ snprintf(boolean->name, sizeof(boolean->name), "%s%d_%s",
+ name, seq, type);
+ PMBUS_ADD_GET_ATTR(data, boolean->name, boolean, idx);
+ data->num_booleans++;
+}
+
+static void pmbus_add_boolean_reg(struct pmbus_data *data,
+ const char *name, const char *type,
+ int seq, int reg, int bit)
+{
+ pmbus_add_boolean(data, name, type, seq, (reg << 8) | bit);
+}
+
+static void pmbus_add_boolean_cmp(struct pmbus_data *data,
+ const char *name, const char *type,
+ int seq, int i1, int i2, int reg, int mask)
+{
+ pmbus_add_boolean(data, name, type, seq,
+ (i1 << 24) | (i2 << 16) | (reg << 8) | mask);
+}
+
+static void pmbus_add_sensor(struct pmbus_data *data,
+ const char *name, const char *type, int seq,
+ int page, int reg, enum pmbus_sensor_classes class,
+ bool update)
+{
+ struct pmbus_sensor *sensor;
+
+ BUG_ON(data->num_sensors >= data->max_sensors);
+
+ sensor = &data->sensors[data->num_sensors];
+ snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
+ name, seq, type);
+ sensor->page = page;
+ sensor->reg = reg;
+ sensor->class = class;
+ sensor->update = update;
+ if (update)
+ PMBUS_ADD_GET_ATTR(data, sensor->name, sensor,
+ data->num_sensors);
+ else
+ PMBUS_ADD_SET_ATTR(data, sensor->name, sensor,
+ data->num_sensors);
+ data->num_sensors++;
+}
+
+static void pmbus_add_label(struct pmbus_data *data,
+ const char *name, int seq,
+ const char *lstring, int index)
+{
+ struct pmbus_label *label;
+
+ BUG_ON(data->num_labels >= data->max_labels);
+
+ label = &data->labels[data->num_labels];
+ snprintf(label->name, sizeof(label->name), "%s%d_label", name, seq);
+ if (!index)
+ strncpy(label->label, lstring, sizeof(label->label) - 1);
+ else
+ snprintf(label->label, sizeof(label->label), "%s%d", lstring,
+ index);
+
+ PMBUS_ADD_GET_ATTR(data, label->name, label, data->num_labels);
+ data->num_labels++;
+}
+
+static const int pmbus_temp_registers[] = {
+ PMBUS_READ_TEMPERATURE_1,
+ PMBUS_READ_TEMPERATURE_2,
+ PMBUS_READ_TEMPERATURE_3
+};
+
+static const int pmbus_temp_flags[] = {
+ PMBUS_HAVE_TEMP,
+ PMBUS_HAVE_TEMP2,
+ PMBUS_HAVE_TEMP3
+};
+
+static const int pmbus_fan_registers[] = {
+ PMBUS_READ_FAN_SPEED_1,
+ PMBUS_READ_FAN_SPEED_2,
+ PMBUS_READ_FAN_SPEED_3,
+ PMBUS_READ_FAN_SPEED_4
+};
+
+static const int pmbus_fan_config_registers[] = {
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_34,
+ PMBUS_FAN_CONFIG_34
+};
+
+static const int pmbus_fan_status_registers[] = {
+ PMBUS_STATUS_FAN_12,
+ PMBUS_STATUS_FAN_12,
+ PMBUS_STATUS_FAN_34,
+ PMBUS_STATUS_FAN_34
+};
+
+static const u32 pmbus_fan_flags[] = {
+ PMBUS_HAVE_FAN12,
+ PMBUS_HAVE_FAN12,
+ PMBUS_HAVE_FAN34,
+ PMBUS_HAVE_FAN34
+};
+
+static const u32 pmbus_fan_status_flags[] = {
+ PMBUS_HAVE_STATUS_FAN12,
+ PMBUS_HAVE_STATUS_FAN12,
+ PMBUS_HAVE_STATUS_FAN34,
+ PMBUS_HAVE_STATUS_FAN34
+};
+
+/*
+ * Determine maximum number of sensors, booleans, and labels.
+ * To keep things simple, only make a rough high estimate.
+ */
+static void pmbus_find_max_attr(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ const struct pmbus_driver_info *info = data->info;
+ int page, max_sensors, max_booleans, max_labels;
+
+ max_sensors = PMBUS_MAX_INPUT_SENSORS;
+ max_booleans = PMBUS_MAX_INPUT_BOOLEANS;
+ max_labels = PMBUS_MAX_INPUT_LABELS;
+
+ for (page = 0; page < info->pages; page++) {
+ if (info->func[page] & PMBUS_HAVE_VOUT) {
+ max_sensors += PMBUS_VOUT_SENSORS_PER_PAGE;
+ max_booleans += PMBUS_VOUT_BOOLEANS_PER_PAGE;
+ max_labels++;
+ }
+ if (info->func[page] & PMBUS_HAVE_IOUT) {
+ max_sensors += PMBUS_IOUT_SENSORS_PER_PAGE;
+ max_booleans += PMBUS_IOUT_BOOLEANS_PER_PAGE;
+ max_labels++;
+ }
+ if (info->func[page] & PMBUS_HAVE_POUT) {
+ max_sensors += PMBUS_POUT_SENSORS_PER_PAGE;
+ max_booleans += PMBUS_POUT_BOOLEANS_PER_PAGE;
+ max_labels++;
+ }
+ if (info->func[page] & PMBUS_HAVE_FAN12) {
+ max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN;
+ max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN;
+ }
+ if (info->func[page] & PMBUS_HAVE_FAN34) {
+ max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN;
+ max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN;
+ }
+ if (info->func[page] & PMBUS_HAVE_TEMP) {
+ max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
+ max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
+ }
+ if (info->func[page] & PMBUS_HAVE_TEMP2) {
+ max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
+ max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
+ }
+ if (info->func[page] & PMBUS_HAVE_TEMP3) {
+ max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
+ max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
+ }
+ }
+ data->max_sensors = max_sensors;
+ data->max_booleans = max_booleans;
+ data->max_labels = max_labels;
+ data->max_attributes = max_sensors + max_booleans + max_labels;
+}
+
+/*
+ * Search for attributes. Allocate sensors, booleans, and labels as needed.
+ */
+static void pmbus_find_attributes(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ const struct pmbus_driver_info *info = data->info;
+ int page, i0, i1, in_index;
+
+ /*
+ * Input voltage sensors
+ */
+ in_index = 1;
+ if (info->func[0] & PMBUS_HAVE_VIN) {
+ bool have_alarm = false;
+
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "in", in_index, "vin", 0);
+ pmbus_add_sensor(data, "in", "input", in_index,
+ 0, PMBUS_READ_VIN, PSC_VOLTAGE_IN, true);
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_VIN_UV_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "min", in_index,
+ 0, PMBUS_VIN_UV_WARN_LIMIT,
+ PSC_VOLTAGE_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "in", "min_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_VOLTAGE_UV_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_VIN_UV_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "lcrit", in_index,
+ 0, PMBUS_VIN_UV_FAULT_LIMIT,
+ PSC_VOLTAGE_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "in", "lcrit_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_VOLTAGE_UV_FAULT);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_VIN_OV_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "max", in_index,
+ 0, PMBUS_VIN_OV_WARN_LIMIT,
+ PSC_VOLTAGE_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "in", "max_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_VOLTAGE_OV_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_VIN_OV_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "crit", in_index,
+ 0, PMBUS_VIN_OV_FAULT_LIMIT,
+ PSC_VOLTAGE_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "in", "crit_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_VOLTAGE_OV_FAULT);
+ have_alarm = true;
+ }
+ }
+ /*
+ * Add generic alarm attribute only if there are no individual
+ * attributes.
+ */
+ if (!have_alarm)
+ pmbus_add_boolean_reg(data, "in", "alarm",
+ in_index,
+ PB_STATUS_BASE,
+ PB_STATUS_VIN_UV);
+ in_index++;
+ }
+ if (info->func[0] & PMBUS_HAVE_VCAP) {
+ pmbus_add_label(data, "in", in_index, "vcap", 0);
+ pmbus_add_sensor(data, "in", "input", in_index, 0,
+ PMBUS_READ_VCAP, PSC_VOLTAGE_IN, true);
+ in_index++;
+ }
+
+ /*
+ * Output voltage sensors
+ */
+ for (page = 0; page < info->pages; page++) {
+ bool have_alarm = false;
+
+ if (!(info->func[page] & PMBUS_HAVE_VOUT))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "in", in_index, "vout", page + 1);
+ pmbus_add_sensor(data, "in", "input", in_index, page,
+ PMBUS_READ_VOUT, PSC_VOLTAGE_OUT, true);
+ if (pmbus_check_word_register(client, page,
+ PMBUS_VOUT_UV_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "min", in_index, page,
+ PMBUS_VOUT_UV_WARN_LIMIT,
+ PSC_VOLTAGE_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
+ pmbus_add_boolean_reg(data, "in", "min_alarm",
+ in_index,
+ PB_STATUS_VOUT_BASE +
+ page,
+ PB_VOLTAGE_UV_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_VOUT_UV_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "lcrit", in_index, page,
+ PMBUS_VOUT_UV_FAULT_LIMIT,
+ PSC_VOLTAGE_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
+ pmbus_add_boolean_reg(data, "in", "lcrit_alarm",
+ in_index,
+ PB_STATUS_VOUT_BASE +
+ page,
+ PB_VOLTAGE_UV_FAULT);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_VOUT_OV_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "max", in_index, page,
+ PMBUS_VOUT_OV_WARN_LIMIT,
+ PSC_VOLTAGE_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
+ pmbus_add_boolean_reg(data, "in", "max_alarm",
+ in_index,
+ PB_STATUS_VOUT_BASE +
+ page,
+ PB_VOLTAGE_OV_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_VOUT_OV_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "crit", in_index, page,
+ PMBUS_VOUT_OV_FAULT_LIMIT,
+ PSC_VOLTAGE_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
+ pmbus_add_boolean_reg(data, "in", "crit_alarm",
+ in_index,
+ PB_STATUS_VOUT_BASE +
+ page,
+ PB_VOLTAGE_OV_FAULT);
+ have_alarm = true;
+ }
+ }
+ /*
+ * Add generic alarm attribute only if there are no individual
+ * attributes.
+ */
+ if (!have_alarm)
+ pmbus_add_boolean_reg(data, "in", "alarm",
+ in_index,
+ PB_STATUS_BASE + page,
+ PB_STATUS_VOUT_OV);
+ in_index++;
+ }
+
+ /*
+ * Current sensors
+ */
+
+ /*
+ * Input current sensors
+ */
+ in_index = 1;
+ if (info->func[0] & PMBUS_HAVE_IIN) {
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "curr", in_index, "iin", 0);
+ pmbus_add_sensor(data, "curr", "input", in_index,
+ 0, PMBUS_READ_IIN, PSC_CURRENT_IN, true);
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_IIN_OC_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "max", in_index,
+ 0, PMBUS_IIN_OC_WARN_LIMIT,
+ PSC_CURRENT_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "curr", "max_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_IIN_OC_WARNING);
+ }
+ }
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_IIN_OC_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "crit", in_index,
+ 0, PMBUS_IIN_OC_FAULT_LIMIT,
+ PSC_CURRENT_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
+ pmbus_add_boolean_reg(data, "curr",
+ "crit_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_IIN_OC_FAULT);
+ }
+ in_index++;
+ }
+
+ /*
+ * Output current sensors
+ */
+ for (page = 0; page < info->pages; page++) {
+ bool have_alarm = false;
+
+ if (!(info->func[page] & PMBUS_HAVE_IOUT))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "curr", in_index, "iout", page + 1);
+ pmbus_add_sensor(data, "curr", "input", in_index, page,
+ PMBUS_READ_IOUT, PSC_CURRENT_OUT, true);
+ if (pmbus_check_word_register(client, page,
+ PMBUS_IOUT_OC_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "max", in_index, page,
+ PMBUS_IOUT_OC_WARN_LIMIT,
+ PSC_CURRENT_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) {
+ pmbus_add_boolean_reg(data, "curr", "max_alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE +
+ page, PB_IOUT_OC_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_IOUT_UC_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "lcrit", in_index, page,
+ PMBUS_IOUT_UC_FAULT_LIMIT,
+ PSC_CURRENT_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) {
+ pmbus_add_boolean_reg(data, "curr",
+ "lcrit_alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE +
+ page, PB_IOUT_UC_FAULT);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_IOUT_OC_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "crit", in_index, page,
+ PMBUS_IOUT_OC_FAULT_LIMIT,
+ PSC_CURRENT_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) {
+ pmbus_add_boolean_reg(data, "curr",
+ "crit_alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE +
+ page, PB_IOUT_OC_FAULT);
+ have_alarm = true;
+ }
+ }
+ /*
+ * Add generic alarm attribute only if there are no individual
+ * attributes.
+ */
+ if (!have_alarm)
+ pmbus_add_boolean_reg(data, "curr", "alarm",
+ in_index,
+ PB_STATUS_BASE + page,
+ PB_STATUS_IOUT_OC);
+ in_index++;
+ }
+
+ /*
+ * Power sensors
+ */
+ /*
+ * Input Power sensors
+ */
+ in_index = 1;
+ if (info->func[0] & PMBUS_HAVE_PIN) {
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "power", in_index, "pin", 0);
+ pmbus_add_sensor(data, "power", "input", in_index,
+ 0, PMBUS_READ_PIN, PSC_POWER, true);
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_PIN_OP_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "power", "max", in_index,
+ 0, PMBUS_PIN_OP_WARN_LIMIT, PSC_POWER,
+ false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
+ pmbus_add_boolean_reg(data, "power",
+ "alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_PIN_OP_WARNING);
+ }
+ in_index++;
+ }
+
+ /*
+ * Output Power sensors
+ */
+ for (page = 0; page < info->pages; page++) {
+ bool need_alarm = false;
+
+ if (!(info->func[page] & PMBUS_HAVE_POUT))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "power", in_index, "pout", page + 1);
+ pmbus_add_sensor(data, "power", "input", in_index, page,
+ PMBUS_READ_POUT, PSC_POWER, true);
+ /*
+ * Per hwmon sysfs API, power_cap is to be used to limit output
+ * power.
+ * We have two registers related to maximum output power,
+ * PMBUS_POUT_MAX and PMBUS_POUT_OP_WARN_LIMIT.
+ * PMBUS_POUT_MAX matches the powerX_cap attribute definition.
+ * There is no attribute in the API to match
+ * PMBUS_POUT_OP_WARN_LIMIT. We use powerX_max for now.
+ */
+ if (pmbus_check_word_register(client, page, PMBUS_POUT_MAX)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "power", "cap", in_index, page,
+ PMBUS_POUT_MAX, PSC_POWER, false);
+ need_alarm = true;
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_POUT_OP_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "power", "max", in_index, page,
+ PMBUS_POUT_OP_WARN_LIMIT, PSC_POWER,
+ false);
+ need_alarm = true;
+ }
+ if (need_alarm && (info->func[page] & PMBUS_HAVE_STATUS_IOUT))
+ pmbus_add_boolean_reg(data, "power", "alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE + page,
+ PB_POUT_OP_WARNING
+ | PB_POWER_LIMITING);
+
+ if (pmbus_check_word_register(client, page,
+ PMBUS_POUT_OP_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "power", "crit", in_index, page,
+ PMBUS_POUT_OP_FAULT_LIMIT, PSC_POWER,
+ false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_IOUT)
+ pmbus_add_boolean_reg(data, "power",
+ "crit_alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE
+ + page,
+ PB_POUT_OP_FAULT);
+ }
+ in_index++;
+ }
+
+ /*
+ * Temperature sensors
+ */
+ in_index = 1;
+ for (page = 0; page < info->pages; page++) {
+ int t;
+
+ for (t = 0; t < ARRAY_SIZE(pmbus_temp_registers); t++) {
+ bool have_alarm = false;
+
+ /*
+ * A PMBus chip may support any combination of
+ * temperature registers on any page. So we can not
+ * abort after a failure to detect a register, but have
+ * to continue checking for all registers on all pages.
+ */
+ if (!(info->func[page] & pmbus_temp_flags[t]))
+ continue;
+
+ if (!pmbus_check_word_register
+ (client, page, pmbus_temp_registers[t]))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "input", in_index, page,
+ pmbus_temp_registers[t],
+ PSC_TEMPERATURE, true);
+
+ /*
+ * PMBus provides only one status register for TEMP1-3.
+ * Thus, we can not use the status register to determine
+ * which of the three sensors actually caused an alarm.
+ * Always compare current temperature against the limit
+ * registers to determine alarm conditions for a
+ * specific sensor.
+ *
+ * Since there is only one set of limit registers for
+ * up to three temperature sensors, we need to update
+ * all limit registers after the limit was changed for
+ * one of the sensors. This ensures that correct limits
+ * are reported for all temperature sensors.
+ */
+ if (pmbus_check_word_register
+ (client, page, PMBUS_UT_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "min", in_index,
+ page, PMBUS_UT_WARN_LIMIT,
+ PSC_TEMPERATURE, true);
+ if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
+ pmbus_add_boolean_cmp(data, "temp",
+ "min_alarm", in_index, i1, i0,
+ PB_STATUS_TEMP_BASE + page,
+ PB_TEMP_UT_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_UT_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "lcrit",
+ in_index, page,
+ PMBUS_UT_FAULT_LIMIT,
+ PSC_TEMPERATURE, true);
+ if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
+ pmbus_add_boolean_cmp(data, "temp",
+ "lcrit_alarm", in_index, i1, i0,
+ PB_STATUS_TEMP_BASE + page,
+ PB_TEMP_UT_FAULT);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register
+ (client, page, PMBUS_OT_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "max", in_index,
+ page, PMBUS_OT_WARN_LIMIT,
+ PSC_TEMPERATURE, true);
+ if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
+ pmbus_add_boolean_cmp(data, "temp",
+ "max_alarm", in_index, i0, i1,
+ PB_STATUS_TEMP_BASE + page,
+ PB_TEMP_OT_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_OT_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "crit", in_index,
+ page, PMBUS_OT_FAULT_LIMIT,
+ PSC_TEMPERATURE, true);
+ if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
+ pmbus_add_boolean_cmp(data, "temp",
+ "crit_alarm", in_index, i0, i1,
+ PB_STATUS_TEMP_BASE + page,
+ PB_TEMP_OT_FAULT);
+ have_alarm = true;
+ }
+ }
+ /*
+ * Last resort - we were not able to create any alarm
+ * registers. Report alarm for all sensors using the
+ * status register temperature alarm bit.
+ */
+ if (!have_alarm)
+ pmbus_add_boolean_reg(data, "temp", "alarm",
+ in_index,
+ PB_STATUS_BASE + page,
+ PB_STATUS_TEMPERATURE);
+ in_index++;
+ }
+ }
+
+ /*
+ * Fans
+ */
+ in_index = 1;
+ for (page = 0; page < info->pages; page++) {
+ int f;
+
+ for (f = 0; f < ARRAY_SIZE(pmbus_fan_registers); f++) {
+ int regval;
+
+ if (!(info->func[page] & pmbus_fan_flags[f]))
+ break;
+
+ if (!pmbus_check_word_register(client, page,
+ pmbus_fan_registers[f])
+ || !pmbus_check_byte_register(client, page,
+ pmbus_fan_config_registers[f]))
+ break;
+
+ /*
+ * Skip fan if not installed.
+ * Each fan configuration register covers multiple fans,
+ * so we have to do some magic.
+ */
+ regval = pmbus_read_byte_data(client, page,
+ pmbus_fan_config_registers[f]);
+ if (regval < 0 ||
+ (!(regval & (PB_FAN_1_INSTALLED >> ((f & 1) * 4)))))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_sensor(data, "fan", "input", in_index, page,
+ pmbus_fan_registers[f], PSC_FAN, true);
+
+ /*
+ * Each fan status register covers multiple fans,
+ * so we have to do some magic.
+ */
+ if ((info->func[page] & pmbus_fan_status_flags[f]) &&
+ pmbus_check_byte_register(client,
+ page, pmbus_fan_status_registers[f])) {
+ int base;
+
+ if (f > 1) /* fan 3, 4 */
+ base = PB_STATUS_FAN34_BASE + page;
+ else
+ base = PB_STATUS_FAN_BASE + page;
+ pmbus_add_boolean_reg(data, "fan", "alarm",
+ in_index, base,
+ PB_FAN_FAN1_WARNING >> (f & 1));
+ pmbus_add_boolean_reg(data, "fan", "fault",
+ in_index, base,
+ PB_FAN_FAN1_FAULT >> (f & 1));
+ }
+ in_index++;
+ }
+ }
+}
+
+/*
+ * Identify chip parameters.
+ * This function is called for all chips.
+ */
+static int pmbus_identify_common(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ int vout_mode = -1, exponent;
+
+ if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE))
+ vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ if (vout_mode >= 0 && vout_mode != 0xff) {
+ /*
+ * Not all chips support the VOUT_MODE command,
+ * so a failure to read it is not an error.
+ */
+ switch (vout_mode >> 5) {
+ case 0: /* linear mode */
+ if (data->info->direct[PSC_VOLTAGE_OUT])
+ return -ENODEV;
+
+ exponent = vout_mode & 0x1f;
+ /* and sign-extend it */
+ if (exponent & 0x10)
+ exponent |= ~0x1f;
+ data->exponent = exponent;
+ break;
+ case 2: /* direct mode */
+ if (!data->info->direct[PSC_VOLTAGE_OUT])
+ return -ENODEV;
+ break;
+ default:
+ return -ENODEV;
+ }
+ }
+
+ /* Determine maximum number of sensors, booleans, and labels */
+ pmbus_find_max_attr(client, data);
+ pmbus_clear_fault_page(client, 0);
+ return 0;
+}
+
+int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
+ struct pmbus_driver_info *info)
+{
+ const struct pmbus_platform_data *pdata = client->dev.platform_data;
+ struct pmbus_data *data;
+ int ret;
+
+ if (!info) {
+ dev_err(&client->dev, "Missing chip information");
+ return -ENODEV;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE
+ | I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&client->dev, "No memory to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /*
+ * Bail out if status register or PMBus revision register
+ * does not exist.
+ */
+ if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0
+ || i2c_smbus_read_byte_data(client, PMBUS_REVISION) < 0) {
+ dev_err(&client->dev,
+ "Status or revision register not found\n");
+ ret = -ENODEV;
+ goto out_data;
+ }
+
+ if (pdata)
+ data->flags = pdata->flags;
+ data->info = info;
+
+ pmbus_clear_faults(client);
+
+ if (info->identify) {
+ ret = (*info->identify)(client, info);
+ if (ret < 0) {
+ dev_err(&client->dev, "Chip identification failed\n");
+ goto out_data;
+ }
+ }
+
+ if (info->pages <= 0 || info->pages > PMBUS_PAGES) {
+ dev_err(&client->dev, "Bad number of PMBus pages: %d\n",
+ info->pages);
+ ret = -EINVAL;
+ goto out_data;
+ }
+ /*
+ * Bail out if more than one page was configured, but we can not
+ * select the highest page. This is an indication that the wrong
+ * chip type was selected. Better bail out now than keep
+ * returning errors later on.
+ */
+ if (info->pages > 1 && pmbus_set_page(client, info->pages - 1) < 0) {
+ dev_err(&client->dev, "Failed to select page %d\n",
+ info->pages - 1);
+ ret = -EINVAL;
+ goto out_data;
+ }
+
+ ret = pmbus_identify_common(client, data);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to identify chip capabilities\n");
+ goto out_data;
+ }
+
+ ret = -ENOMEM;
+ data->sensors = kzalloc(sizeof(struct pmbus_sensor) * data->max_sensors,
+ GFP_KERNEL);
+ if (!data->sensors) {
+ dev_err(&client->dev, "No memory to allocate sensor data\n");
+ goto out_data;
+ }
+
+ data->booleans = kzalloc(sizeof(struct pmbus_boolean)
+ * data->max_booleans, GFP_KERNEL);
+ if (!data->booleans) {
+ dev_err(&client->dev, "No memory to allocate boolean data\n");
+ goto out_sensors;
+ }
+
+ data->labels = kzalloc(sizeof(struct pmbus_label) * data->max_labels,
+ GFP_KERNEL);
+ if (!data->labels) {
+ dev_err(&client->dev, "No memory to allocate label data\n");
+ goto out_booleans;
+ }
+
+ data->attributes = kzalloc(sizeof(struct attribute *)
+ * data->max_attributes, GFP_KERNEL);
+ if (!data->attributes) {
+ dev_err(&client->dev, "No memory to allocate attribute data\n");
+ goto out_labels;
+ }
+
+ pmbus_find_attributes(client, data);
+
+ /*
+ * If there are no attributes, something is wrong.
+ * Bail out instead of trying to register nothing.
+ */
+ if (!data->num_attributes) {
+ dev_err(&client->dev, "No attributes found\n");
+ ret = -ENODEV;
+ goto out_attributes;
+ }
+
+ /* Register sysfs hooks */
+ data->group.attrs = data->attributes;
+ ret = sysfs_create_group(&client->dev.kobj, &data->group);
+ if (ret) {
+ dev_err(&client->dev, "Failed to create sysfs entries\n");
+ goto out_attributes;
+ }
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ dev_err(&client->dev, "Failed to register hwmon device\n");
+ goto out_hwmon_device_register;
+ }
+ return 0;
+
+out_hwmon_device_register:
+ sysfs_remove_group(&client->dev.kobj, &data->group);
+out_attributes:
+ kfree(data->attributes);
+out_labels:
+ kfree(data->labels);
+out_booleans:
+ kfree(data->booleans);
+out_sensors:
+ kfree(data->sensors);
+out_data:
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pmbus_do_probe);
+
+int pmbus_do_remove(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &data->group);
+ kfree(data->attributes);
+ kfree(data->labels);
+ kfree(data->booleans);
+ kfree(data->sensors);
+ kfree(data);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmbus_do_remove);
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
new file mode 100644
index 000000000000..9a51dcca9b0d
--- /dev/null
+++ b/drivers/hwmon/sch5627.c
@@ -0,0 +1,858 @@
+/***************************************************************************
+ * Copyright (C) 2010-2011 Hans de Goede <hdegoede@redhat.com> *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the *
+ * Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ ***************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+
+#define DRVNAME "sch5627"
+#define DEVNAME DRVNAME /* We only support one model */
+
+#define SIO_SCH5627_EM_LD 0x0C /* Embedded Microcontroller LD */
+#define SIO_UNLOCK_KEY 0x55 /* Key to enable Super-I/O */
+#define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */
+
+#define SIO_REG_LDSEL 0x07 /* Logical device select */
+#define SIO_REG_DEVID 0x20 /* Device ID */
+#define SIO_REG_ENABLE 0x30 /* Logical device enable */
+#define SIO_REG_ADDR 0x66 /* Logical device address (2 bytes) */
+
+#define SIO_SCH5627_ID 0xC6 /* Chipset ID */
+
+#define REGION_LENGTH 9
+
+#define SCH5627_HWMON_ID 0xa5
+#define SCH5627_COMPANY_ID 0x5c
+#define SCH5627_PRIMARY_ID 0xa0
+
+#define SCH5627_REG_BUILD_CODE 0x39
+#define SCH5627_REG_BUILD_ID 0x3a
+#define SCH5627_REG_HWMON_ID 0x3c
+#define SCH5627_REG_HWMON_REV 0x3d
+#define SCH5627_REG_COMPANY_ID 0x3e
+#define SCH5627_REG_PRIMARY_ID 0x3f
+#define SCH5627_REG_CTRL 0x40
+
+#define SCH5627_NO_TEMPS 8
+#define SCH5627_NO_FANS 4
+#define SCH5627_NO_IN 5
+
+static const u16 SCH5627_REG_TEMP_MSB[SCH5627_NO_TEMPS] = {
+ 0x2B, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x180, 0x181 };
+static const u16 SCH5627_REG_TEMP_LSN[SCH5627_NO_TEMPS] = {
+ 0xE2, 0xE1, 0xE1, 0xE5, 0xE5, 0xE6, 0x182, 0x182 };
+static const u16 SCH5627_REG_TEMP_HIGH_NIBBLE[SCH5627_NO_TEMPS] = {
+ 0, 0, 1, 1, 0, 0, 0, 1 };
+static const u16 SCH5627_REG_TEMP_HIGH[SCH5627_NO_TEMPS] = {
+ 0x61, 0x57, 0x59, 0x5B, 0x5D, 0x5F, 0x184, 0x186 };
+static const u16 SCH5627_REG_TEMP_ABS[SCH5627_NO_TEMPS] = {
+ 0x9B, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x1A8, 0x1A9 };
+
+static const u16 SCH5627_REG_FAN[SCH5627_NO_FANS] = {
+ 0x2C, 0x2E, 0x30, 0x32 };
+static const u16 SCH5627_REG_FAN_MIN[SCH5627_NO_FANS] = {
+ 0x62, 0x64, 0x66, 0x68 };
+
+static const u16 SCH5627_REG_IN_MSB[SCH5627_NO_IN] = {
+ 0x22, 0x23, 0x24, 0x25, 0x189 };
+static const u16 SCH5627_REG_IN_LSN[SCH5627_NO_IN] = {
+ 0xE4, 0xE4, 0xE3, 0xE3, 0x18A };
+static const u16 SCH5627_REG_IN_HIGH_NIBBLE[SCH5627_NO_IN] = {
+ 1, 0, 1, 0, 1 };
+static const u16 SCH5627_REG_IN_FACTOR[SCH5627_NO_IN] = {
+ 10745, 3660, 9765, 10745, 3660 };
+static const char * const SCH5627_IN_LABELS[SCH5627_NO_IN] = {
+ "VCC", "VTT", "VBAT", "VTR", "V_IN" };
+
+struct sch5627_data {
+ unsigned short addr;
+ struct device *hwmon_dev;
+ u8 temp_max[SCH5627_NO_TEMPS];
+ u8 temp_crit[SCH5627_NO_TEMPS];
+ u16 fan_min[SCH5627_NO_FANS];
+
+ struct mutex update_lock;
+ char valid; /* !=0 if following fields are valid */
+ unsigned long last_updated; /* In jiffies */
+ u16 temp[SCH5627_NO_TEMPS];
+ u16 fan[SCH5627_NO_FANS];
+ u16 in[SCH5627_NO_IN];
+};
+
+static struct platform_device *sch5627_pdev;
+
+/* Super I/O functions */
+static inline int superio_inb(int base, int reg)
+{
+ outb(reg, base);
+ return inb(base + 1);
+}
+
+static inline int superio_enter(int base)
+{
+ /* Don't step on other drivers' I/O space by accident */
+ if (!request_muxed_region(base, 2, DRVNAME)) {
+ pr_err("I/O address 0x%04x already in use\n", base);
+ return -EBUSY;
+ }
+
+ outb(SIO_UNLOCK_KEY, base);
+
+ return 0;
+}
+
+static inline void superio_select(int base, int ld)
+{
+ outb(SIO_REG_LDSEL, base);
+ outb(ld, base + 1);
+}
+
+static inline void superio_exit(int base)
+{
+ outb(SIO_LOCK_KEY, base);
+ release_region(base, 2);
+}
+
+static int sch5627_read_virtual_reg(struct sch5627_data *data, u16 reg)
+{
+ u8 val;
+ int i;
+ /*
+ * According to SMSC for the commands we use the maximum time for
+ * the EM to respond is 15 ms, but testing shows in practice it
+ * responds within 15-32 reads, so we first busy poll, and if
+ * that fails sleep a bit and try again until we are way past
+ * the 15 ms maximum response time.
+ */
+ const int max_busy_polls = 64;
+ const int max_lazy_polls = 32;
+
+ /* (Optional) Write-Clear the EC to Host Mailbox Register */
+ val = inb(data->addr + 1);
+ outb(val, data->addr + 1);
+
+ /* Set Mailbox Address Pointer to first location in Region 1 */
+ outb(0x00, data->addr + 2);
+ outb(0x80, data->addr + 3);
+
+ /* Write Request Packet Header */
+ outb(0x02, data->addr + 4); /* Access Type: VREG read */
+ outb(0x01, data->addr + 5); /* # of Entries: 1 Byte (8-bit) */
+ outb(0x04, data->addr + 2); /* Mailbox AP to first data entry loc. */
+
+ /* Write Address field */
+ outb(reg & 0xff, data->addr + 6);
+ outb(reg >> 8, data->addr + 7);
+
+ /* Execute the Random Access Command */
+ outb(0x01, data->addr); /* Write 01h to the Host-to-EC register */
+
+ /* EM Interface Polling "Algorithm" */
+ for (i = 0; i < max_busy_polls + max_lazy_polls; i++) {
+ if (i >= max_busy_polls)
+ msleep(1);
+ /* Read Interrupt source Register */
+ val = inb(data->addr + 8);
+ /* Write Clear the interrupt source bits */
+ if (val)
+ outb(val, data->addr + 8);
+ /* Command Completed ? */
+ if (val & 0x01)
+ break;
+ }
+ if (i == max_busy_polls + max_lazy_polls) {
+ pr_err("Max retries exceeded reading virtual "
+ "register 0x%04hx (%d)\n", reg, 1);
+ return -EIO;
+ }
+
+ /*
+ * According to SMSC we may need to retry this, but sofar I've always
+ * seen this succeed in 1 try.
+ */
+ for (i = 0; i < max_busy_polls; i++) {
+ /* Read EC-to-Host Register */
+ val = inb(data->addr + 1);
+ /* Command Completed ? */
+ if (val == 0x01)
+ break;
+
+ if (i == 0)
+ pr_warn("EC reports: 0x%02x reading virtual register "
+ "0x%04hx\n", (unsigned int)val, reg);
+ }
+ if (i == max_busy_polls) {
+ pr_err("Max retries exceeded reading virtual "
+ "register 0x%04hx (%d)\n", reg, 2);
+ return -EIO;
+ }
+
+ /*
+ * According to the SMSC app note we should now do:
+ *
+ * Set Mailbox Address Pointer to first location in Region 1 *
+ * outb(0x00, data->addr + 2);
+ * outb(0x80, data->addr + 3);
+ *
+ * But if we do that things don't work, so let's not.
+ */
+
+ /* Read Data from Mailbox */
+ return inb(data->addr + 4);
+}
+
+static int sch5627_read_virtual_reg16(struct sch5627_data *data, u16 reg)
+{
+ int lsb, msb;
+
+ /* Read LSB first, this will cause the matching MSB to be latched */
+ lsb = sch5627_read_virtual_reg(data, reg);
+ if (lsb < 0)
+ return lsb;
+
+ msb = sch5627_read_virtual_reg(data, reg + 1);
+ if (msb < 0)
+ return msb;
+
+ return lsb | (msb << 8);
+}
+
+static int sch5627_read_virtual_reg12(struct sch5627_data *data, u16 msb_reg,
+ u16 lsn_reg, int high_nibble)
+{
+ int msb, lsn;
+
+ /* Read MSB first, this will cause the matching LSN to be latched */
+ msb = sch5627_read_virtual_reg(data, msb_reg);
+ if (msb < 0)
+ return msb;
+
+ lsn = sch5627_read_virtual_reg(data, lsn_reg);
+ if (lsn < 0)
+ return lsn;
+
+ if (high_nibble)
+ return (msb << 4) | (lsn >> 4);
+ else
+ return (msb << 4) | (lsn & 0x0f);
+}
+
+static struct sch5627_data *sch5627_update_device(struct device *dev)
+{
+ struct sch5627_data *data = dev_get_drvdata(dev);
+ struct sch5627_data *ret = data;
+ int i, val;
+
+ mutex_lock(&data->update_lock);
+
+ /* Cache the values for 1 second */
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ for (i = 0; i < SCH5627_NO_TEMPS; i++) {
+ val = sch5627_read_virtual_reg12(data,
+ SCH5627_REG_TEMP_MSB[i],
+ SCH5627_REG_TEMP_LSN[i],
+ SCH5627_REG_TEMP_HIGH_NIBBLE[i]);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp[i] = val;
+ }
+
+ for (i = 0; i < SCH5627_NO_FANS; i++) {
+ val = sch5627_read_virtual_reg16(data,
+ SCH5627_REG_FAN[i]);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->fan[i] = val;
+ }
+
+ for (i = 0; i < SCH5627_NO_IN; i++) {
+ val = sch5627_read_virtual_reg12(data,
+ SCH5627_REG_IN_MSB[i],
+ SCH5627_REG_IN_LSN[i],
+ SCH5627_REG_IN_HIGH_NIBBLE[i]);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->in[i] = val;
+ }
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static int __devinit sch5627_read_limits(struct sch5627_data *data)
+{
+ int i, val;
+
+ for (i = 0; i < SCH5627_NO_TEMPS; i++) {
+ /*
+ * Note what SMSC calls ABS, is what lm_sensors calls max
+ * (aka high), and HIGH is what lm_sensors calls crit.
+ */
+ val = sch5627_read_virtual_reg(data, SCH5627_REG_TEMP_ABS[i]);
+ if (val < 0)
+ return val;
+ data->temp_max[i] = val;
+
+ val = sch5627_read_virtual_reg(data, SCH5627_REG_TEMP_HIGH[i]);
+ if (val < 0)
+ return val;
+ data->temp_crit[i] = val;
+ }
+ for (i = 0; i < SCH5627_NO_FANS; i++) {
+ val = sch5627_read_virtual_reg16(data, SCH5627_REG_FAN_MIN[i]);
+ if (val < 0)
+ return val;
+ data->fan_min[i] = val;
+ }
+
+ return 0;
+}
+
+static int reg_to_temp(u16 reg)
+{
+ return (reg * 625) / 10 - 64000;
+}
+
+static int reg_to_temp_limit(u8 reg)
+{
+ return (reg - 64) * 1000;
+}
+
+static int reg_to_rpm(u16 reg)
+{
+ if (reg == 0)
+ return -EIO;
+ if (reg == 0xffff)
+ return 0;
+
+ return 5400540 / reg;
+}
+
+static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", DEVNAME);
+}
+
+static ssize_t show_temp(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sch5627_data *data = sch5627_update_device(dev);
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = reg_to_temp(data->temp[attr->index]);
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t show_temp_fault(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sch5627_data *data = sch5627_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", data->temp[attr->index] == 0);
+}
+
+static ssize_t show_temp_max(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sch5627_data *data = dev_get_drvdata(dev);
+ int val;
+
+ val = reg_to_temp_limit(data->temp_max[attr->index]);
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t show_temp_crit(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sch5627_data *data = dev_get_drvdata(dev);
+ int val;
+
+ val = reg_to_temp_limit(data->temp_crit[attr->index]);
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t show_fan(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sch5627_data *data = sch5627_update_device(dev);
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = reg_to_rpm(data->fan[attr->index]);
+ if (val < 0)
+ return val;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t show_fan_fault(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sch5627_data *data = sch5627_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ data->fan[attr->index] == 0xffff);
+}
+
+static ssize_t show_fan_min(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sch5627_data *data = dev_get_drvdata(dev);
+ int val = reg_to_rpm(data->fan_min[attr->index]);
+ if (val < 0)
+ return val;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t show_in(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sch5627_data *data = sch5627_update_device(dev);
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = DIV_ROUND_CLOSEST(
+ data->in[attr->index] * SCH5627_REG_IN_FACTOR[attr->index],
+ 10000);
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t show_in_label(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ SCH5627_IN_LABELS[attr->index]);
+}
+
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, show_temp, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_input, S_IRUGO, show_temp, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO, show_temp, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_temp_fault, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_temp_fault, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_fault, S_IRUGO, show_temp_fault, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_fault, S_IRUGO, show_temp_fault, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_fault, S_IRUGO, show_temp_fault, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO, show_temp_max, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO, show_temp_max, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO, show_temp_max, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_max, S_IRUGO, show_temp_max, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_max, S_IRUGO, show_temp_max, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_max, S_IRUGO, show_temp_max, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_max, S_IRUGO, show_temp_max, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp_crit, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, show_temp_crit, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_crit, S_IRUGO, show_temp_crit, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_crit, S_IRUGO, show_temp_crit, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_crit, S_IRUGO, show_temp_crit, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_crit, S_IRUGO, show_temp_crit, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_crit, S_IRUGO, show_temp_crit, NULL, 7);
+
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2);
+static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3);
+static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, show_fan_fault, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, show_fan_fault, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan3_fault, S_IRUGO, show_fan_fault, NULL, 2);
+static SENSOR_DEVICE_ATTR(fan4_fault, S_IRUGO, show_fan_fault, NULL, 3);
+static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, show_fan_min, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_min, S_IRUGO, show_fan_min, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan3_min, S_IRUGO, show_fan_min, NULL, 2);
+static SENSOR_DEVICE_ATTR(fan4_min, S_IRUGO, show_fan_min, NULL, 3);
+
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in, NULL, 0);
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in, NULL, 1);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in, NULL, 2);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in, NULL, 3);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in, NULL, 4);
+static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, show_in_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, show_in_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, show_in_label, NULL, 2);
+static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_in_label, NULL, 3);
+
+static struct attribute *sch5627_attributes[] = {
+ &dev_attr_name.attr,
+
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp5_input.dev_attr.attr,
+ &sensor_dev_attr_temp6_input.dev_attr.attr,
+ &sensor_dev_attr_temp7_input.dev_attr.attr,
+ &sensor_dev_attr_temp8_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_fault.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ &sensor_dev_attr_temp3_fault.dev_attr.attr,
+ &sensor_dev_attr_temp4_fault.dev_attr.attr,
+ &sensor_dev_attr_temp5_fault.dev_attr.attr,
+ &sensor_dev_attr_temp6_fault.dev_attr.attr,
+ &sensor_dev_attr_temp7_fault.dev_attr.attr,
+ &sensor_dev_attr_temp8_fault.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp4_max.dev_attr.attr,
+ &sensor_dev_attr_temp5_max.dev_attr.attr,
+ &sensor_dev_attr_temp6_max.dev_attr.attr,
+ &sensor_dev_attr_temp7_max.dev_attr.attr,
+ &sensor_dev_attr_temp8_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit.dev_attr.attr,
+ &sensor_dev_attr_temp4_crit.dev_attr.attr,
+ &sensor_dev_attr_temp5_crit.dev_attr.attr,
+ &sensor_dev_attr_temp6_crit.dev_attr.attr,
+ &sensor_dev_attr_temp7_crit.dev_attr.attr,
+ &sensor_dev_attr_temp8_crit.dev_attr.attr,
+
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan3_input.dev_attr.attr,
+ &sensor_dev_attr_fan4_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_fault.dev_attr.attr,
+ &sensor_dev_attr_fan2_fault.dev_attr.attr,
+ &sensor_dev_attr_fan3_fault.dev_attr.attr,
+ &sensor_dev_attr_fan4_fault.dev_attr.attr,
+ &sensor_dev_attr_fan1_min.dev_attr.attr,
+ &sensor_dev_attr_fan2_min.dev_attr.attr,
+ &sensor_dev_attr_fan3_min.dev_attr.attr,
+ &sensor_dev_attr_fan4_min.dev_attr.attr,
+
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in0_label.dev_attr.attr,
+ &sensor_dev_attr_in1_label.dev_attr.attr,
+ &sensor_dev_attr_in2_label.dev_attr.attr,
+ &sensor_dev_attr_in3_label.dev_attr.attr,
+ /* No in4_label as in4 is a generic input pin */
+
+ NULL
+};
+
+static const struct attribute_group sch5627_group = {
+ .attrs = sch5627_attributes,
+};
+
+static int sch5627_remove(struct platform_device *pdev)
+{
+ struct sch5627_data *data = platform_get_drvdata(pdev);
+
+ if (data->hwmon_dev)
+ hwmon_device_unregister(data->hwmon_dev);
+
+ sysfs_remove_group(&pdev->dev.kobj, &sch5627_group);
+ platform_set_drvdata(pdev, NULL);
+ kfree(data);
+
+ return 0;
+}
+
+static int __devinit sch5627_probe(struct platform_device *pdev)
+{
+ struct sch5627_data *data;
+ int err, build_code, build_id, hwmon_rev, val;
+
+ data = kzalloc(sizeof(struct sch5627_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
+ mutex_init(&data->update_lock);
+ platform_set_drvdata(pdev, data);
+
+ val = sch5627_read_virtual_reg(data, SCH5627_REG_HWMON_ID);
+ if (val < 0) {
+ err = val;
+ goto error;
+ }
+ if (val != SCH5627_HWMON_ID) {
+ pr_err("invalid %s id: 0x%02X (expected 0x%02X)\n", "hwmon",
+ val, SCH5627_HWMON_ID);
+ err = -ENODEV;
+ goto error;
+ }
+
+ val = sch5627_read_virtual_reg(data, SCH5627_REG_COMPANY_ID);
+ if (val < 0) {
+ err = val;
+ goto error;
+ }
+ if (val != SCH5627_COMPANY_ID) {
+ pr_err("invalid %s id: 0x%02X (expected 0x%02X)\n", "company",
+ val, SCH5627_COMPANY_ID);
+ err = -ENODEV;
+ goto error;
+ }
+
+ val = sch5627_read_virtual_reg(data, SCH5627_REG_PRIMARY_ID);
+ if (val < 0) {
+ err = val;
+ goto error;
+ }
+ if (val != SCH5627_PRIMARY_ID) {
+ pr_err("invalid %s id: 0x%02X (expected 0x%02X)\n", "primary",
+ val, SCH5627_PRIMARY_ID);
+ err = -ENODEV;
+ goto error;
+ }
+
+ build_code = sch5627_read_virtual_reg(data, SCH5627_REG_BUILD_CODE);
+ if (build_code < 0) {
+ err = build_code;
+ goto error;
+ }
+
+ build_id = sch5627_read_virtual_reg16(data, SCH5627_REG_BUILD_ID);
+ if (build_id < 0) {
+ err = build_id;
+ goto error;
+ }
+
+ hwmon_rev = sch5627_read_virtual_reg(data, SCH5627_REG_HWMON_REV);
+ if (hwmon_rev < 0) {
+ err = hwmon_rev;
+ goto error;
+ }
+
+ val = sch5627_read_virtual_reg(data, SCH5627_REG_CTRL);
+ if (val < 0) {
+ err = val;
+ goto error;
+ }
+ if (!(val & 0x01)) {
+ pr_err("hardware monitoring not enabled\n");
+ err = -ENODEV;
+ goto error;
+ }
+
+ /*
+ * Read limits, we do this only once as reading a register on
+ * the sch5627 is quite expensive (and they don't change).
+ */
+ err = sch5627_read_limits(data);
+ if (err)
+ goto error;
+
+ pr_info("firmware build: code 0x%02X, id 0x%04X, hwmon: rev 0x%02X\n",
+ build_code, build_id, hwmon_rev);
+
+ /* Register sysfs interface files */
+ err = sysfs_create_group(&pdev->dev.kobj, &sch5627_group);
+ if (err)
+ goto error;
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ data->hwmon_dev = NULL;
+ goto error;
+ }
+
+ return 0;
+
+error:
+ sch5627_remove(pdev);
+ return err;
+}
+
+static int __init sch5627_find(int sioaddr, unsigned short *address)
+{
+ u8 devid;
+ int err = superio_enter(sioaddr);
+ if (err)
+ return err;
+
+ devid = superio_inb(sioaddr, SIO_REG_DEVID);
+ if (devid != SIO_SCH5627_ID) {
+ pr_debug("Unsupported device id: 0x%02x\n",
+ (unsigned int)devid);
+ err = -ENODEV;
+ goto exit;
+ }
+
+ superio_select(sioaddr, SIO_SCH5627_EM_LD);
+
+ if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
+ pr_warn("Device not activated\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+ /*
+ * Warning the order of the low / high byte is the other way around
+ * as on most other superio devices!!
+ */
+ *address = superio_inb(sioaddr, SIO_REG_ADDR) |
+ superio_inb(sioaddr, SIO_REG_ADDR + 1) << 8;
+ if (*address == 0) {
+ pr_warn("Base address not set\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+ pr_info("Found %s chip at %#hx\n", DEVNAME, *address);
+exit:
+ superio_exit(sioaddr);
+ return err;
+}
+
+static int __init sch5627_device_add(unsigned short address)
+{
+ struct resource res = {
+ .start = address,
+ .end = address + REGION_LENGTH - 1,
+ .flags = IORESOURCE_IO,
+ };
+ int err;
+
+ sch5627_pdev = platform_device_alloc(DRVNAME, address);
+ if (!sch5627_pdev)
+ return -ENOMEM;
+
+ res.name = sch5627_pdev->name;
+ err = acpi_check_resource_conflict(&res);
+ if (err)
+ goto exit_device_put;
+
+ err = platform_device_add_resources(sch5627_pdev, &res, 1);
+ if (err) {
+ pr_err("Device resource addition failed\n");
+ goto exit_device_put;
+ }
+
+ err = platform_device_add(sch5627_pdev);
+ if (err) {
+ pr_err("Device addition failed\n");
+ goto exit_device_put;
+ }
+
+ return 0;
+
+exit_device_put:
+ platform_device_put(sch5627_pdev);
+
+ return err;
+}
+
+static struct platform_driver sch5627_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRVNAME,
+ },
+ .probe = sch5627_probe,
+ .remove = sch5627_remove,
+};
+
+static int __init sch5627_init(void)
+{
+ int err = -ENODEV;
+ unsigned short address;
+
+ if (sch5627_find(0x4e, &address) && sch5627_find(0x2e, &address))
+ goto exit;
+
+ err = platform_driver_register(&sch5627_driver);
+ if (err)
+ goto exit;
+
+ err = sch5627_device_add(address);
+ if (err)
+ goto exit_driver;
+
+ return 0;
+
+exit_driver:
+ platform_driver_unregister(&sch5627_driver);
+exit:
+ return err;
+}
+
+static void __exit sch5627_exit(void)
+{
+ platform_device_unregister(sch5627_pdev);
+ platform_driver_unregister(&sch5627_driver);
+}
+
+MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
+MODULE_AUTHOR("Hans de Goede (hdegoede@redhat.com)");
+MODULE_LICENSE("GPL");
+
+module_init(sch5627_init);
+module_exit(sch5627_exit);
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index a610e7880fb3..1a9c32d6893a 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -333,11 +333,11 @@ static inline int sht15_calc_humid(struct sht15_data *data)
const int c1 = -4;
const int c2 = 40500; /* x 10 ^ -6 */
- const int c3 = -2800; /* x10 ^ -9 */
+ const int c3 = -28; /* x 10 ^ -7 */
RHlinear = c1*1000
+ c2 * data->val_humid/1000
- + (data->val_humid * data->val_humid * c3)/1000000;
+ + (data->val_humid * data->val_humid * c3) / 10000;
return (temp - 25000) * (10000 + 80 * data->val_humid)
/ 1000000 + RHlinear;
}
@@ -610,7 +610,7 @@ static int __devexit sht15_remove(struct platform_device *pdev)
struct sht15_data *data = platform_get_drvdata(pdev);
/* Make sure any reads from the device are done and
- * prevent new ones beginnning */
+ * prevent new ones from beginning */
mutex_lock(&data->read_lock);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &sht15_attr_group);
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
new file mode 100644
index 000000000000..97e22bef85ab
--- /dev/null
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -0,0 +1,157 @@
+/*
+ *
+ * TWL4030 MADC Hwmon driver-This driver monitors the real time
+ * conversion of analog signals like battery temperature,
+ * battery type, battery level etc. User can ask for the conversion on a
+ * particular channel using the sysfs nodes.
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * J Keerthy <j-keerthy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/i2c/twl.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/i2c/twl4030-madc.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/stddef.h>
+#include <linux/sysfs.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+/*
+ * sysfs hook function
+ */
+static ssize_t madc_read(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct twl4030_madc_request req;
+ long val;
+
+ req.channels = (1 << attr->index);
+ req.method = TWL4030_MADC_SW2;
+ req.func_cb = NULL;
+ val = twl4030_madc_conversion(&req);
+ if (val < 0)
+ return val;
+
+ return sprintf(buf, "%d\n", req.rbuf[attr->index]);
+}
+
+/* sysfs nodes to read individual channels from user side */
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, madc_read, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, madc_read, NULL, 1);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, madc_read, NULL, 2);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, madc_read, NULL, 3);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, madc_read, NULL, 4);
+static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, madc_read, NULL, 5);
+static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, madc_read, NULL, 6);
+static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, madc_read, NULL, 7);
+static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, madc_read, NULL, 8);
+static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, madc_read, NULL, 9);
+static SENSOR_DEVICE_ATTR(curr10_input, S_IRUGO, madc_read, NULL, 10);
+static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, madc_read, NULL, 11);
+static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, madc_read, NULL, 12);
+static SENSOR_DEVICE_ATTR(in15_input, S_IRUGO, madc_read, NULL, 15);
+
+static struct attribute *twl4030_madc_attributes[] = {
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_in7_input.dev_attr.attr,
+ &sensor_dev_attr_in8_input.dev_attr.attr,
+ &sensor_dev_attr_in9_input.dev_attr.attr,
+ &sensor_dev_attr_curr10_input.dev_attr.attr,
+ &sensor_dev_attr_in11_input.dev_attr.attr,
+ &sensor_dev_attr_in12_input.dev_attr.attr,
+ &sensor_dev_attr_in15_input.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group twl4030_madc_group = {
+ .attrs = twl4030_madc_attributes,
+};
+
+static int __devinit twl4030_madc_hwmon_probe(struct platform_device *pdev)
+{
+ int ret;
+ int status;
+ struct device *hwmon;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &twl4030_madc_group);
+ if (ret)
+ goto err_sysfs;
+ hwmon = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(hwmon)) {
+ dev_err(&pdev->dev, "hwmon_device_register failed.\n");
+ status = PTR_ERR(hwmon);
+ goto err_reg;
+ }
+
+ return 0;
+
+err_reg:
+ sysfs_remove_group(&pdev->dev.kobj, &twl4030_madc_group);
+err_sysfs:
+
+ return ret;
+}
+
+static int __devexit twl4030_madc_hwmon_remove(struct platform_device *pdev)
+{
+ hwmon_device_unregister(&pdev->dev);
+ sysfs_remove_group(&pdev->dev.kobj, &twl4030_madc_group);
+
+ return 0;
+}
+
+static struct platform_driver twl4030_madc_hwmon_driver = {
+ .probe = twl4030_madc_hwmon_probe,
+ .remove = __exit_p(twl4030_madc_hwmon_remove),
+ .driver = {
+ .name = "twl4030_madc_hwmon",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init twl4030_madc_hwmon_init(void)
+{
+ return platform_driver_register(&twl4030_madc_hwmon_driver);
+}
+
+module_init(twl4030_madc_hwmon_init);
+
+static void __exit twl4030_madc_hwmon_exit(void)
+{
+ platform_driver_unregister(&twl4030_madc_hwmon_driver);
+}
+
+module_exit(twl4030_madc_hwmon_exit);
+
+MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("J Keerthy");
+MODULE_ALIAS("twl4030_madc_hwmon");
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 073eabedc432..f2b377c56a3a 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1,11 +1,12 @@
/*
w83627ehf - Driver for the hardware monitoring functionality of
- the Winbond W83627EHF Super-I/O chip
+ the Winbond W83627EHF Super-I/O chip
Copyright (C) 2005 Jean Delvare <khali@linux-fr.org>
Copyright (C) 2006 Yuan Mu (Winbond),
- Rudolf Marek <r.marek@assembler.cz>
- David Hubbard <david.c.hubbard@gmail.com>
+ Rudolf Marek <r.marek@assembler.cz>
+ David Hubbard <david.c.hubbard@gmail.com>
Daniel J Blueman <daniel.blueman@gmail.com>
+ Copyright (C) 2010 Sheng-Yuan Huang (Nuvoton) (PS00)
Shamelessly ripped from the w83627hf driver
Copyright (C) 2003 Mark Studebaker
@@ -35,11 +36,13 @@
Chip #vin #fan #pwm #temp chip IDs man ID
w83627ehf 10 5 4 3 0x8850 0x88 0x5ca3
- 0x8860 0xa1
+ 0x8860 0xa1
w83627dhg 9 5 4 3 0xa020 0xc1 0x5ca3
w83627dhg-p 9 5 4 3 0xb070 0xc1 0x5ca3
w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3
- w83667hg-b 9 5 3 3 0xb350 0xc1 0x5ca3
+ w83667hg-b 9 5 3 4 0xb350 0xc1 0x5ca3
+ nct6775f 9 4 3 9 0xb470 0xc1 0x5ca3
+ nct6776f 9 5 3 9 0xC330 0xc1 0x5ca3
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -58,21 +61,28 @@
#include <linux/io.h>
#include "lm75.h"
-enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83667hg, w83667hg_b };
+enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83667hg, w83667hg_b, nct6775,
+ nct6776 };
/* used to set data->name = w83627ehf_device_names[data->sio_kind] */
-static const char * w83627ehf_device_names[] = {
+static const char * const w83627ehf_device_names[] = {
"w83627ehf",
"w83627dhg",
"w83627dhg",
"w83667hg",
"w83667hg",
+ "nct6775",
+ "nct6776",
};
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
+static unsigned short fan_debounce;
+module_param(fan_debounce, ushort, 0);
+MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
+
#define DRVNAME "w83627ehf"
/*
@@ -80,7 +90,7 @@ MODULE_PARM_DESC(force_id, "Override the detected device ID");
*/
#define W83627EHF_LD_HWM 0x0b
-#define W83667HG_LD_VID 0x0d
+#define W83667HG_LD_VID 0x0d
#define SIO_REG_LDSEL 0x07 /* Logical device select */
#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */
@@ -94,8 +104,10 @@ MODULE_PARM_DESC(force_id, "Override the detected device ID");
#define SIO_W83627EHG_ID 0x8860
#define SIO_W83627DHG_ID 0xa020
#define SIO_W83627DHG_P_ID 0xb070
-#define SIO_W83667HG_ID 0xa510
+#define SIO_W83667HG_ID 0xa510
#define SIO_W83667HG_B_ID 0xb350
+#define SIO_NCT6775_ID 0xb470
+#define SIO_NCT6776_ID 0xc330
#define SIO_ID_MASK 0xFFF0
static inline void
@@ -138,7 +150,7 @@ superio_exit(int ioreg)
* ISA constants
*/
-#define IOREGION_ALIGNMENT ~7
+#define IOREGION_ALIGNMENT (~7)
#define IOREGION_OFFSET 5
#define IOREGION_LENGTH 2
#define ADDR_REG_OFFSET 0
@@ -164,13 +176,10 @@ static const u16 W83627EHF_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d, 0x3e, 0x55c };
#define W83627EHF_REG_IN(nr) ((nr < 7) ? (0x20 + (nr)) : \
(0x550 + (nr) - 7))
-#define W83627EHF_REG_TEMP1 0x27
-#define W83627EHF_REG_TEMP1_HYST 0x3a
-#define W83627EHF_REG_TEMP1_OVER 0x39
-static const u16 W83627EHF_REG_TEMP[] = { 0x150, 0x250 };
-static const u16 W83627EHF_REG_TEMP_HYST[] = { 0x153, 0x253 };
-static const u16 W83627EHF_REG_TEMP_OVER[] = { 0x155, 0x255 };
-static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0x152, 0x252 };
+static const u16 W83627EHF_REG_TEMP[] = { 0x27, 0x150, 0x250, 0x7e };
+static const u16 W83627EHF_REG_TEMP_HYST[] = { 0x3a, 0x153, 0x253, 0 };
+static const u16 W83627EHF_REG_TEMP_OVER[] = { 0x39, 0x155, 0x255, 0 };
+static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0, 0x152, 0x252, 0 };
/* Fan clock dividers are spread over the following five registers */
#define W83627EHF_REG_FANDIV1 0x47
@@ -179,6 +188,11 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0x152, 0x252 };
#define W83627EHF_REG_DIODE 0x59
#define W83627EHF_REG_SMI_OVT 0x4C
+/* NCT6775F has its own fan divider registers */
+#define NCT6775_REG_FANDIV1 0x506
+#define NCT6775_REG_FANDIV2 0x507
+#define NCT6775_REG_FAN_DEBOUNCE 0xf0
+
#define W83627EHF_REG_ALARM1 0x459
#define W83627EHF_REG_ALARM2 0x45A
#define W83627EHF_REG_ALARM3 0x45B
@@ -199,22 +213,123 @@ static const u8 W83627EHF_PWM_MODE_SHIFT[] = { 0, 1, 0, 6 };
static const u8 W83627EHF_PWM_ENABLE_SHIFT[] = { 2, 4, 1, 4 };
/* FAN Duty Cycle, be used to control */
-static const u8 W83627EHF_REG_PWM[] = { 0x01, 0x03, 0x11, 0x61 };
-static const u8 W83627EHF_REG_TARGET[] = { 0x05, 0x06, 0x13, 0x63 };
+static const u16 W83627EHF_REG_PWM[] = { 0x01, 0x03, 0x11, 0x61 };
+static const u16 W83627EHF_REG_TARGET[] = { 0x05, 0x06, 0x13, 0x63 };
static const u8 W83627EHF_REG_TOLERANCE[] = { 0x07, 0x07, 0x14, 0x62 };
/* Advanced Fan control, some values are common for all fans */
-static const u8 W83627EHF_REG_FAN_START_OUTPUT[] = { 0x0a, 0x0b, 0x16, 0x65 };
-static const u8 W83627EHF_REG_FAN_STOP_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 };
-static const u8 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0c, 0x0d, 0x17, 0x66 };
+static const u16 W83627EHF_REG_FAN_START_OUTPUT[] = { 0x0a, 0x0b, 0x16, 0x65 };
+static const u16 W83627EHF_REG_FAN_STOP_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 };
+static const u16 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0c, 0x0d, 0x17, 0x66 };
-static const u8 W83627EHF_REG_FAN_MAX_OUTPUT_COMMON[]
+static const u16 W83627EHF_REG_FAN_MAX_OUTPUT_COMMON[]
= { 0xff, 0x67, 0xff, 0x69 };
-static const u8 W83627EHF_REG_FAN_STEP_OUTPUT_COMMON[]
+static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_COMMON[]
= { 0xff, 0x68, 0xff, 0x6a };
-static const u8 W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B[] = { 0x67, 0x69, 0x6b };
-static const u8 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[] = { 0x68, 0x6a, 0x6c };
+static const u16 W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B[] = { 0x67, 0x69, 0x6b };
+static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[]
+ = { 0x68, 0x6a, 0x6c };
+
+static const u16 NCT6775_REG_TARGET[] = { 0x101, 0x201, 0x301 };
+static const u16 NCT6775_REG_FAN_MODE[] = { 0x102, 0x202, 0x302 };
+static const u16 NCT6775_REG_FAN_STOP_OUTPUT[] = { 0x105, 0x205, 0x305 };
+static const u16 NCT6775_REG_FAN_START_OUTPUT[] = { 0x106, 0x206, 0x306 };
+static const u16 NCT6775_REG_FAN_STOP_TIME[] = { 0x107, 0x207, 0x307 };
+static const u16 NCT6775_REG_PWM[] = { 0x109, 0x209, 0x309 };
+static const u16 NCT6775_REG_FAN_MAX_OUTPUT[] = { 0x10a, 0x20a, 0x30a };
+static const u16 NCT6775_REG_FAN_STEP_OUTPUT[] = { 0x10b, 0x20b, 0x30b };
+static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
+static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642};
+
+static const u16 NCT6775_REG_TEMP[]
+ = { 0x27, 0x150, 0x250, 0x73, 0x75, 0x77, 0x62b, 0x62c, 0x62d };
+static const u16 NCT6775_REG_TEMP_CONFIG[]
+ = { 0, 0x152, 0x252, 0, 0, 0, 0x628, 0x629, 0x62A };
+static const u16 NCT6775_REG_TEMP_HYST[]
+ = { 0x3a, 0x153, 0x253, 0, 0, 0, 0x673, 0x678, 0x67D };
+static const u16 NCT6775_REG_TEMP_OVER[]
+ = { 0x39, 0x155, 0x255, 0, 0, 0, 0x672, 0x677, 0x67C };
+static const u16 NCT6775_REG_TEMP_SOURCE[]
+ = { 0x621, 0x622, 0x623, 0x100, 0x200, 0x300, 0x624, 0x625, 0x626 };
+
+static const char *const w83667hg_b_temp_label[] = {
+ "SYSTIN",
+ "CPUTIN",
+ "AUXTIN",
+ "AMDTSI",
+ "PECI Agent 1",
+ "PECI Agent 2",
+ "PECI Agent 3",
+ "PECI Agent 4"
+};
+
+static const char *const nct6775_temp_label[] = {
+ "",
+ "SYSTIN",
+ "CPUTIN",
+ "AUXTIN",
+ "AMD SB-TSI",
+ "PECI Agent 0",
+ "PECI Agent 1",
+ "PECI Agent 2",
+ "PECI Agent 3",
+ "PECI Agent 4",
+ "PECI Agent 5",
+ "PECI Agent 6",
+ "PECI Agent 7",
+ "PCH_CHIP_CPU_MAX_TEMP",
+ "PCH_CHIP_TEMP",
+ "PCH_CPU_TEMP",
+ "PCH_MCH_TEMP",
+ "PCH_DIM0_TEMP",
+ "PCH_DIM1_TEMP",
+ "PCH_DIM2_TEMP",
+ "PCH_DIM3_TEMP"
+};
+
+static const char *const nct6776_temp_label[] = {
+ "",
+ "SYSTIN",
+ "CPUTIN",
+ "AUXTIN",
+ "SMBUSMASTER 0",
+ "SMBUSMASTER 1",
+ "SMBUSMASTER 2",
+ "SMBUSMASTER 3",
+ "SMBUSMASTER 4",
+ "SMBUSMASTER 5",
+ "SMBUSMASTER 6",
+ "SMBUSMASTER 7",
+ "PECI Agent 0",
+ "PECI Agent 1",
+ "PCH_CHIP_CPU_MAX_TEMP",
+ "PCH_CHIP_TEMP",
+ "PCH_CPU_TEMP",
+ "PCH_MCH_TEMP",
+ "PCH_DIM0_TEMP",
+ "PCH_DIM1_TEMP",
+ "PCH_DIM2_TEMP",
+ "PCH_DIM3_TEMP",
+ "BYTE_TEMP"
+};
+
+#define NUM_REG_TEMP ARRAY_SIZE(NCT6775_REG_TEMP)
+
+static inline int is_word_sized(u16 reg)
+{
+ return ((((reg & 0xff00) == 0x100
+ || (reg & 0xff00) == 0x200)
+ && ((reg & 0x00ff) == 0x50
+ || (reg & 0x00ff) == 0x53
+ || (reg & 0x00ff) == 0x55))
+ || (reg & 0xfff0) == 0x630
+ || reg == 0x640 || reg == 0x642
+ || ((reg & 0xfff0) == 0x650
+ && (reg & 0x000f) >= 0x06)
+ || reg == 0x73 || reg == 0x75 || reg == 0x77
+ );
+}
/*
* Conversions
@@ -232,12 +347,36 @@ static inline u8 step_time_to_reg(unsigned int msec, u8 mode)
(msec + 200) / 400), 1, 255);
}
-static inline unsigned int
-fan_from_reg(u8 reg, unsigned int div)
+static unsigned int fan_from_reg8(u16 reg, unsigned int divreg)
{
if (reg == 0 || reg == 255)
return 0;
- return 1350000U / (reg * div);
+ return 1350000U / (reg << divreg);
+}
+
+static unsigned int fan_from_reg13(u16 reg, unsigned int divreg)
+{
+ if ((reg & 0xff1f) == 0xff1f)
+ return 0;
+
+ reg = (reg & 0x1f) | ((reg & 0xff00) >> 3);
+
+ if (reg == 0)
+ return 0;
+
+ return 1350000U / reg;
+}
+
+static unsigned int fan_from_reg16(u16 reg, unsigned int divreg)
+{
+ if (reg == 0 || reg == 0xffff)
+ return 0;
+
+ /*
+ * Even though the registers are 16 bit wide, the fan divisor
+ * still applies.
+ */
+ return 1350000U / (reg << divreg);
}
static inline unsigned int
@@ -247,21 +386,19 @@ div_from_reg(u8 reg)
}
static inline int
-temp1_from_reg(s8 reg)
+temp_from_reg(u16 reg, s16 regval)
{
- return reg * 1000;
+ if (is_word_sized(reg))
+ return LM75_TEMP_FROM_REG(regval);
+ return regval * 1000;
}
-static inline s8
-temp1_to_reg(long temp, int min, int max)
+static inline u16
+temp_to_reg(u16 reg, long temp)
{
- if (temp <= min)
- return min / 1000;
- if (temp >= max)
- return max / 1000;
- if (temp < 0)
- return (temp - 500) / 1000;
- return (temp + 500) / 1000;
+ if (is_word_sized(reg))
+ return LM75_TEMP_TO_REG(temp);
+ return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), 1000);
}
/* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */
@@ -275,7 +412,8 @@ static inline long in_from_reg(u8 reg, u8 nr)
static inline u8 in_to_reg(u32 val, u8 nr)
{
- return SENSORS_LIMIT(((val + (scale_in[nr] / 2)) / scale_in[nr]), 0, 255);
+ return SENSORS_LIMIT(((val + (scale_in[nr] / 2)) / scale_in[nr]), 0,
+ 255);
}
/*
@@ -289,38 +427,57 @@ struct w83627ehf_data {
struct device *hwmon_dev;
struct mutex lock;
- const u8 *REG_FAN_START_OUTPUT;
- const u8 *REG_FAN_STOP_OUTPUT;
- const u8 *REG_FAN_MAX_OUTPUT;
- const u8 *REG_FAN_STEP_OUTPUT;
+ u16 reg_temp[NUM_REG_TEMP];
+ u16 reg_temp_over[NUM_REG_TEMP];
+ u16 reg_temp_hyst[NUM_REG_TEMP];
+ u16 reg_temp_config[NUM_REG_TEMP];
+ u8 temp_src[NUM_REG_TEMP];
+ const char * const *temp_label;
+
+ const u16 *REG_PWM;
+ const u16 *REG_TARGET;
+ const u16 *REG_FAN;
+ const u16 *REG_FAN_MIN;
+ const u16 *REG_FAN_START_OUTPUT;
+ const u16 *REG_FAN_STOP_OUTPUT;
+ const u16 *REG_FAN_STOP_TIME;
+ const u16 *REG_FAN_MAX_OUTPUT;
+ const u16 *REG_FAN_STEP_OUTPUT;
+
+ unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg);
+ unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg);
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* Register values */
+ u8 bank; /* current register bank */
u8 in_num; /* number of in inputs we have */
u8 in[10]; /* Register value */
u8 in_max[10]; /* Register value */
u8 in_min[10]; /* Register value */
- u8 fan[5];
- u8 fan_min[5];
+ unsigned int rpm[5];
+ u16 fan_min[5];
u8 fan_div[5];
u8 has_fan; /* some fan inputs can be disabled */
+ u8 has_fan_min; /* some fans don't have min register */
+ bool has_fan_div;
u8 temp_type[3];
- s8 temp1;
- s8 temp1_max;
- s8 temp1_max_hyst;
- s16 temp[2];
- s16 temp_max[2];
- s16 temp_max_hyst[2];
+ s16 temp[9];
+ s16 temp_max[9];
+ s16 temp_max_hyst[9];
u32 alarms;
u8 pwm_mode[4]; /* 0->DC variable voltage, 1->PWM variable duty cycle */
u8 pwm_enable[4]; /* 1->manual
2->thermal cruise mode (also called SmartFan I)
3->fan speed cruise mode
- 4->variable thermal cruise (also called SmartFan III) */
+ 4->variable thermal cruise (also called
+ SmartFan III)
+ 5->enhanced variable thermal cruise (also called
+ SmartFan IV) */
+ u8 pwm_enable_orig[4]; /* original value of pwm_enable */
u8 pwm_num; /* number of pwm */
u8 pwm[4];
u8 target_temp[4];
@@ -335,7 +492,7 @@ struct w83627ehf_data {
u8 vid;
u8 vrm;
- u8 temp3_disable;
+ u16 have_temp;
u8 in6_skip;
};
@@ -344,30 +501,19 @@ struct w83627ehf_sio_data {
enum kinds kind;
};
-static inline int is_word_sized(u16 reg)
-{
- return (((reg & 0xff00) == 0x100
- || (reg & 0xff00) == 0x200)
- && ((reg & 0x00ff) == 0x50
- || (reg & 0x00ff) == 0x53
- || (reg & 0x00ff) == 0x55));
-}
-
-/* Registers 0x50-0x5f are banked */
+/*
+ * On older chips, only registers 0x50-0x5f are banked.
+ * On more recent chips, all registers are banked.
+ * Assume that is the case and set the bank number for each access.
+ * Cache the bank number so it only needs to be set if it changes.
+ */
static inline void w83627ehf_set_bank(struct w83627ehf_data *data, u16 reg)
{
- if ((reg & 0x00f0) == 0x50) {
+ u8 bank = reg >> 8;
+ if (data->bank != bank) {
outb_p(W83627EHF_REG_BANK, data->addr + ADDR_REG_OFFSET);
- outb_p(reg >> 8, data->addr + DATA_REG_OFFSET);
- }
-}
-
-/* Not strictly necessary, but play it safe for now */
-static inline void w83627ehf_reset_bank(struct w83627ehf_data *data, u16 reg)
-{
- if (reg & 0xff00) {
- outb_p(W83627EHF_REG_BANK, data->addr + ADDR_REG_OFFSET);
- outb_p(0, data->addr + DATA_REG_OFFSET);
+ outb_p(bank, data->addr + DATA_REG_OFFSET);
+ data->bank = bank;
}
}
@@ -385,14 +531,13 @@ static u16 w83627ehf_read_value(struct w83627ehf_data *data, u16 reg)
data->addr + ADDR_REG_OFFSET);
res = (res << 8) + inb_p(data->addr + DATA_REG_OFFSET);
}
- w83627ehf_reset_bank(data, reg);
mutex_unlock(&data->lock);
-
return res;
}
-static int w83627ehf_write_value(struct w83627ehf_data *data, u16 reg, u16 value)
+static int w83627ehf_write_value(struct w83627ehf_data *data, u16 reg,
+ u16 value)
{
int word_sized = is_word_sized(reg);
@@ -406,13 +551,40 @@ static int w83627ehf_write_value(struct w83627ehf_data *data, u16 reg, u16 value
data->addr + ADDR_REG_OFFSET);
}
outb_p(value & 0xff, data->addr + DATA_REG_OFFSET);
- w83627ehf_reset_bank(data, reg);
mutex_unlock(&data->lock);
return 0;
}
/* This function assumes that the caller holds data->update_lock */
+static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr)
+{
+ u8 reg;
+
+ switch (nr) {
+ case 0:
+ reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x70)
+ | (data->fan_div[0] & 0x7);
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
+ break;
+ case 1:
+ reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x7)
+ | ((data->fan_div[1] << 4) & 0x70);
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
+ case 2:
+ reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x70)
+ | (data->fan_div[2] & 0x7);
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
+ break;
+ case 3:
+ reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x7)
+ | ((data->fan_div[3] << 4) & 0x70);
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
+ break;
+ }
+}
+
+/* This function assumes that the caller holds data->update_lock */
static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
{
u8 reg;
@@ -463,6 +635,32 @@ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
}
}
+static void w83627ehf_write_fan_div_common(struct device *dev,
+ struct w83627ehf_data *data, int nr)
+{
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+
+ if (sio_data->kind == nct6776)
+ ; /* no dividers, do nothing */
+ else if (sio_data->kind == nct6775)
+ nct6775_write_fan_div(data, nr);
+ else
+ w83627ehf_write_fan_div(data, nr);
+}
+
+static void nct6775_update_fan_div(struct w83627ehf_data *data)
+{
+ u8 i;
+
+ i = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
+ data->fan_div[0] = i & 0x7;
+ data->fan_div[1] = (i & 0x70) >> 4;
+ i = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
+ data->fan_div[2] = i & 0x7;
+ if (data->has_fan & (1<<3))
+ data->fan_div[3] = (i & 0x70) >> 4;
+}
+
static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
{
int i;
@@ -488,10 +686,79 @@ static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
}
}
+static void w83627ehf_update_fan_div_common(struct device *dev,
+ struct w83627ehf_data *data)
+{
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+
+ if (sio_data->kind == nct6776)
+ ; /* no dividers, do nothing */
+ else if (sio_data->kind == nct6775)
+ nct6775_update_fan_div(data);
+ else
+ w83627ehf_update_fan_div(data);
+}
+
+static void nct6775_update_pwm(struct w83627ehf_data *data)
+{
+ int i;
+ int pwmcfg, fanmodecfg;
+
+ for (i = 0; i < data->pwm_num; i++) {
+ pwmcfg = w83627ehf_read_value(data,
+ W83627EHF_REG_PWM_ENABLE[i]);
+ fanmodecfg = w83627ehf_read_value(data,
+ NCT6775_REG_FAN_MODE[i]);
+ data->pwm_mode[i] =
+ ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
+ data->pwm_enable[i] = ((fanmodecfg >> 4) & 7) + 1;
+ data->tolerance[i] = fanmodecfg & 0x0f;
+ data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
+ }
+}
+
+static void w83627ehf_update_pwm(struct w83627ehf_data *data)
+{
+ int i;
+ int pwmcfg = 0, tolerance = 0; /* shut up the compiler */
+
+ for (i = 0; i < data->pwm_num; i++) {
+ if (!(data->has_fan & (1 << i)))
+ continue;
+
+ /* pwmcfg, tolerance mapped for i=0, i=1 to same reg */
+ if (i != 1) {
+ pwmcfg = w83627ehf_read_value(data,
+ W83627EHF_REG_PWM_ENABLE[i]);
+ tolerance = w83627ehf_read_value(data,
+ W83627EHF_REG_TOLERANCE[i]);
+ }
+ data->pwm_mode[i] =
+ ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
+ data->pwm_enable[i] = ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i])
+ & 3) + 1;
+ data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
+
+ data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0)) & 0x0f;
+ }
+}
+
+static void w83627ehf_update_pwm_common(struct device *dev,
+ struct w83627ehf_data *data)
+{
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+
+ if (sio_data->kind == nct6775 || sio_data->kind == nct6776)
+ nct6775_update_pwm(data);
+ else
+ w83627ehf_update_pwm(data);
+}
+
static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- int pwmcfg = 0, tolerance = 0; /* shut up the compiler */
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+
int i;
mutex_lock(&data->update_lock);
@@ -499,7 +766,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
if (time_after(jiffies, data->last_updated + HZ + HZ/2)
|| !data->valid) {
/* Fan clock dividers */
- w83627ehf_update_fan_div(data);
+ w83627ehf_update_fan_div_common(dev, data);
/* Measured voltages and limits */
for (i = 0; i < data->in_num; i++) {
@@ -513,92 +780,90 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
/* Measured fan speeds and limits */
for (i = 0; i < 5; i++) {
+ u16 reg;
+
if (!(data->has_fan & (1 << i)))
continue;
- data->fan[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN[i]);
- data->fan_min[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN_MIN[i]);
+ reg = w83627ehf_read_value(data, data->REG_FAN[i]);
+ data->rpm[i] = data->fan_from_reg(reg,
+ data->fan_div[i]);
+
+ if (data->has_fan_min & (1 << i))
+ data->fan_min[i] = w83627ehf_read_value(data,
+ data->REG_FAN_MIN[i]);
/* If we failed to measure the fan speed and clock
divider can be increased, let's try that for next
time */
- if (data->fan[i] == 0xff
- && data->fan_div[i] < 0x07) {
- dev_dbg(dev, "Increasing fan%d "
+ if (data->has_fan_div
+ && (reg >= 0xff || (sio_data->kind == nct6775
+ && reg == 0x00))
+ && data->fan_div[i] < 0x07) {
+ dev_dbg(dev, "Increasing fan%d "
"clock divider from %u to %u\n",
i + 1, div_from_reg(data->fan_div[i]),
div_from_reg(data->fan_div[i] + 1));
data->fan_div[i]++;
- w83627ehf_write_fan_div(data, i);
+ w83627ehf_write_fan_div_common(dev, data, i);
/* Preserve min limit if possible */
- if (data->fan_min[i] >= 2
+ if ((data->has_fan_min & (1 << i))
+ && data->fan_min[i] >= 2
&& data->fan_min[i] != 255)
w83627ehf_write_value(data,
- W83627EHF_REG_FAN_MIN[i],
+ data->REG_FAN_MIN[i],
(data->fan_min[i] /= 2));
}
}
+ w83627ehf_update_pwm_common(dev, data);
+
for (i = 0; i < data->pwm_num; i++) {
if (!(data->has_fan & (1 << i)))
continue;
- /* pwmcfg, tolerance mapped for i=0, i=1 to same reg */
- if (i != 1) {
- pwmcfg = w83627ehf_read_value(data,
- W83627EHF_REG_PWM_ENABLE[i]);
- tolerance = w83627ehf_read_value(data,
- W83627EHF_REG_TOLERANCE[i]);
- }
- data->pwm_mode[i] =
- ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1)
- ? 0 : 1;
- data->pwm_enable[i] =
- ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i])
- & 3) + 1;
- data->pwm[i] = w83627ehf_read_value(data,
- W83627EHF_REG_PWM[i]);
- data->fan_start_output[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN_START_OUTPUT[i]);
- data->fan_stop_output[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN_STOP_OUTPUT[i]);
- data->fan_stop_time[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN_STOP_TIME[i]);
-
- if (data->REG_FAN_MAX_OUTPUT[i] != 0xff)
+ data->fan_start_output[i] =
+ w83627ehf_read_value(data,
+ data->REG_FAN_START_OUTPUT[i]);
+ data->fan_stop_output[i] =
+ w83627ehf_read_value(data,
+ data->REG_FAN_STOP_OUTPUT[i]);
+ data->fan_stop_time[i] =
+ w83627ehf_read_value(data,
+ data->REG_FAN_STOP_TIME[i]);
+
+ if (data->REG_FAN_MAX_OUTPUT &&
+ data->REG_FAN_MAX_OUTPUT[i] != 0xff)
data->fan_max_output[i] =
w83627ehf_read_value(data,
- data->REG_FAN_MAX_OUTPUT[i]);
+ data->REG_FAN_MAX_OUTPUT[i]);
- if (data->REG_FAN_STEP_OUTPUT[i] != 0xff)
+ if (data->REG_FAN_STEP_OUTPUT &&
+ data->REG_FAN_STEP_OUTPUT[i] != 0xff)
data->fan_step_output[i] =
w83627ehf_read_value(data,
- data->REG_FAN_STEP_OUTPUT[i]);
+ data->REG_FAN_STEP_OUTPUT[i]);
data->target_temp[i] =
w83627ehf_read_value(data,
- W83627EHF_REG_TARGET[i]) &
+ data->REG_TARGET[i]) &
(data->pwm_mode[i] == 1 ? 0x7f : 0xff);
- data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0))
- & 0x0f;
}
/* Measured temperatures and limits */
- data->temp1 = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP1);
- data->temp1_max = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP1_OVER);
- data->temp1_max_hyst = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP1_HYST);
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ if (!(data->have_temp & (1 << i)))
+ continue;
data->temp[i] = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP[i]);
- data->temp_max[i] = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP_OVER[i]);
- data->temp_max_hyst[i] = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP_HYST[i]);
+ data->reg_temp[i]);
+ if (data->reg_temp_over[i])
+ data->temp_max[i]
+ = w83627ehf_read_value(data,
+ data->reg_temp_over[i]);
+ if (data->reg_temp_hyst[i])
+ data->temp_max_hyst[i]
+ = w83627ehf_read_value(data,
+ data->reg_temp_hyst[i]);
}
data->alarms = w83627ehf_read_value(data,
@@ -625,7 +890,8 @@ show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
return sprintf(buf, "%ld\n", in_from_reg(data->reg[nr], nr)); \
}
@@ -635,14 +901,18 @@ show_in_reg(in_max)
#define store_in_reg(REG, reg) \
static ssize_t \
-store_in_##reg (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
+store_in_##reg(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
{ \
struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- u32 val = simple_strtoul(buf, NULL, 10); \
- \
+ unsigned long val; \
+ int err; \
+ err = strict_strtoul(buf, 10, &val); \
+ if (err < 0) \
+ return err; \
mutex_lock(&data->update_lock); \
data->in_##reg[nr] = in_to_reg(val, nr); \
w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \
@@ -654,7 +924,8 @@ store_in_##reg (struct device *dev, struct device_attribute *attr, \
store_in_reg(MIN, min)
store_in_reg(MAX, max)
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct w83627ehf_data *data = w83627ehf_update_device(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
@@ -689,45 +960,50 @@ static struct sensor_device_attribute sda_in_alarm[] = {
};
static struct sensor_device_attribute sda_in_min[] = {
- SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0),
- SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1),
- SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2),
- SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3),
- SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4),
- SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5),
- SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6),
- SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7),
- SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8),
- SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9),
+ SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0),
+ SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1),
+ SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2),
+ SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3),
+ SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4),
+ SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5),
+ SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6),
+ SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7),
+ SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8),
+ SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9),
};
static struct sensor_device_attribute sda_in_max[] = {
- SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0),
- SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1),
- SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2),
- SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3),
- SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4),
- SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5),
- SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6),
- SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7),
- SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8),
- SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9),
+ SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0),
+ SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1),
+ SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2),
+ SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3),
+ SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4),
+ SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5),
+ SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6),
+ SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7),
+ SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8),
+ SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9),
};
-#define show_fan_reg(reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- return sprintf(buf, "%d\n", \
- fan_from_reg(data->reg[nr], \
- div_from_reg(data->fan_div[nr]))); \
+static ssize_t
+show_fan(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev);
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ return sprintf(buf, "%d\n", data->rpm[nr]);
+}
+
+static ssize_t
+show_fan_min(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev);
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ return sprintf(buf, "%d\n",
+ data->fan_from_reg_min(data->fan_min[nr],
+ data->fan_div[nr]));
}
-show_fan_reg(fan);
-show_fan_reg(fan_min);
static ssize_t
show_fan_div(struct device *dev, struct device_attribute *attr,
@@ -746,11 +1022,32 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- unsigned int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
unsigned int reg;
u8 new_div;
+ err = strict_strtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
mutex_lock(&data->update_lock);
+ if (!data->has_fan_div) {
+ /*
+ * Only NCT6776F for now, so we know that this is a 13 bit
+ * register
+ */
+ if (!val) {
+ val = 0xff1f;
+ } else {
+ if (val > 1350000U)
+ val = 135000U;
+ val = 1350000U / val;
+ val = (val & 0x1f) | ((val << 3) & 0xff00);
+ }
+ data->fan_min[nr] = val;
+ goto done; /* Leave fan divider alone */
+ }
if (!val) {
/* No min limit, alarm disabled */
data->fan_min[nr] = 255;
@@ -761,15 +1058,17 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
even with the highest divider (128) */
data->fan_min[nr] = 254;
new_div = 7; /* 128 == (1 << 7) */
- dev_warn(dev, "fan%u low limit %u below minimum %u, set to "
- "minimum\n", nr + 1, val, fan_from_reg(254, 128));
+ dev_warn(dev, "fan%u low limit %lu below minimum %u, set to "
+ "minimum\n", nr + 1, val,
+ data->fan_from_reg_min(254, 7));
} else if (!reg) {
/* Speed above this value cannot possibly be represented,
even with the lowest divider (1) */
data->fan_min[nr] = 1;
new_div = 0; /* 1 == (1 << 0) */
- dev_warn(dev, "fan%u low limit %u above maximum %u, set to "
- "maximum\n", nr + 1, val, fan_from_reg(1, 1));
+ dev_warn(dev, "fan%u low limit %lu above maximum %u, set to "
+ "maximum\n", nr + 1, val,
+ data->fan_from_reg_min(1, 0));
} else {
/* Automatically pick the best divider, i.e. the one such
that the min limit will correspond to a register value
@@ -785,25 +1084,16 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
/* Write both the fan clock divider (if it changed) and the new
fan min (unconditionally) */
if (new_div != data->fan_div[nr]) {
- /* Preserve the fan speed reading */
- if (data->fan[nr] != 0xff) {
- if (new_div > data->fan_div[nr])
- data->fan[nr] >>= new_div - data->fan_div[nr];
- else if (data->fan[nr] & 0x80)
- data->fan[nr] = 0xff;
- else
- data->fan[nr] <<= data->fan_div[nr] - new_div;
- }
-
dev_dbg(dev, "fan%u clock divider changed from %u to %u\n",
nr + 1, div_from_reg(data->fan_div[nr]),
div_from_reg(new_div));
data->fan_div[nr] = new_div;
- w83627ehf_write_fan_div(data, nr);
+ w83627ehf_write_fan_div_common(dev, data, nr);
/* Give the chip time to sample a new speed value */
data->last_updated = jiffies;
}
- w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[nr],
+done:
+ w83627ehf_write_value(data, data->REG_FAN_MIN[nr],
data->fan_min[nr]);
mutex_unlock(&data->update_lock);
@@ -847,70 +1137,54 @@ static struct sensor_device_attribute sda_fan_div[] = {
SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4),
};
-#define show_temp1_reg(reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- return sprintf(buf, "%d\n", temp1_from_reg(data->reg)); \
-}
-show_temp1_reg(temp1);
-show_temp1_reg(temp1_max);
-show_temp1_reg(temp1_max_hyst);
-
-#define store_temp1_reg(REG, reg) \
-static ssize_t \
-store_temp1_##reg(struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- struct w83627ehf_data *data = dev_get_drvdata(dev); \
- long val = simple_strtol(buf, NULL, 10); \
- \
- mutex_lock(&data->update_lock); \
- data->temp1_##reg = temp1_to_reg(val, -128000, 127000); \
- w83627ehf_write_value(data, W83627EHF_REG_TEMP1_##REG, \
- data->temp1_##reg); \
- mutex_unlock(&data->update_lock); \
- return count; \
+static ssize_t
+show_temp_label(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev);
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ return sprintf(buf, "%s\n", data->temp_label[data->temp_src[nr]]);
}
-store_temp1_reg(OVER, max);
-store_temp1_reg(HYST, max_hyst);
-#define show_temp_reg(reg) \
+#define show_temp_reg(addr, reg) \
static ssize_t \
show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
return sprintf(buf, "%d\n", \
- LM75_TEMP_FROM_REG(data->reg[nr])); \
+ temp_from_reg(data->addr[nr], data->reg[nr])); \
}
-show_temp_reg(temp);
-show_temp_reg(temp_max);
-show_temp_reg(temp_max_hyst);
+show_temp_reg(reg_temp, temp);
+show_temp_reg(reg_temp_over, temp_max);
+show_temp_reg(reg_temp_hyst, temp_max_hyst);
-#define store_temp_reg(REG, reg) \
+#define store_temp_reg(addr, reg) \
static ssize_t \
store_##reg(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- long val = simple_strtol(buf, NULL, 10); \
- \
+ int err; \
+ long val; \
+ err = strict_strtol(buf, 10, &val); \
+ if (err < 0) \
+ return err; \
mutex_lock(&data->update_lock); \
- data->reg[nr] = LM75_TEMP_TO_REG(val); \
- w83627ehf_write_value(data, W83627EHF_REG_TEMP_##REG[nr], \
+ data->reg[nr] = temp_to_reg(data->addr[nr], val); \
+ w83627ehf_write_value(data, data->addr[nr], \
data->reg[nr]); \
mutex_unlock(&data->update_lock); \
return count; \
}
-store_temp_reg(OVER, temp_max);
-store_temp_reg(HYST, temp_max_hyst);
+store_temp_reg(reg_temp_over, temp_max);
+store_temp_reg(reg_temp_hyst, temp_max_hyst);
static ssize_t
show_temp_type(struct device *dev, struct device_attribute *attr, char *buf)
@@ -922,27 +1196,69 @@ show_temp_type(struct device *dev, struct device_attribute *attr, char *buf)
}
static struct sensor_device_attribute sda_temp_input[] = {
- SENSOR_ATTR(temp1_input, S_IRUGO, show_temp1, NULL, 0),
- SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0),
- SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 1),
+ SENSOR_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0),
+ SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1),
+ SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2),
+ SENSOR_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3),
+ SENSOR_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4),
+ SENSOR_ATTR(temp6_input, S_IRUGO, show_temp, NULL, 5),
+ SENSOR_ATTR(temp7_input, S_IRUGO, show_temp, NULL, 6),
+ SENSOR_ATTR(temp8_input, S_IRUGO, show_temp, NULL, 7),
+ SENSOR_ATTR(temp9_input, S_IRUGO, show_temp, NULL, 8),
+};
+
+static struct sensor_device_attribute sda_temp_label[] = {
+ SENSOR_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0),
+ SENSOR_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1),
+ SENSOR_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2),
+ SENSOR_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3),
+ SENSOR_ATTR(temp5_label, S_IRUGO, show_temp_label, NULL, 4),
+ SENSOR_ATTR(temp6_label, S_IRUGO, show_temp_label, NULL, 5),
+ SENSOR_ATTR(temp7_label, S_IRUGO, show_temp_label, NULL, 6),
+ SENSOR_ATTR(temp8_label, S_IRUGO, show_temp_label, NULL, 7),
+ SENSOR_ATTR(temp9_label, S_IRUGO, show_temp_label, NULL, 8),
};
static struct sensor_device_attribute sda_temp_max[] = {
- SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp1_max,
- store_temp1_max, 0),
- SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max,
+ SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max,
store_temp_max, 0),
- SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max,
+ SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max,
store_temp_max, 1),
+ SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 2),
+ SENSOR_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 3),
+ SENSOR_ATTR(temp5_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 4),
+ SENSOR_ATTR(temp6_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 5),
+ SENSOR_ATTR(temp7_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 6),
+ SENSOR_ATTR(temp8_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 7),
+ SENSOR_ATTR(temp9_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 8),
};
static struct sensor_device_attribute sda_temp_max_hyst[] = {
- SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp1_max_hyst,
- store_temp1_max_hyst, 0),
- SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
store_temp_max_hyst, 0),
- SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
store_temp_max_hyst, 1),
+ SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 2),
+ SENSOR_ATTR(temp4_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 3),
+ SENSOR_ATTR(temp5_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 4),
+ SENSOR_ATTR(temp6_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 5),
+ SENSOR_ATTR(temp7_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 6),
+ SENSOR_ATTR(temp8_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 7),
+ SENSOR_ATTR(temp9_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 8),
};
static struct sensor_device_attribute sda_temp_alarm[] = {
@@ -958,11 +1274,12 @@ static struct sensor_device_attribute sda_temp_type[] = {
};
#define show_pwm_reg(reg) \
-static ssize_t show_##reg (struct device *dev, struct device_attribute *attr, \
- char *buf) \
+static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
return sprintf(buf, "%d\n", data->reg[nr]); \
}
@@ -978,9 +1295,14 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- u32 val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
u16 reg;
+ err = strict_strtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
if (val > 1)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1001,11 +1323,18 @@ store_pwm(struct device *dev, struct device_attribute *attr,
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 0, 255);
+ unsigned long val;
+ int err;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm[nr] = val;
- w83627ehf_write_value(data, W83627EHF_REG_PWM[nr], val);
+ w83627ehf_write_value(data, data->REG_PWM[nr], val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -1015,19 +1344,38 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- u32 val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
u16 reg;
- if (!val || (val > 4))
+ err = strict_strtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ if (!val || (val > 4 && val != data->pwm_enable_orig[nr]))
return -EINVAL;
+ /* SmartFan III mode is not supported on NCT6776F */
+ if (sio_data->kind == nct6776 && val == 4)
+ return -EINVAL;
+
mutex_lock(&data->update_lock);
- reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
data->pwm_enable[nr] = val;
- reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]);
- reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr];
- w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
+ if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ reg = w83627ehf_read_value(data,
+ NCT6775_REG_FAN_MODE[nr]);
+ reg &= 0x0f;
+ reg |= (val - 1) << 4;
+ w83627ehf_write_value(data,
+ NCT6775_REG_FAN_MODE[nr], reg);
+ } else {
+ reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
+ reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]);
+ reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr];
+ w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
+ }
mutex_unlock(&data->update_lock);
return count;
}
@@ -1038,9 +1386,10 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- return sprintf(buf, "%d\n", temp1_from_reg(data->reg[nr])); \
+ return sprintf(buf, "%d\n", data->reg[nr] * 1000); \
}
show_tol_temp(tolerance)
@@ -1053,11 +1402,18 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- u8 val = temp1_to_reg(simple_strtoul(buf, NULL, 10), 0, 127000);
+ long val;
+ int err;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
mutex_lock(&data->update_lock);
data->target_temp[nr] = val;
- w83627ehf_write_value(data, W83627EHF_REG_TARGET[nr], val);
+ w83627ehf_write_value(data, data->REG_TARGET[nr], val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -1067,20 +1423,37 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
u16 reg;
+ long val;
+ int err;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err < 0)
+ return err;
+
/* Limit the temp to 0C - 15C */
- u8 val = temp1_to_reg(simple_strtoul(buf, NULL, 10), 0, 15000);
+ val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
mutex_lock(&data->update_lock);
- reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
- data->tolerance[nr] = val;
- if (nr == 1)
- reg = (reg & 0x0f) | (val << 4);
- else
+ if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ /* Limit tolerance further for NCT6776F */
+ if (sio_data->kind == nct6776 && val > 7)
+ val = 7;
+ reg = w83627ehf_read_value(data, NCT6775_REG_FAN_MODE[nr]);
reg = (reg & 0xf0) | val;
- w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
+ w83627ehf_write_value(data, NCT6775_REG_FAN_MODE[nr], reg);
+ } else {
+ reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
+ if (nr == 1)
+ reg = (reg & 0x0f) | (val << 4);
+ else
+ reg = (reg & 0xf0) | val;
+ w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
+ }
+ data->tolerance[nr] = val;
mutex_unlock(&data->update_lock);
return count;
}
@@ -1143,18 +1516,25 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
return sprintf(buf, "%d\n", data->reg[nr]); \
-}\
+} \
static ssize_t \
store_##reg(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
-{\
+{ \
struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 1, 255); \
+ unsigned long val; \
+ int err; \
+ err = strict_strtoul(buf, 10, &val); \
+ if (err < 0) \
+ return err; \
+ val = SENSORS_LIMIT(val, 1, 255); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
w83627ehf_write_value(data, data->REG_##REG[nr], val); \
@@ -1172,10 +1552,12 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
return sprintf(buf, "%d\n", \
- step_time_from_reg(data->reg[nr], data->pwm_mode[nr])); \
+ step_time_from_reg(data->reg[nr], \
+ data->pwm_mode[nr])); \
} \
\
static ssize_t \
@@ -1183,10 +1565,15 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- u8 val = step_time_to_reg(simple_strtoul(buf, NULL, 10), \
- data->pwm_mode[nr]); \
+ unsigned long val; \
+ int err; \
+ err = strict_strtoul(buf, 10, &val); \
+ if (err < 0) \
+ return err; \
+ val = step_time_to_reg(val, data->pwm_mode[nr]); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
w83627ehf_write_value(data, W83627EHF_REG_##REG[nr], val); \
@@ -1283,7 +1670,8 @@ static void w83627ehf_device_remove_files(struct device *dev)
for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
struct sensor_device_attribute *attr =
&sda_sf3_max_step_arrays[i];
- if (data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff)
+ if (data->REG_FAN_STEP_OUTPUT &&
+ data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff)
device_remove_file(dev, &attr->dev_attr);
}
for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++)
@@ -1309,12 +1697,15 @@ static void w83627ehf_device_remove_files(struct device *dev)
device_remove_file(dev, &sda_target_temp[i].dev_attr);
device_remove_file(dev, &sda_tolerance[i].dev_attr);
}
- for (i = 0; i < 3; i++) {
- if ((i == 2) && data->temp3_disable)
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ if (!(data->have_temp & (1 << i)))
continue;
device_remove_file(dev, &sda_temp_input[i].dev_attr);
+ device_remove_file(dev, &sda_temp_label[i].dev_attr);
device_remove_file(dev, &sda_temp_max[i].dev_attr);
device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr);
+ if (i > 2)
+ continue;
device_remove_file(dev, &sda_temp_alarm[i].dev_attr);
device_remove_file(dev, &sda_temp_type[i].dev_attr);
}
@@ -1335,15 +1726,17 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
w83627ehf_write_value(data, W83627EHF_REG_CONFIG,
tmp | 0x01);
- /* Enable temp2 and temp3 if needed */
- for (i = 0; i < 2; i++) {
- tmp = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP_CONFIG[i]);
- if ((i == 1) && data->temp3_disable)
+ /* Enable temperature sensors if needed */
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ if (!(data->have_temp & (1 << i)))
+ continue;
+ if (!data->reg_temp_config[i])
continue;
+ tmp = w83627ehf_read_value(data,
+ data->reg_temp_config[i]);
if (tmp & 0x01)
w83627ehf_write_value(data,
- W83627EHF_REG_TEMP_CONFIG[i],
+ data->reg_temp_config[i],
tmp & 0xfe);
}
@@ -1362,13 +1755,39 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
}
}
+static void w82627ehf_swap_tempreg(struct w83627ehf_data *data,
+ int r1, int r2)
+{
+ u16 tmp;
+
+ tmp = data->temp_src[r1];
+ data->temp_src[r1] = data->temp_src[r2];
+ data->temp_src[r2] = tmp;
+
+ tmp = data->reg_temp[r1];
+ data->reg_temp[r1] = data->reg_temp[r2];
+ data->reg_temp[r2] = tmp;
+
+ tmp = data->reg_temp_over[r1];
+ data->reg_temp_over[r1] = data->reg_temp_over[r2];
+ data->reg_temp_over[r2] = tmp;
+
+ tmp = data->reg_temp_hyst[r1];
+ data->reg_temp_hyst[r1] = data->reg_temp_hyst[r2];
+ data->reg_temp_hyst[r2] = tmp;
+
+ tmp = data->reg_temp_config[r1];
+ data->reg_temp_config[r1] = data->reg_temp_config[r2];
+ data->reg_temp_config[r2] = tmp;
+}
+
static int __devinit w83627ehf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct w83627ehf_sio_data *sio_data = dev->platform_data;
struct w83627ehf_data *data;
struct resource *res;
- u8 fan4pin, fan5pin, en_vrm10;
+ u8 fan3pin, fan4pin, fan4min, fan5pin, en_vrm10;
int i, err = 0;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -1380,7 +1799,8 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
goto exit;
}
- if (!(data = kzalloc(sizeof(struct w83627ehf_data), GFP_KERNEL))) {
+ data = kzalloc(sizeof(struct w83627ehf_data), GFP_KERNEL);
+ if (!data) {
err = -ENOMEM;
goto exit_release;
}
@@ -1393,25 +1813,202 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
/* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9;
- /* 667HG has 3 pwms */
+ /* 667HG, NCT6775F, and NCT6776F have 3 pwms */
data->pwm_num = (sio_data->kind == w83667hg
- || sio_data->kind == w83667hg_b) ? 3 : 4;
+ || sio_data->kind == w83667hg_b
+ || sio_data->kind == nct6775
+ || sio_data->kind == nct6776) ? 3 : 4;
+ data->have_temp = 0x07;
/* Check temp3 configuration bit for 667HG */
- if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
- data->temp3_disable = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP_CONFIG[1]) & 0x01;
- data->in6_skip = !data->temp3_disable;
+ if (sio_data->kind == w83667hg) {
+ u8 reg;
+
+ reg = w83627ehf_read_value(data, W83627EHF_REG_TEMP_CONFIG[2]);
+ if (reg & 0x01)
+ data->have_temp &= ~(1 << 2);
+ else
+ data->in6_skip = 1; /* either temp3 or in6 */
+ }
+
+ /* Deal with temperature register setup first. */
+ if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ int mask = 0;
+
+ /*
+ * Display temperature sensor output only if it monitors
+ * a source other than one already reported. Always display
+ * first three temperature registers, though.
+ */
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ u8 src;
+
+ data->reg_temp[i] = NCT6775_REG_TEMP[i];
+ data->reg_temp_over[i] = NCT6775_REG_TEMP_OVER[i];
+ data->reg_temp_hyst[i] = NCT6775_REG_TEMP_HYST[i];
+ data->reg_temp_config[i] = NCT6775_REG_TEMP_CONFIG[i];
+
+ src = w83627ehf_read_value(data,
+ NCT6775_REG_TEMP_SOURCE[i]);
+ src &= 0x1f;
+ if (src && !(mask & (1 << src))) {
+ data->have_temp |= 1 << i;
+ mask |= 1 << src;
+ }
+
+ data->temp_src[i] = src;
+
+ /*
+ * Now do some register swapping if index 0..2 don't
+ * point to SYSTIN(1), CPUIN(2), and AUXIN(3).
+ * Idea is to have the first three attributes
+ * report SYSTIN, CPUIN, and AUXIN if possible
+ * without overriding the basic system configuration.
+ */
+ if (i > 0 && data->temp_src[0] != 1
+ && data->temp_src[i] == 1)
+ w82627ehf_swap_tempreg(data, 0, i);
+ if (i > 1 && data->temp_src[1] != 2
+ && data->temp_src[i] == 2)
+ w82627ehf_swap_tempreg(data, 1, i);
+ if (i > 2 && data->temp_src[2] != 3
+ && data->temp_src[i] == 3)
+ w82627ehf_swap_tempreg(data, 2, i);
+ }
+ if (sio_data->kind == nct6776) {
+ /*
+ * On NCT6776, AUXTIN and VIN3 pins are shared.
+ * Only way to detect it is to check if AUXTIN is used
+ * as a temperature source, and if that source is
+ * enabled.
+ *
+ * If that is the case, disable in6, which reports VIN3.
+ * Otherwise disable temp3.
+ */
+ if (data->temp_src[2] == 3) {
+ u8 reg;
+
+ if (data->reg_temp_config[2])
+ reg = w83627ehf_read_value(data,
+ data->reg_temp_config[2]);
+ else
+ reg = 0; /* Assume AUXTIN is used */
+
+ if (reg & 0x01)
+ data->have_temp &= ~(1 << 2);
+ else
+ data->in6_skip = 1;
+ }
+ data->temp_label = nct6776_temp_label;
+ } else {
+ data->temp_label = nct6775_temp_label;
+ }
+ } else if (sio_data->kind == w83667hg_b) {
+ u8 reg;
+
+ /*
+ * Temperature sources are selected with bank 0, registers 0x49
+ * and 0x4a.
+ */
+ for (i = 0; i < ARRAY_SIZE(W83627EHF_REG_TEMP); i++) {
+ data->reg_temp[i] = W83627EHF_REG_TEMP[i];
+ data->reg_temp_over[i] = W83627EHF_REG_TEMP_OVER[i];
+ data->reg_temp_hyst[i] = W83627EHF_REG_TEMP_HYST[i];
+ data->reg_temp_config[i] = W83627EHF_REG_TEMP_CONFIG[i];
+ }
+ reg = w83627ehf_read_value(data, 0x4a);
+ data->temp_src[0] = reg >> 5;
+ reg = w83627ehf_read_value(data, 0x49);
+ data->temp_src[1] = reg & 0x07;
+ data->temp_src[2] = (reg >> 4) & 0x07;
+
+ /*
+ * W83667HG-B has another temperature register at 0x7e.
+ * The temperature source is selected with register 0x7d.
+ * Support it if the source differs from already reported
+ * sources.
+ */
+ reg = w83627ehf_read_value(data, 0x7d);
+ reg &= 0x07;
+ if (reg != data->temp_src[0] && reg != data->temp_src[1]
+ && reg != data->temp_src[2]) {
+ data->temp_src[3] = reg;
+ data->have_temp |= 1 << 3;
+ }
+
+ /*
+ * Chip supports either AUXTIN or VIN3. Try to find out which
+ * one.
+ */
+ reg = w83627ehf_read_value(data, W83627EHF_REG_TEMP_CONFIG[2]);
+ if (data->temp_src[2] == 2 && (reg & 0x01))
+ data->have_temp &= ~(1 << 2);
+
+ if ((data->temp_src[2] == 2 && (data->have_temp & (1 << 2)))
+ || (data->temp_src[3] == 2 && (data->have_temp & (1 << 3))))
+ data->in6_skip = 1;
+
+ data->temp_label = w83667hg_b_temp_label;
+ } else {
+ /* Temperature sources are fixed */
+ for (i = 0; i < 3; i++) {
+ data->reg_temp[i] = W83627EHF_REG_TEMP[i];
+ data->reg_temp_over[i] = W83627EHF_REG_TEMP_OVER[i];
+ data->reg_temp_hyst[i] = W83627EHF_REG_TEMP_HYST[i];
+ data->reg_temp_config[i] = W83627EHF_REG_TEMP_CONFIG[i];
+ }
}
- data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
- if (sio_data->kind == w83667hg_b) {
+ if (sio_data->kind == nct6775) {
+ data->has_fan_div = true;
+ data->fan_from_reg = fan_from_reg16;
+ data->fan_from_reg_min = fan_from_reg8;
+ data->REG_PWM = NCT6775_REG_PWM;
+ data->REG_TARGET = NCT6775_REG_TARGET;
+ data->REG_FAN = NCT6775_REG_FAN;
+ data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
+ data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
+ data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
+ data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
+ data->REG_FAN_MAX_OUTPUT = NCT6775_REG_FAN_MAX_OUTPUT;
+ data->REG_FAN_STEP_OUTPUT = NCT6775_REG_FAN_STEP_OUTPUT;
+ } else if (sio_data->kind == nct6776) {
+ data->has_fan_div = false;
+ data->fan_from_reg = fan_from_reg13;
+ data->fan_from_reg_min = fan_from_reg13;
+ data->REG_PWM = NCT6775_REG_PWM;
+ data->REG_TARGET = NCT6775_REG_TARGET;
+ data->REG_FAN = NCT6775_REG_FAN;
+ data->REG_FAN_MIN = NCT6776_REG_FAN_MIN;
+ data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
+ data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
+ data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
+ } else if (sio_data->kind == w83667hg_b) {
+ data->has_fan_div = true;
+ data->fan_from_reg = fan_from_reg8;
+ data->fan_from_reg_min = fan_from_reg8;
+ data->REG_PWM = W83627EHF_REG_PWM;
+ data->REG_TARGET = W83627EHF_REG_TARGET;
+ data->REG_FAN = W83627EHF_REG_FAN;
+ data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
+ data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
+ data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
+ data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
data->REG_FAN_MAX_OUTPUT =
W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B;
data->REG_FAN_STEP_OUTPUT =
W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B;
} else {
+ data->has_fan_div = true;
+ data->fan_from_reg = fan_from_reg8;
+ data->fan_from_reg_min = fan_from_reg8;
+ data->REG_PWM = W83627EHF_REG_PWM;
+ data->REG_TARGET = W83627EHF_REG_TARGET;
+ data->REG_FAN = W83627EHF_REG_FAN;
+ data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
+ data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
+ data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
+ data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
data->REG_FAN_MAX_OUTPUT =
W83627EHF_REG_FAN_MAX_OUTPUT_COMMON;
data->REG_FAN_STEP_OUTPUT =
@@ -1424,7 +2021,8 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
data->vrm = vid_which_vrm();
superio_enter(sio_data->sioreg);
/* Read VID value */
- if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
+ if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b ||
+ sio_data->kind == nct6775 || sio_data->kind == nct6776) {
/* W83667HG has different pins for VID input and output, so
we can get the VID input values directly at logical device D
0xe3. */
@@ -1475,13 +2073,44 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
}
/* fan4 and fan5 share some pins with the GPIO and serial flash */
- if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
- fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20;
+ if (sio_data->kind == nct6775) {
+ /* On NCT6775, fan4 shares pins with the fdc interface */
+ fan3pin = 1;
+ fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80);
+ fan4min = 0;
+ fan5pin = 0;
+ } else if (sio_data->kind == nct6776) {
+ fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
+ fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
+ fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
+ fan4min = fan4pin;
+ } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
+ fan3pin = 1;
fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40;
+ fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20;
+ fan4min = fan4pin;
} else {
- fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02);
+ fan3pin = 1;
fan4pin = !(superio_inb(sio_data->sioreg, 0x29) & 0x06);
+ fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02);
+ fan4min = fan4pin;
}
+
+ if (fan_debounce &&
+ (sio_data->kind == nct6775 || sio_data->kind == nct6776)) {
+ u8 tmp;
+
+ superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
+ tmp = superio_inb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE);
+ if (sio_data->kind == nct6776)
+ superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
+ 0x3e | tmp);
+ else
+ superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
+ 0x1e | tmp);
+ pr_info("Enabled fan debounce for chip %s\n", data->name);
+ }
+
superio_exit(sio_data->sioreg);
/* It looks like fan4 and fan5 pins can be alternatively used
@@ -1490,26 +2119,54 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
connected fan5 as input unless they are emitting log 1, which
is not the default. */
- data->has_fan = 0x07; /* fan1, fan2 and fan3 */
- i = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
- if ((i & (1 << 2)) && fan4pin)
- data->has_fan |= (1 << 3);
- if (!(i & (1 << 1)) && fan5pin)
- data->has_fan |= (1 << 4);
+ data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */
+
+ data->has_fan |= (fan3pin << 2);
+ data->has_fan_min |= (fan3pin << 2);
+
+ /*
+ * NCT6775F and NCT6776F don't have the W83627EHF_REG_FANDIV1 register
+ */
+ if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ data->has_fan |= (fan4pin << 3) | (fan5pin << 4);
+ data->has_fan_min |= (fan4min << 3) | (fan5pin << 4);
+ } else {
+ i = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
+ if ((i & (1 << 2)) && fan4pin) {
+ data->has_fan |= (1 << 3);
+ data->has_fan_min |= (1 << 3);
+ }
+ if (!(i & (1 << 1)) && fan5pin) {
+ data->has_fan |= (1 << 4);
+ data->has_fan_min |= (1 << 4);
+ }
+ }
/* Read fan clock dividers immediately */
- w83627ehf_update_fan_div(data);
+ w83627ehf_update_fan_div_common(dev, data);
+
+ /* Read pwm data to save original values */
+ w83627ehf_update_pwm_common(dev, data);
+ for (i = 0; i < data->pwm_num; i++)
+ data->pwm_enable_orig[i] = data->pwm_enable[i];
+
+ /* Read pwm data to save original values */
+ w83627ehf_update_pwm_common(dev, data);
+ for (i = 0; i < data->pwm_num; i++)
+ data->pwm_enable_orig[i] = data->pwm_enable[i];
/* Register sysfs hooks */
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++)
- if ((err = device_create_file(dev,
- &sda_sf3_arrays[i].dev_attr)))
+ for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) {
+ err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr);
+ if (err)
goto exit_remove;
+ }
for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
struct sensor_device_attribute *attr =
&sda_sf3_max_step_arrays[i];
- if (data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) {
+ if (data->REG_FAN_STEP_OUTPUT &&
+ data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) {
err = device_create_file(dev, &attr->dev_attr);
if (err)
goto exit_remove;
@@ -1518,8 +2175,9 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
/* if fan4 is enabled create the sf3 files for it */
if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4)
for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) {
- if ((err = device_create_file(dev,
- &sda_sf3_arrays_fan4[i].dev_attr)))
+ err = device_create_file(dev,
+ &sda_sf3_arrays_fan4[i].dev_attr);
+ if (err)
goto exit_remove;
}
@@ -1541,12 +2199,20 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
if ((err = device_create_file(dev,
&sda_fan_input[i].dev_attr))
|| (err = device_create_file(dev,
- &sda_fan_alarm[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_fan_div[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_fan_min[i].dev_attr)))
+ &sda_fan_alarm[i].dev_attr)))
goto exit_remove;
+ if (sio_data->kind != nct6776) {
+ err = device_create_file(dev,
+ &sda_fan_div[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+ if (data->has_fan_min & (1 << i)) {
+ err = device_create_file(dev,
+ &sda_fan_min[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
if (i < data->pwm_num &&
((err = device_create_file(dev,
&sda_pwm[i].dev_attr))
@@ -1562,16 +2228,33 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
}
}
- for (i = 0; i < 3; i++) {
- if ((i == 2) && data->temp3_disable)
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ if (!(data->have_temp & (1 << i)))
+ continue;
+ err = device_create_file(dev, &sda_temp_input[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ if (data->temp_label) {
+ err = device_create_file(dev,
+ &sda_temp_label[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+ if (data->reg_temp_over[i]) {
+ err = device_create_file(dev,
+ &sda_temp_max[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+ if (data->reg_temp_hyst[i]) {
+ err = device_create_file(dev,
+ &sda_temp_max_hyst[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+ if (i > 2)
continue;
if ((err = device_create_file(dev,
- &sda_temp_input[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_temp_max[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_temp_max_hyst[i].dev_attr))
- || (err = device_create_file(dev,
&sda_temp_alarm[i].dev_attr))
|| (err = device_create_file(dev,
&sda_temp_type[i].dev_attr)))
@@ -1632,6 +2315,8 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
static const char __initdata sio_name_W83627DHG_P[] = "W83627DHG-P";
static const char __initdata sio_name_W83667HG[] = "W83667HG";
static const char __initdata sio_name_W83667HG_B[] = "W83667HG-B";
+ static const char __initdata sio_name_NCT6775[] = "NCT6775F";
+ static const char __initdata sio_name_NCT6776[] = "NCT6776F";
u16 val;
const char *sio_name;
@@ -1668,6 +2353,14 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
sio_data->kind = w83667hg_b;
sio_name = sio_name_W83667HG_B;
break;
+ case SIO_NCT6775_ID:
+ sio_data->kind = nct6775;
+ sio_name = sio_name_NCT6775;
+ break;
+ case SIO_NCT6776_ID:
+ sio_data->kind = nct6776;
+ sio_name = sio_name_NCT6776;
+ break;
default:
if (val != 0xffff)
pr_debug("unsupported chip ID: 0x%04x\n", val);
@@ -1689,7 +2382,8 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
/* Activate logical device if needed */
val = superio_inb(sioaddr, SIO_REG_ENABLE);
if (!(val & 0x01)) {
- pr_warn("Forcibly enabling Super-I/O. Sensor is probably unusable.\n");
+ pr_warn("Forcibly enabling Super-I/O. "
+ "Sensor is probably unusable.\n");
superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
}
@@ -1726,7 +2420,8 @@ static int __init sensors_w83627ehf_init(void)
if (err)
goto exit;
- if (!(pdev = platform_device_alloc(DRVNAME, address))) {
+ pdev = platform_device_alloc(DRVNAME, address);
+ if (!pdev) {
err = -ENOMEM;
pr_err("Device allocation failed\n");
goto exit_unregister;
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
new file mode 100644
index 000000000000..eb4af28f8567
--- /dev/null
+++ b/drivers/hwspinlock/Kconfig
@@ -0,0 +1,22 @@
+#
+# Generic HWSPINLOCK framework
+#
+
+config HWSPINLOCK
+ tristate "Generic Hardware Spinlock framework"
+ help
+ Say y here to support the generic hardware spinlock framework.
+ You only need to enable this if you have hardware spinlock module
+ on your system (usually only relevant if your system has remote slave
+ coprocessors).
+
+ If unsure, say N.
+
+config HWSPINLOCK_OMAP
+ tristate "OMAP Hardware Spinlock device"
+ depends on HWSPINLOCK && ARCH_OMAP4
+ help
+ Say y here to support the OMAP Hardware Spinlock device (firstly
+ introduced in OMAP4).
+
+ If unsure, say N.
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
new file mode 100644
index 000000000000..5729a3f7ed3d
--- /dev/null
+++ b/drivers/hwspinlock/Makefile
@@ -0,0 +1,6 @@
+#
+# Generic Hardware Spinlock framework
+#
+
+obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
+obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
new file mode 100644
index 000000000000..43a62714b4fb
--- /dev/null
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -0,0 +1,548 @@
+/*
+ * Hardware spinlock framework
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Contact: Ohad Ben-Cohen <ohad@wizery.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/jiffies.h>
+#include <linux/radix-tree.h>
+#include <linux/hwspinlock.h>
+#include <linux/pm_runtime.h>
+
+#include "hwspinlock_internal.h"
+
+/* radix tree tags */
+#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
+
+/*
+ * A radix tree is used to maintain the available hwspinlock instances.
+ * The tree associates hwspinlock pointers with their integer key id,
+ * and provides easy-to-use API which makes the hwspinlock core code simple
+ * and easy to read.
+ *
+ * Radix trees are quick on lookups, and reasonably efficient in terms of
+ * storage, especially with high density usages such as this framework
+ * requires (a continuous range of integer keys, beginning with zero, is
+ * used as the ID's of the hwspinlock instances).
+ *
+ * The radix tree API supports tagging items in the tree, which this
+ * framework uses to mark unused hwspinlock instances (see the
+ * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
+ * tree, looking for an unused hwspinlock instance, is now reduced to a
+ * single radix tree API call.
+ */
+static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
+
+/*
+ * Synchronization of access to the tree is achieved using this spinlock,
+ * as the radix-tree API requires that users provide all synchronisation.
+ */
+static DEFINE_SPINLOCK(hwspinlock_tree_lock);
+
+/**
+ * __hwspin_trylock() - attempt to lock a specific hwspinlock
+ * @hwlock: an hwspinlock which we want to trylock
+ * @mode: controls whether local interrupts are disabled or not
+ * @flags: a pointer where the caller's interrupt state will be saved at (if
+ * requested)
+ *
+ * This function attempts to lock an hwspinlock, and will immediately
+ * fail if the hwspinlock is already taken.
+ *
+ * Upon a successful return from this function, preemption (and possibly
+ * interrupts) is disabled, so the caller must not sleep, and is advised to
+ * release the hwspinlock as soon as possible. This is required in order to
+ * minimize remote cores polling on the hardware interconnect.
+ *
+ * The user decides whether local interrupts are disabled or not, and if yes,
+ * whether he wants their previous state to be saved. It is up to the user
+ * to choose the appropriate @mode of operation, exactly the same way users
+ * should decide between spin_trylock, spin_trylock_irq and
+ * spin_trylock_irqsave.
+ *
+ * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
+ * the hwspinlock was already taken.
+ * This function will never sleep.
+ */
+int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+{
+ int ret;
+
+ BUG_ON(!hwlock);
+ BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
+
+ /*
+ * This spin_lock{_irq, _irqsave} serves three purposes:
+ *
+ * 1. Disable preemption, in order to minimize the period of time
+ * in which the hwspinlock is taken. This is important in order
+ * to minimize the possible polling on the hardware interconnect
+ * by a remote user of this lock.
+ * 2. Make the hwspinlock SMP-safe (so we can take it from
+ * additional contexts on the local host).
+ * 3. Ensure that in_atomic/might_sleep checks catch potential
+ * problems with hwspinlock usage (e.g. scheduler checks like
+ * 'scheduling while atomic' etc.)
+ */
+ if (mode == HWLOCK_IRQSTATE)
+ ret = spin_trylock_irqsave(&hwlock->lock, *flags);
+ else if (mode == HWLOCK_IRQ)
+ ret = spin_trylock_irq(&hwlock->lock);
+ else
+ ret = spin_trylock(&hwlock->lock);
+
+ /* is lock already taken by another context on the local cpu ? */
+ if (!ret)
+ return -EBUSY;
+
+ /* try to take the hwspinlock device */
+ ret = hwlock->ops->trylock(hwlock);
+
+ /* if hwlock is already taken, undo spin_trylock_* and exit */
+ if (!ret) {
+ if (mode == HWLOCK_IRQSTATE)
+ spin_unlock_irqrestore(&hwlock->lock, *flags);
+ else if (mode == HWLOCK_IRQ)
+ spin_unlock_irq(&hwlock->lock);
+ else
+ spin_unlock(&hwlock->lock);
+
+ return -EBUSY;
+ }
+
+ /*
+ * We can be sure the other core's memory operations
+ * are observable to us only _after_ we successfully take
+ * the hwspinlock, and we must make sure that subsequent memory
+ * operations (both reads and writes) will not be reordered before
+ * we actually took the hwspinlock.
+ *
+ * Note: the implicit memory barrier of the spinlock above is too
+ * early, so we need this additional explicit memory barrier.
+ */
+ mb();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__hwspin_trylock);
+
+/**
+ * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
+ * @hwlock: the hwspinlock to be locked
+ * @timeout: timeout value in msecs
+ * @mode: mode which controls whether local interrupts are disabled or not
+ * @flags: a pointer to where the caller's interrupt state will be saved at (if
+ * requested)
+ *
+ * This function locks the given @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up after @timeout msecs have elapsed.
+ *
+ * Upon a successful return from this function, preemption is disabled
+ * (and possibly local interrupts, too), so the caller must not sleep,
+ * and is advised to release the hwspinlock as soon as possible.
+ * This is required in order to minimize remote cores polling on the
+ * hardware interconnect.
+ *
+ * The user decides whether local interrupts are disabled or not, and if yes,
+ * whether he wants their previous state to be saved. It is up to the user
+ * to choose the appropriate @mode of operation, exactly the same way users
+ * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
+ int mode, unsigned long *flags)
+{
+ int ret;
+ unsigned long expire;
+
+ expire = msecs_to_jiffies(to) + jiffies;
+
+ for (;;) {
+ /* Try to take the hwspinlock */
+ ret = __hwspin_trylock(hwlock, mode, flags);
+ if (ret != -EBUSY)
+ break;
+
+ /*
+ * The lock is already taken, let's check if the user wants
+ * us to try again
+ */
+ if (time_is_before_eq_jiffies(expire))
+ return -ETIMEDOUT;
+
+ /*
+ * Allow platform-specific relax handlers to prevent
+ * hogging the interconnect (no sleeping, though)
+ */
+ if (hwlock->ops->relax)
+ hwlock->ops->relax(hwlock);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
+
+/**
+ * __hwspin_unlock() - unlock a specific hwspinlock
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ * @mode: controls whether local interrupts needs to be restored or not
+ * @flags: previous caller's interrupt state to restore (if requested)
+ *
+ * This function will unlock a specific hwspinlock, enable preemption and
+ * (possibly) enable interrupts or restore their previous state.
+ * @hwlock must be already locked before calling this function: it is a bug
+ * to call unlock on a @hwlock that is already unlocked.
+ *
+ * The user decides whether local interrupts should be enabled or not, and
+ * if yes, whether he wants their previous state to be restored. It is up
+ * to the user to choose the appropriate @mode of operation, exactly the
+ * same way users decide between spin_unlock, spin_unlock_irq and
+ * spin_unlock_irqrestore.
+ *
+ * The function will never sleep.
+ */
+void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+{
+ BUG_ON(!hwlock);
+ BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
+
+ /*
+ * We must make sure that memory operations (both reads and writes),
+ * done before unlocking the hwspinlock, will not be reordered
+ * after the lock is released.
+ *
+ * That's the purpose of this explicit memory barrier.
+ *
+ * Note: the memory barrier induced by the spin_unlock below is too
+ * late; the other core is going to access memory soon after it will
+ * take the hwspinlock, and by then we want to be sure our memory
+ * operations are already observable.
+ */
+ mb();
+
+ hwlock->ops->unlock(hwlock);
+
+ /* Undo the spin_trylock{_irq, _irqsave} called while locking */
+ if (mode == HWLOCK_IRQSTATE)
+ spin_unlock_irqrestore(&hwlock->lock, *flags);
+ else if (mode == HWLOCK_IRQ)
+ spin_unlock_irq(&hwlock->lock);
+ else
+ spin_unlock(&hwlock->lock);
+}
+EXPORT_SYMBOL_GPL(__hwspin_unlock);
+
+/**
+ * hwspin_lock_register() - register a new hw spinlock
+ * @hwlock: hwspinlock to register.
+ *
+ * This function should be called from the underlying platform-specific
+ * implementation, to register a new hwspinlock instance.
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context.
+ *
+ * Returns 0 on success, or an appropriate error code on failure
+ */
+int hwspin_lock_register(struct hwspinlock *hwlock)
+{
+ struct hwspinlock *tmp;
+ int ret;
+
+ if (!hwlock || !hwlock->ops ||
+ !hwlock->ops->trylock || !hwlock->ops->unlock) {
+ pr_err("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ spin_lock_init(&hwlock->lock);
+
+ spin_lock(&hwspinlock_tree_lock);
+
+ ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
+ if (ret)
+ goto out;
+
+ /* mark this hwspinlock as available */
+ tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
+ HWSPINLOCK_UNUSED);
+
+ /* self-sanity check which should never fail */
+ WARN_ON(tmp != hwlock);
+
+out:
+ spin_unlock(&hwspinlock_tree_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_register);
+
+/**
+ * hwspin_lock_unregister() - unregister an hw spinlock
+ * @id: index of the specific hwspinlock to unregister
+ *
+ * This function should be called from the underlying platform-specific
+ * implementation, to unregister an existing (and unused) hwspinlock.
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context.
+ *
+ * Returns the address of hwspinlock @id on success, or NULL on failure
+ */
+struct hwspinlock *hwspin_lock_unregister(unsigned int id)
+{
+ struct hwspinlock *hwlock = NULL;
+ int ret;
+
+ spin_lock(&hwspinlock_tree_lock);
+
+ /* make sure the hwspinlock is not in use (tag is set) */
+ ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
+ if (ret == 0) {
+ pr_err("hwspinlock %d still in use (or not present)\n", id);
+ goto out;
+ }
+
+ hwlock = radix_tree_delete(&hwspinlock_tree, id);
+ if (!hwlock) {
+ pr_err("failed to delete hwspinlock %d\n", id);
+ goto out;
+ }
+
+out:
+ spin_unlock(&hwspinlock_tree_lock);
+ return hwlock;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
+
+/**
+ * __hwspin_lock_request() - tag an hwspinlock as used and power it up
+ *
+ * This is an internal function that prepares an hwspinlock instance
+ * before it is given to the user. The function assumes that
+ * hwspinlock_tree_lock is taken.
+ *
+ * Returns 0 or positive to indicate success, and a negative value to
+ * indicate an error (with the appropriate error code)
+ */
+static int __hwspin_lock_request(struct hwspinlock *hwlock)
+{
+ struct hwspinlock *tmp;
+ int ret;
+
+ /* prevent underlying implementation from being removed */
+ if (!try_module_get(hwlock->owner)) {
+ dev_err(hwlock->dev, "%s: can't get owner\n", __func__);
+ return -EINVAL;
+ }
+
+ /* notify PM core that power is now needed */
+ ret = pm_runtime_get_sync(hwlock->dev);
+ if (ret < 0) {
+ dev_err(hwlock->dev, "%s: can't power on device\n", __func__);
+ return ret;
+ }
+
+ /* mark hwspinlock as used, should not fail */
+ tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id,
+ HWSPINLOCK_UNUSED);
+
+ /* self-sanity check that should never fail */
+ WARN_ON(tmp != hwlock);
+
+ return ret;
+}
+
+/**
+ * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
+ * @hwlock: a valid hwspinlock instance
+ *
+ * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
+ */
+int hwspin_lock_get_id(struct hwspinlock *hwlock)
+{
+ if (!hwlock) {
+ pr_err("invalid hwlock\n");
+ return -EINVAL;
+ }
+
+ return hwlock->id;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
+
+/**
+ * hwspin_lock_request() - request an hwspinlock
+ *
+ * This function should be called by users of the hwspinlock device,
+ * in order to dynamically assign them an unused hwspinlock.
+ * Usually the user of this lock will then have to communicate the lock's id
+ * to the remote core before it can be used for synchronization (to get the
+ * id of a given hwlock, use hwspin_lock_get_id()).
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context (simply because there is no use case for
+ * that yet).
+ *
+ * Returns the address of the assigned hwspinlock, or NULL on error
+ */
+struct hwspinlock *hwspin_lock_request(void)
+{
+ struct hwspinlock *hwlock;
+ int ret;
+
+ spin_lock(&hwspinlock_tree_lock);
+
+ /* look for an unused lock */
+ ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
+ 0, 1, HWSPINLOCK_UNUSED);
+ if (ret == 0) {
+ pr_warn("a free hwspinlock is not available\n");
+ hwlock = NULL;
+ goto out;
+ }
+
+ /* sanity check that should never fail */
+ WARN_ON(ret > 1);
+
+ /* mark as used and power up */
+ ret = __hwspin_lock_request(hwlock);
+ if (ret < 0)
+ hwlock = NULL;
+
+out:
+ spin_unlock(&hwspinlock_tree_lock);
+ return hwlock;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_request);
+
+/**
+ * hwspin_lock_request_specific() - request for a specific hwspinlock
+ * @id: index of the specific hwspinlock that is requested
+ *
+ * This function should be called by users of the hwspinlock module,
+ * in order to assign them a specific hwspinlock.
+ * Usually early board code will be calling this function in order to
+ * reserve specific hwspinlock ids for predefined purposes.
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context (simply because there is no use case for
+ * that yet).
+ *
+ * Returns the address of the assigned hwspinlock, or NULL on error
+ */
+struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
+{
+ struct hwspinlock *hwlock;
+ int ret;
+
+ spin_lock(&hwspinlock_tree_lock);
+
+ /* make sure this hwspinlock exists */
+ hwlock = radix_tree_lookup(&hwspinlock_tree, id);
+ if (!hwlock) {
+ pr_warn("hwspinlock %u does not exist\n", id);
+ goto out;
+ }
+
+ /* sanity check (this shouldn't happen) */
+ WARN_ON(hwlock->id != id);
+
+ /* make sure this hwspinlock is unused */
+ ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
+ if (ret == 0) {
+ pr_warn("hwspinlock %u is already in use\n", id);
+ hwlock = NULL;
+ goto out;
+ }
+
+ /* mark as used and power up */
+ ret = __hwspin_lock_request(hwlock);
+ if (ret < 0)
+ hwlock = NULL;
+
+out:
+ spin_unlock(&hwspinlock_tree_lock);
+ return hwlock;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
+
+/**
+ * hwspin_lock_free() - free a specific hwspinlock
+ * @hwlock: the specific hwspinlock to free
+ *
+ * This function mark @hwlock as free again.
+ * Should only be called with an @hwlock that was retrieved from
+ * an earlier call to omap_hwspin_lock_request{_specific}.
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context (simply because there is no use case for
+ * that yet).
+ *
+ * Returns 0 on success, or an appropriate error code on failure
+ */
+int hwspin_lock_free(struct hwspinlock *hwlock)
+{
+ struct hwspinlock *tmp;
+ int ret;
+
+ if (!hwlock) {
+ pr_err("invalid hwlock\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&hwspinlock_tree_lock);
+
+ /* make sure the hwspinlock is used */
+ ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,
+ HWSPINLOCK_UNUSED);
+ if (ret == 1) {
+ dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__);
+ dump_stack();
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* notify the underlying device that power is not needed */
+ ret = pm_runtime_put(hwlock->dev);
+ if (ret < 0)
+ goto out;
+
+ /* mark this hwspinlock as available */
+ tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
+ HWSPINLOCK_UNUSED);
+
+ /* sanity check (this shouldn't happen) */
+ WARN_ON(tmp != hwlock);
+
+ module_put(hwlock->owner);
+
+out:
+ spin_unlock(&hwspinlock_tree_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_free);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Hardware spinlock interface");
+MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h
new file mode 100644
index 000000000000..69935e6b93e5
--- /dev/null
+++ b/drivers/hwspinlock/hwspinlock_internal.h
@@ -0,0 +1,61 @@
+/*
+ * Hardware spinlocks internal header
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Contact: Ohad Ben-Cohen <ohad@wizery.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __HWSPINLOCK_HWSPINLOCK_H
+#define __HWSPINLOCK_HWSPINLOCK_H
+
+#include <linux/spinlock.h>
+#include <linux/device.h>
+
+/**
+ * struct hwspinlock_ops - platform-specific hwspinlock handlers
+ *
+ * @trylock: make a single attempt to take the lock. returns 0 on
+ * failure and true on success. may _not_ sleep.
+ * @unlock: release the lock. always succeed. may _not_ sleep.
+ * @relax: optional, platform-specific relax handler, called by hwspinlock
+ * core while spinning on a lock, between two successive
+ * invocations of @trylock. may _not_ sleep.
+ */
+struct hwspinlock_ops {
+ int (*trylock)(struct hwspinlock *lock);
+ void (*unlock)(struct hwspinlock *lock);
+ void (*relax)(struct hwspinlock *lock);
+};
+
+/**
+ * struct hwspinlock - this struct represents a single hwspinlock instance
+ *
+ * @dev: underlying device, will be used to invoke runtime PM api
+ * @ops: platform-specific hwspinlock handlers
+ * @id: a global, unique, system-wide, index of the lock.
+ * @lock: initialized and used by hwspinlock core
+ * @owner: underlying implementation module, used to maintain module ref count
+ *
+ * Note: currently simplicity was opted for, but later we can squeeze some
+ * memory bytes by grouping the dev, ops and owner members in a single
+ * per-platform struct, and have all hwspinlocks point at it.
+ */
+struct hwspinlock {
+ struct device *dev;
+ const struct hwspinlock_ops *ops;
+ int id;
+ spinlock_t lock;
+ struct module *owner;
+};
+
+#endif /* __HWSPINLOCK_HWSPINLOCK_H */
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
new file mode 100644
index 000000000000..a8f02734c026
--- /dev/null
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -0,0 +1,231 @@
+/*
+ * OMAP hardware spinlock driver
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Contact: Simon Que <sque@ti.com>
+ * Hari Kanigeri <h-kanigeri2@ti.com>
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/hwspinlock.h>
+#include <linux/platform_device.h>
+
+#include "hwspinlock_internal.h"
+
+/* Spinlock register offsets */
+#define SYSSTATUS_OFFSET 0x0014
+#define LOCK_BASE_OFFSET 0x0800
+
+#define SPINLOCK_NUMLOCKS_BIT_OFFSET (24)
+
+/* Possible values of SPINLOCK_LOCK_REG */
+#define SPINLOCK_NOTTAKEN (0) /* free */
+#define SPINLOCK_TAKEN (1) /* locked */
+
+#define to_omap_hwspinlock(lock) \
+ container_of(lock, struct omap_hwspinlock, lock)
+
+struct omap_hwspinlock {
+ struct hwspinlock lock;
+ void __iomem *addr;
+};
+
+struct omap_hwspinlock_state {
+ int num_locks; /* Total number of locks in system */
+ void __iomem *io_base; /* Mapped base address */
+};
+
+static int omap_hwspinlock_trylock(struct hwspinlock *lock)
+{
+ struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock);
+
+ /* attempt to acquire the lock by reading its value */
+ return (SPINLOCK_NOTTAKEN == readl(omap_lock->addr));
+}
+
+static void omap_hwspinlock_unlock(struct hwspinlock *lock)
+{
+ struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock);
+
+ /* release the lock by writing 0 to it */
+ writel(SPINLOCK_NOTTAKEN, omap_lock->addr);
+}
+
+/*
+ * relax the OMAP interconnect while spinning on it.
+ *
+ * The specs recommended that the retry delay time will be
+ * just over half of the time that a requester would be
+ * expected to hold the lock.
+ *
+ * The number below is taken from an hardware specs example,
+ * obviously it is somewhat arbitrary.
+ */
+static void omap_hwspinlock_relax(struct hwspinlock *lock)
+{
+ ndelay(50);
+}
+
+static const struct hwspinlock_ops omap_hwspinlock_ops = {
+ .trylock = omap_hwspinlock_trylock,
+ .unlock = omap_hwspinlock_unlock,
+ .relax = omap_hwspinlock_relax,
+};
+
+static int __devinit omap_hwspinlock_probe(struct platform_device *pdev)
+{
+ struct omap_hwspinlock *omap_lock;
+ struct omap_hwspinlock_state *state;
+ struct hwspinlock *lock;
+ struct resource *res;
+ void __iomem *io_base;
+ int i, ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ io_base = ioremap(res->start, resource_size(res));
+ if (!io_base) {
+ ret = -ENOMEM;
+ goto free_state;
+ }
+
+ /* Determine number of locks */
+ i = readl(io_base + SYSSTATUS_OFFSET);
+ i >>= SPINLOCK_NUMLOCKS_BIT_OFFSET;
+
+ /* one of the four lsb's must be set, and nothing else */
+ if (hweight_long(i & 0xf) != 1 || i > 8) {
+ ret = -EINVAL;
+ goto iounmap_base;
+ }
+
+ state->num_locks = i * 32;
+ state->io_base = io_base;
+
+ platform_set_drvdata(pdev, state);
+
+ /*
+ * runtime PM will make sure the clock of this module is
+ * enabled iff at least one lock is requested
+ */
+ pm_runtime_enable(&pdev->dev);
+
+ for (i = 0; i < state->num_locks; i++) {
+ omap_lock = kzalloc(sizeof(*omap_lock), GFP_KERNEL);
+ if (!omap_lock) {
+ ret = -ENOMEM;
+ goto free_locks;
+ }
+
+ omap_lock->lock.dev = &pdev->dev;
+ omap_lock->lock.owner = THIS_MODULE;
+ omap_lock->lock.id = i;
+ omap_lock->lock.ops = &omap_hwspinlock_ops;
+ omap_lock->addr = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
+
+ ret = hwspin_lock_register(&omap_lock->lock);
+ if (ret) {
+ kfree(omap_lock);
+ goto free_locks;
+ }
+ }
+
+ return 0;
+
+free_locks:
+ while (--i >= 0) {
+ lock = hwspin_lock_unregister(i);
+ /* this should't happen, but let's give our best effort */
+ if (!lock) {
+ dev_err(&pdev->dev, "%s: cleanups failed\n", __func__);
+ continue;
+ }
+ omap_lock = to_omap_hwspinlock(lock);
+ kfree(omap_lock);
+ }
+ pm_runtime_disable(&pdev->dev);
+iounmap_base:
+ iounmap(io_base);
+free_state:
+ kfree(state);
+ return ret;
+}
+
+static int omap_hwspinlock_remove(struct platform_device *pdev)
+{
+ struct omap_hwspinlock_state *state = platform_get_drvdata(pdev);
+ struct hwspinlock *lock;
+ struct omap_hwspinlock *omap_lock;
+ int i;
+
+ for (i = 0; i < state->num_locks; i++) {
+ lock = hwspin_lock_unregister(i);
+ /* this shouldn't happen at this point. if it does, at least
+ * don't continue with the remove */
+ if (!lock) {
+ dev_err(&pdev->dev, "%s: failed on %d\n", __func__, i);
+ return -EBUSY;
+ }
+
+ omap_lock = to_omap_hwspinlock(lock);
+ kfree(omap_lock);
+ }
+
+ pm_runtime_disable(&pdev->dev);
+ iounmap(state->io_base);
+ kfree(state);
+
+ return 0;
+}
+
+static struct platform_driver omap_hwspinlock_driver = {
+ .probe = omap_hwspinlock_probe,
+ .remove = omap_hwspinlock_remove,
+ .driver = {
+ .name = "omap_hwspinlock",
+ },
+};
+
+static int __init omap_hwspinlock_init(void)
+{
+ return platform_driver_register(&omap_hwspinlock_driver);
+}
+/* board init code might need to reserve hwspinlocks for predefined purposes */
+postcore_initcall(omap_hwspinlock_init);
+
+static void __exit omap_hwspinlock_exit(void)
+{
+ platform_driver_unregister(&omap_hwspinlock_driver);
+}
+module_exit(omap_hwspinlock_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Hardware spinlock driver for OMAP");
+MODULE_AUTHOR("Simon Que <sque@ti.com>");
+MODULE_AUTHOR("Hari Kanigeri <h-kanigeri2@ti.com>");
+MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index 23ac61e2db39..beee6b2d361d 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_I2C_MUX) += i2c-mux.o
obj-y += algos/ busses/ muxes/
ccflags-$(CONFIG_I2C_DEBUG_CORE) := -DDEBUG
+CFLAGS_i2c-core.o := -Wno-deprecated-declarations
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index cbfcf6fb4a61..326652f673f7 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -98,8 +98,9 @@ config I2C_I801
EP80579 (Tolapai)
ICH10
5/3400 Series (PCH)
- Cougar Point (PCH)
+ 6 Series (PCH)
Patsburg (PCH)
+ DH89xxCC (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -433,7 +434,7 @@ config I2C_IXP2000
config I2C_MPC
tristate "MPC107/824x/85xx/512x/52xx/83xx/86xx"
- depends on PPC32
+ depends on PPC
help
If you say yes to this option, support will be included for the
built-in I2C interface on the MPC107, Tsi107, MPC512x, MPC52xx,
@@ -452,6 +453,16 @@ config I2C_MV64XXX
This driver can also be built as a module. If so, the module
will be called i2c-mv64xxx.
+config I2C_MXS
+ tristate "Freescale i.MX28 I2C interface"
+ depends on SOC_IMX28
+ help
+ Say Y here if you want to use the I2C bus controller on
+ the Freescale i.MX28 processors.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-mxs.
+
config I2C_NOMADIK
tristate "ST-Ericsson Nomadik/Ux500 I2C Controller"
depends on PLAT_NOMADIK
@@ -536,15 +547,18 @@ config I2C_PUV3
config I2C_PXA
tristate "Intel PXA2XX I2C adapter"
- depends on ARCH_PXA || ARCH_MMP
+ depends on ARCH_PXA || ARCH_MMP || (X86_32 && PCI && OF)
help
If you have devices in the PXA I2C bus, say yes to this option.
This driver can also be built as a module. If so, the module
will be called i2c-pxa.
+config I2C_PXA_PCI
+ def_bool I2C_PXA && X86_32 && PCI && OF
+
config I2C_PXA_SLAVE
bool "Intel PXA2XX I2C Slave comms support"
- depends on I2C_PXA
+ depends on I2C_PXA && !X86_32
help
Support I2C slave mode communications on the PXA I2C bus. This
is necessary for systems where the PXA may be a target on the
@@ -618,6 +632,13 @@ config I2C_STU300
This driver can also be built as a module. If so, the module
will be called i2c-stu300.
+config I2C_TEGRA
+ tristate "NVIDIA Tegra internal I2C controller"
+ depends on ARCH_TEGRA
+ help
+ If you say yes to this option, support will be included for the
+ I2C controller embedded in NVIDIA Tegra SOCs
+
config I2C_VERSATILE
tristate "ARM Versatile/Realview I2C bus support"
depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
@@ -650,15 +671,28 @@ config I2C_XILINX
will be called xilinx_i2c.
config I2C_EG20T
- tristate "PCH I2C of Intel EG20T"
- depends on PCI
- help
- This driver is for PCH(Platform controller Hub) I2C of EG20T which
- is an IOH(Input/Output Hub) for x86 embedded processor.
- This driver can access PCH I2C bus device.
+ tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH"
+ depends on PCI
+ help
+ This driver is for PCH(Platform controller Hub) I2C of EG20T which
+ is an IOH(Input/Output Hub) for x86 embedded processor.
+ This driver can access PCH I2C bus device.
+
+ This driver also supports the ML7213, a companion chip for the
+ Atom E6xx series and compatible with the Intel EG20T PCH.
comment "External I2C/SMBus adapter drivers"
+config I2C_DIOLAN_U2C
+ tristate "Diolan U2C-12 USB adapter"
+ depends on USB
+ help
+ If you say yes to this option, support will be included for Diolan
+ U2C-12, a USB to I2C interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-diolan-u2c.
+
config I2C_PARPORT
tristate "Parallel port adapter"
depends on PARPORT
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index a83966acc5ab..e6cf294d3729 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
+obj-$(CONFIG_I2C_MXS) += i2c-mxs.o
obj-$(CONFIG_I2C_NOMADIK) += i2c-nomadik.o
obj-$(CONFIG_I2C_NUC900) += i2c-nuc900.o
obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
@@ -53,18 +54,21 @@ obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o
obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
+obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o
obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
obj-$(CONFIG_I2C_S6000) += i2c-s6000.o
obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
+obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o
# External I2C/SMBus adapter drivers
+obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o
obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
new file mode 100644
index 000000000000..76366716a854
--- /dev/null
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -0,0 +1,535 @@
+/*
+ * Driver for the Diolan u2c-12 USB-I2C adapter
+ *
+ * Copyright (c) 2010-2011 Ericsson AB
+ *
+ * Derived from:
+ * i2c-tiny-usb.c
+ * Copyright (C) 2006-2007 Till Harbaum (Till@Harbaum.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/i2c.h>
+
+#define DRIVER_NAME "i2c-diolan-u2c"
+
+#define USB_VENDOR_ID_DIOLAN 0x0abf
+#define USB_DEVICE_ID_DIOLAN_U2C 0x3370
+
+#define DIOLAN_OUT_EP 0x02
+#define DIOLAN_IN_EP 0x84
+
+/* commands via USB, must match command ids in the firmware */
+#define CMD_I2C_READ 0x01
+#define CMD_I2C_WRITE 0x02
+#define CMD_I2C_SCAN 0x03 /* Returns list of detected devices */
+#define CMD_I2C_RELEASE_SDA 0x04
+#define CMD_I2C_RELEASE_SCL 0x05
+#define CMD_I2C_DROP_SDA 0x06
+#define CMD_I2C_DROP_SCL 0x07
+#define CMD_I2C_READ_SDA 0x08
+#define CMD_I2C_READ_SCL 0x09
+#define CMD_GET_FW_VERSION 0x0a
+#define CMD_GET_SERIAL 0x0b
+#define CMD_I2C_START 0x0c
+#define CMD_I2C_STOP 0x0d
+#define CMD_I2C_REPEATED_START 0x0e
+#define CMD_I2C_PUT_BYTE 0x0f
+#define CMD_I2C_GET_BYTE 0x10
+#define CMD_I2C_PUT_ACK 0x11
+#define CMD_I2C_GET_ACK 0x12
+#define CMD_I2C_PUT_BYTE_ACK 0x13
+#define CMD_I2C_GET_BYTE_ACK 0x14
+#define CMD_I2C_SET_SPEED 0x1b
+#define CMD_I2C_GET_SPEED 0x1c
+#define CMD_I2C_SET_CLK_SYNC 0x24
+#define CMD_I2C_GET_CLK_SYNC 0x25
+#define CMD_I2C_SET_CLK_SYNC_TO 0x26
+#define CMD_I2C_GET_CLK_SYNC_TO 0x27
+
+#define RESP_OK 0x00
+#define RESP_FAILED 0x01
+#define RESP_BAD_MEMADDR 0x04
+#define RESP_DATA_ERR 0x05
+#define RESP_NOT_IMPLEMENTED 0x06
+#define RESP_NACK 0x07
+#define RESP_TIMEOUT 0x09
+
+#define U2C_I2C_SPEED_FAST 0 /* 400 kHz */
+#define U2C_I2C_SPEED_STD 1 /* 100 kHz */
+#define U2C_I2C_SPEED_2KHZ 242 /* 2 kHz, minimum speed */
+#define U2C_I2C_SPEED(f) ((DIV_ROUND_UP(1000000, (f)) - 10) / 2 + 1)
+
+#define U2C_I2C_FREQ_FAST 400000
+#define U2C_I2C_FREQ_STD 100000
+#define U2C_I2C_FREQ(s) (1000000 / (2 * (s - 1) + 10))
+
+#define DIOLAN_USB_TIMEOUT 100 /* in ms */
+#define DIOLAN_SYNC_TIMEOUT 20 /* in ms */
+
+#define DIOLAN_OUTBUF_LEN 128
+#define DIOLAN_FLUSH_LEN (DIOLAN_OUTBUF_LEN - 4)
+#define DIOLAN_INBUF_LEN 256 /* Maximum supported receive length */
+
+/* Structure to hold all of our device specific stuff */
+struct i2c_diolan_u2c {
+ u8 obuffer[DIOLAN_OUTBUF_LEN]; /* output buffer */
+ u8 ibuffer[DIOLAN_INBUF_LEN]; /* input buffer */
+ struct usb_device *usb_dev; /* the usb device for this device */
+ struct usb_interface *interface;/* the interface for this device */
+ struct i2c_adapter adapter; /* i2c related things */
+ int olen; /* Output buffer length */
+ int ocount; /* Number of enqueued messages */
+};
+
+static uint frequency = U2C_I2C_FREQ_STD; /* I2C clock frequency in Hz */
+
+module_param(frequency, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
+
+/* usb layer */
+
+/* Send command to device, and get response. */
+static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
+{
+ int ret = 0;
+ int actual;
+ int i;
+
+ if (!dev->olen || !dev->ocount)
+ return -EINVAL;
+
+ ret = usb_bulk_msg(dev->usb_dev,
+ usb_sndbulkpipe(dev->usb_dev, DIOLAN_OUT_EP),
+ dev->obuffer, dev->olen, &actual,
+ DIOLAN_USB_TIMEOUT);
+ if (!ret) {
+ for (i = 0; i < dev->ocount; i++) {
+ int tmpret;
+
+ tmpret = usb_bulk_msg(dev->usb_dev,
+ usb_rcvbulkpipe(dev->usb_dev,
+ DIOLAN_IN_EP),
+ dev->ibuffer,
+ sizeof(dev->ibuffer), &actual,
+ DIOLAN_USB_TIMEOUT);
+ /*
+ * Stop command processing if a previous command
+ * returned an error.
+ * Note that we still need to retrieve all messages.
+ */
+ if (ret < 0)
+ continue;
+ ret = tmpret;
+ if (ret == 0 && actual > 0) {
+ switch (dev->ibuffer[actual - 1]) {
+ case RESP_NACK:
+ /*
+ * Return ENXIO if NACK was received as
+ * response to the address phase,
+ * EIO otherwise
+ */
+ ret = i == 1 ? -ENXIO : -EIO;
+ break;
+ case RESP_TIMEOUT:
+ ret = -ETIMEDOUT;
+ break;
+ case RESP_OK:
+ /* strip off return code */
+ ret = actual - 1;
+ break;
+ default:
+ ret = -EIO;
+ break;
+ }
+ }
+ }
+ }
+ dev->olen = 0;
+ dev->ocount = 0;
+ return ret;
+}
+
+static int diolan_write_cmd(struct i2c_diolan_u2c *dev, bool flush)
+{
+ if (flush || dev->olen >= DIOLAN_FLUSH_LEN)
+ return diolan_usb_transfer(dev);
+ return 0;
+}
+
+/* Send command (no data) */
+static int diolan_usb_cmd(struct i2c_diolan_u2c *dev, u8 command, bool flush)
+{
+ dev->obuffer[dev->olen++] = command;
+ dev->ocount++;
+ return diolan_write_cmd(dev, flush);
+}
+
+/* Send command with one byte of data */
+static int diolan_usb_cmd_data(struct i2c_diolan_u2c *dev, u8 command, u8 data,
+ bool flush)
+{
+ dev->obuffer[dev->olen++] = command;
+ dev->obuffer[dev->olen++] = data;
+ dev->ocount++;
+ return diolan_write_cmd(dev, flush);
+}
+
+/* Send command with two bytes of data */
+static int diolan_usb_cmd_data2(struct i2c_diolan_u2c *dev, u8 command, u8 d1,
+ u8 d2, bool flush)
+{
+ dev->obuffer[dev->olen++] = command;
+ dev->obuffer[dev->olen++] = d1;
+ dev->obuffer[dev->olen++] = d2;
+ dev->ocount++;
+ return diolan_write_cmd(dev, flush);
+}
+
+/*
+ * Flush input queue.
+ * If we don't do this at startup and the controller has queued up
+ * messages which were not retrieved, it will stop responding
+ * at some point.
+ */
+static void diolan_flush_input(struct i2c_diolan_u2c *dev)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ int actual = 0;
+ int ret;
+
+ ret = usb_bulk_msg(dev->usb_dev,
+ usb_rcvbulkpipe(dev->usb_dev, DIOLAN_IN_EP),
+ dev->ibuffer, sizeof(dev->ibuffer), &actual,
+ DIOLAN_USB_TIMEOUT);
+ if (ret < 0 || actual == 0)
+ break;
+ }
+ if (i == 10)
+ dev_err(&dev->interface->dev, "Failed to flush input buffer\n");
+}
+
+static int diolan_i2c_start(struct i2c_diolan_u2c *dev)
+{
+ return diolan_usb_cmd(dev, CMD_I2C_START, false);
+}
+
+static int diolan_i2c_repeated_start(struct i2c_diolan_u2c *dev)
+{
+ return diolan_usb_cmd(dev, CMD_I2C_REPEATED_START, false);
+}
+
+static int diolan_i2c_stop(struct i2c_diolan_u2c *dev)
+{
+ return diolan_usb_cmd(dev, CMD_I2C_STOP, true);
+}
+
+static int diolan_i2c_get_byte_ack(struct i2c_diolan_u2c *dev, bool ack,
+ u8 *byte)
+{
+ int ret;
+
+ ret = diolan_usb_cmd_data(dev, CMD_I2C_GET_BYTE_ACK, ack, true);
+ if (ret > 0)
+ *byte = dev->ibuffer[0];
+ else if (ret == 0)
+ ret = -EIO;
+
+ return ret;
+}
+
+static int diolan_i2c_put_byte_ack(struct i2c_diolan_u2c *dev, u8 byte)
+{
+ return diolan_usb_cmd_data(dev, CMD_I2C_PUT_BYTE_ACK, byte, false);
+}
+
+static int diolan_set_speed(struct i2c_diolan_u2c *dev, u8 speed)
+{
+ return diolan_usb_cmd_data(dev, CMD_I2C_SET_SPEED, speed, true);
+}
+
+/* Enable or disable clock synchronization (stretching) */
+static int diolan_set_clock_synch(struct i2c_diolan_u2c *dev, bool enable)
+{
+ return diolan_usb_cmd_data(dev, CMD_I2C_SET_CLK_SYNC, enable, true);
+}
+
+/* Set clock synchronization timeout in ms */
+static int diolan_set_clock_synch_timeout(struct i2c_diolan_u2c *dev, int ms)
+{
+ int to_val = ms * 10;
+
+ return diolan_usb_cmd_data2(dev, CMD_I2C_SET_CLK_SYNC_TO,
+ to_val & 0xff, (to_val >> 8) & 0xff, true);
+}
+
+static void diolan_fw_version(struct i2c_diolan_u2c *dev)
+{
+ int ret;
+
+ ret = diolan_usb_cmd(dev, CMD_GET_FW_VERSION, true);
+ if (ret >= 2)
+ dev_info(&dev->interface->dev,
+ "Diolan U2C firmware version %u.%u\n",
+ (unsigned int)dev->ibuffer[0],
+ (unsigned int)dev->ibuffer[1]);
+}
+
+static void diolan_get_serial(struct i2c_diolan_u2c *dev)
+{
+ int ret;
+ u32 serial;
+
+ ret = diolan_usb_cmd(dev, CMD_GET_SERIAL, true);
+ if (ret >= 4) {
+ serial = le32_to_cpu(*(u32 *)dev->ibuffer);
+ dev_info(&dev->interface->dev,
+ "Diolan U2C serial number %u\n", serial);
+ }
+}
+
+static int diolan_init(struct i2c_diolan_u2c *dev)
+{
+ int speed, ret;
+
+ if (frequency >= 200000) {
+ speed = U2C_I2C_SPEED_FAST;
+ frequency = U2C_I2C_FREQ_FAST;
+ } else if (frequency >= 100000 || frequency == 0) {
+ speed = U2C_I2C_SPEED_STD;
+ frequency = U2C_I2C_FREQ_STD;
+ } else {
+ speed = U2C_I2C_SPEED(frequency);
+ if (speed > U2C_I2C_SPEED_2KHZ)
+ speed = U2C_I2C_SPEED_2KHZ;
+ frequency = U2C_I2C_FREQ(speed);
+ }
+
+ dev_info(&dev->interface->dev,
+ "Diolan U2C at USB bus %03d address %03d speed %d Hz\n",
+ dev->usb_dev->bus->busnum, dev->usb_dev->devnum, frequency);
+
+ diolan_flush_input(dev);
+ diolan_fw_version(dev);
+ diolan_get_serial(dev);
+
+ /* Set I2C speed */
+ ret = diolan_set_speed(dev, speed);
+ if (ret < 0)
+ return ret;
+
+ /* Configure I2C clock synchronization */
+ ret = diolan_set_clock_synch(dev, speed != U2C_I2C_SPEED_FAST);
+ if (ret < 0)
+ return ret;
+
+ if (speed != U2C_I2C_SPEED_FAST)
+ ret = diolan_set_clock_synch_timeout(dev, DIOLAN_SYNC_TIMEOUT);
+
+ return ret;
+}
+
+/* i2c layer */
+
+static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ int num)
+{
+ struct i2c_diolan_u2c *dev = i2c_get_adapdata(adapter);
+ struct i2c_msg *pmsg;
+ int i, j;
+ int ret, sret;
+
+ ret = diolan_i2c_start(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num; i++) {
+ pmsg = &msgs[i];
+ if (i) {
+ ret = diolan_i2c_repeated_start(dev);
+ if (ret < 0)
+ goto abort;
+ }
+ if (pmsg->flags & I2C_M_RD) {
+ ret =
+ diolan_i2c_put_byte_ack(dev, (pmsg->addr << 1) | 1);
+ if (ret < 0)
+ goto abort;
+ for (j = 0; j < pmsg->len; j++) {
+ u8 byte;
+ bool ack = j < pmsg->len - 1;
+
+ /*
+ * Don't send NACK if this is the first byte
+ * of a SMBUS_BLOCK message.
+ */
+ if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN))
+ ack = true;
+
+ ret = diolan_i2c_get_byte_ack(dev, ack, &byte);
+ if (ret < 0)
+ goto abort;
+ /*
+ * Adjust count if first received byte is length
+ */
+ if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN)) {
+ if (byte == 0
+ || byte > I2C_SMBUS_BLOCK_MAX) {
+ ret = -EPROTO;
+ goto abort;
+ }
+ pmsg->len += byte;
+ }
+ pmsg->buf[j] = byte;
+ }
+ } else {
+ ret = diolan_i2c_put_byte_ack(dev, pmsg->addr << 1);
+ if (ret < 0)
+ goto abort;
+ for (j = 0; j < pmsg->len; j++) {
+ ret = diolan_i2c_put_byte_ack(dev,
+ pmsg->buf[j]);
+ if (ret < 0)
+ goto abort;
+ }
+ }
+ }
+abort:
+ sret = diolan_i2c_stop(dev);
+ if (sret < 0 && ret >= 0)
+ ret = sret;
+ return ret;
+}
+
+/*
+ * Return list of supported functionality.
+ */
+static u32 diolan_usb_func(struct i2c_adapter *a)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL;
+}
+
+static const struct i2c_algorithm diolan_usb_algorithm = {
+ .master_xfer = diolan_usb_xfer,
+ .functionality = diolan_usb_func,
+};
+
+/* device layer */
+
+static const struct usb_device_id diolan_u2c_table[] = {
+ { USB_DEVICE(USB_VENDOR_ID_DIOLAN, USB_DEVICE_ID_DIOLAN_U2C) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, diolan_u2c_table);
+
+static void diolan_u2c_free(struct i2c_diolan_u2c *dev)
+{
+ usb_put_dev(dev->usb_dev);
+ kfree(dev);
+}
+
+static int diolan_u2c_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct i2c_diolan_u2c *dev;
+ int ret;
+
+ /* allocate memory for our device state and initialize it */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ dev_err(&interface->dev, "no memory for device state\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ dev->usb_dev = usb_get_dev(interface_to_usbdev(interface));
+ dev->interface = interface;
+
+ /* save our data pointer in this interface device */
+ usb_set_intfdata(interface, dev);
+
+ /* setup i2c adapter description */
+ dev->adapter.owner = THIS_MODULE;
+ dev->adapter.class = I2C_CLASS_HWMON;
+ dev->adapter.algo = &diolan_usb_algorithm;
+ i2c_set_adapdata(&dev->adapter, dev);
+ snprintf(dev->adapter.name, sizeof(dev->adapter.name),
+ DRIVER_NAME " at bus %03d device %03d",
+ dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
+
+ dev->adapter.dev.parent = &dev->interface->dev;
+
+ /* initialize diolan i2c interface */
+ ret = diolan_init(dev);
+ if (ret < 0) {
+ dev_err(&interface->dev, "failed to initialize adapter\n");
+ goto error_free;
+ }
+
+ /* and finally attach to i2c layer */
+ ret = i2c_add_adapter(&dev->adapter);
+ if (ret < 0) {
+ dev_err(&interface->dev, "failed to add I2C adapter\n");
+ goto error_free;
+ }
+
+ dev_dbg(&interface->dev, "connected " DRIVER_NAME "\n");
+
+ return 0;
+
+error_free:
+ usb_set_intfdata(interface, NULL);
+ diolan_u2c_free(dev);
+error:
+ return ret;
+}
+
+static void diolan_u2c_disconnect(struct usb_interface *interface)
+{
+ struct i2c_diolan_u2c *dev = usb_get_intfdata(interface);
+
+ i2c_del_adapter(&dev->adapter);
+ usb_set_intfdata(interface, NULL);
+ diolan_u2c_free(dev);
+
+ dev_dbg(&interface->dev, "disconnected\n");
+}
+
+static struct usb_driver diolan_u2c_driver = {
+ .name = DRIVER_NAME,
+ .probe = diolan_u2c_probe,
+ .disconnect = diolan_u2c_disconnect,
+ .id_table = diolan_u2c_table,
+};
+
+static int __init diolan_u2c_init(void)
+{
+ /* register this driver with the USB subsystem */
+ return usb_register(&diolan_u2c_driver);
+}
+
+static void __exit diolan_u2c_exit(void)
+{
+ /* deregister this driver with the USB subsystem */
+ usb_deregister(&diolan_u2c_driver);
+}
+
+module_init(diolan_u2c_init);
+module_exit(diolan_u2c_exit);
+
+MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_DESCRIPTION(DRIVER_NAME " driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 50ea1f43bdc1..878a12026af2 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -132,6 +132,13 @@
#define pch_pci_dbg(pdev, fmt, arg...) \
dev_dbg(&pdev->dev, "%s :" fmt, __func__, ##arg)
+/*
+Set the number of I2C instance max
+Intel EG20T PCH : 1ch
+OKI SEMICONDUCTOR ML7213 IOH : 2ch
+*/
+#define PCH_I2C_MAX_DEV 2
+
/**
* struct i2c_algo_pch_data - for I2C driver functionalities
* @pch_adapter: stores the reference to i2c_adapter structure
@@ -156,12 +163,14 @@ struct i2c_algo_pch_data {
* @pch_data: stores a list of i2c_algo_pch_data
* @pch_i2c_suspended: specifies whether the system is suspended or not
* perhaps with more lines and words.
+ * @ch_num: specifies the number of i2c instance
*
* pch_data has as many elements as maximum I2C channels
*/
struct adapter_info {
- struct i2c_algo_pch_data pch_data;
+ struct i2c_algo_pch_data pch_data[PCH_I2C_MAX_DEV];
bool pch_i2c_suspended;
+ int ch_num;
};
@@ -170,8 +179,13 @@ static int pch_clk = 50000; /* specifies I2C clock speed in KHz */
static wait_queue_head_t pch_event;
static DEFINE_MUTEX(pch_mutex);
+/* Definition for ML7213 by OKI SEMICONDUCTOR */
+#define PCI_VENDOR_ID_ROHM 0x10DB
+#define PCI_DEVICE_ID_ML7213_I2C 0x802D
+
static struct pci_device_id __devinitdata pch_pcidev_id[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_I2C)},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, },
{0,}
};
@@ -212,8 +226,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
/* Initialize I2C registers */
iowrite32(0x21, p + PCH_I2CNF);
- pch_setbit(adap->pch_base_address, PCH_I2CCTL,
- PCH_I2CCTL_I2CMEN);
+ pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_I2CCTL_I2CMEN);
if (pch_i2c_speed != 400)
pch_i2c_speed = 100;
@@ -255,7 +268,7 @@ static inline bool ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
* @timeout: waiting time counter (us).
*/
static s32 pch_i2c_wait_for_bus_idle(struct i2c_algo_pch_data *adap,
- s32 timeout)
+ s32 timeout)
{
void __iomem *p = adap->pch_base_address;
@@ -475,8 +488,8 @@ static void pch_i2c_sendnack(struct i2c_algo_pch_data *adap)
* @last: specifies whether last message or not.
* @first: specifies whether first message or not.
*/
-s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
- u32 last, u32 first)
+static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
+ u32 last, u32 first)
{
struct i2c_algo_pch_data *adap = i2c_adap->algo_data;
@@ -569,10 +582,10 @@ s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
}
/**
- * pch_i2c_cb_ch0() - Interrupt handler Call back function
+ * pch_i2c_cb() - Interrupt handler Call back function
* @adap: Pointer to struct i2c_algo_pch_data.
*/
-static void pch_i2c_cb_ch0(struct i2c_algo_pch_data *adap)
+static void pch_i2c_cb(struct i2c_algo_pch_data *adap)
{
u32 sts;
void __iomem *p = adap->pch_base_address;
@@ -600,24 +613,30 @@ static void pch_i2c_cb_ch0(struct i2c_algo_pch_data *adap)
*/
static irqreturn_t pch_i2c_handler(int irq, void *pData)
{
- s32 reg_val;
-
- struct i2c_algo_pch_data *adap_data = (struct i2c_algo_pch_data *)pData;
- void __iomem *p = adap_data->pch_base_address;
- u32 mode = ioread32(p + PCH_I2CMOD) & (BUFFER_MODE | EEPROM_SR_MODE);
-
- if (mode != NORMAL_MODE) {
- pch_err(adap_data, "I2C mode is not supported\n");
- return IRQ_NONE;
+ u32 reg_val;
+ int flag;
+ int i;
+ struct adapter_info *adap_info = pData;
+ void __iomem *p;
+ u32 mode;
+
+ for (i = 0, flag = 0; i < adap_info->ch_num; i++) {
+ p = adap_info->pch_data[i].pch_base_address;
+ mode = ioread32(p + PCH_I2CMOD);
+ mode &= BUFFER_MODE | EEPROM_SR_MODE;
+ if (mode != NORMAL_MODE) {
+ pch_err(adap_info->pch_data,
+ "I2C-%d mode(%d) is not supported\n", mode, i);
+ continue;
+ }
+ reg_val = ioread32(p + PCH_I2CSR);
+ if (reg_val & (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT)) {
+ pch_i2c_cb(&adap_info->pch_data[i]);
+ flag = 1;
+ }
}
- reg_val = ioread32(p + PCH_I2CSR);
- if (reg_val & (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT))
- pch_i2c_cb_ch0(adap_data);
- else
- return IRQ_NONE;
-
- return IRQ_HANDLED;
+ return flag ? IRQ_HANDLED : IRQ_NONE;
}
/**
@@ -627,7 +646,7 @@ static irqreturn_t pch_i2c_handler(int irq, void *pData)
* @num: number of messages.
*/
static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg *msgs, s32 num)
+ struct i2c_msg *msgs, s32 num)
{
struct i2c_msg *pmsg;
u32 i = 0;
@@ -710,11 +729,13 @@ static void pch_i2c_disbl_int(struct i2c_algo_pch_data *adap)
}
static int __devinit pch_i2c_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+ const struct pci_device_id *id)
{
void __iomem *base_addr;
- s32 ret;
+ int ret;
+ int i, j;
struct adapter_info *adap_info;
+ struct i2c_adapter *pch_adap;
pch_pci_dbg(pdev, "Entered.\n");
@@ -744,44 +765,48 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
goto err_pci_iomap;
}
- adap_info->pch_i2c_suspended = false;
+ /* Set the number of I2C channel instance */
+ adap_info->ch_num = id->driver_data;
- adap_info->pch_data.p_adapter_info = adap_info;
+ for (i = 0; i < adap_info->ch_num; i++) {
+ pch_adap = &adap_info->pch_data[i].pch_adapter;
+ adap_info->pch_i2c_suspended = false;
- adap_info->pch_data.pch_adapter.owner = THIS_MODULE;
- adap_info->pch_data.pch_adapter.class = I2C_CLASS_HWMON;
- strcpy(adap_info->pch_data.pch_adapter.name, KBUILD_MODNAME);
- adap_info->pch_data.pch_adapter.algo = &pch_algorithm;
- adap_info->pch_data.pch_adapter.algo_data =
- &adap_info->pch_data;
+ adap_info->pch_data[i].p_adapter_info = adap_info;
- /* (i * 0x80) + base_addr; */
- adap_info->pch_data.pch_base_address = base_addr;
+ pch_adap->owner = THIS_MODULE;
+ pch_adap->class = I2C_CLASS_HWMON;
+ strcpy(pch_adap->name, KBUILD_MODNAME);
+ pch_adap->algo = &pch_algorithm;
+ pch_adap->algo_data = &adap_info->pch_data[i];
- adap_info->pch_data.pch_adapter.dev.parent = &pdev->dev;
+ /* base_addr + offset; */
+ adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i;
- ret = i2c_add_adapter(&(adap_info->pch_data.pch_adapter));
+ pch_adap->dev.parent = &pdev->dev;
- if (ret) {
- pch_pci_err(pdev, "i2c_add_adapter FAILED\n");
- goto err_i2c_add_adapter;
- }
+ ret = i2c_add_adapter(pch_adap);
+ if (ret) {
+ pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
+ goto err_i2c_add_adapter;
+ }
- pch_i2c_init(&adap_info->pch_data);
+ pch_i2c_init(&adap_info->pch_data[i]);
+ }
ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
- KBUILD_MODNAME, &adap_info->pch_data);
+ KBUILD_MODNAME, adap_info);
if (ret) {
pch_pci_err(pdev, "request_irq FAILED\n");
- goto err_request_irq;
+ goto err_i2c_add_adapter;
}
pci_set_drvdata(pdev, adap_info);
pch_pci_dbg(pdev, "returns %d.\n", ret);
return 0;
-err_request_irq:
- i2c_del_adapter(&(adap_info->pch_data.pch_adapter));
err_i2c_add_adapter:
+ for (j = 0; j < i; j++)
+ i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
pci_iounmap(pdev, base_addr);
err_pci_iomap:
pci_release_regions(pdev);
@@ -794,17 +819,22 @@ err_pci_enable:
static void __devexit pch_i2c_remove(struct pci_dev *pdev)
{
+ int i;
struct adapter_info *adap_info = pci_get_drvdata(pdev);
- pch_i2c_disbl_int(&adap_info->pch_data);
- free_irq(pdev->irq, &adap_info->pch_data);
- i2c_del_adapter(&(adap_info->pch_data.pch_adapter));
+ free_irq(pdev->irq, adap_info);
- if (adap_info->pch_data.pch_base_address) {
- pci_iounmap(pdev, adap_info->pch_data.pch_base_address);
- adap_info->pch_data.pch_base_address = 0;
+ for (i = 0; i < adap_info->ch_num; i++) {
+ pch_i2c_disbl_int(&adap_info->pch_data[i]);
+ i2c_del_adapter(&adap_info->pch_data[i].pch_adapter);
}
+ if (adap_info->pch_data[0].pch_base_address)
+ pci_iounmap(pdev, adap_info->pch_data[0].pch_base_address);
+
+ for (i = 0; i < adap_info->ch_num; i++)
+ adap_info->pch_data[i].pch_base_address = 0;
+
pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
@@ -817,17 +847,22 @@ static void __devexit pch_i2c_remove(struct pci_dev *pdev)
static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state)
{
int ret;
+ int i;
struct adapter_info *adap_info = pci_get_drvdata(pdev);
- void __iomem *p = adap_info->pch_data.pch_base_address;
+ void __iomem *p = adap_info->pch_data[0].pch_base_address;
adap_info->pch_i2c_suspended = true;
- while ((adap_info->pch_data.pch_i2c_xfer_in_progress)) {
- /* Wait until all channel transfers are completed */
- msleep(20);
+ for (i = 0; i < adap_info->ch_num; i++) {
+ while ((adap_info->pch_data[i].pch_i2c_xfer_in_progress)) {
+ /* Wait until all channel transfers are completed */
+ msleep(20);
+ }
}
+
/* Disable the i2c interrupts */
- pch_i2c_disbl_int(&adap_info->pch_data);
+ for (i = 0; i < adap_info->ch_num; i++)
+ pch_i2c_disbl_int(&adap_info->pch_data[i]);
pch_pci_dbg(pdev, "I2CSR = %x I2CBUFSTA = %x I2CESRSTA = %x "
"invoked function pch_i2c_disbl_int successfully\n",
@@ -850,6 +885,7 @@ static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state)
static int pch_i2c_resume(struct pci_dev *pdev)
{
+ int i;
struct adapter_info *adap_info = pci_get_drvdata(pdev);
pci_set_power_state(pdev, PCI_D0);
@@ -862,7 +898,8 @@ static int pch_i2c_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
- pch_i2c_init(&adap_info->pch_data);
+ for (i = 0; i < adap_info->ch_num; i++)
+ pch_i2c_init(&adap_info->pch_data[i]);
adap_info->pch_i2c_suspended = false;
@@ -894,7 +931,7 @@ static void __exit pch_pci_exit(void)
}
module_exit(pch_pci_exit);
-MODULE_DESCRIPTION("PCH I2C PCI Driver");
+MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH I2C Driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.okisemi.com>");
module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 7979aef7ee7b..ed2e0c5ea37c 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -44,11 +44,12 @@
ICH10 0x3a30 32 hard yes yes yes
ICH10 0x3a60 32 hard yes yes yes
5/3400 Series (PCH) 0x3b30 32 hard yes yes yes
- Cougar Point (PCH) 0x1c22 32 hard yes yes yes
+ 6 Series (PCH) 0x1c22 32 hard yes yes yes
Patsburg (PCH) 0x1d22 32 hard yes yes yes
Patsburg (PCH) IDF 0x1d70 32 hard yes yes yes
Patsburg (PCH) IDF 0x1d71 32 hard yes yes yes
Patsburg (PCH) IDF 0x1d72 32 hard yes yes yes
+ DH89xxCC (PCH) 0x2330 32 hard yes yes yes
Features supported by this driver:
Software PEC no
@@ -621,6 +622,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
new file mode 100644
index 000000000000..caf96dc8ca1b
--- /dev/null
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -0,0 +1,412 @@
+/*
+ * Freescale MXS I2C bus driver
+ *
+ * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ *
+ * based on a (non-working) driver which was:
+ *
+ * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * TODO: add dma-support if platform-support for it is available
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+
+#include <mach/common.h>
+
+#define DRIVER_NAME "mxs-i2c"
+
+#define MXS_I2C_CTRL0 (0x00)
+#define MXS_I2C_CTRL0_SET (0x04)
+
+#define MXS_I2C_CTRL0_SFTRST 0x80000000
+#define MXS_I2C_CTRL0_SEND_NAK_ON_LAST 0x02000000
+#define MXS_I2C_CTRL0_RETAIN_CLOCK 0x00200000
+#define MXS_I2C_CTRL0_POST_SEND_STOP 0x00100000
+#define MXS_I2C_CTRL0_PRE_SEND_START 0x00080000
+#define MXS_I2C_CTRL0_MASTER_MODE 0x00020000
+#define MXS_I2C_CTRL0_DIRECTION 0x00010000
+#define MXS_I2C_CTRL0_XFER_COUNT(v) ((v) & 0x0000FFFF)
+
+#define MXS_I2C_CTRL1 (0x40)
+#define MXS_I2C_CTRL1_SET (0x44)
+#define MXS_I2C_CTRL1_CLR (0x48)
+
+#define MXS_I2C_CTRL1_BUS_FREE_IRQ 0x80
+#define MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ 0x40
+#define MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ 0x20
+#define MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ 0x10
+#define MXS_I2C_CTRL1_EARLY_TERM_IRQ 0x08
+#define MXS_I2C_CTRL1_MASTER_LOSS_IRQ 0x04
+#define MXS_I2C_CTRL1_SLAVE_STOP_IRQ 0x02
+#define MXS_I2C_CTRL1_SLAVE_IRQ 0x01
+
+#define MXS_I2C_IRQ_MASK (MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ | \
+ MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ | \
+ MXS_I2C_CTRL1_EARLY_TERM_IRQ | \
+ MXS_I2C_CTRL1_MASTER_LOSS_IRQ | \
+ MXS_I2C_CTRL1_SLAVE_STOP_IRQ | \
+ MXS_I2C_CTRL1_SLAVE_IRQ)
+
+#define MXS_I2C_QUEUECTRL (0x60)
+#define MXS_I2C_QUEUECTRL_SET (0x64)
+#define MXS_I2C_QUEUECTRL_CLR (0x68)
+
+#define MXS_I2C_QUEUECTRL_QUEUE_RUN 0x20
+#define MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE 0x04
+
+#define MXS_I2C_QUEUESTAT (0x70)
+#define MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY 0x00002000
+
+#define MXS_I2C_QUEUECMD (0x80)
+
+#define MXS_I2C_QUEUEDATA (0x90)
+
+#define MXS_I2C_DATA (0xa0)
+
+
+#define MXS_CMD_I2C_SELECT (MXS_I2C_CTRL0_RETAIN_CLOCK | \
+ MXS_I2C_CTRL0_PRE_SEND_START | \
+ MXS_I2C_CTRL0_MASTER_MODE | \
+ MXS_I2C_CTRL0_DIRECTION | \
+ MXS_I2C_CTRL0_XFER_COUNT(1))
+
+#define MXS_CMD_I2C_WRITE (MXS_I2C_CTRL0_PRE_SEND_START | \
+ MXS_I2C_CTRL0_MASTER_MODE | \
+ MXS_I2C_CTRL0_DIRECTION)
+
+#define MXS_CMD_I2C_READ (MXS_I2C_CTRL0_SEND_NAK_ON_LAST | \
+ MXS_I2C_CTRL0_MASTER_MODE)
+
+/**
+ * struct mxs_i2c_dev - per device, private MXS-I2C data
+ *
+ * @dev: driver model device node
+ * @regs: IO registers pointer
+ * @cmd_complete: completion object for transaction wait
+ * @cmd_err: error code for last transaction
+ * @adapter: i2c subsystem adapter node
+ */
+struct mxs_i2c_dev {
+ struct device *dev;
+ void __iomem *regs;
+ struct completion cmd_complete;
+ u32 cmd_err;
+ struct i2c_adapter adapter;
+};
+
+/*
+ * TODO: check if calls to here are really needed. If not, we could get rid of
+ * mxs_reset_block and the mach-dependency. Needs an I2C analyzer, probably.
+ */
+static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
+{
+ mxs_reset_block(i2c->regs);
+ writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
+ writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
+ i2c->regs + MXS_I2C_QUEUECTRL_SET);
+}
+
+static void mxs_i2c_pioq_setup_read(struct mxs_i2c_dev *i2c, u8 addr, int len,
+ int flags)
+{
+ u32 data;
+
+ writel(MXS_CMD_I2C_SELECT, i2c->regs + MXS_I2C_QUEUECMD);
+
+ data = (addr << 1) | I2C_SMBUS_READ;
+ writel(data, i2c->regs + MXS_I2C_DATA);
+
+ data = MXS_CMD_I2C_READ | MXS_I2C_CTRL0_XFER_COUNT(len) | flags;
+ writel(data, i2c->regs + MXS_I2C_QUEUECMD);
+}
+
+static void mxs_i2c_pioq_setup_write(struct mxs_i2c_dev *i2c,
+ u8 addr, u8 *buf, int len, int flags)
+{
+ u32 data;
+ int i, shifts_left;
+
+ data = MXS_CMD_I2C_WRITE | MXS_I2C_CTRL0_XFER_COUNT(len + 1) | flags;
+ writel(data, i2c->regs + MXS_I2C_QUEUECMD);
+
+ /*
+ * We have to copy the slave address (u8) and buffer (arbitrary number
+ * of u8) into the data register (u32). To achieve that, the u8 are put
+ * into the MSBs of 'data' which is then shifted for the next u8. When
+ * apropriate, 'data' is written to MXS_I2C_DATA. So, the first u32
+ * looks like this:
+ *
+ * 3 2 1 0
+ * 10987654|32109876|54321098|76543210
+ * --------+--------+--------+--------
+ * buffer+2|buffer+1|buffer+0|slave_addr
+ */
+
+ data = ((addr << 1) | I2C_SMBUS_WRITE) << 24;
+
+ for (i = 0; i < len; i++) {
+ data >>= 8;
+ data |= buf[i] << 24;
+ if ((i & 3) == 2)
+ writel(data, i2c->regs + MXS_I2C_DATA);
+ }
+
+ /* Write out the remaining bytes if any */
+ shifts_left = 24 - (i & 3) * 8;
+ if (shifts_left)
+ writel(data >> shifts_left, i2c->regs + MXS_I2C_DATA);
+}
+
+/*
+ * TODO: should be replaceable with a waitqueue and RD_QUEUE_IRQ (setting the
+ * rd_threshold to 1). Couldn't get this to work, though.
+ */
+static int mxs_i2c_wait_for_data(struct mxs_i2c_dev *i2c)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ while (readl(i2c->regs + MXS_I2C_QUEUESTAT)
+ & MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cond_resched();
+ }
+
+ return 0;
+}
+
+static int mxs_i2c_finish_read(struct mxs_i2c_dev *i2c, u8 *buf, int len)
+{
+ u32 data;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if ((i & 3) == 0) {
+ if (mxs_i2c_wait_for_data(i2c))
+ return -ETIMEDOUT;
+ data = readl(i2c->regs + MXS_I2C_QUEUEDATA);
+ }
+ buf[i] = data & 0xff;
+ data >>= 8;
+ }
+
+ return 0;
+}
+
+/*
+ * Low level master read/write transaction.
+ */
+static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
+ int stop)
+{
+ struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
+ int ret;
+ int flags;
+
+ init_completion(&i2c->cmd_complete);
+
+ dev_dbg(i2c->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
+ msg->addr, msg->len, msg->flags, stop);
+
+ if (msg->len == 0)
+ return -EINVAL;
+
+ flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
+
+ if (msg->flags & I2C_M_RD)
+ mxs_i2c_pioq_setup_read(i2c, msg->addr, msg->len, flags);
+ else
+ mxs_i2c_pioq_setup_write(i2c, msg->addr, msg->buf, msg->len,
+ flags);
+
+ writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
+ i2c->regs + MXS_I2C_QUEUECTRL_SET);
+
+ ret = wait_for_completion_timeout(&i2c->cmd_complete,
+ msecs_to_jiffies(1000));
+ if (ret == 0)
+ goto timeout;
+
+ if ((!i2c->cmd_err) && (msg->flags & I2C_M_RD)) {
+ ret = mxs_i2c_finish_read(i2c, msg->buf, msg->len);
+ if (ret)
+ goto timeout;
+ }
+
+ if (i2c->cmd_err == -ENXIO)
+ mxs_i2c_reset(i2c);
+
+ dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err);
+
+ return i2c->cmd_err;
+
+timeout:
+ dev_dbg(i2c->dev, "Timeout!\n");
+ mxs_i2c_reset(i2c);
+ return -ETIMEDOUT;
+}
+
+static int mxs_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num)
+{
+ int i;
+ int err;
+
+ for (i = 0; i < num; i++) {
+ err = mxs_i2c_xfer_msg(adap, &msgs[i], i == (num - 1));
+ if (err)
+ return err;
+ }
+
+ return num;
+}
+
+static u32 mxs_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
+{
+ struct mxs_i2c_dev *i2c = dev_id;
+ u32 stat = readl(i2c->regs + MXS_I2C_CTRL1) & MXS_I2C_IRQ_MASK;
+
+ if (!stat)
+ return IRQ_NONE;
+
+ if (stat & MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ)
+ i2c->cmd_err = -ENXIO;
+ else if (stat & (MXS_I2C_CTRL1_EARLY_TERM_IRQ |
+ MXS_I2C_CTRL1_MASTER_LOSS_IRQ |
+ MXS_I2C_CTRL1_SLAVE_STOP_IRQ | MXS_I2C_CTRL1_SLAVE_IRQ))
+ /* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */
+ i2c->cmd_err = -EIO;
+ else
+ i2c->cmd_err = 0;
+
+ complete(&i2c->cmd_complete);
+
+ writel(stat, i2c->regs + MXS_I2C_CTRL1_CLR);
+ return IRQ_HANDLED;
+}
+
+static const struct i2c_algorithm mxs_i2c_algo = {
+ .master_xfer = mxs_i2c_xfer,
+ .functionality = mxs_i2c_func,
+};
+
+static int __devinit mxs_i2c_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mxs_i2c_dev *i2c;
+ struct i2c_adapter *adap;
+ struct resource *res;
+ resource_size_t res_size;
+ int err, irq;
+
+ i2c = devm_kzalloc(dev, sizeof(struct mxs_i2c_dev), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
+ res_size = resource_size(res);
+ if (!devm_request_mem_region(dev, res->start, res_size, res->name))
+ return -EBUSY;
+
+ i2c->regs = devm_ioremap_nocache(dev, res->start, res_size);
+ if (!i2c->regs)
+ return -EBUSY;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, mxs_i2c_isr, 0, dev_name(dev), i2c);
+ if (err)
+ return err;
+
+ i2c->dev = dev;
+ platform_set_drvdata(pdev, i2c);
+
+ /* Do reset to enforce correct startup after pinmuxing */
+ mxs_i2c_reset(i2c);
+
+ adap = &i2c->adapter;
+ strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
+ adap->owner = THIS_MODULE;
+ adap->algo = &mxs_i2c_algo;
+ adap->dev.parent = dev;
+ adap->nr = pdev->id;
+ i2c_set_adapdata(adap, i2c);
+ err = i2c_add_numbered_adapter(adap);
+ if (err) {
+ dev_err(dev, "Failed to add adapter (%d)\n", err);
+ writel(MXS_I2C_CTRL0_SFTRST,
+ i2c->regs + MXS_I2C_CTRL0_SET);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __devexit mxs_i2c_remove(struct platform_device *pdev)
+{
+ struct mxs_i2c_dev *i2c = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i2c_del_adapter(&i2c->adapter);
+ if (ret)
+ return -EBUSY;
+
+ writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
+ i2c->regs + MXS_I2C_QUEUECTRL_CLR);
+ writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver mxs_i2c_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(mxs_i2c_remove),
+};
+
+static int __init mxs_i2c_init(void)
+{
+ return platform_driver_probe(&mxs_i2c_driver, mxs_i2c_probe);
+}
+subsys_initcall(mxs_i2c_init);
+
+static void __exit mxs_i2c_exit(void)
+{
+ platform_driver_unregister(&mxs_i2c_driver);
+}
+module_exit(mxs_i2c_exit);
+
+MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_DESCRIPTION("MXS I2C Bus Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 1b46a9d9f907..fee1a2613861 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -49,6 +49,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
@@ -305,7 +306,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
return -EIO;
}
- pdata = pdev->dev.platform_data;
+ pdata = mfd_get_data(pdev);
if (pdata) {
i2c->regstep = pdata->regstep;
i2c->clock_khz = pdata->clock_khz;
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
new file mode 100644
index 000000000000..6659d269b841
--- /dev/null
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -0,0 +1,176 @@
+/*
+ * The CE4100's I2C device is more or less the same one as found on PXA.
+ * It does not support slave mode, the register slightly moved. This PCI
+ * device provides three bars, every contains a single I2C controller.
+ */
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/i2c/pxa-i2c.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+
+#define CE4100_PCI_I2C_DEVS 3
+
+struct ce4100_devices {
+ struct platform_device *pdev[CE4100_PCI_I2C_DEVS];
+};
+
+static struct platform_device *add_i2c_device(struct pci_dev *dev, int bar)
+{
+ struct platform_device *pdev;
+ struct i2c_pxa_platform_data pdata;
+ struct resource res[2];
+ struct device_node *child;
+ static int devnum;
+ int ret;
+
+ memset(&pdata, 0, sizeof(struct i2c_pxa_platform_data));
+ memset(&res, 0, sizeof(res));
+
+ res[0].flags = IORESOURCE_MEM;
+ res[0].start = pci_resource_start(dev, bar);
+ res[0].end = pci_resource_end(dev, bar);
+
+ res[1].flags = IORESOURCE_IRQ;
+ res[1].start = dev->irq;
+ res[1].end = dev->irq;
+
+ for_each_child_of_node(dev->dev.of_node, child) {
+ const void *prop;
+ struct resource r;
+ int ret;
+
+ ret = of_address_to_resource(child, 0, &r);
+ if (ret < 0)
+ continue;
+ if (r.start != res[0].start)
+ continue;
+ if (r.end != res[0].end)
+ continue;
+ if (r.flags != res[0].flags)
+ continue;
+
+ prop = of_get_property(child, "fast-mode", NULL);
+ if (prop)
+ pdata.fast_mode = 1;
+
+ break;
+ }
+
+ if (!child) {
+ dev_err(&dev->dev, "failed to match a DT node for bar %d.\n",
+ bar);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pdev = platform_device_alloc("ce4100-i2c", devnum);
+ if (!pdev) {
+ of_node_put(child);
+ ret = -ENOMEM;
+ goto out;
+ }
+ pdev->dev.parent = &dev->dev;
+ pdev->dev.of_node = child;
+
+ ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
+ if (ret)
+ goto err;
+
+ ret = platform_device_add_data(pdev, &pdata, sizeof(pdata));
+ if (ret)
+ goto err;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto err;
+ devnum++;
+ return pdev;
+err:
+ platform_device_put(pdev);
+out:
+ return ERR_PTR(ret);
+}
+
+static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ int ret;
+ int i;
+ struct ce4100_devices *sds;
+
+ ret = pci_enable_device_mem(dev);
+ if (ret)
+ return ret;
+
+ if (!dev->dev.of_node) {
+ dev_err(&dev->dev, "Missing device tree node.\n");
+ return -EINVAL;
+ }
+ sds = kzalloc(sizeof(*sds), GFP_KERNEL);
+ if (!sds)
+ goto err_mem;
+
+ for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
+ sds->pdev[i] = add_i2c_device(dev, i);
+ if (IS_ERR(sds->pdev[i])) {
+ while (--i >= 0)
+ platform_device_unregister(sds->pdev[i]);
+ goto err_dev_add;
+ }
+ }
+ pci_set_drvdata(dev, sds);
+ return 0;
+
+err_dev_add:
+ pci_set_drvdata(dev, NULL);
+ kfree(sds);
+err_mem:
+ pci_disable_device(dev);
+ return ret;
+}
+
+static void __devexit ce4100_i2c_remove(struct pci_dev *dev)
+{
+ struct ce4100_devices *sds;
+ unsigned int i;
+
+ sds = pci_get_drvdata(dev);
+ pci_set_drvdata(dev, NULL);
+
+ for (i = 0; i < ARRAY_SIZE(sds->pdev); i++)
+ platform_device_unregister(sds->pdev[i]);
+
+ pci_disable_device(dev);
+ kfree(sds);
+}
+
+static struct pci_device_id ce4100_i2c_devices[] __devinitdata = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e68)},
+ { },
+};
+MODULE_DEVICE_TABLE(pci, ce4100_i2c_devices);
+
+static struct pci_driver ce4100_i2c_driver = {
+ .name = "ce4100_i2c",
+ .id_table = ce4100_i2c_devices,
+ .probe = ce4100_i2c_probe,
+ .remove = __devexit_p(ce4100_i2c_remove),
+};
+
+static int __init ce4100_i2c_init(void)
+{
+ return pci_register_driver(&ce4100_i2c_driver);
+}
+module_init(ce4100_i2c_init);
+
+static void __exit ce4100_i2c_exit(void)
+{
+ pci_unregister_driver(&ce4100_i2c_driver);
+}
+module_exit(ce4100_i2c_exit);
+
+MODULE_DESCRIPTION("CE4100 PCI-I2C glue code for PXA's driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index f4c19a97e0b3..f59224a5c761 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -29,38 +29,75 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/i2c-pxa.h>
+#include <linux/of_i2c.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/i2c/pxa-i2c.h>
#include <asm/irq.h>
-#include <plat/i2c.h>
+
+#ifndef CONFIG_HAVE_CLK
+#define clk_get(dev, id) NULL
+#define clk_put(clk) do { } while (0)
+#define clk_disable(clk) do { } while (0)
+#define clk_enable(clk) do { } while (0)
+#endif
+
+struct pxa_reg_layout {
+ u32 ibmr;
+ u32 idbr;
+ u32 icr;
+ u32 isr;
+ u32 isar;
+};
+
+enum pxa_i2c_types {
+ REGS_PXA2XX,
+ REGS_PXA3XX,
+ REGS_CE4100,
+};
/*
- * I2C register offsets will be shifted 0 or 1 bit left, depending on
- * different SoCs
+ * I2C registers definitions
*/
-#define REG_SHIFT_0 (0 << 0)
-#define REG_SHIFT_1 (1 << 0)
-#define REG_SHIFT(d) ((d) & 0x1)
+static struct pxa_reg_layout pxa_reg_layout[] = {
+ [REGS_PXA2XX] = {
+ .ibmr = 0x00,
+ .idbr = 0x08,
+ .icr = 0x10,
+ .isr = 0x18,
+ .isar = 0x20,
+ },
+ [REGS_PXA3XX] = {
+ .ibmr = 0x00,
+ .idbr = 0x04,
+ .icr = 0x08,
+ .isr = 0x0c,
+ .isar = 0x10,
+ },
+ [REGS_CE4100] = {
+ .ibmr = 0x14,
+ .idbr = 0x0c,
+ .icr = 0x00,
+ .isr = 0x04,
+ /* no isar register */
+ },
+};
static const struct platform_device_id i2c_pxa_id_table[] = {
- { "pxa2xx-i2c", REG_SHIFT_1 },
- { "pxa3xx-pwri2c", REG_SHIFT_0 },
+ { "pxa2xx-i2c", REGS_PXA2XX },
+ { "pxa3xx-pwri2c", REGS_PXA3XX },
+ { "ce4100-i2c", REGS_CE4100 },
{ },
};
MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table);
/*
- * I2C registers and bit definitions
+ * I2C bit definitions
*/
-#define IBMR (0x00)
-#define IDBR (0x08)
-#define ICR (0x10)
-#define ISR (0x18)
-#define ISAR (0x20)
#define ICR_START (1 << 0) /* start bit */
#define ICR_STOP (1 << 1) /* stop bit */
@@ -111,7 +148,11 @@ struct pxa_i2c {
u32 icrlog[32];
void __iomem *reg_base;
- unsigned int reg_shift;
+ void __iomem *reg_ibmr;
+ void __iomem *reg_idbr;
+ void __iomem *reg_icr;
+ void __iomem *reg_isr;
+ void __iomem *reg_isar;
unsigned long iobase;
unsigned long iosize;
@@ -121,11 +162,11 @@ struct pxa_i2c {
unsigned int fast_mode :1;
};
-#define _IBMR(i2c) ((i2c)->reg_base + (0x0 << (i2c)->reg_shift))
-#define _IDBR(i2c) ((i2c)->reg_base + (0x4 << (i2c)->reg_shift))
-#define _ICR(i2c) ((i2c)->reg_base + (0x8 << (i2c)->reg_shift))
-#define _ISR(i2c) ((i2c)->reg_base + (0xc << (i2c)->reg_shift))
-#define _ISAR(i2c) ((i2c)->reg_base + (0x10 << (i2c)->reg_shift))
+#define _IBMR(i2c) ((i2c)->reg_ibmr)
+#define _IDBR(i2c) ((i2c)->reg_idbr)
+#define _ICR(i2c) ((i2c)->reg_icr)
+#define _ISR(i2c) ((i2c)->reg_isr)
+#define _ISAR(i2c) ((i2c)->reg_isar)
/*
* I2C Slave mode address
@@ -418,7 +459,8 @@ static void i2c_pxa_reset(struct pxa_i2c *i2c)
writel(I2C_ISR_INIT, _ISR(i2c));
writel(readl(_ICR(i2c)) & ~ICR_UR, _ICR(i2c));
- writel(i2c->slave_addr, _ISAR(i2c));
+ if (i2c->reg_isar)
+ writel(i2c->slave_addr, _ISAR(i2c));
/* set control register values */
writel(I2C_ICR_INIT | (i2c->fast_mode ? ICR_FM : 0), _ICR(i2c));
@@ -729,8 +771,10 @@ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num)
*/
ret = i2c->msg_idx;
- if (timeout == 0)
+ if (!timeout && i2c->msg_num) {
i2c_pxa_scream_blue_murder(i2c, "timeout");
+ ret = I2C_RETRY;
+ }
out:
return ret;
@@ -915,11 +959,16 @@ static void i2c_pxa_irq_rxfull(struct pxa_i2c *i2c, u32 isr)
writel(icr, _ICR(i2c));
}
+#define VALID_INT_SOURCE (ISR_SSD | ISR_ALD | ISR_ITE | ISR_IRF | \
+ ISR_SAD | ISR_BED)
static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id)
{
struct pxa_i2c *i2c = dev_id;
u32 isr = readl(_ISR(i2c));
+ if (!(isr & VALID_INT_SOURCE))
+ return IRQ_NONE;
+
if (i2c_debug > 2 && 0) {
dev_dbg(&i2c->adap.dev, "%s: ISR=%08x, ICR=%08x, IBMR=%02x\n",
__func__, isr, readl(_ICR(i2c)), readl(_IBMR(i2c)));
@@ -934,7 +983,7 @@ static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id)
/*
* Always clear all pending IRQs.
*/
- writel(isr & (ISR_SSD|ISR_ALD|ISR_ITE|ISR_IRF|ISR_SAD|ISR_BED), _ISR(i2c));
+ writel(isr & VALID_INT_SOURCE, _ISR(i2c));
if (isr & ISR_SAD)
i2c_pxa_slave_start(i2c, isr);
@@ -1001,6 +1050,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
struct resource *res;
struct i2c_pxa_platform_data *plat = dev->dev.platform_data;
const struct platform_device_id *id = platform_get_device_id(dev);
+ enum pxa_i2c_types i2c_type = id->driver_data;
int ret;
int irq;
@@ -1044,7 +1094,13 @@ static int i2c_pxa_probe(struct platform_device *dev)
ret = -EIO;
goto eremap;
}
- i2c->reg_shift = REG_SHIFT(id->driver_data);
+
+ i2c->reg_ibmr = i2c->reg_base + pxa_reg_layout[i2c_type].ibmr;
+ i2c->reg_idbr = i2c->reg_base + pxa_reg_layout[i2c_type].idbr;
+ i2c->reg_icr = i2c->reg_base + pxa_reg_layout[i2c_type].icr;
+ i2c->reg_isr = i2c->reg_base + pxa_reg_layout[i2c_type].isr;
+ if (i2c_type != REGS_CE4100)
+ i2c->reg_isar = i2c->reg_base + pxa_reg_layout[i2c_type].isar;
i2c->iobase = res->start;
i2c->iosize = resource_size(res);
@@ -1072,7 +1128,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->adap.algo = &i2c_pxa_pio_algorithm;
} else {
i2c->adap.algo = &i2c_pxa_algorithm;
- ret = request_irq(irq, i2c_pxa_handler, IRQF_DISABLED,
+ ret = request_irq(irq, i2c_pxa_handler, IRQF_SHARED,
i2c->adap.name, i2c);
if (ret)
goto ereqirq;
@@ -1082,12 +1138,19 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &dev->dev;
+#ifdef CONFIG_OF
+ i2c->adap.dev.of_node = dev->dev.of_node;
+#endif
- ret = i2c_add_numbered_adapter(&i2c->adap);
+ if (i2c_type == REGS_CE4100)
+ ret = i2c_add_adapter(&i2c->adap);
+ else
+ ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0) {
printk(KERN_INFO "I2C: Failed to add bus\n");
goto eadapt;
}
+ of_i2c_register_devices(&i2c->adap);
platform_set_drvdata(dev, i2c);
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
new file mode 100644
index 000000000000..3921f664c9c3
--- /dev/null
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -0,0 +1,700 @@
+/*
+ * drivers/i2c/busses/i2c-tegra.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c-tegra.h>
+
+#include <asm/unaligned.h>
+
+#include <mach/clk.h>
+
+#define TEGRA_I2C_TIMEOUT (msecs_to_jiffies(1000))
+#define BYTES_PER_FIFO_WORD 4
+
+#define I2C_CNFG 0x000
+#define I2C_CNFG_PACKET_MODE_EN (1<<10)
+#define I2C_CNFG_NEW_MASTER_FSM (1<<11)
+#define I2C_SL_CNFG 0x020
+#define I2C_SL_CNFG_NEWSL (1<<2)
+#define I2C_SL_ADDR1 0x02c
+#define I2C_TX_FIFO 0x050
+#define I2C_RX_FIFO 0x054
+#define I2C_PACKET_TRANSFER_STATUS 0x058
+#define I2C_FIFO_CONTROL 0x05c
+#define I2C_FIFO_CONTROL_TX_FLUSH (1<<1)
+#define I2C_FIFO_CONTROL_RX_FLUSH (1<<0)
+#define I2C_FIFO_CONTROL_TX_TRIG_SHIFT 5
+#define I2C_FIFO_CONTROL_RX_TRIG_SHIFT 2
+#define I2C_FIFO_STATUS 0x060
+#define I2C_FIFO_STATUS_TX_MASK 0xF0
+#define I2C_FIFO_STATUS_TX_SHIFT 4
+#define I2C_FIFO_STATUS_RX_MASK 0x0F
+#define I2C_FIFO_STATUS_RX_SHIFT 0
+#define I2C_INT_MASK 0x064
+#define I2C_INT_STATUS 0x068
+#define I2C_INT_PACKET_XFER_COMPLETE (1<<7)
+#define I2C_INT_ALL_PACKETS_XFER_COMPLETE (1<<6)
+#define I2C_INT_TX_FIFO_OVERFLOW (1<<5)
+#define I2C_INT_RX_FIFO_UNDERFLOW (1<<4)
+#define I2C_INT_NO_ACK (1<<3)
+#define I2C_INT_ARBITRATION_LOST (1<<2)
+#define I2C_INT_TX_FIFO_DATA_REQ (1<<1)
+#define I2C_INT_RX_FIFO_DATA_REQ (1<<0)
+#define I2C_CLK_DIVISOR 0x06c
+
+#define DVC_CTRL_REG1 0x000
+#define DVC_CTRL_REG1_INTR_EN (1<<10)
+#define DVC_CTRL_REG2 0x004
+#define DVC_CTRL_REG3 0x008
+#define DVC_CTRL_REG3_SW_PROG (1<<26)
+#define DVC_CTRL_REG3_I2C_DONE_INTR_EN (1<<30)
+#define DVC_STATUS 0x00c
+#define DVC_STATUS_I2C_DONE_INTR (1<<30)
+
+#define I2C_ERR_NONE 0x00
+#define I2C_ERR_NO_ACK 0x01
+#define I2C_ERR_ARBITRATION_LOST 0x02
+
+#define PACKET_HEADER0_HEADER_SIZE_SHIFT 28
+#define PACKET_HEADER0_PACKET_ID_SHIFT 16
+#define PACKET_HEADER0_CONT_ID_SHIFT 12
+#define PACKET_HEADER0_PROTOCOL_I2C (1<<4)
+
+#define I2C_HEADER_HIGHSPEED_MODE (1<<22)
+#define I2C_HEADER_CONT_ON_NAK (1<<21)
+#define I2C_HEADER_SEND_START_BYTE (1<<20)
+#define I2C_HEADER_READ (1<<19)
+#define I2C_HEADER_10BIT_ADDR (1<<18)
+#define I2C_HEADER_IE_ENABLE (1<<17)
+#define I2C_HEADER_REPEAT_START (1<<16)
+#define I2C_HEADER_MASTER_ADDR_SHIFT 12
+#define I2C_HEADER_SLAVE_ADDR_SHIFT 1
+
+/**
+ * struct tegra_i2c_dev - per device i2c context
+ * @dev: device reference for power management
+ * @adapter: core i2c layer adapter information
+ * @clk: clock reference for i2c controller
+ * @i2c_clk: clock reference for i2c bus
+ * @iomem: memory resource for registers
+ * @base: ioremapped registers cookie
+ * @cont_id: i2c controller id, used for for packet header
+ * @irq: irq number of transfer complete interrupt
+ * @is_dvc: identifies the DVC i2c controller, has a different register layout
+ * @msg_complete: transfer completion notifier
+ * @msg_err: error code for completed message
+ * @msg_buf: pointer to current message data
+ * @msg_buf_remaining: size of unsent data in the message buffer
+ * @msg_read: identifies read transfers
+ * @bus_clk_rate: current i2c bus clock rate
+ * @is_suspended: prevents i2c controller accesses after suspend is called
+ */
+struct tegra_i2c_dev {
+ struct device *dev;
+ struct i2c_adapter adapter;
+ struct clk *clk;
+ struct clk *i2c_clk;
+ struct resource *iomem;
+ void __iomem *base;
+ int cont_id;
+ int irq;
+ int is_dvc;
+ struct completion msg_complete;
+ int msg_err;
+ u8 *msg_buf;
+ size_t msg_buf_remaining;
+ int msg_read;
+ unsigned long bus_clk_rate;
+ bool is_suspended;
+};
+
+static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg)
+{
+ writel(val, i2c_dev->base + reg);
+}
+
+static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
+{
+ return readl(i2c_dev->base + reg);
+}
+
+/*
+ * i2c_writel and i2c_readl will offset the register if necessary to talk
+ * to the I2C block inside the DVC block
+ */
+static unsigned long tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev,
+ unsigned long reg)
+{
+ if (i2c_dev->is_dvc)
+ reg += (reg >= I2C_TX_FIFO) ? 0x10 : 0x40;
+ return reg;
+}
+
+static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
+ unsigned long reg)
+{
+ writel(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+}
+
+static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
+{
+ return readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+}
+
+static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data,
+ unsigned long reg, int len)
+{
+ writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
+}
+
+static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data,
+ unsigned long reg, int len)
+{
+ readsl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
+}
+
+static void tegra_i2c_mask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
+{
+ u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK);
+ int_mask &= ~mask;
+ i2c_writel(i2c_dev, int_mask, I2C_INT_MASK);
+}
+
+static void tegra_i2c_unmask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
+{
+ u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK);
+ int_mask |= mask;
+ i2c_writel(i2c_dev, int_mask, I2C_INT_MASK);
+}
+
+static int tegra_i2c_flush_fifos(struct tegra_i2c_dev *i2c_dev)
+{
+ unsigned long timeout = jiffies + HZ;
+ u32 val = i2c_readl(i2c_dev, I2C_FIFO_CONTROL);
+ val |= I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH;
+ i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+
+ while (i2c_readl(i2c_dev, I2C_FIFO_CONTROL) &
+ (I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH)) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(i2c_dev->dev, "timeout waiting for fifo flush\n");
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ return 0;
+}
+
+static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int rx_fifo_avail;
+ u8 *buf = i2c_dev->msg_buf;
+ size_t buf_remaining = i2c_dev->msg_buf_remaining;
+ int words_to_transfer;
+
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ rx_fifo_avail = (val & I2C_FIFO_STATUS_RX_MASK) >>
+ I2C_FIFO_STATUS_RX_SHIFT;
+
+ /* Rounds down to not include partial word at the end of buf */
+ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
+ if (words_to_transfer > rx_fifo_avail)
+ words_to_transfer = rx_fifo_avail;
+
+ i2c_readsl(i2c_dev, buf, I2C_RX_FIFO, words_to_transfer);
+
+ buf += words_to_transfer * BYTES_PER_FIFO_WORD;
+ buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
+ rx_fifo_avail -= words_to_transfer;
+
+ /*
+ * If there is a partial word at the end of buf, handle it manually to
+ * prevent overwriting past the end of buf
+ */
+ if (rx_fifo_avail > 0 && buf_remaining > 0) {
+ BUG_ON(buf_remaining > 3);
+ val = i2c_readl(i2c_dev, I2C_RX_FIFO);
+ memcpy(buf, &val, buf_remaining);
+ buf_remaining = 0;
+ rx_fifo_avail--;
+ }
+
+ BUG_ON(rx_fifo_avail > 0 && buf_remaining > 0);
+ i2c_dev->msg_buf_remaining = buf_remaining;
+ i2c_dev->msg_buf = buf;
+ return 0;
+}
+
+static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int tx_fifo_avail;
+ u8 *buf = i2c_dev->msg_buf;
+ size_t buf_remaining = i2c_dev->msg_buf_remaining;
+ int words_to_transfer;
+
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ tx_fifo_avail = (val & I2C_FIFO_STATUS_TX_MASK) >>
+ I2C_FIFO_STATUS_TX_SHIFT;
+
+ /* Rounds down to not include partial word at the end of buf */
+ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
+ if (words_to_transfer > tx_fifo_avail)
+ words_to_transfer = tx_fifo_avail;
+
+ i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+
+ buf += words_to_transfer * BYTES_PER_FIFO_WORD;
+ buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
+ tx_fifo_avail -= words_to_transfer;
+
+ /*
+ * If there is a partial word at the end of buf, handle it manually to
+ * prevent reading past the end of buf, which could cross a page
+ * boundary and fault.
+ */
+ if (tx_fifo_avail > 0 && buf_remaining > 0) {
+ BUG_ON(buf_remaining > 3);
+ memcpy(&val, buf, buf_remaining);
+ i2c_writel(i2c_dev, val, I2C_TX_FIFO);
+ buf_remaining = 0;
+ tx_fifo_avail--;
+ }
+
+ BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
+ i2c_dev->msg_buf_remaining = buf_remaining;
+ i2c_dev->msg_buf = buf;
+ return 0;
+}
+
+/*
+ * One of the Tegra I2C blocks is inside the DVC (Digital Voltage Controller)
+ * block. This block is identical to the rest of the I2C blocks, except that
+ * it only supports master mode, it has registers moved around, and it needs
+ * some extra init to get it into I2C mode. The register moves are handled
+ * by i2c_readl and i2c_writel
+ */
+static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val = 0;
+ val = dvc_readl(i2c_dev, DVC_CTRL_REG3);
+ val |= DVC_CTRL_REG3_SW_PROG;
+ val |= DVC_CTRL_REG3_I2C_DONE_INTR_EN;
+ dvc_writel(i2c_dev, val, DVC_CTRL_REG3);
+
+ val = dvc_readl(i2c_dev, DVC_CTRL_REG1);
+ val |= DVC_CTRL_REG1_INTR_EN;
+ dvc_writel(i2c_dev, val, DVC_CTRL_REG1);
+}
+
+static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int err = 0;
+
+ clk_enable(i2c_dev->clk);
+
+ tegra_periph_reset_assert(i2c_dev->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(i2c_dev->clk);
+
+ if (i2c_dev->is_dvc)
+ tegra_dvc_init(i2c_dev);
+
+ val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN;
+ i2c_writel(i2c_dev, val, I2C_CNFG);
+ i2c_writel(i2c_dev, 0, I2C_INT_MASK);
+ clk_set_rate(i2c_dev->clk, i2c_dev->bus_clk_rate * 8);
+
+ val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
+ 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT;
+ i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+
+ if (tegra_i2c_flush_fifos(i2c_dev))
+ err = -ETIMEDOUT;
+
+ clk_disable(i2c_dev->clk);
+ return err;
+}
+
+static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
+{
+ u32 status;
+ const u32 status_err = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ struct tegra_i2c_dev *i2c_dev = dev_id;
+
+ status = i2c_readl(i2c_dev, I2C_INT_STATUS);
+
+ if (status == 0) {
+ dev_warn(i2c_dev->dev, "interrupt with no status\n");
+ return IRQ_NONE;
+ }
+
+ if (unlikely(status & status_err)) {
+ if (status & I2C_INT_NO_ACK)
+ i2c_dev->msg_err |= I2C_ERR_NO_ACK;
+ if (status & I2C_INT_ARBITRATION_LOST)
+ i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST;
+ complete(&i2c_dev->msg_complete);
+ goto err;
+ }
+
+ if (i2c_dev->msg_read && (status & I2C_INT_RX_FIFO_DATA_REQ)) {
+ if (i2c_dev->msg_buf_remaining)
+ tegra_i2c_empty_rx_fifo(i2c_dev);
+ else
+ BUG();
+ }
+
+ if (!i2c_dev->msg_read && (status & I2C_INT_TX_FIFO_DATA_REQ)) {
+ if (i2c_dev->msg_buf_remaining)
+ tegra_i2c_fill_tx_fifo(i2c_dev);
+ else
+ tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
+ }
+
+ if ((status & I2C_INT_PACKET_XFER_COMPLETE) &&
+ !i2c_dev->msg_buf_remaining)
+ complete(&i2c_dev->msg_complete);
+
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ if (i2c_dev->is_dvc)
+ dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+ return IRQ_HANDLED;
+err:
+ /* An error occured, mask all interrupts */
+ tegra_i2c_mask_irq(i2c_dev, I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST |
+ I2C_INT_PACKET_XFER_COMPLETE | I2C_INT_TX_FIFO_DATA_REQ |
+ I2C_INT_RX_FIFO_DATA_REQ);
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ return IRQ_HANDLED;
+}
+
+static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
+ struct i2c_msg *msg, int stop)
+{
+ u32 packet_header;
+ u32 int_mask;
+ int ret;
+
+ tegra_i2c_flush_fifos(i2c_dev);
+ i2c_writel(i2c_dev, 0xFF, I2C_INT_STATUS);
+
+ if (msg->len == 0)
+ return -EINVAL;
+
+ i2c_dev->msg_buf = msg->buf;
+ i2c_dev->msg_buf_remaining = msg->len;
+ i2c_dev->msg_err = I2C_ERR_NONE;
+ i2c_dev->msg_read = (msg->flags & I2C_M_RD);
+ INIT_COMPLETION(i2c_dev->msg_complete);
+
+ packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) |
+ PACKET_HEADER0_PROTOCOL_I2C |
+ (i2c_dev->cont_id << PACKET_HEADER0_CONT_ID_SHIFT) |
+ (1 << PACKET_HEADER0_PACKET_ID_SHIFT);
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ packet_header = msg->len - 1;
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
+ packet_header |= I2C_HEADER_IE_ENABLE;
+ if (msg->flags & I2C_M_TEN)
+ packet_header |= I2C_HEADER_10BIT_ADDR;
+ if (msg->flags & I2C_M_IGNORE_NAK)
+ packet_header |= I2C_HEADER_CONT_ON_NAK;
+ if (msg->flags & I2C_M_NOSTART)
+ packet_header |= I2C_HEADER_REPEAT_START;
+ if (msg->flags & I2C_M_RD)
+ packet_header |= I2C_HEADER_READ;
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ if (!(msg->flags & I2C_M_RD))
+ tegra_i2c_fill_tx_fifo(i2c_dev);
+
+ int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ if (msg->flags & I2C_M_RD)
+ int_mask |= I2C_INT_RX_FIFO_DATA_REQ;
+ else if (i2c_dev->msg_buf_remaining)
+ int_mask |= I2C_INT_TX_FIFO_DATA_REQ;
+ tegra_i2c_unmask_irq(i2c_dev, int_mask);
+ dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n",
+ i2c_readl(i2c_dev, I2C_INT_MASK));
+
+ ret = wait_for_completion_timeout(&i2c_dev->msg_complete, TEGRA_I2C_TIMEOUT);
+ tegra_i2c_mask_irq(i2c_dev, int_mask);
+
+ if (WARN_ON(ret == 0)) {
+ dev_err(i2c_dev->dev, "i2c transfer timed out\n");
+
+ tegra_i2c_init(i2c_dev);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(i2c_dev->dev, "transfer complete: %d %d %d\n",
+ ret, completion_done(&i2c_dev->msg_complete), i2c_dev->msg_err);
+
+ if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
+ return 0;
+
+ tegra_i2c_init(i2c_dev);
+ if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
+ if (msg->flags & I2C_M_IGNORE_NAK)
+ return 0;
+ return -EREMOTEIO;
+ }
+
+ return -EIO;
+}
+
+static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num)
+{
+ struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+ int i;
+ int ret = 0;
+
+ if (i2c_dev->is_suspended)
+ return -EBUSY;
+
+ clk_enable(i2c_dev->clk);
+ for (i = 0; i < num; i++) {
+ int stop = (i == (num - 1)) ? 1 : 0;
+ ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], stop);
+ if (ret)
+ break;
+ }
+ clk_disable(i2c_dev->clk);
+ return ret ?: i;
+}
+
+static u32 tegra_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C;
+}
+
+static const struct i2c_algorithm tegra_i2c_algo = {
+ .master_xfer = tegra_i2c_xfer,
+ .functionality = tegra_i2c_func,
+};
+
+static int tegra_i2c_probe(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev;
+ struct tegra_i2c_platform_data *pdata = pdev->dev.platform_data;
+ struct resource *res;
+ struct resource *iomem;
+ struct clk *clk;
+ struct clk *i2c_clk;
+ void *base;
+ int irq;
+ int ret = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mem resource\n");
+ return -EINVAL;
+ }
+ iomem = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!iomem) {
+ dev_err(&pdev->dev, "I2C region already claimed\n");
+ return -EBUSY;
+ }
+
+ base = ioremap(iomem->start, resource_size(iomem));
+ if (!base) {
+ dev_err(&pdev->dev, "Cannot ioremap I2C region\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no irq resource\n");
+ ret = -EINVAL;
+ goto err_iounmap;
+ }
+ irq = res->start;
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "missing controller clock");
+ ret = PTR_ERR(clk);
+ goto err_release_region;
+ }
+
+ i2c_clk = clk_get(&pdev->dev, "i2c");
+ if (IS_ERR(i2c_clk)) {
+ dev_err(&pdev->dev, "missing bus clock");
+ ret = PTR_ERR(i2c_clk);
+ goto err_clk_put;
+ }
+
+ i2c_dev = kzalloc(sizeof(struct tegra_i2c_dev), GFP_KERNEL);
+ if (!i2c_dev) {
+ ret = -ENOMEM;
+ goto err_i2c_clk_put;
+ }
+
+ i2c_dev->base = base;
+ i2c_dev->clk = clk;
+ i2c_dev->i2c_clk = i2c_clk;
+ i2c_dev->iomem = iomem;
+ i2c_dev->adapter.algo = &tegra_i2c_algo;
+ i2c_dev->irq = irq;
+ i2c_dev->cont_id = pdev->id;
+ i2c_dev->dev = &pdev->dev;
+ i2c_dev->bus_clk_rate = pdata ? pdata->bus_clk_rate : 100000;
+
+ if (pdev->id == 3)
+ i2c_dev->is_dvc = 1;
+ init_completion(&i2c_dev->msg_complete);
+
+ platform_set_drvdata(pdev, i2c_dev);
+
+ ret = tegra_i2c_init(i2c_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize i2c controller");
+ goto err_free;
+ }
+
+ ret = request_irq(i2c_dev->irq, tegra_i2c_isr, 0, pdev->name, i2c_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
+ goto err_free;
+ }
+
+ clk_enable(i2c_dev->i2c_clk);
+
+ i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
+ i2c_dev->adapter.owner = THIS_MODULE;
+ i2c_dev->adapter.class = I2C_CLASS_HWMON;
+ strlcpy(i2c_dev->adapter.name, "Tegra I2C adapter",
+ sizeof(i2c_dev->adapter.name));
+ i2c_dev->adapter.algo = &tegra_i2c_algo;
+ i2c_dev->adapter.dev.parent = &pdev->dev;
+ i2c_dev->adapter.nr = pdev->id;
+
+ ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add I2C adapter\n");
+ goto err_free_irq;
+ }
+
+ return 0;
+err_free_irq:
+ free_irq(i2c_dev->irq, i2c_dev);
+err_free:
+ kfree(i2c_dev);
+err_i2c_clk_put:
+ clk_put(i2c_clk);
+err_clk_put:
+ clk_put(clk);
+err_release_region:
+ release_mem_region(iomem->start, resource_size(iomem));
+err_iounmap:
+ iounmap(base);
+ return ret;
+}
+
+static int tegra_i2c_remove(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ i2c_del_adapter(&i2c_dev->adapter);
+ free_irq(i2c_dev->irq, i2c_dev);
+ clk_put(i2c_dev->i2c_clk);
+ clk_put(i2c_dev->clk);
+ release_mem_region(i2c_dev->iomem->start,
+ resource_size(i2c_dev->iomem));
+ iounmap(i2c_dev->base);
+ kfree(i2c_dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_i2c_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+
+ i2c_lock_adapter(&i2c_dev->adapter);
+ i2c_dev->is_suspended = true;
+ i2c_unlock_adapter(&i2c_dev->adapter);
+
+ return 0;
+}
+
+static int tegra_i2c_resume(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ int ret;
+
+ i2c_lock_adapter(&i2c_dev->adapter);
+
+ ret = tegra_i2c_init(i2c_dev);
+
+ if (ret) {
+ i2c_unlock_adapter(&i2c_dev->adapter);
+ return ret;
+ }
+
+ i2c_dev->is_suspended = false;
+
+ i2c_unlock_adapter(&i2c_dev->adapter);
+
+ return 0;
+}
+#endif
+
+static struct platform_driver tegra_i2c_driver = {
+ .probe = tegra_i2c_probe,
+ .remove = tegra_i2c_remove,
+#ifdef CONFIG_PM
+ .suspend = tegra_i2c_suspend,
+ .resume = tegra_i2c_resume,
+#endif
+ .driver = {
+ .name = "tegra-i2c",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_i2c_init_driver(void)
+{
+ return platform_driver_register(&tegra_i2c_driver);
+}
+
+static void __exit tegra_i2c_exit_driver(void)
+{
+ platform_driver_unregister(&tegra_i2c_driver);
+}
+
+subsys_initcall(tegra_i2c_init_driver);
+module_exit(tegra_i2c_exit_driver);
+
+MODULE_DESCRIPTION("nVidia Tegra2 I2C Bus Controller driver");
+MODULE_AUTHOR("Colin Cross");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index a9c419e075a5..9fbd7e6fe32e 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -34,6 +34,7 @@
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
@@ -704,7 +705,7 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
if (irq < 0)
goto resource_missing;
- pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
+ pdata = mfd_get_data(pdev);
if (!pdata)
return -EINVAL;
diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
index 7e6a63b57165..3ca2e012e789 100644
--- a/drivers/i2c/i2c-boardinfo.c
+++ b/drivers/i2c/i2c-boardinfo.c
@@ -1,5 +1,5 @@
/*
- * i2c-boardinfo.h - collect pre-declarations of I2C devices
+ * i2c-boardinfo.c - collect pre-declarations of I2C devices
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 045ba6efea48..e5f76a0372fd 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -797,6 +797,9 @@ static int i2c_do_add_adapter(struct i2c_driver *driver,
/* Let legacy drivers scan this bus for matching devices */
if (driver->attach_adapter) {
+ dev_warn(&adap->dev, "attach_adapter method is deprecated\n");
+ dev_warn(&adap->dev, "Please use another way to instantiate "
+ "your i2c_client\n");
/* We ignore the return code; if it fails, too bad */
driver->attach_adapter(adap);
}
@@ -981,6 +984,7 @@ static int i2c_do_del_adapter(struct i2c_driver *driver,
if (!driver->detach_adapter)
return 0;
+ dev_warn(&adapter->dev, "detach_adapter method is deprecated\n");
res = driver->detach_adapter(adapter);
if (res)
dev_err(&adapter->dev, "detach_adapter failed (%d) "
@@ -1091,6 +1095,18 @@ EXPORT_SYMBOL(i2c_del_adapter);
/* ------------------------------------------------------------------------- */
+int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *))
+{
+ int res;
+
+ mutex_lock(&core_lock);
+ res = bus_for_each_dev(&i2c_bus_type, NULL, data, fn);
+ mutex_unlock(&core_lock);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(i2c_for_each_dev);
+
static int __process_new_driver(struct device *dev, void *data)
{
if (dev->type != &i2c_adapter_type)
@@ -1134,9 +1150,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
INIT_LIST_HEAD(&driver->clients);
/* Walk the adapters that are already present */
- mutex_lock(&core_lock);
- bus_for_each_dev(&i2c_bus_type, NULL, driver, __process_new_driver);
- mutex_unlock(&core_lock);
+ i2c_for_each_dev(driver, __process_new_driver);
return 0;
}
@@ -1156,9 +1170,7 @@ static int __process_removed_driver(struct device *dev, void *data)
*/
void i2c_del_driver(struct i2c_driver *driver)
{
- mutex_lock(&core_lock);
- bus_for_each_dev(&i2c_bus_type, NULL, driver, __process_removed_driver);
- mutex_unlock(&core_lock);
+ i2c_for_each_dev(driver, __process_removed_driver);
driver_unregister(&driver->driver);
pr_debug("i2c-core: driver [%s] unregistered\n", driver->driver.name);
@@ -1581,12 +1593,12 @@ i2c_new_probed_device(struct i2c_adapter *adap,
}
EXPORT_SYMBOL_GPL(i2c_new_probed_device);
-struct i2c_adapter *i2c_get_adapter(int id)
+struct i2c_adapter *i2c_get_adapter(int nr)
{
struct i2c_adapter *adapter;
mutex_lock(&core_lock);
- adapter = idr_find(&i2c_adapter_idr, id);
+ adapter = idr_find(&i2c_adapter_idr, nr);
if (adapter && !try_module_get(adapter->owner))
adapter = NULL;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index cec0f3ba97f8..c90ce50b619f 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -28,6 +28,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -37,16 +39,13 @@
#include <linux/jiffies.h>
#include <linux/uaccess.h>
-static struct i2c_driver i2cdev_driver;
-
/*
* An i2c_dev represents an i2c_adapter ... an I2C or SMBus master, not a
* slave (i2c_client) with which messages will be exchanged. It's coupled
* with a character special file which is accessed by user mode drivers.
*
* The list of i2c_dev structures is parallel to the i2c_adapter lists
- * maintained by the driver model, and is updated using notifications
- * delivered to the i2cdev_driver.
+ * maintained by the driver model, and is updated using bus notifications.
*/
struct i2c_dev {
struct list_head list;
@@ -491,7 +490,6 @@ static int i2cdev_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
snprintf(client->name, I2C_NAME_SIZE, "i2c-dev %d", adap->nr);
- client->driver = &i2cdev_driver;
client->adapter = adap;
file->private_data = client;
@@ -522,19 +520,18 @@ static const struct file_operations i2cdev_fops = {
/* ------------------------------------------------------------------------- */
-/*
- * The legacy "i2cdev_driver" is used primarily to get notifications when
- * I2C adapters are added or removed, so that each one gets an i2c_dev
- * and is thus made available to userspace driver code.
- */
-
static struct class *i2c_dev_class;
-static int i2cdev_attach_adapter(struct i2c_adapter *adap)
+static int i2cdev_attach_adapter(struct device *dev, void *dummy)
{
+ struct i2c_adapter *adap;
struct i2c_dev *i2c_dev;
int res;
+ if (dev->type != &i2c_adapter_type)
+ return 0;
+ adap = to_i2c_adapter(dev);
+
i2c_dev = get_free_i2c_dev(adap);
if (IS_ERR(i2c_dev))
return PTR_ERR(i2c_dev);
@@ -561,10 +558,15 @@ error:
return res;
}
-static int i2cdev_detach_adapter(struct i2c_adapter *adap)
+static int i2cdev_detach_adapter(struct device *dev, void *dummy)
{
+ struct i2c_adapter *adap;
struct i2c_dev *i2c_dev;
+ if (dev->type != &i2c_adapter_type)
+ return 0;
+ adap = to_i2c_adapter(dev);
+
i2c_dev = i2c_dev_get_by_minor(adap->nr);
if (!i2c_dev) /* attach_adapter must have failed */
return 0;
@@ -577,12 +579,23 @@ static int i2cdev_detach_adapter(struct i2c_adapter *adap)
return 0;
}
-static struct i2c_driver i2cdev_driver = {
- .driver = {
- .name = "dev_driver",
- },
- .attach_adapter = i2cdev_attach_adapter,
- .detach_adapter = i2cdev_detach_adapter,
+int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ return i2cdev_attach_adapter(dev, NULL);
+ case BUS_NOTIFY_DEL_DEVICE:
+ return i2cdev_detach_adapter(dev, NULL);
+ }
+
+ return 0;
+}
+
+static struct notifier_block i2cdev_notifier = {
+ .notifier_call = i2cdev_notifier_call,
};
/* ------------------------------------------------------------------------- */
@@ -607,10 +620,14 @@ static int __init i2c_dev_init(void)
goto out_unreg_chrdev;
}
- res = i2c_add_driver(&i2cdev_driver);
+ /* Keep track of adapters which will be added or removed later */
+ res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier);
if (res)
goto out_unreg_class;
+ /* Bind to already existing adapters right away */
+ i2c_for_each_dev(NULL, i2cdev_attach_adapter);
+
return 0;
out_unreg_class:
@@ -624,7 +641,8 @@ out:
static void __exit i2c_dev_exit(void)
{
- i2c_del_driver(&i2cdev_driver);
+ bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
+ i2c_for_each_dev(NULL, i2cdev_detach_adapter);
class_destroy(i2c_dev_class);
unregister_chrdev(I2C_MAJOR, "i2c");
}
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 81df925f0e8b..7f879b2397b0 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -2,7 +2,7 @@
# link order is important here
#
-EXTRA_CFLAGS += -Idrivers/ide
+ccflags-y := -Idrivers/ide
ide-core-y += ide.o ide-ioctls.o ide-io.o ide-iops.o ide-lib.o ide-probe.o \
ide-taskfile.o ide-pm.o ide-park.o ide-sysfs.o ide-devsets.o \
diff --git a/drivers/ieee802154/Makefile b/drivers/ieee802154/Makefile
index e0e8e1a184ff..68999137dedf 100644
--- a/drivers/ieee802154/Makefile
+++ b/drivers/ieee802154/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
-EXTRA_CFLAGS += -DDEBUG -DCONFIG_FFD
+ccflags-y := -DDEBUG -DCONFIG_FFD
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index bab9f74c0665..cfed5399f074 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -53,8 +53,8 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages,
}
/* call with current->mm->mmap_sem held */
-static int __get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p, struct vm_area_struct **vma)
+static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
+ struct page **p, struct vm_area_struct **vma)
{
unsigned long lock_limit;
size_t got;
@@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
down_write(&current->mm->mmap_sem);
- ret = __get_user_pages(start_page, num_pages, p, NULL);
+ ret = __ipath_get_user_pages(start_page, num_pages, p, NULL);
up_write(&current->mm->mmap_sem);
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index d7a26c1d4f37..7689e49c13c9 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -51,8 +51,8 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
/*
* Call with current->mm->mmap_sem held.
*/
-static int __get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p, struct vm_area_struct **vma)
+static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
+ struct page **p, struct vm_area_struct **vma)
{
unsigned long lock_limit;
size_t got;
@@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
down_write(&current->mm->mmap_sem);
- ret = __get_user_pages(start_page, num_pages, p, NULL);
+ ret = __qib_get_user_pages(start_page, num_pages, p, NULL);
up_write(&current->mm->mmap_sem);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 7b2fc98e2f2b..8db008de5392 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -532,6 +532,29 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
stats->custom[3].value = conn->fmr_unalign_cnt;
}
+static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf)
+{
+ struct iser_conn *ib_conn = ep->dd_data;
+ int len;
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ if (!ib_conn || !ib_conn->cma_id)
+ return -ENOTCONN;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &ib_conn->cma_id->route.addr.dst_addr,
+ param, buf);
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return len;
+}
+
static struct iscsi_endpoint *
iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
@@ -637,6 +660,8 @@ static struct iscsi_transport iscsi_iser_transport = {
ISCSI_MAX_BURST |
ISCSI_PDU_INORDER_EN |
ISCSI_DATASEQ_INORDER_EN |
+ ISCSI_CONN_PORT |
+ ISCSI_CONN_ADDRESS |
ISCSI_EXP_STATSN |
ISCSI_PERSISTENT_PORT |
ISCSI_PERSISTENT_ADDRESS |
@@ -659,6 +684,7 @@ static struct iscsi_transport iscsi_iser_transport = {
.destroy_conn = iscsi_iser_conn_destroy,
.set_param = iscsi_iser_set_param,
.get_conn_param = iscsi_conn_get_param,
+ .get_ep_param = iscsi_iser_get_ep_param,
.get_session_param = iscsi_session_get_param,
.start_conn = iscsi_iser_conn_start,
.stop_conn = iscsi_iser_conn_stop,
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 1903c0f5b925..23e82e46656d 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -161,16 +161,6 @@ config INPUT_APMPOWER
To compile this driver as a module, choose M here: the
module will be called apm-power.
-config XEN_KBDDEV_FRONTEND
- tristate "Xen virtual keyboard and mouse support"
- depends on XEN_FBDEV_FRONTEND
- default y
- select XEN_XENBUS_FRONTEND
- help
- This driver implements the front-end of the Xen virtual
- keyboard and mouse device driver. It communicates with a back-end
- in another domain.
-
comment "Input Device Drivers"
source "drivers/input/keyboard/Kconfig"
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 09614ce74961..0c789490e0b3 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -24,5 +24,3 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/
obj-$(CONFIG_INPUT_MISC) += misc/
obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o
-
-obj-$(CONFIG_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index c8471a2552e7..7f42d3a454d2 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -321,6 +321,9 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
struct input_event event;
int retval;
+ if (count < input_event_size())
+ return -EINVAL;
+
retval = mutex_lock_interruptible(&evdev->mutex);
if (retval)
return retval;
@@ -330,17 +333,16 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
goto out;
}
- while (retval < count) {
-
+ do {
if (input_event_from_user(buffer + retval, &event)) {
retval = -EFAULT;
goto out;
}
+ retval += input_event_size();
input_inject_event(&evdev->handle,
event.type, event.code, event.value);
- retval += input_event_size();
- }
+ } while (retval + input_event_size() <= count);
out:
mutex_unlock(&evdev->mutex);
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c
index 0559e309bac9..3037842a60d8 100644
--- a/drivers/input/input-polldev.c
+++ b/drivers/input/input-polldev.c
@@ -192,7 +192,7 @@ static struct attribute_group input_polldev_attribute_group = {
};
/**
- * input_allocate_polled_device - allocated memory polled device
+ * input_allocate_polled_device - allocate memory for polled device
*
* The function allocates memory for a polled device and also
* for an input device associated with this polled device.
@@ -239,7 +239,7 @@ EXPORT_SYMBOL(input_free_polled_device);
* with input layer. The device should be allocated with call to
* input_allocate_polled_device(). Callers should also set up poll()
* method and set up capabilities (id, name, phys, bits) of the
- * corresponing input_dev structure.
+ * corresponding input_dev structure.
*/
int input_register_polled_device(struct input_polled_dev *dev)
{
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 11905b6a3023..d6e8bd8a851c 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -791,22 +791,9 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke)
int retval;
spin_lock_irqsave(&dev->event_lock, flags);
-
- if (dev->getkeycode) {
- /*
- * Support for legacy drivers, that don't implement the new
- * ioctls
- */
- u32 scancode = ke->index;
-
- memcpy(ke->scancode, &scancode, sizeof(scancode));
- ke->len = sizeof(scancode);
- retval = dev->getkeycode(dev, scancode, &ke->keycode);
- } else {
- retval = dev->getkeycode_new(dev, ke);
- }
-
+ retval = dev->getkeycode(dev, ke);
spin_unlock_irqrestore(&dev->event_lock, flags);
+
return retval;
}
EXPORT_SYMBOL(input_get_keycode);
@@ -831,35 +818,7 @@ int input_set_keycode(struct input_dev *dev,
spin_lock_irqsave(&dev->event_lock, flags);
- if (dev->setkeycode) {
- /*
- * Support for legacy drivers, that don't implement the new
- * ioctls
- */
- unsigned int scancode;
-
- retval = input_scancode_to_scalar(ke, &scancode);
- if (retval)
- goto out;
-
- /*
- * We need to know the old scancode, in order to generate a
- * keyup effect, if the set operation happens successfully
- */
- if (!dev->getkeycode) {
- retval = -EINVAL;
- goto out;
- }
-
- retval = dev->getkeycode(dev, scancode, &old_keycode);
- if (retval)
- goto out;
-
- retval = dev->setkeycode(dev, scancode, ke->keycode);
- } else {
- retval = dev->setkeycode_new(dev, ke, &old_keycode);
- }
-
+ retval = dev->setkeycode(dev, ke, &old_keycode);
if (retval)
goto out;
@@ -1846,11 +1805,11 @@ int input_register_device(struct input_dev *dev)
dev->rep[REP_PERIOD] = 33;
}
- if (!dev->getkeycode && !dev->getkeycode_new)
- dev->getkeycode_new = input_default_getkeycode;
+ if (!dev->getkeycode)
+ dev->getkeycode = input_default_getkeycode;
- if (!dev->setkeycode && !dev->setkeycode_new)
- dev->setkeycode_new = input_default_setkeycode;
+ if (!dev->setkeycode)
+ dev->setkeycode = input_default_setkeycode;
dev_set_name(&dev->dev, "input%ld",
(unsigned long) atomic_inc_return(&input_no) - 1);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index c7a92028f450..b16bed038f72 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -112,6 +112,16 @@ config KEYBOARD_ATKBD_RDI_KEYCODES
right-hand column will be interpreted as the key shown in the
left-hand column.
+config KEYBOARD_QT1070
+ tristate "Atmel AT42QT1070 Touch Sensor Chip"
+ depends on I2C
+ help
+ Say Y here if you want to use Atmel AT42QT1070 QTouch
+ Sensor chip as input device.
+
+ To compile this driver as a module, choose M here:
+ the module will be called qt1070
+
config KEYBOARD_QT2160
tristate "Atmel AT42QT2160 Touch Sensor Chip"
depends on I2C && EXPERIMENTAL
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 468c627a2844..878e6c20deb0 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o
obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o
obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
+obj-$(CONFIG_KEYBOARD_QT1070) += qt1070.o
obj-$(CONFIG_KEYBOARD_QT2160) += qt2160.o
obj-$(CONFIG_KEYBOARD_SAMSUNG) += samsung-keypad.o
obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index f7c2a166576b..b732870ecc89 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -30,6 +30,7 @@
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/leds.h>
+#include <linux/pm.h>
#include <linux/i2c/lm8323.h>
#include <linux/slab.h>
@@ -802,8 +803,9 @@ static int __devexit lm8323_remove(struct i2c_client *client)
* We don't need to explicitly suspend the chip, as it already switches off
* when there's no activity.
*/
-static int lm8323_suspend(struct i2c_client *client, pm_message_t mesg)
+static int lm8323_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct lm8323_chip *lm = i2c_get_clientdata(client);
int i;
@@ -821,8 +823,9 @@ static int lm8323_suspend(struct i2c_client *client, pm_message_t mesg)
return 0;
}
-static int lm8323_resume(struct i2c_client *client)
+static int lm8323_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct lm8323_chip *lm = i2c_get_clientdata(client);
int i;
@@ -839,11 +842,10 @@ static int lm8323_resume(struct i2c_client *client)
return 0;
}
-#else
-#define lm8323_suspend NULL
-#define lm8323_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(lm8323_pm_ops, lm8323_suspend, lm8323_resume);
+
static const struct i2c_device_id lm8323_id[] = {
{ "lm8323", 0 },
{ }
@@ -852,11 +854,10 @@ static const struct i2c_device_id lm8323_id[] = {
static struct i2c_driver lm8323_i2c_driver = {
.driver = {
.name = "lm8323",
+ .pm = &lm8323_pm_ops,
},
.probe = lm8323_probe,
.remove = __devexit_p(lm8323_remove),
- .suspend = lm8323_suspend,
- .resume = lm8323_resume,
.id_table = lm8323_id,
};
MODULE_DEVICE_TABLE(i2c, lm8323_id);
diff --git a/drivers/input/keyboard/max7359_keypad.c b/drivers/input/keyboard/max7359_keypad.c
index 9091ff5ea808..5afe35ad24d3 100644
--- a/drivers/input/keyboard/max7359_keypad.c
+++ b/drivers/input/keyboard/max7359_keypad.c
@@ -17,6 +17,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/pm.h>
#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
@@ -271,8 +272,10 @@ static int __devexit max7359_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
-static int max7359_suspend(struct i2c_client *client, pm_message_t mesg)
+static int max7359_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
+
max7359_fall_deepsleep(client);
if (device_may_wakeup(&client->dev))
@@ -281,8 +284,10 @@ static int max7359_suspend(struct i2c_client *client, pm_message_t mesg)
return 0;
}
-static int max7359_resume(struct i2c_client *client)
+static int max7359_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
+
if (device_may_wakeup(&client->dev))
disable_irq_wake(client->irq);
@@ -291,11 +296,10 @@ static int max7359_resume(struct i2c_client *client)
return 0;
}
-#else
-#define max7359_suspend NULL
-#define max7359_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(max7359_pm, max7359_suspend, max7359_resume);
+
static const struct i2c_device_id max7359_ids[] = {
{ "max7359", 0 },
{ }
@@ -305,11 +309,10 @@ MODULE_DEVICE_TABLE(i2c, max7359_ids);
static struct i2c_driver max7359_i2c_driver = {
.driver = {
.name = "max7359",
+ .pm = &max7359_pm,
},
.probe = max7359_probe,
.remove = __devexit_p(max7359_remove),
- .suspend = max7359_suspend,
- .resume = max7359_resume,
.id_table = max7359_ids,
};
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 63b849d7e90b..af1aab324a4c 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -1,5 +1,5 @@
/*
- * mcs_touchkey.c - Touchkey driver for MELFAS MCS5000/5080 controller
+ * Touchkey driver for MELFAS MCS5000/5080 controller
*
* Copyright (C) 2010 Samsung Electronics Co.Ltd
* Author: HeungJun Kim <riverful.kim@samsung.com>
@@ -19,6 +19,7 @@
#include <linux/input.h>
#include <linux/irq.h>
#include <linux/slab.h>
+#include <linux/pm.h>
/* MCS5000 Touchkey */
#define MCS5000_TOUCHKEY_STATUS 0x04
@@ -45,6 +46,8 @@ struct mcs_touchkey_chip {
};
struct mcs_touchkey_data {
+ void (*poweron)(bool);
+
struct i2c_client *client;
struct input_dev *input_dev;
struct mcs_touchkey_chip chip;
@@ -169,6 +172,11 @@ static int __devinit mcs_touchkey_probe(struct i2c_client *client,
if (pdata->cfg_pin)
pdata->cfg_pin();
+ if (pdata->poweron) {
+ data->poweron = pdata->poweron;
+ data->poweron(true);
+ }
+
error = request_threaded_irq(client->irq, NULL, mcs_touchkey_interrupt,
IRQF_TRIGGER_FALLING, client->dev.driver->name, data);
if (error) {
@@ -196,12 +204,57 @@ static int __devexit mcs_touchkey_remove(struct i2c_client *client)
struct mcs_touchkey_data *data = i2c_get_clientdata(client);
free_irq(client->irq, data);
+ if (data->poweron)
+ data->poweron(false);
input_unregister_device(data->input_dev);
kfree(data);
return 0;
}
+static void mcs_touchkey_shutdown(struct i2c_client *client)
+{
+ struct mcs_touchkey_data *data = i2c_get_clientdata(client);
+
+ if (data->poweron)
+ data->poweron(false);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mcs_touchkey_suspend(struct device *dev)
+{
+ struct mcs_touchkey_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+
+ /* Disable the work */
+ disable_irq(client->irq);
+
+ /* Finally turn off the power */
+ if (data->poweron)
+ data->poweron(false);
+
+ return 0;
+}
+
+static int mcs_touchkey_resume(struct device *dev)
+{
+ struct mcs_touchkey_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+
+ /* Enable the device first */
+ if (data->poweron)
+ data->poweron(true);
+
+ /* Enable irq again */
+ enable_irq(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mcs_touchkey_pm_ops,
+ mcs_touchkey_suspend, mcs_touchkey_resume);
+
static const struct i2c_device_id mcs_touchkey_id[] = {
{ "mcs5000_touchkey", MCS5000_TOUCHKEY },
{ "mcs5080_touchkey", MCS5080_TOUCHKEY },
@@ -213,9 +266,11 @@ static struct i2c_driver mcs_touchkey_driver = {
.driver = {
.name = "mcs_touchkey",
.owner = THIS_MODULE,
+ .pm = &mcs_touchkey_pm_ops,
},
.probe = mcs_touchkey_probe,
.remove = __devexit_p(mcs_touchkey_remove),
+ .shutdown = mcs_touchkey_shutdown,
.id_table = mcs_touchkey_id,
};
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 45bd0977d006..c51a3c4a7feb 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -29,6 +29,7 @@
#include <linux/io.h>
#include <linux/input.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <plat/omap4-keypad.h>
@@ -80,20 +81,6 @@ struct omap4_keypad {
unsigned short keymap[];
};
-static void __devinit omap4_keypad_config(struct omap4_keypad *keypad_data)
-{
- __raw_writel(OMAP4_VAL_FUNCTIONALCFG,
- keypad_data->base + OMAP4_KBD_CTRL);
- __raw_writel(OMAP4_VAL_DEBOUNCINGTIME,
- keypad_data->base + OMAP4_KBD_DEBOUNCINGTIME);
- __raw_writel(OMAP4_VAL_IRQDISABLE,
- keypad_data->base + OMAP4_KBD_IRQSTATUS);
- __raw_writel(OMAP4_DEF_IRQENABLE_EVENTEN | OMAP4_DEF_IRQENABLE_LONGKEY,
- keypad_data->base + OMAP4_KBD_IRQENABLE);
- __raw_writel(OMAP4_DEF_WUP_EVENT_ENA | OMAP4_DEF_WUP_LONG_KEY_ENA,
- keypad_data->base + OMAP4_KBD_WAKEUPENABLE);
-}
-
/* Interrupt handler */
static irqreturn_t omap4_keypad_interrupt(int irq, void *dev_id)
{
@@ -144,6 +131,49 @@ static irqreturn_t omap4_keypad_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int omap4_keypad_open(struct input_dev *input)
+{
+ struct omap4_keypad *keypad_data = input_get_drvdata(input);
+
+ pm_runtime_get_sync(input->dev.parent);
+
+ disable_irq(keypad_data->irq);
+
+ __raw_writel(OMAP4_VAL_FUNCTIONALCFG,
+ keypad_data->base + OMAP4_KBD_CTRL);
+ __raw_writel(OMAP4_VAL_DEBOUNCINGTIME,
+ keypad_data->base + OMAP4_KBD_DEBOUNCINGTIME);
+ __raw_writel(OMAP4_VAL_IRQDISABLE,
+ keypad_data->base + OMAP4_KBD_IRQSTATUS);
+ __raw_writel(OMAP4_DEF_IRQENABLE_EVENTEN | OMAP4_DEF_IRQENABLE_LONGKEY,
+ keypad_data->base + OMAP4_KBD_IRQENABLE);
+ __raw_writel(OMAP4_DEF_WUP_EVENT_ENA | OMAP4_DEF_WUP_LONG_KEY_ENA,
+ keypad_data->base + OMAP4_KBD_WAKEUPENABLE);
+
+ enable_irq(keypad_data->irq);
+
+ return 0;
+}
+
+static void omap4_keypad_close(struct input_dev *input)
+{
+ struct omap4_keypad *keypad_data = input_get_drvdata(input);
+
+ disable_irq(keypad_data->irq);
+
+ /* Disable interrupts */
+ __raw_writel(OMAP4_VAL_IRQDISABLE,
+ keypad_data->base + OMAP4_KBD_IRQENABLE);
+
+ /* clear pending interrupts */
+ __raw_writel(__raw_readl(keypad_data->base + OMAP4_KBD_IRQSTATUS),
+ keypad_data->base + OMAP4_KBD_IRQSTATUS);
+
+ enable_irq(keypad_data->irq);
+
+ pm_runtime_put_sync(input->dev.parent);
+}
+
static int __devinit omap4_keypad_probe(struct platform_device *pdev)
{
const struct omap4_keypad_platform_data *pdata;
@@ -225,6 +255,9 @@ static int __devinit omap4_keypad_probe(struct platform_device *pdev)
input_dev->id.product = 0x0001;
input_dev->id.version = 0x0001;
+ input_dev->open = omap4_keypad_open;
+ input_dev->close = omap4_keypad_close;
+
input_dev->keycode = keypad_data->keymap;
input_dev->keycodesize = sizeof(keypad_data->keymap[0]);
input_dev->keycodemax = max_keys;
@@ -239,8 +272,6 @@ static int __devinit omap4_keypad_probe(struct platform_device *pdev)
matrix_keypad_build_keymap(pdata->keymap_data, row_shift,
input_dev->keycode, input_dev->keybit);
- omap4_keypad_config(keypad_data);
-
error = request_irq(keypad_data->irq, omap4_keypad_interrupt,
IRQF_TRIGGER_RISING,
"omap4-keypad", keypad_data);
@@ -249,17 +280,19 @@ static int __devinit omap4_keypad_probe(struct platform_device *pdev)
goto err_free_input;
}
+ pm_runtime_enable(&pdev->dev);
+
error = input_register_device(keypad_data->input);
if (error < 0) {
dev_err(&pdev->dev, "failed to register input device\n");
- goto err_free_irq;
+ goto err_pm_disable;
}
-
platform_set_drvdata(pdev, keypad_data);
return 0;
-err_free_irq:
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
free_irq(keypad_data->irq, keypad_data);
err_free_input:
input_free_device(input_dev);
@@ -278,6 +311,9 @@ static int __devexit omap4_keypad_remove(struct platform_device *pdev)
struct resource *res;
free_irq(keypad_data->irq, keypad_data);
+
+ pm_runtime_disable(&pdev->dev);
+
input_unregister_device(keypad_data->input);
iounmap(keypad_data->base);
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
new file mode 100644
index 000000000000..fba8404c7297
--- /dev/null
+++ b/drivers/input/keyboard/qt1070.c
@@ -0,0 +1,276 @@
+/*
+ * Atmel AT42QT1070 QTouch Sensor Controller
+ *
+ * Copyright (C) 2011 Atmel
+ *
+ * Authors: Bo Shen <voice.shen@atmel.com>
+ *
+ * Base on AT42QT2160 driver by:
+ * Raphael Derosso Pereira <raphaelpereira@gmail.com>
+ * Copyright (C) 2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+
+/* Address for each register */
+#define CHIP_ID 0x00
+#define QT1070_CHIP_ID 0x2E
+
+#define FW_VERSION 0x01
+#define QT1070_FW_VERSION 0x15
+
+#define DET_STATUS 0x02
+
+#define KEY_STATUS 0x03
+
+/* Calibrate */
+#define CALIBRATE_CMD 0x38
+#define QT1070_CAL_TIME 200
+
+/* Reset */
+#define RESET 0x39
+#define QT1070_RESET_TIME 255
+
+/* AT42QT1070 support up to 7 keys */
+static const unsigned short qt1070_key2code[] = {
+ KEY_0, KEY_1, KEY_2, KEY_3,
+ KEY_4, KEY_5, KEY_6,
+};
+
+struct qt1070_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ unsigned int irq;
+ unsigned short keycodes[ARRAY_SIZE(qt1070_key2code)];
+ u8 last_keys;
+};
+
+static int qt1070_read(struct i2c_client *client, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0)
+ dev_err(&client->dev,
+ "can not read register, returned %d\n", ret);
+
+ return ret;
+}
+
+static int qt1070_write(struct i2c_client *client, u8 reg, u8 data)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, reg, data);
+ if (ret < 0)
+ dev_err(&client->dev,
+ "can not write register, returned %d\n", ret);
+
+ return ret;
+}
+
+static bool __devinit qt1070_identify(struct i2c_client *client)
+{
+ int id, ver;
+
+ /* Read Chip ID */
+ id = qt1070_read(client, CHIP_ID);
+ if (id != QT1070_CHIP_ID) {
+ dev_err(&client->dev, "ID %d not supported\n", id);
+ return false;
+ }
+
+ /* Read firmware version */
+ ver = qt1070_read(client, FW_VERSION);
+ if (ver < 0) {
+ dev_err(&client->dev, "could not read the firmware version\n");
+ return false;
+ }
+
+ dev_info(&client->dev, "AT42QT1070 firmware version %x\n", ver);
+
+ return true;
+}
+
+static irqreturn_t qt1070_interrupt(int irq, void *dev_id)
+{
+ struct qt1070_data *data = dev_id;
+ struct i2c_client *client = data->client;
+ struct input_dev *input = data->input;
+ int i;
+ u8 new_keys, keyval, mask = 0x01;
+
+ /* Read the detected status register, thus clearing interrupt */
+ qt1070_read(client, DET_STATUS);
+
+ /* Read which key changed */
+ new_keys = qt1070_read(client, KEY_STATUS);
+
+ for (i = 0; i < ARRAY_SIZE(qt1070_key2code); i++) {
+ keyval = new_keys & mask;
+ if ((data->last_keys & mask) != keyval)
+ input_report_key(input, data->keycodes[i], keyval);
+ mask <<= 1;
+ }
+ input_sync(input);
+
+ data->last_keys = new_keys;
+ return IRQ_HANDLED;
+}
+
+static int __devinit qt1070_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct qt1070_data *data;
+ struct input_dev *input;
+ int i;
+ int err;
+
+ err = i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE);
+ if (!err) {
+ dev_err(&client->dev, "%s adapter not supported\n",
+ dev_driver_string(&client->adapter->dev));
+ return -ENODEV;
+ }
+
+ if (!client->irq) {
+ dev_err(&client->dev, "please assign the irq to this device\n");
+ return -EINVAL;
+ }
+
+ /* Identify the qt1070 chip */
+ if (!qt1070_identify(client))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(struct qt1070_data), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!data || !input) {
+ dev_err(&client->dev, "insufficient memory\n");
+ err = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ data->client = client;
+ data->input = input;
+ data->irq = client->irq;
+
+ input->name = "AT42QT1070 QTouch Sensor";
+ input->dev.parent = &client->dev;
+ input->id.bustype = BUS_I2C;
+
+ /* Add the keycode */
+ input->keycode = data->keycodes;
+ input->keycodesize = sizeof(data->keycodes[0]);
+ input->keycodemax = ARRAY_SIZE(qt1070_key2code);
+
+ __set_bit(EV_KEY, input->evbit);
+
+ for (i = 0; i < ARRAY_SIZE(qt1070_key2code); i++) {
+ data->keycodes[i] = qt1070_key2code[i];
+ __set_bit(qt1070_key2code[i], input->keybit);
+ }
+
+ /* Calibrate device */
+ qt1070_write(client, CALIBRATE_CMD, 1);
+ msleep(QT1070_CAL_TIME);
+
+ /* Soft reset */
+ qt1070_write(client, RESET, 1);
+ msleep(QT1070_RESET_TIME);
+
+ err = request_threaded_irq(client->irq, NULL, qt1070_interrupt,
+ IRQF_TRIGGER_NONE, client->dev.driver->name, data);
+ if (err) {
+ dev_err(&client->dev, "fail to request irq\n");
+ goto err_free_mem;
+ }
+
+ /* Register the input device */
+ err = input_register_device(data->input);
+ if (err) {
+ dev_err(&client->dev, "Failed to register input device\n");
+ goto err_free_irq;
+ }
+
+ i2c_set_clientdata(client, data);
+
+ /* Read to clear the chang line */
+ qt1070_read(client, DET_STATUS);
+
+ return 0;
+
+err_free_irq:
+ free_irq(client->irq, data);
+err_free_mem:
+ input_free_device(input);
+ kfree(data);
+ return err;
+}
+
+static int __devexit qt1070_remove(struct i2c_client *client)
+{
+ struct qt1070_data *data = i2c_get_clientdata(client);
+
+ /* Release IRQ */
+ free_irq(client->irq, data);
+
+ input_unregister_device(data->input);
+ kfree(data);
+
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+static const struct i2c_device_id qt1070_id[] = {
+ { "qt1070", 0 },
+ { },
+};
+
+static struct i2c_driver qt1070_driver = {
+ .driver = {
+ .name = "qt1070",
+ .owner = THIS_MODULE,
+ },
+ .id_table = qt1070_id,
+ .probe = qt1070_probe,
+ .remove = __devexit_p(qt1070_remove),
+};
+
+static int __init qt1070_init(void)
+{
+ return i2c_add_driver(&qt1070_driver);
+}
+module_init(qt1070_init);
+
+static void __exit qt1070_exit(void)
+{
+ i2c_del_driver(&qt1070_driver);
+}
+module_exit(qt1070_exit);
+
+MODULE_AUTHOR("Bo Shen <voice.shen@atmel.com>");
+MODULE_DESCRIPTION("Driver for AT42QT1070 QTouch sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index dbbe761778d2..99122f59e988 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -402,7 +402,7 @@ static int __devexit tc3589x_keypad_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int tc3589x_keypad_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -439,19 +439,19 @@ static int tc3589x_keypad_resume(struct device *dev)
return 0;
}
-
-static const SIMPLE_DEV_PM_OPS(tc3589x_keypad_dev_pm_ops,
- tc3589x_keypad_suspend, tc3589x_keypad_resume);
#endif
+static SIMPLE_DEV_PM_OPS(tc3589x_keypad_dev_pm_ops,
+ tc3589x_keypad_suspend, tc3589x_keypad_resume);
+
static struct platform_driver tc3589x_keypad_driver = {
- .driver.name = "tc3589x-keypad",
- .driver.owner = THIS_MODULE,
-#ifdef CONFIG_PM
- .driver.pm = &tc3589x_keypad_dev_pm_ops,
-#endif
- .probe = tc3589x_keypad_probe,
- .remove = __devexit_p(tc3589x_keypad_remove),
+ .driver = {
+ .name = "tc3589x-keypad",
+ .owner = THIS_MODULE,
+ .pm = &tc3589x_keypad_dev_pm_ops,
+ },
+ .probe = tc3589x_keypad_probe,
+ .remove = __devexit_p(tc3589x_keypad_remove),
};
static int __init tc3589x_keypad_init(void)
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 800fbccf1f0f..3afea3f89718 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -297,6 +297,7 @@ static int __devinit tca6416_keypad_probe(struct i2c_client *client,
}
i2c_set_clientdata(client, chip);
+ device_init_wakeup(&client->dev, 1);
return 0;
@@ -326,10 +327,37 @@ static int __devexit tca6416_keypad_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int tca6416_keypad_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(chip->irqnum);
+
+ return 0;
+}
+
+static int tca6416_keypad_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(chip->irqnum);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tca6416_keypad_dev_pm_ops,
+ tca6416_keypad_suspend, tca6416_keypad_resume);
static struct i2c_driver tca6416_keypad_driver = {
.driver = {
.name = "tca6416-keypad",
+ .pm = &tca6416_keypad_dev_pm_ops,
},
.probe = tca6416_keypad_probe,
.remove = __devexit_p(tca6416_keypad_remove),
diff --git a/drivers/input/misc/88pm860x_onkey.c b/drivers/input/misc/88pm860x_onkey.c
index 4cc82826ea6b..3dca3c14510e 100644
--- a/drivers/input/misc/88pm860x_onkey.c
+++ b/drivers/input/misc/88pm860x_onkey.c
@@ -74,7 +74,7 @@ static int __devinit pm860x_onkey_probe(struct platform_device *pdev)
info->chip = chip;
info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion;
info->dev = &pdev->dev;
- info->irq = irq + chip->irq_base;
+ info->irq = irq;
info->idev = input_allocate_device();
if (!info->idev) {
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index b0c6772851a9..f9cf0881b0e3 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -454,4 +454,17 @@ config INPUT_CMA3000_I2C
To compile this driver as a module, choose M here: the
module will be called cma3000_d0x_i2c.
+config INPUT_XEN_KBDDEV_FRONTEND
+ tristate "Xen virtual keyboard and mouse support"
+ depends on XEN_FBDEV_FRONTEND
+ default y
+ select XEN_XENBUS_FRONTEND
+ help
+ This driver implements the front-end of the Xen virtual
+ keyboard and mouse device driver. It communicates with a back-end
+ in another domain.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xen-kbdfront.
+
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 9b4797112c9a..e3f7984e6274 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -42,5 +42,6 @@ obj-$(CONFIG_INPUT_TWL4030_VIBRA) += twl4030-vibra.o
obj-$(CONFIG_INPUT_UINPUT) += uinput.o
obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o
obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o
+obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o
obj-$(CONFIG_INPUT_YEALINK) += yealink.o
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
index 2bef8fa56c94..e21deb1baa8a 100644
--- a/drivers/input/misc/ad714x-i2c.c
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -10,23 +10,23 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/pm.h>
#include "ad714x.h"
#ifdef CONFIG_PM
-static int ad714x_i2c_suspend(struct i2c_client *client, pm_message_t message)
+static int ad714x_i2c_suspend(struct device *dev)
{
- return ad714x_disable(i2c_get_clientdata(client));
+ return ad714x_disable(i2c_get_clientdata(to_i2c_client(dev)));
}
-static int ad714x_i2c_resume(struct i2c_client *client)
+static int ad714x_i2c_resume(struct device *dev)
{
- return ad714x_enable(i2c_get_clientdata(client));
+ return ad714x_enable(i2c_get_clientdata(to_i2c_client(dev)));
}
-#else
-# define ad714x_i2c_suspend NULL
-# define ad714x_i2c_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume);
+
static int ad714x_i2c_write(struct device *dev, unsigned short reg,
unsigned short data)
{
@@ -114,11 +114,10 @@ MODULE_DEVICE_TABLE(i2c, ad714x_id);
static struct i2c_driver ad714x_i2c_driver = {
.driver = {
.name = "ad714x_captouch",
+ .pm = &ad714x_i2c_pm,
},
.probe = ad714x_i2c_probe,
.remove = __devexit_p(ad714x_i2c_remove),
- .suspend = ad714x_i2c_suspend,
- .resume = ad714x_i2c_resume,
.id_table = ad714x_id,
};
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
index 7f8dedfd1bfe..4120dd549305 100644
--- a/drivers/input/misc/ad714x-spi.c
+++ b/drivers/input/misc/ad714x-spi.c
@@ -9,6 +9,7 @@
#include <linux/input.h> /* BUS_I2C */
#include <linux/module.h>
#include <linux/spi/spi.h>
+#include <linux/pm.h>
#include <linux/types.h>
#include "ad714x.h"
@@ -16,20 +17,19 @@
#define AD714x_SPI_READ BIT(10)
#ifdef CONFIG_PM
-static int ad714x_spi_suspend(struct spi_device *spi, pm_message_t message)
+static int ad714x_spi_suspend(struct device *dev)
{
- return ad714x_disable(spi_get_drvdata(spi));
+ return ad714x_disable(spi_get_drvdata(to_spi_device(dev)));
}
-static int ad714x_spi_resume(struct spi_device *spi)
+static int ad714x_spi_resume(struct device *dev)
{
- return ad714x_enable(spi_get_drvdata(spi));
+ return ad714x_enable(spi_get_drvdata(to_spi_device(dev)));
}
-#else
-# define ad714x_spi_suspend NULL
-# define ad714x_spi_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume);
+
static int ad714x_spi_read(struct device *dev, unsigned short reg,
unsigned short *data)
{
@@ -79,11 +79,10 @@ static struct spi_driver ad714x_spi_driver = {
.driver = {
.name = "ad714x_captouch",
.owner = THIS_MODULE,
+ .pm = &ad714x_spi_pm,
},
.probe = ad714x_spi_probe,
.remove = __devexit_p(ad714x_spi_remove),
- .suspend = ad714x_spi_suspend,
- .resume = ad714x_spi_resume,
};
static __init int ad714x_spi_init(void)
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index 0779724af7e7..ccacf2bb06a4 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -11,6 +11,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/pm.h>
#include "adxl34x.h"
static int adxl34x_smbus_read(struct device *dev, unsigned char reg)
@@ -105,8 +106,9 @@ static int __devexit adxl34x_i2c_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
-static int adxl34x_i2c_suspend(struct i2c_client *client, pm_message_t message)
+static int adxl34x_i2c_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct adxl34x *ac = i2c_get_clientdata(client);
adxl34x_suspend(ac);
@@ -114,19 +116,20 @@ static int adxl34x_i2c_suspend(struct i2c_client *client, pm_message_t message)
return 0;
}
-static int adxl34x_i2c_resume(struct i2c_client *client)
+static int adxl34x_i2c_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct adxl34x *ac = i2c_get_clientdata(client);
adxl34x_resume(ac);
return 0;
}
-#else
-# define adxl34x_i2c_suspend NULL
-# define adxl34x_i2c_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(adxl34x_i2c_pm, adxl34x_i2c_suspend,
+ adxl34x_i2c_resume);
+
static const struct i2c_device_id adxl34x_id[] = {
{ "adxl34x", 0 },
{ }
@@ -138,11 +141,10 @@ static struct i2c_driver adxl34x_driver = {
.driver = {
.name = "adxl34x",
.owner = THIS_MODULE,
+ .pm = &adxl34x_i2c_pm,
},
.probe = adxl34x_i2c_probe,
.remove = __devexit_p(adxl34x_i2c_remove),
- .suspend = adxl34x_i2c_suspend,
- .resume = adxl34x_i2c_resume,
.id_table = adxl34x_id,
};
diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c
index 782de9e89828..f29de22fdda0 100644
--- a/drivers/input/misc/adxl34x-spi.c
+++ b/drivers/input/misc/adxl34x-spi.c
@@ -10,6 +10,7 @@
#include <linux/input.h> /* BUS_SPI */
#include <linux/module.h>
#include <linux/spi/spi.h>
+#include <linux/pm.h>
#include <linux/types.h>
#include "adxl34x.h"
@@ -57,7 +58,7 @@ static int adxl34x_spi_read_block(struct device *dev,
return (status < 0) ? status : 0;
}
-static const struct adxl34x_bus_ops adx134x_spi_bops = {
+static const struct adxl34x_bus_ops adxl34x_spi_bops = {
.bustype = BUS_SPI,
.write = adxl34x_spi_write,
.read = adxl34x_spi_read,
@@ -76,7 +77,7 @@ static int __devinit adxl34x_spi_probe(struct spi_device *spi)
ac = adxl34x_probe(&spi->dev, spi->irq,
spi->max_speed_hz > MAX_FREQ_NO_FIFODELAY,
- &adx134x_spi_bops);
+ &adxl34x_spi_bops);
if (IS_ERR(ac))
return PTR_ERR(ac);
@@ -94,8 +95,9 @@ static int __devexit adxl34x_spi_remove(struct spi_device *spi)
}
#ifdef CONFIG_PM
-static int adxl34x_spi_suspend(struct spi_device *spi, pm_message_t message)
+static int adxl34x_spi_suspend(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct adxl34x *ac = dev_get_drvdata(&spi->dev);
adxl34x_suspend(ac);
@@ -103,29 +105,29 @@ static int adxl34x_spi_suspend(struct spi_device *spi, pm_message_t message)
return 0;
}
-static int adxl34x_spi_resume(struct spi_device *spi)
+static int adxl34x_spi_resume(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct adxl34x *ac = dev_get_drvdata(&spi->dev);
adxl34x_resume(ac);
return 0;
}
-#else
-# define adxl34x_spi_suspend NULL
-# define adxl34x_spi_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(adxl34x_spi_pm, adxl34x_spi_suspend,
+ adxl34x_spi_resume);
+
static struct spi_driver adxl34x_driver = {
.driver = {
.name = "adxl34x",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &adxl34x_spi_pm,
},
.probe = adxl34x_spi_probe,
.remove = __devexit_p(adxl34x_spi_remove),
- .suspend = adxl34x_spi_suspend,
- .resume = adxl34x_spi_resume,
};
static int __init adxl34x_spi_init(void)
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 0b0e9be63542..9ccdb82d869a 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -612,8 +612,8 @@ static int ati_remote2_input_init(struct ati_remote2 *ar2)
idev->open = ati_remote2_open;
idev->close = ati_remote2_close;
- idev->getkeycode_new = ati_remote2_getkeycode;
- idev->setkeycode_new = ati_remote2_setkeycode;
+ idev->getkeycode = ati_remote2_getkeycode;
+ idev->setkeycode = ati_remote2_setkeycode;
idev->name = ar2->name;
idev->phys = ar2->phys;
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 014dd4ad0d4f..6a11694e3fc7 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -29,6 +29,7 @@
#include <linux/workqueue.h>
#include <linux/i2c/twl.h>
#include <linux/mfd/twl4030-codec.h>
+#include <linux/mfd/core.h>
#include <linux/input.h>
#include <linux/slab.h>
@@ -196,7 +197,7 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
{
- struct twl4030_codec_vibra_data *pdata = pdev->dev.platform_data;
+ struct twl4030_codec_vibra_data *pdata = mfd_get_data(pdev);
struct vibra_info *info;
int ret;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 82542a1c1098..364bdf43a381 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -347,8 +347,7 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
{
struct uinput_user_dev *user_dev;
struct input_dev *dev;
- char *name;
- int i, size;
+ int i;
int retval;
if (count != sizeof(struct uinput_user_dev))
@@ -362,30 +361,25 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
dev = udev->dev;
- user_dev = kmalloc(sizeof(struct uinput_user_dev), GFP_KERNEL);
- if (!user_dev)
- return -ENOMEM;
-
- if (copy_from_user(user_dev, buffer, sizeof(struct uinput_user_dev))) {
- retval = -EFAULT;
- goto exit;
- }
+ user_dev = memdup_user(buffer, sizeof(struct uinput_user_dev));
+ if (IS_ERR(user_dev))
+ return PTR_ERR(user_dev);
udev->ff_effects_max = user_dev->ff_effects_max;
- size = strnlen(user_dev->name, UINPUT_MAX_NAME_SIZE) + 1;
- if (!size) {
+ /* Ensure name is filled in */
+ if (!user_dev->name[0]) {
retval = -EINVAL;
goto exit;
}
kfree(dev->name);
- dev->name = name = kmalloc(size, GFP_KERNEL);
- if (!name) {
+ dev->name = kstrndup(user_dev->name, UINPUT_MAX_NAME_SIZE,
+ GFP_KERNEL);
+ if (!dev->name) {
retval = -ENOMEM;
goto exit;
}
- strlcpy(name, user_dev->name, size);
dev->id.bustype = user_dev->id.bustype;
dev->id.vendor = user_dev->id.vendor;
@@ -622,7 +616,6 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
struct uinput_ff_upload ff_up;
struct uinput_ff_erase ff_erase;
struct uinput_request *req;
- int length;
char *phys;
retval = mutex_lock_interruptible(&udev->mutex);
@@ -689,24 +682,15 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
retval = -EINVAL;
goto out;
}
- length = strnlen_user(p, 1024);
- if (length <= 0) {
- retval = -EFAULT;
- break;
+
+ phys = strndup_user(p, 1024);
+ if (IS_ERR(phys)) {
+ retval = PTR_ERR(phys);
+ goto out;
}
+
kfree(udev->dev->phys);
- udev->dev->phys = phys = kmalloc(length, GFP_KERNEL);
- if (!phys) {
- retval = -ENOMEM;
- break;
- }
- if (copy_from_user(phys, p, length)) {
- udev->dev->phys = NULL;
- kfree(phys);
- retval = -EFAULT;
- break;
- }
- phys[length - 1] = '\0';
+ udev->dev->phys = phys;
break;
case UI_BEGIN_FF_UPLOAD:
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 7f85a862ad11..7077f9bf5ead 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -11,12 +11,6 @@
* more details.
*/
-/*
- * TODO:
- *
- * Switch to grant tables together with xen-fbfront.c.
- */
-
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
@@ -30,6 +24,8 @@
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/page.h>
+#include <xen/grant_table.h>
+#include <xen/interface/grant_table.h>
#include <xen/interface/io/fbif.h>
#include <xen/interface/io/kbdif.h>
#include <xen/xenbus.h>
@@ -38,6 +34,7 @@ struct xenkbd_info {
struct input_dev *kbd;
struct input_dev *ptr;
struct xenkbd_page *page;
+ int gref;
int irq;
struct xenbus_device *xbdev;
char phys[32];
@@ -110,7 +107,7 @@ static irqreturn_t input_handler(int rq, void *dev_id)
static int __devinit xenkbd_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
- int ret, i;
+ int ret, i, abs;
struct xenkbd_info *info;
struct input_dev *kbd, *ptr;
@@ -122,12 +119,18 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
dev_set_drvdata(&dev->dev, info);
info->xbdev = dev;
info->irq = -1;
+ info->gref = -1;
snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);
info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (!info->page)
goto error_nomem;
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0)
+ abs = 0;
+ if (abs)
+ xenbus_printf(XBT_NIL, dev->nodename, "request-abs-pointer", "1");
+
/* keyboard */
kbd = input_allocate_device();
if (!kbd)
@@ -137,11 +140,12 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
kbd->id.bustype = BUS_PCI;
kbd->id.vendor = 0x5853;
kbd->id.product = 0xffff;
- kbd->evbit[0] = BIT(EV_KEY);
+
+ __set_bit(EV_KEY, kbd->evbit);
for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
- set_bit(i, kbd->keybit);
+ __set_bit(i, kbd->keybit);
for (i = KEY_OK; i < KEY_MAX; i++)
- set_bit(i, kbd->keybit);
+ __set_bit(i, kbd->keybit);
ret = input_register_device(kbd);
if (ret) {
@@ -160,12 +164,20 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
ptr->id.bustype = BUS_PCI;
ptr->id.vendor = 0x5853;
ptr->id.product = 0xfffe;
- ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
+
+ if (abs) {
+ __set_bit(EV_ABS, ptr->evbit);
+ input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
+ input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
+ } else {
+ input_set_capability(ptr, EV_REL, REL_X);
+ input_set_capability(ptr, EV_REL, REL_Y);
+ }
+ input_set_capability(ptr, EV_REL, REL_WHEEL);
+
+ __set_bit(EV_KEY, ptr->evbit);
for (i = BTN_LEFT; i <= BTN_TASK; i++)
- set_bit(i, ptr->keybit);
- ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL);
- input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
- input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
+ __set_bit(i, ptr->keybit);
ret = input_register_device(ptr);
if (ret) {
@@ -218,15 +230,20 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
int ret, evtchn;
struct xenbus_transaction xbt;
+ ret = gnttab_grant_foreign_access(dev->otherend_id,
+ virt_to_mfn(info->page), 0);
+ if (ret < 0)
+ return ret;
+ info->gref = ret;
+
ret = xenbus_alloc_evtchn(dev, &evtchn);
if (ret)
- return ret;
+ goto error_grant;
ret = bind_evtchn_to_irqhandler(evtchn, input_handler,
0, dev->devicetype, info);
if (ret < 0) {
- xenbus_free_evtchn(dev, evtchn);
xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler");
- return ret;
+ goto error_evtchan;
}
info->irq = ret;
@@ -234,12 +251,15 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
ret = xenbus_transaction_start(&xbt);
if (ret) {
xenbus_dev_fatal(dev, ret, "starting transaction");
- return ret;
+ goto error_irqh;
}
ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
virt_to_mfn(info->page));
if (ret)
goto error_xenbus;
+ ret = xenbus_printf(xbt, dev->nodename, "page-gref", "%u", info->gref);
+ if (ret)
+ goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
evtchn);
if (ret)
@@ -249,7 +269,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
if (ret == -EAGAIN)
goto again;
xenbus_dev_fatal(dev, ret, "completing transaction");
- return ret;
+ goto error_irqh;
}
xenbus_switch_state(dev, XenbusStateInitialised);
@@ -258,6 +278,14 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
error_xenbus:
xenbus_transaction_end(xbt, 1);
xenbus_dev_fatal(dev, ret, "writing xenstore");
+ error_irqh:
+ unbind_from_irqhandler(info->irq, info);
+ info->irq = -1;
+ error_evtchan:
+ xenbus_free_evtchn(dev, evtchn);
+ error_grant:
+ gnttab_end_foreign_access_ref(info->gref, 0);
+ info->gref = -1;
return ret;
}
@@ -266,13 +294,16 @@ static void xenkbd_disconnect_backend(struct xenkbd_info *info)
if (info->irq >= 0)
unbind_from_irqhandler(info->irq, info);
info->irq = -1;
+ if (info->gref >= 0)
+ gnttab_end_foreign_access_ref(info->gref, 0);
+ info->gref = -1;
}
static void xenkbd_backend_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
- int ret, val;
+ int val;
switch (backend_state) {
case XenbusStateInitialising:
@@ -285,16 +316,6 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
case XenbusStateInitWait:
InitWait:
- ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-abs-pointer", "%d", &val);
- if (ret < 0)
- val = 0;
- if (val) {
- ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
- "request-abs-pointer", "1");
- if (ret)
- pr_warning("can't request abs-pointer\n");
- }
xenbus_switch_state(dev, XenbusStateConnected);
break;
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index ee82851afe3e..3aead91bacc8 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -63,6 +63,10 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242
#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243
#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244
+/* Macbook8 (unibody, March 2011) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245
+#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246
+#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247
#define BCM5974_DEVICE(prod) { \
.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -96,6 +100,10 @@ static const struct usb_device_id bcm5974_table[] = {
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS),
+ /* MacbookPro8 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
/* Terminating entry */
{}
};
@@ -274,6 +282,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
{ DIM_X, DIM_X / SN_COORD, -4616, 5112 },
{ DIM_Y, DIM_Y / SN_COORD, -142, 5234 }
},
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING5_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING5_JIS,
+ HAS_INTEGRATED_BUTTON,
+ 0x84, sizeof(struct bt_data),
+ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+ { DIM_X, DIM_X / SN_COORD, -4415, 5050 },
+ { DIM_Y, DIM_Y / SN_COORD, -55, 6680 }
+ },
{}
};
@@ -430,10 +450,6 @@ static int report_tp_state(struct bcm5974 *dev, int size)
ptest = int2bound(&c->p, raw_p);
origin = raw2int(f->origin);
- /* set the integrated button if applicable */
- if (c->tp_type == TYPE2)
- ibt = raw2int(dev->tp_data[BUTTON_TYPE2]);
-
/* while tracking finger still valid, count all fingers */
if (ptest > PRESSURE_LOW && origin) {
abs_p = ptest;
@@ -452,6 +468,10 @@ static int report_tp_state(struct bcm5974 *dev, int size)
}
}
+ /* set the integrated button if applicable */
+ if (c->tp_type == TYPE2)
+ ibt = raw2int(dev->tp_data[BUTTON_TYPE2]);
+
if (dev->fingers < nmin)
dev->fingers = nmin;
if (dev->fingers > nmax)
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index 0ae62f0bcb32..f6aa26d305ed 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -18,6 +18,7 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
+#include <linux/pm.h>
#define DRIVER_NAME "synaptics_i2c"
/* maximum product id is 15 characters */
@@ -619,8 +620,9 @@ static int __devexit synaptics_i2c_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
-static int synaptics_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
+static int synaptics_i2c_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct synaptics_i2c *touch = i2c_get_clientdata(client);
cancel_delayed_work_sync(&touch->dwork);
@@ -631,9 +633,10 @@ static int synaptics_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
return 0;
}
-static int synaptics_i2c_resume(struct i2c_client *client)
+static int synaptics_i2c_resume(struct device *dev)
{
int ret;
+ struct i2c_client *client = to_i2c_client(dev);
struct synaptics_i2c *touch = i2c_get_clientdata(client);
ret = synaptics_i2c_reset_config(client);
@@ -645,11 +648,11 @@ static int synaptics_i2c_resume(struct i2c_client *client)
return 0;
}
-#else
-#define synaptics_i2c_suspend NULL
-#define synaptics_i2c_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(synaptics_i2c_pm, synaptics_i2c_suspend,
+ synaptics_i2c_resume);
+
static const struct i2c_device_id synaptics_i2c_id_table[] = {
{ "synaptics_i2c", 0 },
{ },
@@ -660,13 +663,12 @@ static struct i2c_driver synaptics_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .pm = &synaptics_i2c_pm,
},
.probe = synaptics_i2c_probe,
.remove = __devexit_p(synaptics_i2c_remove),
- .suspend = synaptics_i2c_suspend,
- .resume = synaptics_i2c_resume,
.id_table = synaptics_i2c_id_table,
};
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 92563a681d65..12abc50508e5 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -107,7 +107,8 @@ static void amba_kmi_close(struct serio *io)
clk_disable(kmi->clk);
}
-static int __devinit amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
+static int __devinit amba_kmi_probe(struct amba_device *dev,
+ const struct amba_id *id)
{
struct amba_kmi_port *kmi;
struct serio *io;
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index 7729e547ba65..337bf51bc984 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -210,8 +210,8 @@ int sparse_keymap_setup(struct input_dev *dev,
dev->keycode = map;
dev->keycodemax = map_size;
- dev->getkeycode_new = sparse_keymap_getkeycode;
- dev->setkeycode_new = sparse_keymap_setkeycode;
+ dev->getkeycode = sparse_keymap_getkeycode;
+ dev->setkeycode = sparse_keymap_setkeycode;
return 0;
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index cf8fb9f5d4a8..449c0a46dbac 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -193,16 +193,16 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
case HID_USAGE_X:
if (usage == WCM_DESKTOP) {
if (finger) {
- features->device_type = BTN_TOOL_DOUBLETAP;
+ features->device_type = BTN_TOOL_FINGER;
if (features->type == TABLETPC2FG) {
/* need to reset back */
features->pktlen = WACOM_PKGLEN_TPC2FG;
- features->device_type = BTN_TOOL_TRIPLETAP;
+ features->device_type = BTN_TOOL_DOUBLETAP;
}
if (features->type == BAMBOO_PT) {
/* need to reset back */
features->pktlen = WACOM_PKGLEN_BBTOUCH;
- features->device_type = BTN_TOOL_TRIPLETAP;
+ features->device_type = BTN_TOOL_DOUBLETAP;
features->x_phy =
get_unaligned_le16(&report[i + 5]);
features->x_max =
@@ -241,11 +241,11 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
case HID_USAGE_Y:
if (usage == WCM_DESKTOP) {
if (finger) {
- features->device_type = BTN_TOOL_DOUBLETAP;
+ features->device_type = BTN_TOOL_FINGER;
if (features->type == TABLETPC2FG) {
/* need to reset back */
features->pktlen = WACOM_PKGLEN_TPC2FG;
- features->device_type = BTN_TOOL_TRIPLETAP;
+ features->device_type = BTN_TOOL_DOUBLETAP;
features->y_max =
get_unaligned_le16(&report[i + 3]);
features->y_phy =
@@ -254,7 +254,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
} else if (features->type == BAMBOO_PT) {
/* need to reset back */
features->pktlen = WACOM_PKGLEN_BBTOUCH;
- features->device_type = BTN_TOOL_TRIPLETAP;
+ features->device_type = BTN_TOOL_DOUBLETAP;
features->y_phy =
get_unaligned_le16(&report[i + 3]);
features->y_max =
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 367fa82a607e..5597637cfd41 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -675,169 +675,87 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
return 1;
}
-
-static void wacom_tpc_finger_in(struct wacom_wac *wacom, char *data, int idx)
+static int wacom_tpc_mt_touch(struct wacom_wac *wacom)
{
struct input_dev *input = wacom->input;
- int finger = idx + 1;
- int x = le16_to_cpup((__le16 *)&data[finger * 2]) & 0x7fff;
- int y = le16_to_cpup((__le16 *)&data[4 + finger * 2]) & 0x7fff;
+ unsigned char *data = wacom->data;
+ int contact_with_no_pen_down_count = 0;
+ int i;
- /*
- * Work around input core suppressing "duplicate" events since
- * we are abusing ABS_X/ABS_Y to transmit multi-finger data.
- * This should go away once we switch to true multitouch
- * protocol.
- */
- if (wacom->last_finger != finger) {
- if (x == input_abs_get_val(input, ABS_X))
- x++;
+ for (i = 0; i < 2; i++) {
+ int p = data[1] & (1 << i);
+ bool touch = p && !wacom->shared->stylus_in_proximity;
+
+ input_mt_slot(input, i);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
+ if (touch) {
+ int x = le16_to_cpup((__le16 *)&data[i * 2 + 2]) & 0x7fff;
+ int y = le16_to_cpup((__le16 *)&data[i * 2 + 6]) & 0x7fff;
- if (y == input_abs_get_val(input, ABS_Y))
- y++;
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+ contact_with_no_pen_down_count++;
+ }
}
- input_report_abs(input, ABS_X, x);
- input_report_abs(input, ABS_Y, y);
- input_report_abs(input, ABS_MISC, wacom->id[0]);
- input_report_key(input, wacom->tool[finger], 1);
- if (!idx)
- input_report_key(input, BTN_TOUCH, 1);
- input_event(input, EV_MSC, MSC_SERIAL, finger);
- input_sync(input);
+ /* keep touch state for pen event */
+ wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
- wacom->last_finger = finger;
-}
+ input_mt_report_pointer_emulation(input, true);
-static void wacom_tpc_touch_out(struct wacom_wac *wacom, int idx)
-{
- struct input_dev *input = wacom->input;
- int finger = idx + 1;
-
- input_report_abs(input, ABS_X, 0);
- input_report_abs(input, ABS_Y, 0);
- input_report_abs(input, ABS_MISC, 0);
- input_report_key(input, wacom->tool[finger], 0);
- if (!idx)
- input_report_key(input, BTN_TOUCH, 0);
- input_event(input, EV_MSC, MSC_SERIAL, finger);
- input_sync(input);
+ return 1;
}
-static void wacom_tpc_touch_in(struct wacom_wac *wacom, size_t len)
+static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
{
char *data = wacom->data;
struct input_dev *input = wacom->input;
+ bool prox;
+ int x = 0, y = 0;
- wacom->tool[1] = BTN_TOOL_DOUBLETAP;
- wacom->id[0] = TOUCH_DEVICE_ID;
- wacom->tool[2] = BTN_TOOL_TRIPLETAP;
-
- if (len != WACOM_PKGLEN_TPC1FG) {
-
- switch (data[0]) {
+ if (!wacom->shared->stylus_in_proximity) {
+ if (len == WACOM_PKGLEN_TPC1FG) {
+ prox = data[0] & 0x01;
+ x = get_unaligned_le16(&data[1]);
+ y = get_unaligned_le16(&data[3]);
+ } else { /* with capacity */
+ prox = data[1] & 0x01;
+ x = le16_to_cpup((__le16 *)&data[2]);
+ y = le16_to_cpup((__le16 *)&data[4]);
+ }
+ } else
+ /* force touch out when pen is in prox */
+ prox = 0;
- case WACOM_REPORT_TPC1FG:
- input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
- input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
- input_report_abs(input, ABS_PRESSURE, le16_to_cpup((__le16 *)&data[6]));
- input_report_key(input, BTN_TOUCH, le16_to_cpup((__le16 *)&data[6]));
- input_report_abs(input, ABS_MISC, wacom->id[0]);
- input_report_key(input, wacom->tool[1], 1);
- input_sync(input);
- break;
+ if (prox) {
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ }
+ input_report_key(input, BTN_TOUCH, prox);
- case WACOM_REPORT_TPC2FG:
- if (data[1] & 0x01)
- wacom_tpc_finger_in(wacom, data, 0);
- else if (wacom->id[1] & 0x01)
- wacom_tpc_touch_out(wacom, 0);
+ /* keep touch state for pen events */
+ wacom->shared->touch_down = prox;
- if (data[1] & 0x02)
- wacom_tpc_finger_in(wacom, data, 1);
- else if (wacom->id[1] & 0x02)
- wacom_tpc_touch_out(wacom, 1);
- break;
- }
- } else {
- input_report_abs(input, ABS_X, get_unaligned_le16(&data[1]));
- input_report_abs(input, ABS_Y, get_unaligned_le16(&data[3]));
- input_report_key(input, BTN_TOUCH, 1);
- input_report_abs(input, ABS_MISC, wacom->id[1]);
- input_report_key(input, wacom->tool[1], 1);
- input_sync(input);
- }
+ return 1;
}
-static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
+static int wacom_tpc_pen(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
char *data = wacom->data;
struct input_dev *input = wacom->input;
- int prox = 0, pressure;
- int retval = 0;
+ int pressure;
+ bool prox = data[1] & 0x20;
- dbg("wacom_tpc_irq: received report #%d", data[0]);
-
- if (len == WACOM_PKGLEN_TPC1FG || /* single touch */
- data[0] == WACOM_REPORT_TPC1FG || /* single touch */
- data[0] == WACOM_REPORT_TPC2FG) { /* 2FG touch */
-
- if (wacom->shared->stylus_in_proximity) {
- if (wacom->id[1] & 0x01)
- wacom_tpc_touch_out(wacom, 0);
-
- if (wacom->id[1] & 0x02)
- wacom_tpc_touch_out(wacom, 1);
-
- wacom->id[1] = 0;
- return 0;
- }
-
- if (len == WACOM_PKGLEN_TPC1FG) { /* with touch */
- prox = data[0] & 0x01;
- } else { /* with capacity */
- if (data[0] == WACOM_REPORT_TPC1FG)
- /* single touch */
- prox = data[1] & 0x01;
- else
- /* 2FG touch data */
- prox = data[1] & 0x03;
- }
-
- if (prox) {
- if (!wacom->id[1])
- wacom->last_finger = 1;
- wacom_tpc_touch_in(wacom, len);
- } else {
- if (data[0] == WACOM_REPORT_TPC2FG) {
- /* 2FGT out-prox */
- if (wacom->id[1] & 0x01)
- wacom_tpc_touch_out(wacom, 0);
+ if (!wacom->shared->stylus_in_proximity) /* first in prox */
+ /* Going into proximity select tool */
+ wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
- if (wacom->id[1] & 0x02)
- wacom_tpc_touch_out(wacom, 1);
- } else
- /* one finger touch */
- wacom_tpc_touch_out(wacom, 0);
+ /* keep pen state for touch events */
+ wacom->shared->stylus_in_proximity = prox;
- wacom->id[0] = 0;
- }
- /* keep prox bit to send proper out-prox event */
- wacom->id[1] = prox;
- } else if (data[0] == WACOM_REPORT_PENABLED) { /* Penabled */
- prox = data[1] & 0x20;
-
- if (!wacom->shared->stylus_in_proximity) { /* first in prox */
- /* Going into proximity select tool */
- wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
- if (wacom->tool[0] == BTN_TOOL_PEN)
- wacom->id[0] = STYLUS_DEVICE_ID;
- else
- wacom->id[0] = ERASER_DEVICE_ID;
-
- wacom->shared->stylus_in_proximity = true;
- }
+ /* send pen events only when touch is up or forced out */
+ if (!wacom->shared->touch_down) {
input_report_key(input, BTN_STYLUS, data[1] & 0x02);
input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
@@ -847,15 +765,27 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
pressure = features->pressure_max + pressure + 1;
input_report_abs(input, ABS_PRESSURE, pressure);
input_report_key(input, BTN_TOUCH, data[1] & 0x05);
- if (!prox) { /* out-prox */
- wacom->id[0] = 0;
- wacom->shared->stylus_in_proximity = false;
- }
input_report_key(input, wacom->tool[0], prox);
- input_report_abs(input, ABS_MISC, wacom->id[0]);
- retval = 1;
+ return 1;
}
- return retval;
+
+ return 0;
+}
+
+static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
+{
+ char *data = wacom->data;
+
+ dbg("wacom_tpc_irq: received report #%d", data[0]);
+
+ if (len == WACOM_PKGLEN_TPC1FG || data[0] == WACOM_REPORT_TPC1FG)
+ return wacom_tpc_single_touch(wacom, len);
+ else if (data[0] == WACOM_REPORT_TPC2FG)
+ return wacom_tpc_mt_touch(wacom);
+ else if (data[0] == WACOM_REPORT_PENABLED)
+ return wacom_tpc_pen(wacom);
+
+ return 0;
}
static int wacom_bpt_touch(struct wacom_wac *wacom)
@@ -1078,7 +1008,7 @@ void wacom_setup_device_quirks(struct wacom_features *features)
{
/* touch device found but size is not defined. use default */
- if (features->device_type == BTN_TOOL_DOUBLETAP && !features->x_max) {
+ if (features->device_type == BTN_TOOL_FINGER && !features->x_max) {
features->x_max = 1023;
features->y_max = 1023;
}
@@ -1090,7 +1020,7 @@ void wacom_setup_device_quirks(struct wacom_features *features)
/* quirks for bamboo touch */
if (features->type == BAMBOO_PT &&
- features->device_type == BTN_TOOL_TRIPLETAP) {
+ features->device_type == BTN_TOOL_DOUBLETAP) {
features->x_max <<= 5;
features->y_max <<= 5;
features->x_fuzz <<= 5;
@@ -1226,27 +1156,30 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
break;
case TABLETPC2FG:
- if (features->device_type == BTN_TOOL_TRIPLETAP) {
- __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
- input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
+ if (features->device_type == BTN_TOOL_DOUBLETAP) {
+
+ input_mt_init_slots(input_dev, 2);
+ input_set_abs_params(input_dev, ABS_MT_TOOL_TYPE,
+ 0, MT_TOOL_MAX, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ 0, features->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ 0, features->y_max, 0, 0);
}
/* fall through */
case TABLETPC:
- if (features->device_type == BTN_TOOL_DOUBLETAP ||
- features->device_type == BTN_TOOL_TRIPLETAP) {
+ __clear_bit(ABS_MISC, input_dev->absbit);
+
+ if (features->device_type != BTN_TOOL_PEN) {
input_abs_set_res(input_dev, ABS_X,
wacom_calculate_touch_res(features->x_max,
features->x_phy));
input_abs_set_res(input_dev, ABS_Y,
wacom_calculate_touch_res(features->y_max,
features->y_phy));
- __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- }
-
- if (features->device_type != BTN_TOOL_PEN)
break; /* no need to process stylus stuff */
-
+ }
/* fall through */
case PL:
@@ -1264,7 +1197,7 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
case BAMBOO_PT:
__clear_bit(ABS_MISC, input_dev->absbit);
- if (features->device_type == BTN_TOOL_TRIPLETAP) {
+ if (features->device_type == BTN_TOOL_DOUBLETAP) {
__set_bit(BTN_LEFT, input_dev->keybit);
__set_bit(BTN_FORWARD, input_dev->keybit);
__set_bit(BTN_BACK, input_dev->keybit);
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index b1310ec9720c..835f756b150c 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -88,15 +88,15 @@ struct wacom_features {
struct wacom_shared {
bool stylus_in_proximity;
+ bool touch_down;
};
struct wacom_wac {
char name[64];
unsigned char *data;
- int tool[3];
- int id[3];
+ int tool[2];
+ int id[2];
__u32 serial[2];
- int last_finger;
struct wacom_features features;
struct wacom_shared *shared;
struct input_dev *input;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 61834ae282e1..112ec55f2939 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -86,6 +86,18 @@ config TOUCHSCREEN_AD7879_SPI
To compile this driver as a module, choose M here: the
module will be called ad7879-spi.
+config TOUCHSCREEN_ATMEL_MXT
+ tristate "Atmel mXT I2C Touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have Atmel mXT series I2C touchscreen,
+ such as AT42QT602240/ATMXT224, connected to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called atmel_mxt_ts.
+
config TOUCHSCREEN_BITSY
tristate "Compaq iPAQ H3600 (Bitsy) touchscreen"
depends on SA1100_BITSY
@@ -339,18 +351,6 @@ config TOUCHSCREEN_PENMOUNT
To compile this driver as a module, choose M here: the
module will be called penmount.
-config TOUCHSCREEN_QT602240
- tristate "QT602240 I2C Touchscreen"
- depends on I2C
- help
- Say Y here if you have the AT42QT602240/ATMXT224 I2C touchscreen
- connected to your system.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called qt602240_ts.
-
config TOUCHSCREEN_MIGOR
tristate "Renesas MIGO-R touchscreen"
depends on SH_MIGOR && I2C
@@ -423,6 +423,16 @@ config TOUCHSCREEN_UCB1400
To compile this driver as a module, choose M here: the
module will be called ucb1400_ts.
+config TOUCHSCREEN_WM831X
+ tristate "Support for WM831x touchscreen controllers"
+ depends on MFD_WM831X
+ help
+ This enables support for the touchscreen controller on the WM831x
+ series of PMICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called wm831x-ts.
+
config TOUCHSCREEN_WM97XX
tristate "Support for WM97xx AC97 touchscreen controllers"
depends on AC97_BUS
@@ -629,6 +639,17 @@ config TOUCHSCREEN_TOUCHIT213
To compile this driver as a module, choose M here: the
module will be called touchit213.
+config TOUCHSCREEN_TSC2005
+ tristate "TSC2005 based touchscreens"
+ depends on SPI_MASTER
+ help
+ Say Y here if you have a TSC2005 based touchscreen.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tsc2005.
+
config TOUCHSCREEN_TSC2007
tristate "TSC2007 based touchscreens"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 718bcc814952..ca94098d4c92 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
obj-$(CONFIG_TOUCHSCREEN_AD7879_I2C) += ad7879-i2c.o
obj-$(CONFIG_TOUCHSCREEN_AD7879_SPI) += ad7879-spi.o
obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
+obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
@@ -37,7 +38,6 @@ obj-$(CONFIG_TOUCHSCREEN_HTCPEN) += htcpen.o
obj-$(CONFIG_TOUCHSCREEN_USB_COMPOSITE) += usbtouchscreen.o
obj-$(CONFIG_TOUCHSCREEN_PCAP) += pcap_ts.o
obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
-obj-$(CONFIG_TOUCHSCREEN_QT602240) += qt602240_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
@@ -45,9 +45,11 @@ obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o
+obj-$(CONFIG_TOUCHSCREEN_TSC2005) += tsc2005.o
obj-$(CONFIG_TOUCHSCREEN_TSC2007) += tsc2007.o
obj-$(CONFIG_TOUCHSCREEN_UCB1400) += ucb1400_ts.o
obj-$(CONFIG_TOUCHSCREEN_WACOM_W8001) += wacom_w8001.o
+obj-$(CONFIG_TOUCHSCREEN_WM831X) += wm831x-ts.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX) += wm97xx-ts.o
wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9705) += wm9705.o
wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9712) += wm9712.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index a1952fcc083e..714d4e0f9f95 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -41,6 +41,7 @@
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/ad7877.h>
@@ -826,39 +827,37 @@ static int __devexit ad7877_remove(struct spi_device *spi)
return 0;
}
-#ifdef CONFIG_PM
-static int ad7877_suspend(struct spi_device *spi, pm_message_t message)
+#ifdef CONFIG_PM_SLEEP
+static int ad7877_suspend(struct device *dev)
{
- struct ad7877 *ts = dev_get_drvdata(&spi->dev);
+ struct ad7877 *ts = dev_get_drvdata(dev);
ad7877_disable(ts);
return 0;
}
-static int ad7877_resume(struct spi_device *spi)
+static int ad7877_resume(struct device *dev)
{
- struct ad7877 *ts = dev_get_drvdata(&spi->dev);
+ struct ad7877 *ts = dev_get_drvdata(dev);
ad7877_enable(ts);
return 0;
}
-#else
-#define ad7877_suspend NULL
-#define ad7877_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(ad7877_pm, ad7877_suspend, ad7877_resume);
+
static struct spi_driver ad7877_driver = {
.driver = {
.name = "ad7877",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &ad7877_pm,
},
.probe = ad7877_probe,
.remove = __devexit_p(ad7877_remove),
- .suspend = ad7877_suspend,
- .resume = ad7877_resume,
};
static int __init ad7877_init(void)
diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c
index 59c6e68c4325..ddf732f3cafc 100644
--- a/drivers/input/touchscreen/ad7879-spi.c
+++ b/drivers/input/touchscreen/ad7879-spi.c
@@ -7,6 +7,7 @@
*/
#include <linux/input.h> /* BUS_SPI */
+#include <linux/pm.h>
#include <linux/spi/spi.h>
#include "ad7879.h"
@@ -20,9 +21,10 @@
#define AD7879_WRITECMD(reg) (AD7879_CMD(reg))
#define AD7879_READCMD(reg) (AD7879_CMD(reg) | AD7879_CMD_READ)
-#ifdef CONFIG_PM
-static int ad7879_spi_suspend(struct spi_device *spi, pm_message_t message)
+#ifdef CONFIG_PM_SLEEP
+static int ad7879_spi_suspend(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct ad7879 *ts = spi_get_drvdata(spi);
ad7879_suspend(ts);
@@ -30,19 +32,19 @@ static int ad7879_spi_suspend(struct spi_device *spi, pm_message_t message)
return 0;
}
-static int ad7879_spi_resume(struct spi_device *spi)
+static int ad7879_spi_resume(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct ad7879 *ts = spi_get_drvdata(spi);
ad7879_resume(ts);
return 0;
}
-#else
-# define ad7879_spi_suspend NULL
-# define ad7879_spi_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(ad7879_spi_pm, ad7879_spi_suspend, ad7879_spi_resume);
+
/*
* ad7879_read/write are only used for initial setup and for sysfs controls.
* The main traffic is done in ad7879_collect().
@@ -173,11 +175,10 @@ static struct spi_driver ad7879_spi_driver = {
.name = "ad7879",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &ad7879_spi_pm,
},
.probe = ad7879_spi_probe,
.remove = __devexit_p(ad7879_spi_remove),
- .suspend = ad7879_spi_suspend,
- .resume = ad7879_spi_resume,
};
static int __init ad7879_spi_init(void)
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 4bf2316e3284..c24946f51256 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -26,6 +26,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/pm.h>
#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
@@ -892,9 +893,10 @@ static irqreturn_t ads7846_irq(int irq, void *handle)
return IRQ_HANDLED;
}
-static int ads7846_suspend(struct spi_device *spi, pm_message_t message)
+#ifdef CONFIG_PM_SLEEP
+static int ads7846_suspend(struct device *dev)
{
- struct ads7846 *ts = dev_get_drvdata(&spi->dev);
+ struct ads7846 *ts = dev_get_drvdata(dev);
mutex_lock(&ts->lock);
@@ -914,9 +916,9 @@ static int ads7846_suspend(struct spi_device *spi, pm_message_t message)
return 0;
}
-static int ads7846_resume(struct spi_device *spi)
+static int ads7846_resume(struct device *dev)
{
- struct ads7846 *ts = dev_get_drvdata(&spi->dev);
+ struct ads7846 *ts = dev_get_drvdata(dev);
mutex_lock(&ts->lock);
@@ -935,6 +937,9 @@ static int ads7846_resume(struct spi_device *spi)
return 0;
}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume);
static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads7846 *ts)
{
@@ -1408,11 +1413,10 @@ static struct spi_driver ads7846_driver = {
.name = "ads7846",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &ads7846_pm,
},
.probe = ads7846_probe,
.remove = __devexit_p(ads7846_remove),
- .suspend = ads7846_suspend,
- .resume = ads7846_resume,
};
static int __init ads7846_init(void)
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
new file mode 100644
index 000000000000..4012436633b1
--- /dev/null
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -0,0 +1,1211 @@
+/*
+ * Atmel maXTouch Touchscreen driver
+ *
+ * Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/i2c/atmel_mxt_ts.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+/* Version */
+#define MXT_VER_20 20
+#define MXT_VER_21 21
+#define MXT_VER_22 22
+
+/* Slave addresses */
+#define MXT_APP_LOW 0x4a
+#define MXT_APP_HIGH 0x4b
+#define MXT_BOOT_LOW 0x24
+#define MXT_BOOT_HIGH 0x25
+
+/* Firmware */
+#define MXT_FW_NAME "maxtouch.fw"
+
+/* Registers */
+#define MXT_FAMILY_ID 0x00
+#define MXT_VARIANT_ID 0x01
+#define MXT_VERSION 0x02
+#define MXT_BUILD 0x03
+#define MXT_MATRIX_X_SIZE 0x04
+#define MXT_MATRIX_Y_SIZE 0x05
+#define MXT_OBJECT_NUM 0x06
+#define MXT_OBJECT_START 0x07
+
+#define MXT_OBJECT_SIZE 6
+
+/* Object types */
+#define MXT_DEBUG_DIAGNOSTIC 37
+#define MXT_GEN_MESSAGE 5
+#define MXT_GEN_COMMAND 6
+#define MXT_GEN_POWER 7
+#define MXT_GEN_ACQUIRE 8
+#define MXT_TOUCH_MULTI 9
+#define MXT_TOUCH_KEYARRAY 15
+#define MXT_TOUCH_PROXIMITY 23
+#define MXT_PROCI_GRIPFACE 20
+#define MXT_PROCG_NOISE 22
+#define MXT_PROCI_ONETOUCH 24
+#define MXT_PROCI_TWOTOUCH 27
+#define MXT_PROCI_GRIP 40
+#define MXT_PROCI_PALM 41
+#define MXT_SPT_COMMSCONFIG 18
+#define MXT_SPT_GPIOPWM 19
+#define MXT_SPT_SELFTEST 25
+#define MXT_SPT_CTECONFIG 28
+#define MXT_SPT_USERDATA 38
+#define MXT_SPT_DIGITIZER 43
+#define MXT_SPT_MESSAGECOUNT 44
+
+/* MXT_GEN_COMMAND field */
+#define MXT_COMMAND_RESET 0
+#define MXT_COMMAND_BACKUPNV 1
+#define MXT_COMMAND_CALIBRATE 2
+#define MXT_COMMAND_REPORTALL 3
+#define MXT_COMMAND_DIAGNOSTIC 5
+
+/* MXT_GEN_POWER field */
+#define MXT_POWER_IDLEACQINT 0
+#define MXT_POWER_ACTVACQINT 1
+#define MXT_POWER_ACTV2IDLETO 2
+
+/* MXT_GEN_ACQUIRE field */
+#define MXT_ACQUIRE_CHRGTIME 0
+#define MXT_ACQUIRE_TCHDRIFT 2
+#define MXT_ACQUIRE_DRIFTST 3
+#define MXT_ACQUIRE_TCHAUTOCAL 4
+#define MXT_ACQUIRE_SYNC 5
+#define MXT_ACQUIRE_ATCHCALST 6
+#define MXT_ACQUIRE_ATCHCALSTHR 7
+
+/* MXT_TOUCH_MULTI field */
+#define MXT_TOUCH_CTRL 0
+#define MXT_TOUCH_XORIGIN 1
+#define MXT_TOUCH_YORIGIN 2
+#define MXT_TOUCH_XSIZE 3
+#define MXT_TOUCH_YSIZE 4
+#define MXT_TOUCH_BLEN 6
+#define MXT_TOUCH_TCHTHR 7
+#define MXT_TOUCH_TCHDI 8
+#define MXT_TOUCH_ORIENT 9
+#define MXT_TOUCH_MOVHYSTI 11
+#define MXT_TOUCH_MOVHYSTN 12
+#define MXT_TOUCH_NUMTOUCH 14
+#define MXT_TOUCH_MRGHYST 15
+#define MXT_TOUCH_MRGTHR 16
+#define MXT_TOUCH_AMPHYST 17
+#define MXT_TOUCH_XRANGE_LSB 18
+#define MXT_TOUCH_XRANGE_MSB 19
+#define MXT_TOUCH_YRANGE_LSB 20
+#define MXT_TOUCH_YRANGE_MSB 21
+#define MXT_TOUCH_XLOCLIP 22
+#define MXT_TOUCH_XHICLIP 23
+#define MXT_TOUCH_YLOCLIP 24
+#define MXT_TOUCH_YHICLIP 25
+#define MXT_TOUCH_XEDGECTRL 26
+#define MXT_TOUCH_XEDGEDIST 27
+#define MXT_TOUCH_YEDGECTRL 28
+#define MXT_TOUCH_YEDGEDIST 29
+#define MXT_TOUCH_JUMPLIMIT 30
+
+/* MXT_PROCI_GRIPFACE field */
+#define MXT_GRIPFACE_CTRL 0
+#define MXT_GRIPFACE_XLOGRIP 1
+#define MXT_GRIPFACE_XHIGRIP 2
+#define MXT_GRIPFACE_YLOGRIP 3
+#define MXT_GRIPFACE_YHIGRIP 4
+#define MXT_GRIPFACE_MAXTCHS 5
+#define MXT_GRIPFACE_SZTHR1 7
+#define MXT_GRIPFACE_SZTHR2 8
+#define MXT_GRIPFACE_SHPTHR1 9
+#define MXT_GRIPFACE_SHPTHR2 10
+#define MXT_GRIPFACE_SUPEXTTO 11
+
+/* MXT_PROCI_NOISE field */
+#define MXT_NOISE_CTRL 0
+#define MXT_NOISE_OUTFLEN 1
+#define MXT_NOISE_GCAFUL_LSB 3
+#define MXT_NOISE_GCAFUL_MSB 4
+#define MXT_NOISE_GCAFLL_LSB 5
+#define MXT_NOISE_GCAFLL_MSB 6
+#define MXT_NOISE_ACTVGCAFVALID 7
+#define MXT_NOISE_NOISETHR 8
+#define MXT_NOISE_FREQHOPSCALE 10
+#define MXT_NOISE_FREQ0 11
+#define MXT_NOISE_FREQ1 12
+#define MXT_NOISE_FREQ2 13
+#define MXT_NOISE_FREQ3 14
+#define MXT_NOISE_FREQ4 15
+#define MXT_NOISE_IDLEGCAFVALID 16
+
+/* MXT_SPT_COMMSCONFIG */
+#define MXT_COMMS_CTRL 0
+#define MXT_COMMS_CMD 1
+
+/* MXT_SPT_CTECONFIG field */
+#define MXT_CTE_CTRL 0
+#define MXT_CTE_CMD 1
+#define MXT_CTE_MODE 2
+#define MXT_CTE_IDLEGCAFDEPTH 3
+#define MXT_CTE_ACTVGCAFDEPTH 4
+#define MXT_CTE_VOLTAGE 5
+
+#define MXT_VOLTAGE_DEFAULT 2700000
+#define MXT_VOLTAGE_STEP 10000
+
+/* Define for MXT_GEN_COMMAND */
+#define MXT_BOOT_VALUE 0xa5
+#define MXT_BACKUP_VALUE 0x55
+#define MXT_BACKUP_TIME 25 /* msec */
+#define MXT_RESET_TIME 65 /* msec */
+
+#define MXT_FWRESET_TIME 175 /* msec */
+
+/* Command to unlock bootloader */
+#define MXT_UNLOCK_CMD_MSB 0xaa
+#define MXT_UNLOCK_CMD_LSB 0xdc
+
+/* Bootloader mode status */
+#define MXT_WAITING_BOOTLOAD_CMD 0xc0 /* valid 7 6 bit only */
+#define MXT_WAITING_FRAME_DATA 0x80 /* valid 7 6 bit only */
+#define MXT_FRAME_CRC_CHECK 0x02
+#define MXT_FRAME_CRC_FAIL 0x03
+#define MXT_FRAME_CRC_PASS 0x04
+#define MXT_APP_CRC_FAIL 0x40 /* valid 7 8 bit only */
+#define MXT_BOOT_STATUS_MASK 0x3f
+
+/* Touch status */
+#define MXT_SUPPRESS (1 << 1)
+#define MXT_AMP (1 << 2)
+#define MXT_VECTOR (1 << 3)
+#define MXT_MOVE (1 << 4)
+#define MXT_RELEASE (1 << 5)
+#define MXT_PRESS (1 << 6)
+#define MXT_DETECT (1 << 7)
+
+/* Touchscreen absolute values */
+#define MXT_MAX_XC 0x3ff
+#define MXT_MAX_YC 0x3ff
+#define MXT_MAX_AREA 0xff
+
+#define MXT_MAX_FINGER 10
+
+struct mxt_info {
+ u8 family_id;
+ u8 variant_id;
+ u8 version;
+ u8 build;
+ u8 matrix_xsize;
+ u8 matrix_ysize;
+ u8 object_num;
+};
+
+struct mxt_object {
+ u8 type;
+ u16 start_address;
+ u8 size;
+ u8 instances;
+ u8 num_report_ids;
+
+ /* to map object and message */
+ u8 max_reportid;
+};
+
+struct mxt_message {
+ u8 reportid;
+ u8 message[7];
+ u8 checksum;
+};
+
+struct mxt_finger {
+ int status;
+ int x;
+ int y;
+ int area;
+};
+
+/* Each client has this additional data */
+struct mxt_data {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ const struct mxt_platform_data *pdata;
+ struct mxt_object *object_table;
+ struct mxt_info info;
+ struct mxt_finger finger[MXT_MAX_FINGER];
+ unsigned int irq;
+};
+
+static bool mxt_object_readable(unsigned int type)
+{
+ switch (type) {
+ case MXT_GEN_MESSAGE:
+ case MXT_GEN_COMMAND:
+ case MXT_GEN_POWER:
+ case MXT_GEN_ACQUIRE:
+ case MXT_TOUCH_MULTI:
+ case MXT_TOUCH_KEYARRAY:
+ case MXT_TOUCH_PROXIMITY:
+ case MXT_PROCI_GRIPFACE:
+ case MXT_PROCG_NOISE:
+ case MXT_PROCI_ONETOUCH:
+ case MXT_PROCI_TWOTOUCH:
+ case MXT_PROCI_GRIP:
+ case MXT_PROCI_PALM:
+ case MXT_SPT_COMMSCONFIG:
+ case MXT_SPT_GPIOPWM:
+ case MXT_SPT_SELFTEST:
+ case MXT_SPT_CTECONFIG:
+ case MXT_SPT_USERDATA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool mxt_object_writable(unsigned int type)
+{
+ switch (type) {
+ case MXT_GEN_COMMAND:
+ case MXT_GEN_POWER:
+ case MXT_GEN_ACQUIRE:
+ case MXT_TOUCH_MULTI:
+ case MXT_TOUCH_KEYARRAY:
+ case MXT_TOUCH_PROXIMITY:
+ case MXT_PROCI_GRIPFACE:
+ case MXT_PROCG_NOISE:
+ case MXT_PROCI_ONETOUCH:
+ case MXT_PROCI_TWOTOUCH:
+ case MXT_PROCI_GRIP:
+ case MXT_PROCI_PALM:
+ case MXT_SPT_GPIOPWM:
+ case MXT_SPT_SELFTEST:
+ case MXT_SPT_CTECONFIG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void mxt_dump_message(struct device *dev,
+ struct mxt_message *message)
+{
+ dev_dbg(dev, "reportid:\t0x%x\n", message->reportid);
+ dev_dbg(dev, "message1:\t0x%x\n", message->message[0]);
+ dev_dbg(dev, "message2:\t0x%x\n", message->message[1]);
+ dev_dbg(dev, "message3:\t0x%x\n", message->message[2]);
+ dev_dbg(dev, "message4:\t0x%x\n", message->message[3]);
+ dev_dbg(dev, "message5:\t0x%x\n", message->message[4]);
+ dev_dbg(dev, "message6:\t0x%x\n", message->message[5]);
+ dev_dbg(dev, "message7:\t0x%x\n", message->message[6]);
+ dev_dbg(dev, "checksum:\t0x%x\n", message->checksum);
+}
+
+static int mxt_check_bootloader(struct i2c_client *client,
+ unsigned int state)
+{
+ u8 val;
+
+recheck:
+ if (i2c_master_recv(client, &val, 1) != 1) {
+ dev_err(&client->dev, "%s: i2c recv failed\n", __func__);
+ return -EIO;
+ }
+
+ switch (state) {
+ case MXT_WAITING_BOOTLOAD_CMD:
+ case MXT_WAITING_FRAME_DATA:
+ val &= ~MXT_BOOT_STATUS_MASK;
+ break;
+ case MXT_FRAME_CRC_PASS:
+ if (val == MXT_FRAME_CRC_CHECK)
+ goto recheck;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (val != state) {
+ dev_err(&client->dev, "Unvalid bootloader mode state\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mxt_unlock_bootloader(struct i2c_client *client)
+{
+ u8 buf[2];
+
+ buf[0] = MXT_UNLOCK_CMD_LSB;
+ buf[1] = MXT_UNLOCK_CMD_MSB;
+
+ if (i2c_master_send(client, buf, 2) != 2) {
+ dev_err(&client->dev, "%s: i2c send failed\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mxt_fw_write(struct i2c_client *client,
+ const u8 *data, unsigned int frame_size)
+{
+ if (i2c_master_send(client, data, frame_size) != frame_size) {
+ dev_err(&client->dev, "%s: i2c send failed\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int __mxt_read_reg(struct i2c_client *client,
+ u16 reg, u16 len, void *val)
+{
+ struct i2c_msg xfer[2];
+ u8 buf[2];
+
+ buf[0] = reg & 0xff;
+ buf[1] = (reg >> 8) & 0xff;
+
+ /* Write register */
+ xfer[0].addr = client->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 2;
+ xfer[0].buf = buf;
+
+ /* Read data */
+ xfer[1].addr = client->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = len;
+ xfer[1].buf = val;
+
+ if (i2c_transfer(client->adapter, xfer, 2) != 2) {
+ dev_err(&client->dev, "%s: i2c transfer failed\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mxt_read_reg(struct i2c_client *client, u16 reg, u8 *val)
+{
+ return __mxt_read_reg(client, reg, 1, val);
+}
+
+static int mxt_write_reg(struct i2c_client *client, u16 reg, u8 val)
+{
+ u8 buf[3];
+
+ buf[0] = reg & 0xff;
+ buf[1] = (reg >> 8) & 0xff;
+ buf[2] = val;
+
+ if (i2c_master_send(client, buf, 3) != 3) {
+ dev_err(&client->dev, "%s: i2c send failed\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mxt_read_object_table(struct i2c_client *client,
+ u16 reg, u8 *object_buf)
+{
+ return __mxt_read_reg(client, reg, MXT_OBJECT_SIZE,
+ object_buf);
+}
+
+static struct mxt_object *
+mxt_get_object(struct mxt_data *data, u8 type)
+{
+ struct mxt_object *object;
+ int i;
+
+ for (i = 0; i < data->info.object_num; i++) {
+ object = data->object_table + i;
+ if (object->type == type)
+ return object;
+ }
+
+ dev_err(&data->client->dev, "Invalid object type\n");
+ return NULL;
+}
+
+static int mxt_read_message(struct mxt_data *data,
+ struct mxt_message *message)
+{
+ struct mxt_object *object;
+ u16 reg;
+
+ object = mxt_get_object(data, MXT_GEN_MESSAGE);
+ if (!object)
+ return -EINVAL;
+
+ reg = object->start_address;
+ return __mxt_read_reg(data->client, reg,
+ sizeof(struct mxt_message), message);
+}
+
+static int mxt_read_object(struct mxt_data *data,
+ u8 type, u8 offset, u8 *val)
+{
+ struct mxt_object *object;
+ u16 reg;
+
+ object = mxt_get_object(data, type);
+ if (!object)
+ return -EINVAL;
+
+ reg = object->start_address;
+ return __mxt_read_reg(data->client, reg + offset, 1, val);
+}
+
+static int mxt_write_object(struct mxt_data *data,
+ u8 type, u8 offset, u8 val)
+{
+ struct mxt_object *object;
+ u16 reg;
+
+ object = mxt_get_object(data, type);
+ if (!object)
+ return -EINVAL;
+
+ reg = object->start_address;
+ return mxt_write_reg(data->client, reg + offset, val);
+}
+
+static void mxt_input_report(struct mxt_data *data, int single_id)
+{
+ struct mxt_finger *finger = data->finger;
+ struct input_dev *input_dev = data->input_dev;
+ int status = finger[single_id].status;
+ int finger_num = 0;
+ int id;
+
+ for (id = 0; id < MXT_MAX_FINGER; id++) {
+ if (!finger[id].status)
+ continue;
+
+ input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR,
+ finger[id].status != MXT_RELEASE ?
+ finger[id].area : 0);
+ input_report_abs(input_dev, ABS_MT_POSITION_X,
+ finger[id].x);
+ input_report_abs(input_dev, ABS_MT_POSITION_Y,
+ finger[id].y);
+ input_mt_sync(input_dev);
+
+ if (finger[id].status == MXT_RELEASE)
+ finger[id].status = 0;
+ else
+ finger_num++;
+ }
+
+ input_report_key(input_dev, BTN_TOUCH, finger_num > 0);
+
+ if (status != MXT_RELEASE) {
+ input_report_abs(input_dev, ABS_X, finger[single_id].x);
+ input_report_abs(input_dev, ABS_Y, finger[single_id].y);
+ }
+
+ input_sync(input_dev);
+}
+
+static void mxt_input_touchevent(struct mxt_data *data,
+ struct mxt_message *message, int id)
+{
+ struct mxt_finger *finger = data->finger;
+ struct device *dev = &data->client->dev;
+ u8 status = message->message[0];
+ int x;
+ int y;
+ int area;
+
+ /* Check the touch is present on the screen */
+ if (!(status & MXT_DETECT)) {
+ if (status & MXT_RELEASE) {
+ dev_dbg(dev, "[%d] released\n", id);
+
+ finger[id].status = MXT_RELEASE;
+ mxt_input_report(data, id);
+ }
+ return;
+ }
+
+ /* Check only AMP detection */
+ if (!(status & (MXT_PRESS | MXT_MOVE)))
+ return;
+
+ x = (message->message[1] << 2) | ((message->message[3] & ~0x3f) >> 6);
+ y = (message->message[2] << 2) | ((message->message[3] & ~0xf3) >> 2);
+ area = message->message[4];
+
+ dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id,
+ status & MXT_MOVE ? "moved" : "pressed",
+ x, y, area);
+
+ finger[id].status = status & MXT_MOVE ?
+ MXT_MOVE : MXT_PRESS;
+ finger[id].x = x;
+ finger[id].y = y;
+ finger[id].area = area;
+
+ mxt_input_report(data, id);
+}
+
+static irqreturn_t mxt_interrupt(int irq, void *dev_id)
+{
+ struct mxt_data *data = dev_id;
+ struct mxt_message message;
+ struct mxt_object *object;
+ struct device *dev = &data->client->dev;
+ int id;
+ u8 reportid;
+ u8 max_reportid;
+ u8 min_reportid;
+
+ do {
+ if (mxt_read_message(data, &message)) {
+ dev_err(dev, "Failed to read message\n");
+ goto end;
+ }
+
+ reportid = message.reportid;
+
+ /* whether reportid is thing of MXT_TOUCH_MULTI */
+ object = mxt_get_object(data, MXT_TOUCH_MULTI);
+ if (!object)
+ goto end;
+
+ max_reportid = object->max_reportid;
+ min_reportid = max_reportid - object->num_report_ids + 1;
+ id = reportid - min_reportid;
+
+ if (reportid >= min_reportid && reportid <= max_reportid)
+ mxt_input_touchevent(data, &message, id);
+ else
+ mxt_dump_message(dev, &message);
+ } while (reportid != 0xff);
+
+end:
+ return IRQ_HANDLED;
+}
+
+static int mxt_check_reg_init(struct mxt_data *data)
+{
+ const struct mxt_platform_data *pdata = data->pdata;
+ struct mxt_object *object;
+ struct device *dev = &data->client->dev;
+ int index = 0;
+ int i, j, config_offset;
+
+ if (!pdata->config) {
+ dev_dbg(dev, "No cfg data defined, skipping reg init\n");
+ return 0;
+ }
+
+ for (i = 0; i < data->info.object_num; i++) {
+ object = data->object_table + i;
+
+ if (!mxt_object_writable(object->type))
+ continue;
+
+ for (j = 0; j < object->size + 1; j++) {
+ config_offset = index + j;
+ if (config_offset > pdata->config_length) {
+ dev_err(dev, "Not enough config data!\n");
+ return -EINVAL;
+ }
+ mxt_write_object(data, object->type, j,
+ pdata->config[config_offset]);
+ }
+ index += object->size + 1;
+ }
+
+ return 0;
+}
+
+static int mxt_make_highchg(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+ struct mxt_message message;
+ int count = 10;
+ int error;
+
+ /* Read dummy message to make high CHG pin */
+ do {
+ error = mxt_read_message(data, &message);
+ if (error)
+ return error;
+ } while (message.reportid != 0xff && --count);
+
+ if (!count) {
+ dev_err(dev, "CHG pin isn't cleared\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void mxt_handle_pdata(struct mxt_data *data)
+{
+ const struct mxt_platform_data *pdata = data->pdata;
+ u8 voltage;
+
+ /* Set touchscreen lines */
+ mxt_write_object(data, MXT_TOUCH_MULTI, MXT_TOUCH_XSIZE,
+ pdata->x_line);
+ mxt_write_object(data, MXT_TOUCH_MULTI, MXT_TOUCH_YSIZE,
+ pdata->y_line);
+
+ /* Set touchscreen orient */
+ mxt_write_object(data, MXT_TOUCH_MULTI, MXT_TOUCH_ORIENT,
+ pdata->orient);
+
+ /* Set touchscreen burst length */
+ mxt_write_object(data, MXT_TOUCH_MULTI,
+ MXT_TOUCH_BLEN, pdata->blen);
+
+ /* Set touchscreen threshold */
+ mxt_write_object(data, MXT_TOUCH_MULTI,
+ MXT_TOUCH_TCHTHR, pdata->threshold);
+
+ /* Set touchscreen resolution */
+ mxt_write_object(data, MXT_TOUCH_MULTI,
+ MXT_TOUCH_XRANGE_LSB, (pdata->x_size - 1) & 0xff);
+ mxt_write_object(data, MXT_TOUCH_MULTI,
+ MXT_TOUCH_XRANGE_MSB, (pdata->x_size - 1) >> 8);
+ mxt_write_object(data, MXT_TOUCH_MULTI,
+ MXT_TOUCH_YRANGE_LSB, (pdata->y_size - 1) & 0xff);
+ mxt_write_object(data, MXT_TOUCH_MULTI,
+ MXT_TOUCH_YRANGE_MSB, (pdata->y_size - 1) >> 8);
+
+ /* Set touchscreen voltage */
+ if (pdata->voltage) {
+ if (pdata->voltage < MXT_VOLTAGE_DEFAULT) {
+ voltage = (MXT_VOLTAGE_DEFAULT - pdata->voltage) /
+ MXT_VOLTAGE_STEP;
+ voltage = 0xff - voltage + 1;
+ } else
+ voltage = (pdata->voltage - MXT_VOLTAGE_DEFAULT) /
+ MXT_VOLTAGE_STEP;
+
+ mxt_write_object(data, MXT_SPT_CTECONFIG,
+ MXT_CTE_VOLTAGE, voltage);
+ }
+}
+
+static int mxt_get_info(struct mxt_data *data)
+{
+ struct i2c_client *client = data->client;
+ struct mxt_info *info = &data->info;
+ int error;
+ u8 val;
+
+ error = mxt_read_reg(client, MXT_FAMILY_ID, &val);
+ if (error)
+ return error;
+ info->family_id = val;
+
+ error = mxt_read_reg(client, MXT_VARIANT_ID, &val);
+ if (error)
+ return error;
+ info->variant_id = val;
+
+ error = mxt_read_reg(client, MXT_VERSION, &val);
+ if (error)
+ return error;
+ info->version = val;
+
+ error = mxt_read_reg(client, MXT_BUILD, &val);
+ if (error)
+ return error;
+ info->build = val;
+
+ error = mxt_read_reg(client, MXT_OBJECT_NUM, &val);
+ if (error)
+ return error;
+ info->object_num = val;
+
+ return 0;
+}
+
+static int mxt_get_object_table(struct mxt_data *data)
+{
+ int error;
+ int i;
+ u16 reg;
+ u8 reportid = 0;
+ u8 buf[MXT_OBJECT_SIZE];
+
+ for (i = 0; i < data->info.object_num; i++) {
+ struct mxt_object *object = data->object_table + i;
+
+ reg = MXT_OBJECT_START + MXT_OBJECT_SIZE * i;
+ error = mxt_read_object_table(data->client, reg, buf);
+ if (error)
+ return error;
+
+ object->type = buf[0];
+ object->start_address = (buf[2] << 8) | buf[1];
+ object->size = buf[3];
+ object->instances = buf[4];
+ object->num_report_ids = buf[5];
+
+ if (object->num_report_ids) {
+ reportid += object->num_report_ids *
+ (object->instances + 1);
+ object->max_reportid = reportid;
+ }
+ }
+
+ return 0;
+}
+
+static int mxt_initialize(struct mxt_data *data)
+{
+ struct i2c_client *client = data->client;
+ struct mxt_info *info = &data->info;
+ int error;
+ u8 val;
+
+ error = mxt_get_info(data);
+ if (error)
+ return error;
+
+ data->object_table = kcalloc(info->object_num,
+ sizeof(struct mxt_object),
+ GFP_KERNEL);
+ if (!data->object_table) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Get object table information */
+ error = mxt_get_object_table(data);
+ if (error)
+ return error;
+
+ /* Check register init values */
+ error = mxt_check_reg_init(data);
+ if (error)
+ return error;
+
+ error = mxt_make_highchg(data);
+ if (error)
+ return error;
+
+ mxt_handle_pdata(data);
+
+ /* Backup to memory */
+ mxt_write_object(data, MXT_GEN_COMMAND,
+ MXT_COMMAND_BACKUPNV,
+ MXT_BACKUP_VALUE);
+ msleep(MXT_BACKUP_TIME);
+
+ /* Soft reset */
+ mxt_write_object(data, MXT_GEN_COMMAND,
+ MXT_COMMAND_RESET, 1);
+ msleep(MXT_RESET_TIME);
+
+ /* Update matrix size at info struct */
+ error = mxt_read_reg(client, MXT_MATRIX_X_SIZE, &val);
+ if (error)
+ return error;
+ info->matrix_xsize = val;
+
+ error = mxt_read_reg(client, MXT_MATRIX_Y_SIZE, &val);
+ if (error)
+ return error;
+ info->matrix_ysize = val;
+
+ dev_info(&client->dev,
+ "Family ID: %d Variant ID: %d Version: %d Build: %d\n",
+ info->family_id, info->variant_id, info->version,
+ info->build);
+
+ dev_info(&client->dev,
+ "Matrix X Size: %d Matrix Y Size: %d Object Num: %d\n",
+ info->matrix_xsize, info->matrix_ysize,
+ info->object_num);
+
+ return 0;
+}
+
+static ssize_t mxt_object_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ struct mxt_object *object;
+ int count = 0;
+ int i, j;
+ int error;
+ u8 val;
+
+ for (i = 0; i < data->info.object_num; i++) {
+ object = data->object_table + i;
+
+ count += sprintf(buf + count,
+ "Object Table Element %d(Type %d)\n",
+ i + 1, object->type);
+
+ if (!mxt_object_readable(object->type)) {
+ count += sprintf(buf + count, "\n");
+ continue;
+ }
+
+ for (j = 0; j < object->size + 1; j++) {
+ error = mxt_read_object(data,
+ object->type, j, &val);
+ if (error)
+ return error;
+
+ count += sprintf(buf + count,
+ " Byte %d: 0x%x (%d)\n", j, val, val);
+ }
+
+ count += sprintf(buf + count, "\n");
+ }
+
+ return count;
+}
+
+static int mxt_load_fw(struct device *dev, const char *fn)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ const struct firmware *fw = NULL;
+ unsigned int frame_size;
+ unsigned int pos = 0;
+ int ret;
+
+ ret = request_firmware(&fw, fn, dev);
+ if (ret) {
+ dev_err(dev, "Unable to open firmware %s\n", fn);
+ return ret;
+ }
+
+ /* Change to the bootloader mode */
+ mxt_write_object(data, MXT_GEN_COMMAND,
+ MXT_COMMAND_RESET, MXT_BOOT_VALUE);
+ msleep(MXT_RESET_TIME);
+
+ /* Change to slave address of bootloader */
+ if (client->addr == MXT_APP_LOW)
+ client->addr = MXT_BOOT_LOW;
+ else
+ client->addr = MXT_BOOT_HIGH;
+
+ ret = mxt_check_bootloader(client, MXT_WAITING_BOOTLOAD_CMD);
+ if (ret)
+ goto out;
+
+ /* Unlock bootloader */
+ mxt_unlock_bootloader(client);
+
+ while (pos < fw->size) {
+ ret = mxt_check_bootloader(client,
+ MXT_WAITING_FRAME_DATA);
+ if (ret)
+ goto out;
+
+ frame_size = ((*(fw->data + pos) << 8) | *(fw->data + pos + 1));
+
+ /* We should add 2 at frame size as the the firmware data is not
+ * included the CRC bytes.
+ */
+ frame_size += 2;
+
+ /* Write one frame to device */
+ mxt_fw_write(client, fw->data + pos, frame_size);
+
+ ret = mxt_check_bootloader(client,
+ MXT_FRAME_CRC_PASS);
+ if (ret)
+ goto out;
+
+ pos += frame_size;
+
+ dev_dbg(dev, "Updated %d bytes / %zd bytes\n", pos, fw->size);
+ }
+
+out:
+ release_firmware(fw);
+
+ /* Change to slave address of application */
+ if (client->addr == MXT_BOOT_LOW)
+ client->addr = MXT_APP_LOW;
+ else
+ client->addr = MXT_APP_HIGH;
+
+ return ret;
+}
+
+static ssize_t mxt_update_fw_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ int error;
+
+ disable_irq(data->irq);
+
+ error = mxt_load_fw(dev, MXT_FW_NAME);
+ if (error) {
+ dev_err(dev, "The firmware update failed(%d)\n", error);
+ count = error;
+ } else {
+ dev_dbg(dev, "The firmware update succeeded\n");
+
+ /* Wait for reset */
+ msleep(MXT_FWRESET_TIME);
+
+ kfree(data->object_table);
+ data->object_table = NULL;
+
+ mxt_initialize(data);
+ }
+
+ enable_irq(data->irq);
+
+ return count;
+}
+
+static DEVICE_ATTR(object, 0444, mxt_object_show, NULL);
+static DEVICE_ATTR(update_fw, 0664, NULL, mxt_update_fw_store);
+
+static struct attribute *mxt_attrs[] = {
+ &dev_attr_object.attr,
+ &dev_attr_update_fw.attr,
+ NULL
+};
+
+static const struct attribute_group mxt_attr_group = {
+ .attrs = mxt_attrs,
+};
+
+static void mxt_start(struct mxt_data *data)
+{
+ /* Touch enable */
+ mxt_write_object(data,
+ MXT_TOUCH_MULTI, MXT_TOUCH_CTRL, 0x83);
+}
+
+static void mxt_stop(struct mxt_data *data)
+{
+ /* Touch disable */
+ mxt_write_object(data,
+ MXT_TOUCH_MULTI, MXT_TOUCH_CTRL, 0);
+}
+
+static int mxt_input_open(struct input_dev *dev)
+{
+ struct mxt_data *data = input_get_drvdata(dev);
+
+ mxt_start(data);
+
+ return 0;
+}
+
+static void mxt_input_close(struct input_dev *dev)
+{
+ struct mxt_data *data = input_get_drvdata(dev);
+
+ mxt_stop(data);
+}
+
+static int __devinit mxt_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct mxt_platform_data *pdata = client->dev.platform_data;
+ struct mxt_data *data;
+ struct input_dev *input_dev;
+ int error;
+
+ if (!pdata)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(struct mxt_data), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!data || !input_dev) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ input_dev->name = "Atmel maXTouch Touchscreen";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = &client->dev;
+ input_dev->open = mxt_input_open;
+ input_dev->close = mxt_input_close;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ /* For single touch */
+ input_set_abs_params(input_dev, ABS_X,
+ 0, MXT_MAX_XC, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y,
+ 0, MXT_MAX_YC, 0, 0);
+
+ /* For multi touch */
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
+ 0, MXT_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ 0, MXT_MAX_XC, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ 0, MXT_MAX_YC, 0, 0);
+
+ input_set_drvdata(input_dev, data);
+
+ data->client = client;
+ data->input_dev = input_dev;
+ data->pdata = pdata;
+ data->irq = client->irq;
+
+ i2c_set_clientdata(client, data);
+
+ error = mxt_initialize(data);
+ if (error)
+ goto err_free_object;
+
+ error = request_threaded_irq(client->irq, NULL, mxt_interrupt,
+ pdata->irqflags, client->dev.driver->name, data);
+ if (error) {
+ dev_err(&client->dev, "Failed to register interrupt\n");
+ goto err_free_object;
+ }
+
+ error = input_register_device(input_dev);
+ if (error)
+ goto err_free_irq;
+
+ error = sysfs_create_group(&client->dev.kobj, &mxt_attr_group);
+ if (error)
+ goto err_unregister_device;
+
+ return 0;
+
+err_unregister_device:
+ input_unregister_device(input_dev);
+ input_dev = NULL;
+err_free_irq:
+ free_irq(client->irq, data);
+err_free_object:
+ kfree(data->object_table);
+err_free_mem:
+ input_free_device(input_dev);
+ kfree(data);
+ return error;
+}
+
+static int __devexit mxt_remove(struct i2c_client *client)
+{
+ struct mxt_data *data = i2c_get_clientdata(client);
+
+ sysfs_remove_group(&client->dev.kobj, &mxt_attr_group);
+ free_irq(data->irq, data);
+ input_unregister_device(data->input_dev);
+ kfree(data->object_table);
+ kfree(data);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mxt_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mxt_data *data = i2c_get_clientdata(client);
+ struct input_dev *input_dev = data->input_dev;
+
+ mutex_lock(&input_dev->mutex);
+
+ if (input_dev->users)
+ mxt_stop(data);
+
+ mutex_unlock(&input_dev->mutex);
+
+ return 0;
+}
+
+static int mxt_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mxt_data *data = i2c_get_clientdata(client);
+ struct input_dev *input_dev = data->input_dev;
+
+ /* Soft reset */
+ mxt_write_object(data, MXT_GEN_COMMAND,
+ MXT_COMMAND_RESET, 1);
+
+ msleep(MXT_RESET_TIME);
+
+ mutex_lock(&input_dev->mutex);
+
+ if (input_dev->users)
+ mxt_start(data);
+
+ mutex_unlock(&input_dev->mutex);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mxt_pm_ops = {
+ .suspend = mxt_suspend,
+ .resume = mxt_resume,
+};
+#endif
+
+static const struct i2c_device_id mxt_id[] = {
+ { "qt602240_ts", 0 },
+ { "atmel_mxt_ts", 0 },
+ { "mXT224", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mxt_id);
+
+static struct i2c_driver mxt_driver = {
+ .driver = {
+ .name = "atmel_mxt_ts",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &mxt_pm_ops,
+#endif
+ },
+ .probe = mxt_probe,
+ .remove = __devexit_p(mxt_remove),
+ .id_table = mxt_id,
+};
+
+static int __init mxt_init(void)
+{
+ return i2c_add_driver(&mxt_driver);
+}
+
+static void __exit mxt_exit(void)
+{
+ i2c_del_driver(&mxt_driver);
+}
+
+module_init(mxt_init);
+module_exit(mxt_exit);
+
+/* Module information */
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_DESCRIPTION("Atmel maXTouch Touchscreen driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/qt602240_ts.c b/drivers/input/touchscreen/qt602240_ts.c
deleted file mode 100644
index 4dcb0e872f6a..000000000000
--- a/drivers/input/touchscreen/qt602240_ts.c
+++ /dev/null
@@ -1,1406 +0,0 @@
-/*
- * AT42QT602240/ATMXT224 Touchscreen driver
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/i2c.h>
-#include <linux/i2c/qt602240_ts.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-/* Version */
-#define QT602240_VER_20 20
-#define QT602240_VER_21 21
-#define QT602240_VER_22 22
-
-/* Slave addresses */
-#define QT602240_APP_LOW 0x4a
-#define QT602240_APP_HIGH 0x4b
-#define QT602240_BOOT_LOW 0x24
-#define QT602240_BOOT_HIGH 0x25
-
-/* Firmware */
-#define QT602240_FW_NAME "qt602240.fw"
-
-/* Registers */
-#define QT602240_FAMILY_ID 0x00
-#define QT602240_VARIANT_ID 0x01
-#define QT602240_VERSION 0x02
-#define QT602240_BUILD 0x03
-#define QT602240_MATRIX_X_SIZE 0x04
-#define QT602240_MATRIX_Y_SIZE 0x05
-#define QT602240_OBJECT_NUM 0x06
-#define QT602240_OBJECT_START 0x07
-
-#define QT602240_OBJECT_SIZE 6
-
-/* Object types */
-#define QT602240_DEBUG_DIAGNOSTIC 37
-#define QT602240_GEN_MESSAGE 5
-#define QT602240_GEN_COMMAND 6
-#define QT602240_GEN_POWER 7
-#define QT602240_GEN_ACQUIRE 8
-#define QT602240_TOUCH_MULTI 9
-#define QT602240_TOUCH_KEYARRAY 15
-#define QT602240_TOUCH_PROXIMITY 23
-#define QT602240_PROCI_GRIPFACE 20
-#define QT602240_PROCG_NOISE 22
-#define QT602240_PROCI_ONETOUCH 24
-#define QT602240_PROCI_TWOTOUCH 27
-#define QT602240_SPT_COMMSCONFIG 18 /* firmware ver 21 over */
-#define QT602240_SPT_GPIOPWM 19
-#define QT602240_SPT_SELFTEST 25
-#define QT602240_SPT_CTECONFIG 28
-#define QT602240_SPT_USERDATA 38 /* firmware ver 21 over */
-
-/* QT602240_GEN_COMMAND field */
-#define QT602240_COMMAND_RESET 0
-#define QT602240_COMMAND_BACKUPNV 1
-#define QT602240_COMMAND_CALIBRATE 2
-#define QT602240_COMMAND_REPORTALL 3
-#define QT602240_COMMAND_DIAGNOSTIC 5
-
-/* QT602240_GEN_POWER field */
-#define QT602240_POWER_IDLEACQINT 0
-#define QT602240_POWER_ACTVACQINT 1
-#define QT602240_POWER_ACTV2IDLETO 2
-
-/* QT602240_GEN_ACQUIRE field */
-#define QT602240_ACQUIRE_CHRGTIME 0
-#define QT602240_ACQUIRE_TCHDRIFT 2
-#define QT602240_ACQUIRE_DRIFTST 3
-#define QT602240_ACQUIRE_TCHAUTOCAL 4
-#define QT602240_ACQUIRE_SYNC 5
-#define QT602240_ACQUIRE_ATCHCALST 6
-#define QT602240_ACQUIRE_ATCHCALSTHR 7
-
-/* QT602240_TOUCH_MULTI field */
-#define QT602240_TOUCH_CTRL 0
-#define QT602240_TOUCH_XORIGIN 1
-#define QT602240_TOUCH_YORIGIN 2
-#define QT602240_TOUCH_XSIZE 3
-#define QT602240_TOUCH_YSIZE 4
-#define QT602240_TOUCH_BLEN 6
-#define QT602240_TOUCH_TCHTHR 7
-#define QT602240_TOUCH_TCHDI 8
-#define QT602240_TOUCH_ORIENT 9
-#define QT602240_TOUCH_MOVHYSTI 11
-#define QT602240_TOUCH_MOVHYSTN 12
-#define QT602240_TOUCH_NUMTOUCH 14
-#define QT602240_TOUCH_MRGHYST 15
-#define QT602240_TOUCH_MRGTHR 16
-#define QT602240_TOUCH_AMPHYST 17
-#define QT602240_TOUCH_XRANGE_LSB 18
-#define QT602240_TOUCH_XRANGE_MSB 19
-#define QT602240_TOUCH_YRANGE_LSB 20
-#define QT602240_TOUCH_YRANGE_MSB 21
-#define QT602240_TOUCH_XLOCLIP 22
-#define QT602240_TOUCH_XHICLIP 23
-#define QT602240_TOUCH_YLOCLIP 24
-#define QT602240_TOUCH_YHICLIP 25
-#define QT602240_TOUCH_XEDGECTRL 26
-#define QT602240_TOUCH_XEDGEDIST 27
-#define QT602240_TOUCH_YEDGECTRL 28
-#define QT602240_TOUCH_YEDGEDIST 29
-#define QT602240_TOUCH_JUMPLIMIT 30 /* firmware ver 22 over */
-
-/* QT602240_PROCI_GRIPFACE field */
-#define QT602240_GRIPFACE_CTRL 0
-#define QT602240_GRIPFACE_XLOGRIP 1
-#define QT602240_GRIPFACE_XHIGRIP 2
-#define QT602240_GRIPFACE_YLOGRIP 3
-#define QT602240_GRIPFACE_YHIGRIP 4
-#define QT602240_GRIPFACE_MAXTCHS 5
-#define QT602240_GRIPFACE_SZTHR1 7
-#define QT602240_GRIPFACE_SZTHR2 8
-#define QT602240_GRIPFACE_SHPTHR1 9
-#define QT602240_GRIPFACE_SHPTHR2 10
-#define QT602240_GRIPFACE_SUPEXTTO 11
-
-/* QT602240_PROCI_NOISE field */
-#define QT602240_NOISE_CTRL 0
-#define QT602240_NOISE_OUTFLEN 1
-#define QT602240_NOISE_GCAFUL_LSB 3
-#define QT602240_NOISE_GCAFUL_MSB 4
-#define QT602240_NOISE_GCAFLL_LSB 5
-#define QT602240_NOISE_GCAFLL_MSB 6
-#define QT602240_NOISE_ACTVGCAFVALID 7
-#define QT602240_NOISE_NOISETHR 8
-#define QT602240_NOISE_FREQHOPSCALE 10
-#define QT602240_NOISE_FREQ0 11
-#define QT602240_NOISE_FREQ1 12
-#define QT602240_NOISE_FREQ2 13
-#define QT602240_NOISE_FREQ3 14
-#define QT602240_NOISE_FREQ4 15
-#define QT602240_NOISE_IDLEGCAFVALID 16
-
-/* QT602240_SPT_COMMSCONFIG */
-#define QT602240_COMMS_CTRL 0
-#define QT602240_COMMS_CMD 1
-
-/* QT602240_SPT_CTECONFIG field */
-#define QT602240_CTE_CTRL 0
-#define QT602240_CTE_CMD 1
-#define QT602240_CTE_MODE 2
-#define QT602240_CTE_IDLEGCAFDEPTH 3
-#define QT602240_CTE_ACTVGCAFDEPTH 4
-#define QT602240_CTE_VOLTAGE 5 /* firmware ver 21 over */
-
-#define QT602240_VOLTAGE_DEFAULT 2700000
-#define QT602240_VOLTAGE_STEP 10000
-
-/* Define for QT602240_GEN_COMMAND */
-#define QT602240_BOOT_VALUE 0xa5
-#define QT602240_BACKUP_VALUE 0x55
-#define QT602240_BACKUP_TIME 25 /* msec */
-#define QT602240_RESET_TIME 65 /* msec */
-
-#define QT602240_FWRESET_TIME 175 /* msec */
-
-/* Command to unlock bootloader */
-#define QT602240_UNLOCK_CMD_MSB 0xaa
-#define QT602240_UNLOCK_CMD_LSB 0xdc
-
-/* Bootloader mode status */
-#define QT602240_WAITING_BOOTLOAD_CMD 0xc0 /* valid 7 6 bit only */
-#define QT602240_WAITING_FRAME_DATA 0x80 /* valid 7 6 bit only */
-#define QT602240_FRAME_CRC_CHECK 0x02
-#define QT602240_FRAME_CRC_FAIL 0x03
-#define QT602240_FRAME_CRC_PASS 0x04
-#define QT602240_APP_CRC_FAIL 0x40 /* valid 7 8 bit only */
-#define QT602240_BOOT_STATUS_MASK 0x3f
-
-/* Touch status */
-#define QT602240_SUPPRESS (1 << 1)
-#define QT602240_AMP (1 << 2)
-#define QT602240_VECTOR (1 << 3)
-#define QT602240_MOVE (1 << 4)
-#define QT602240_RELEASE (1 << 5)
-#define QT602240_PRESS (1 << 6)
-#define QT602240_DETECT (1 << 7)
-
-/* Touchscreen absolute values */
-#define QT602240_MAX_XC 0x3ff
-#define QT602240_MAX_YC 0x3ff
-#define QT602240_MAX_AREA 0xff
-
-#define QT602240_MAX_FINGER 10
-
-/* Initial register values recommended from chip vendor */
-static const u8 init_vals_ver_20[] = {
- /* QT602240_GEN_COMMAND(6) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_GEN_POWER(7) */
- 0x20, 0xff, 0x32,
- /* QT602240_GEN_ACQUIRE(8) */
- 0x08, 0x05, 0x05, 0x00, 0x00, 0x00, 0x05, 0x14,
- /* QT602240_TOUCH_MULTI(9) */
- 0x00, 0x00, 0x00, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0x64,
- /* QT602240_TOUCH_KEYARRAY(15) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00,
- /* QT602240_SPT_GPIOPWM(19) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00,
- /* QT602240_PROCI_GRIPFACE(20) */
- 0x00, 0x64, 0x64, 0x64, 0x64, 0x00, 0x00, 0x1e, 0x14, 0x04,
- 0x1e, 0x00,
- /* QT602240_PROCG_NOISE(22) */
- 0x05, 0x00, 0x00, 0x19, 0x00, 0xe7, 0xff, 0x04, 0x32, 0x00,
- 0x01, 0x0a, 0x0f, 0x14, 0x00, 0x00, 0xe8,
- /* QT602240_TOUCH_PROXIMITY(23) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00,
- /* QT602240_PROCI_ONETOUCH(24) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_SPT_SELFTEST(25) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- /* QT602240_PROCI_TWOTOUCH(27) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_SPT_CTECONFIG(28) */
- 0x00, 0x00, 0x00, 0x04, 0x08,
-};
-
-static const u8 init_vals_ver_21[] = {
- /* QT602240_GEN_COMMAND(6) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_GEN_POWER(7) */
- 0x20, 0xff, 0x32,
- /* QT602240_GEN_ACQUIRE(8) */
- 0x0a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x09, 0x23,
- /* QT602240_TOUCH_MULTI(9) */
- 0x00, 0x00, 0x00, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_TOUCH_KEYARRAY(15) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00,
- /* QT602240_SPT_GPIOPWM(19) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_PROCI_GRIPFACE(20) */
- 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x28, 0x04,
- 0x0f, 0x0a,
- /* QT602240_PROCG_NOISE(22) */
- 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x23, 0x00,
- 0x00, 0x05, 0x0f, 0x19, 0x23, 0x2d, 0x03,
- /* QT602240_TOUCH_PROXIMITY(23) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00,
- /* QT602240_PROCI_ONETOUCH(24) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_SPT_SELFTEST(25) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- /* QT602240_PROCI_TWOTOUCH(27) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_SPT_CTECONFIG(28) */
- 0x00, 0x00, 0x00, 0x08, 0x10, 0x00,
-};
-
-static const u8 init_vals_ver_22[] = {
- /* QT602240_GEN_COMMAND(6) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_GEN_POWER(7) */
- 0x20, 0xff, 0x32,
- /* QT602240_GEN_ACQUIRE(8) */
- 0x0a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x09, 0x23,
- /* QT602240_TOUCH_MULTI(9) */
- 0x00, 0x00, 0x00, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00,
- /* QT602240_TOUCH_KEYARRAY(15) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00,
- /* QT602240_SPT_GPIOPWM(19) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_PROCI_GRIPFACE(20) */
- 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x28, 0x04,
- 0x0f, 0x0a,
- /* QT602240_PROCG_NOISE(22) */
- 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x23, 0x00,
- 0x00, 0x05, 0x0f, 0x19, 0x23, 0x2d, 0x03,
- /* QT602240_TOUCH_PROXIMITY(23) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_PROCI_ONETOUCH(24) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_SPT_SELFTEST(25) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- /* QT602240_PROCI_TWOTOUCH(27) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* QT602240_SPT_CTECONFIG(28) */
- 0x00, 0x00, 0x00, 0x08, 0x10, 0x00,
-};
-
-struct qt602240_info {
- u8 family_id;
- u8 variant_id;
- u8 version;
- u8 build;
- u8 matrix_xsize;
- u8 matrix_ysize;
- u8 object_num;
-};
-
-struct qt602240_object {
- u8 type;
- u16 start_address;
- u8 size;
- u8 instances;
- u8 num_report_ids;
-
- /* to map object and message */
- u8 max_reportid;
-};
-
-struct qt602240_message {
- u8 reportid;
- u8 message[7];
- u8 checksum;
-};
-
-struct qt602240_finger {
- int status;
- int x;
- int y;
- int area;
-};
-
-/* Each client has this additional data */
-struct qt602240_data {
- struct i2c_client *client;
- struct input_dev *input_dev;
- const struct qt602240_platform_data *pdata;
- struct qt602240_object *object_table;
- struct qt602240_info info;
- struct qt602240_finger finger[QT602240_MAX_FINGER];
- unsigned int irq;
-};
-
-static bool qt602240_object_readable(unsigned int type)
-{
- switch (type) {
- case QT602240_GEN_MESSAGE:
- case QT602240_GEN_COMMAND:
- case QT602240_GEN_POWER:
- case QT602240_GEN_ACQUIRE:
- case QT602240_TOUCH_MULTI:
- case QT602240_TOUCH_KEYARRAY:
- case QT602240_TOUCH_PROXIMITY:
- case QT602240_PROCI_GRIPFACE:
- case QT602240_PROCG_NOISE:
- case QT602240_PROCI_ONETOUCH:
- case QT602240_PROCI_TWOTOUCH:
- case QT602240_SPT_COMMSCONFIG:
- case QT602240_SPT_GPIOPWM:
- case QT602240_SPT_SELFTEST:
- case QT602240_SPT_CTECONFIG:
- case QT602240_SPT_USERDATA:
- return true;
- default:
- return false;
- }
-}
-
-static bool qt602240_object_writable(unsigned int type)
-{
- switch (type) {
- case QT602240_GEN_COMMAND:
- case QT602240_GEN_POWER:
- case QT602240_GEN_ACQUIRE:
- case QT602240_TOUCH_MULTI:
- case QT602240_TOUCH_KEYARRAY:
- case QT602240_TOUCH_PROXIMITY:
- case QT602240_PROCI_GRIPFACE:
- case QT602240_PROCG_NOISE:
- case QT602240_PROCI_ONETOUCH:
- case QT602240_PROCI_TWOTOUCH:
- case QT602240_SPT_GPIOPWM:
- case QT602240_SPT_SELFTEST:
- case QT602240_SPT_CTECONFIG:
- return true;
- default:
- return false;
- }
-}
-
-static void qt602240_dump_message(struct device *dev,
- struct qt602240_message *message)
-{
- dev_dbg(dev, "reportid:\t0x%x\n", message->reportid);
- dev_dbg(dev, "message1:\t0x%x\n", message->message[0]);
- dev_dbg(dev, "message2:\t0x%x\n", message->message[1]);
- dev_dbg(dev, "message3:\t0x%x\n", message->message[2]);
- dev_dbg(dev, "message4:\t0x%x\n", message->message[3]);
- dev_dbg(dev, "message5:\t0x%x\n", message->message[4]);
- dev_dbg(dev, "message6:\t0x%x\n", message->message[5]);
- dev_dbg(dev, "message7:\t0x%x\n", message->message[6]);
- dev_dbg(dev, "checksum:\t0x%x\n", message->checksum);
-}
-
-static int qt602240_check_bootloader(struct i2c_client *client,
- unsigned int state)
-{
- u8 val;
-
-recheck:
- if (i2c_master_recv(client, &val, 1) != 1) {
- dev_err(&client->dev, "%s: i2c recv failed\n", __func__);
- return -EIO;
- }
-
- switch (state) {
- case QT602240_WAITING_BOOTLOAD_CMD:
- case QT602240_WAITING_FRAME_DATA:
- val &= ~QT602240_BOOT_STATUS_MASK;
- break;
- case QT602240_FRAME_CRC_PASS:
- if (val == QT602240_FRAME_CRC_CHECK)
- goto recheck;
- break;
- default:
- return -EINVAL;
- }
-
- if (val != state) {
- dev_err(&client->dev, "Unvalid bootloader mode state\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int qt602240_unlock_bootloader(struct i2c_client *client)
-{
- u8 buf[2];
-
- buf[0] = QT602240_UNLOCK_CMD_LSB;
- buf[1] = QT602240_UNLOCK_CMD_MSB;
-
- if (i2c_master_send(client, buf, 2) != 2) {
- dev_err(&client->dev, "%s: i2c send failed\n", __func__);
- return -EIO;
- }
-
- return 0;
-}
-
-static int qt602240_fw_write(struct i2c_client *client,
- const u8 *data, unsigned int frame_size)
-{
- if (i2c_master_send(client, data, frame_size) != frame_size) {
- dev_err(&client->dev, "%s: i2c send failed\n", __func__);
- return -EIO;
- }
-
- return 0;
-}
-
-static int __qt602240_read_reg(struct i2c_client *client,
- u16 reg, u16 len, void *val)
-{
- struct i2c_msg xfer[2];
- u8 buf[2];
-
- buf[0] = reg & 0xff;
- buf[1] = (reg >> 8) & 0xff;
-
- /* Write register */
- xfer[0].addr = client->addr;
- xfer[0].flags = 0;
- xfer[0].len = 2;
- xfer[0].buf = buf;
-
- /* Read data */
- xfer[1].addr = client->addr;
- xfer[1].flags = I2C_M_RD;
- xfer[1].len = len;
- xfer[1].buf = val;
-
- if (i2c_transfer(client->adapter, xfer, 2) != 2) {
- dev_err(&client->dev, "%s: i2c transfer failed\n", __func__);
- return -EIO;
- }
-
- return 0;
-}
-
-static int qt602240_read_reg(struct i2c_client *client, u16 reg, u8 *val)
-{
- return __qt602240_read_reg(client, reg, 1, val);
-}
-
-static int qt602240_write_reg(struct i2c_client *client, u16 reg, u8 val)
-{
- u8 buf[3];
-
- buf[0] = reg & 0xff;
- buf[1] = (reg >> 8) & 0xff;
- buf[2] = val;
-
- if (i2c_master_send(client, buf, 3) != 3) {
- dev_err(&client->dev, "%s: i2c send failed\n", __func__);
- return -EIO;
- }
-
- return 0;
-}
-
-static int qt602240_read_object_table(struct i2c_client *client,
- u16 reg, u8 *object_buf)
-{
- return __qt602240_read_reg(client, reg, QT602240_OBJECT_SIZE,
- object_buf);
-}
-
-static struct qt602240_object *
-qt602240_get_object(struct qt602240_data *data, u8 type)
-{
- struct qt602240_object *object;
- int i;
-
- for (i = 0; i < data->info.object_num; i++) {
- object = data->object_table + i;
- if (object->type == type)
- return object;
- }
-
- dev_err(&data->client->dev, "Invalid object type\n");
- return NULL;
-}
-
-static int qt602240_read_message(struct qt602240_data *data,
- struct qt602240_message *message)
-{
- struct qt602240_object *object;
- u16 reg;
-
- object = qt602240_get_object(data, QT602240_GEN_MESSAGE);
- if (!object)
- return -EINVAL;
-
- reg = object->start_address;
- return __qt602240_read_reg(data->client, reg,
- sizeof(struct qt602240_message), message);
-}
-
-static int qt602240_read_object(struct qt602240_data *data,
- u8 type, u8 offset, u8 *val)
-{
- struct qt602240_object *object;
- u16 reg;
-
- object = qt602240_get_object(data, type);
- if (!object)
- return -EINVAL;
-
- reg = object->start_address;
- return __qt602240_read_reg(data->client, reg + offset, 1, val);
-}
-
-static int qt602240_write_object(struct qt602240_data *data,
- u8 type, u8 offset, u8 val)
-{
- struct qt602240_object *object;
- u16 reg;
-
- object = qt602240_get_object(data, type);
- if (!object)
- return -EINVAL;
-
- reg = object->start_address;
- return qt602240_write_reg(data->client, reg + offset, val);
-}
-
-static void qt602240_input_report(struct qt602240_data *data, int single_id)
-{
- struct qt602240_finger *finger = data->finger;
- struct input_dev *input_dev = data->input_dev;
- int status = finger[single_id].status;
- int finger_num = 0;
- int id;
-
- for (id = 0; id < QT602240_MAX_FINGER; id++) {
- if (!finger[id].status)
- continue;
-
- input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR,
- finger[id].status != QT602240_RELEASE ?
- finger[id].area : 0);
- input_report_abs(input_dev, ABS_MT_POSITION_X,
- finger[id].x);
- input_report_abs(input_dev, ABS_MT_POSITION_Y,
- finger[id].y);
- input_mt_sync(input_dev);
-
- if (finger[id].status == QT602240_RELEASE)
- finger[id].status = 0;
- else
- finger_num++;
- }
-
- input_report_key(input_dev, BTN_TOUCH, finger_num > 0);
-
- if (status != QT602240_RELEASE) {
- input_report_abs(input_dev, ABS_X, finger[single_id].x);
- input_report_abs(input_dev, ABS_Y, finger[single_id].y);
- }
-
- input_sync(input_dev);
-}
-
-static void qt602240_input_touchevent(struct qt602240_data *data,
- struct qt602240_message *message, int id)
-{
- struct qt602240_finger *finger = data->finger;
- struct device *dev = &data->client->dev;
- u8 status = message->message[0];
- int x;
- int y;
- int area;
-
- /* Check the touch is present on the screen */
- if (!(status & QT602240_DETECT)) {
- if (status & QT602240_RELEASE) {
- dev_dbg(dev, "[%d] released\n", id);
-
- finger[id].status = QT602240_RELEASE;
- qt602240_input_report(data, id);
- }
- return;
- }
-
- /* Check only AMP detection */
- if (!(status & (QT602240_PRESS | QT602240_MOVE)))
- return;
-
- x = (message->message[1] << 2) | ((message->message[3] & ~0x3f) >> 6);
- y = (message->message[2] << 2) | ((message->message[3] & ~0xf3) >> 2);
- area = message->message[4];
-
- dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id,
- status & QT602240_MOVE ? "moved" : "pressed",
- x, y, area);
-
- finger[id].status = status & QT602240_MOVE ?
- QT602240_MOVE : QT602240_PRESS;
- finger[id].x = x;
- finger[id].y = y;
- finger[id].area = area;
-
- qt602240_input_report(data, id);
-}
-
-static irqreturn_t qt602240_interrupt(int irq, void *dev_id)
-{
- struct qt602240_data *data = dev_id;
- struct qt602240_message message;
- struct qt602240_object *object;
- struct device *dev = &data->client->dev;
- int id;
- u8 reportid;
- u8 max_reportid;
- u8 min_reportid;
-
- do {
- if (qt602240_read_message(data, &message)) {
- dev_err(dev, "Failed to read message\n");
- goto end;
- }
-
- reportid = message.reportid;
-
- /* whether reportid is thing of QT602240_TOUCH_MULTI */
- object = qt602240_get_object(data, QT602240_TOUCH_MULTI);
- if (!object)
- goto end;
-
- max_reportid = object->max_reportid;
- min_reportid = max_reportid - object->num_report_ids + 1;
- id = reportid - min_reportid;
-
- if (reportid >= min_reportid && reportid <= max_reportid)
- qt602240_input_touchevent(data, &message, id);
- else
- qt602240_dump_message(dev, &message);
- } while (reportid != 0xff);
-
-end:
- return IRQ_HANDLED;
-}
-
-static int qt602240_check_reg_init(struct qt602240_data *data)
-{
- struct qt602240_object *object;
- struct device *dev = &data->client->dev;
- int index = 0;
- int i, j;
- u8 version = data->info.version;
- u8 *init_vals;
-
- switch (version) {
- case QT602240_VER_20:
- init_vals = (u8 *)init_vals_ver_20;
- break;
- case QT602240_VER_21:
- init_vals = (u8 *)init_vals_ver_21;
- break;
- case QT602240_VER_22:
- init_vals = (u8 *)init_vals_ver_22;
- break;
- default:
- dev_err(dev, "Firmware version %d doesn't support\n", version);
- return -EINVAL;
- }
-
- for (i = 0; i < data->info.object_num; i++) {
- object = data->object_table + i;
-
- if (!qt602240_object_writable(object->type))
- continue;
-
- for (j = 0; j < object->size + 1; j++)
- qt602240_write_object(data, object->type, j,
- init_vals[index + j]);
-
- index += object->size + 1;
- }
-
- return 0;
-}
-
-static int qt602240_check_matrix_size(struct qt602240_data *data)
-{
- const struct qt602240_platform_data *pdata = data->pdata;
- struct device *dev = &data->client->dev;
- int mode = -1;
- int error;
- u8 val;
-
- dev_dbg(dev, "Number of X lines: %d\n", pdata->x_line);
- dev_dbg(dev, "Number of Y lines: %d\n", pdata->y_line);
-
- switch (pdata->x_line) {
- case 0 ... 15:
- if (pdata->y_line <= 14)
- mode = 0;
- break;
- case 16:
- if (pdata->y_line <= 12)
- mode = 1;
- if (pdata->y_line == 13 || pdata->y_line == 14)
- mode = 0;
- break;
- case 17:
- if (pdata->y_line <= 11)
- mode = 2;
- if (pdata->y_line == 12 || pdata->y_line == 13)
- mode = 1;
- break;
- case 18:
- if (pdata->y_line <= 10)
- mode = 3;
- if (pdata->y_line == 11 || pdata->y_line == 12)
- mode = 2;
- break;
- case 19:
- if (pdata->y_line <= 9)
- mode = 4;
- if (pdata->y_line == 10 || pdata->y_line == 11)
- mode = 3;
- break;
- case 20:
- mode = 4;
- }
-
- if (mode < 0) {
- dev_err(dev, "Invalid X/Y lines\n");
- return -EINVAL;
- }
-
- error = qt602240_read_object(data, QT602240_SPT_CTECONFIG,
- QT602240_CTE_MODE, &val);
- if (error)
- return error;
-
- if (mode == val)
- return 0;
-
- /* Change the CTE configuration */
- qt602240_write_object(data, QT602240_SPT_CTECONFIG,
- QT602240_CTE_CTRL, 1);
- qt602240_write_object(data, QT602240_SPT_CTECONFIG,
- QT602240_CTE_MODE, mode);
- qt602240_write_object(data, QT602240_SPT_CTECONFIG,
- QT602240_CTE_CTRL, 0);
-
- return 0;
-}
-
-static int qt602240_make_highchg(struct qt602240_data *data)
-{
- struct device *dev = &data->client->dev;
- int count = 10;
- int error;
- u8 val;
-
- /* Read dummy message to make high CHG pin */
- do {
- error = qt602240_read_object(data, QT602240_GEN_MESSAGE, 0, &val);
- if (error)
- return error;
- } while ((val != 0xff) && --count);
-
- if (!count) {
- dev_err(dev, "CHG pin isn't cleared\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-static void qt602240_handle_pdata(struct qt602240_data *data)
-{
- const struct qt602240_platform_data *pdata = data->pdata;
- u8 voltage;
-
- /* Set touchscreen lines */
- qt602240_write_object(data, QT602240_TOUCH_MULTI, QT602240_TOUCH_XSIZE,
- pdata->x_line);
- qt602240_write_object(data, QT602240_TOUCH_MULTI, QT602240_TOUCH_YSIZE,
- pdata->y_line);
-
- /* Set touchscreen orient */
- qt602240_write_object(data, QT602240_TOUCH_MULTI, QT602240_TOUCH_ORIENT,
- pdata->orient);
-
- /* Set touchscreen burst length */
- qt602240_write_object(data, QT602240_TOUCH_MULTI,
- QT602240_TOUCH_BLEN, pdata->blen);
-
- /* Set touchscreen threshold */
- qt602240_write_object(data, QT602240_TOUCH_MULTI,
- QT602240_TOUCH_TCHTHR, pdata->threshold);
-
- /* Set touchscreen resolution */
- qt602240_write_object(data, QT602240_TOUCH_MULTI,
- QT602240_TOUCH_XRANGE_LSB, (pdata->x_size - 1) & 0xff);
- qt602240_write_object(data, QT602240_TOUCH_MULTI,
- QT602240_TOUCH_XRANGE_MSB, (pdata->x_size - 1) >> 8);
- qt602240_write_object(data, QT602240_TOUCH_MULTI,
- QT602240_TOUCH_YRANGE_LSB, (pdata->y_size - 1) & 0xff);
- qt602240_write_object(data, QT602240_TOUCH_MULTI,
- QT602240_TOUCH_YRANGE_MSB, (pdata->y_size - 1) >> 8);
-
- /* Set touchscreen voltage */
- if (data->info.version >= QT602240_VER_21 && pdata->voltage) {
- if (pdata->voltage < QT602240_VOLTAGE_DEFAULT) {
- voltage = (QT602240_VOLTAGE_DEFAULT - pdata->voltage) /
- QT602240_VOLTAGE_STEP;
- voltage = 0xff - voltage + 1;
- } else
- voltage = (pdata->voltage - QT602240_VOLTAGE_DEFAULT) /
- QT602240_VOLTAGE_STEP;
-
- qt602240_write_object(data, QT602240_SPT_CTECONFIG,
- QT602240_CTE_VOLTAGE, voltage);
- }
-}
-
-static int qt602240_get_info(struct qt602240_data *data)
-{
- struct i2c_client *client = data->client;
- struct qt602240_info *info = &data->info;
- int error;
- u8 val;
-
- error = qt602240_read_reg(client, QT602240_FAMILY_ID, &val);
- if (error)
- return error;
- info->family_id = val;
-
- error = qt602240_read_reg(client, QT602240_VARIANT_ID, &val);
- if (error)
- return error;
- info->variant_id = val;
-
- error = qt602240_read_reg(client, QT602240_VERSION, &val);
- if (error)
- return error;
- info->version = val;
-
- error = qt602240_read_reg(client, QT602240_BUILD, &val);
- if (error)
- return error;
- info->build = val;
-
- error = qt602240_read_reg(client, QT602240_OBJECT_NUM, &val);
- if (error)
- return error;
- info->object_num = val;
-
- return 0;
-}
-
-static int qt602240_get_object_table(struct qt602240_data *data)
-{
- int error;
- int i;
- u16 reg;
- u8 reportid = 0;
- u8 buf[QT602240_OBJECT_SIZE];
-
- for (i = 0; i < data->info.object_num; i++) {
- struct qt602240_object *object = data->object_table + i;
-
- reg = QT602240_OBJECT_START + QT602240_OBJECT_SIZE * i;
- error = qt602240_read_object_table(data->client, reg, buf);
- if (error)
- return error;
-
- object->type = buf[0];
- object->start_address = (buf[2] << 8) | buf[1];
- object->size = buf[3];
- object->instances = buf[4];
- object->num_report_ids = buf[5];
-
- if (object->num_report_ids) {
- reportid += object->num_report_ids *
- (object->instances + 1);
- object->max_reportid = reportid;
- }
- }
-
- return 0;
-}
-
-static int qt602240_initialize(struct qt602240_data *data)
-{
- struct i2c_client *client = data->client;
- struct qt602240_info *info = &data->info;
- int error;
- u8 val;
-
- error = qt602240_get_info(data);
- if (error)
- return error;
-
- data->object_table = kcalloc(info->object_num,
- sizeof(struct qt602240_object),
- GFP_KERNEL);
- if (!data->object_table) {
- dev_err(&client->dev, "Failed to allocate memory\n");
- return -ENOMEM;
- }
-
- /* Get object table information */
- error = qt602240_get_object_table(data);
- if (error)
- return error;
-
- /* Check register init values */
- error = qt602240_check_reg_init(data);
- if (error)
- return error;
-
- /* Check X/Y matrix size */
- error = qt602240_check_matrix_size(data);
- if (error)
- return error;
-
- error = qt602240_make_highchg(data);
- if (error)
- return error;
-
- qt602240_handle_pdata(data);
-
- /* Backup to memory */
- qt602240_write_object(data, QT602240_GEN_COMMAND,
- QT602240_COMMAND_BACKUPNV,
- QT602240_BACKUP_VALUE);
- msleep(QT602240_BACKUP_TIME);
-
- /* Soft reset */
- qt602240_write_object(data, QT602240_GEN_COMMAND,
- QT602240_COMMAND_RESET, 1);
- msleep(QT602240_RESET_TIME);
-
- /* Update matrix size at info struct */
- error = qt602240_read_reg(client, QT602240_MATRIX_X_SIZE, &val);
- if (error)
- return error;
- info->matrix_xsize = val;
-
- error = qt602240_read_reg(client, QT602240_MATRIX_Y_SIZE, &val);
- if (error)
- return error;
- info->matrix_ysize = val;
-
- dev_info(&client->dev,
- "Family ID: %d Variant ID: %d Version: %d Build: %d\n",
- info->family_id, info->variant_id, info->version,
- info->build);
-
- dev_info(&client->dev,
- "Matrix X Size: %d Matrix Y Size: %d Object Num: %d\n",
- info->matrix_xsize, info->matrix_ysize,
- info->object_num);
-
- return 0;
-}
-
-static ssize_t qt602240_object_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct qt602240_data *data = dev_get_drvdata(dev);
- struct qt602240_object *object;
- int count = 0;
- int i, j;
- int error;
- u8 val;
-
- for (i = 0; i < data->info.object_num; i++) {
- object = data->object_table + i;
-
- count += sprintf(buf + count,
- "Object Table Element %d(Type %d)\n",
- i + 1, object->type);
-
- if (!qt602240_object_readable(object->type)) {
- count += sprintf(buf + count, "\n");
- continue;
- }
-
- for (j = 0; j < object->size + 1; j++) {
- error = qt602240_read_object(data,
- object->type, j, &val);
- if (error)
- return error;
-
- count += sprintf(buf + count,
- " Byte %d: 0x%x (%d)\n", j, val, val);
- }
-
- count += sprintf(buf + count, "\n");
- }
-
- return count;
-}
-
-static int qt602240_load_fw(struct device *dev, const char *fn)
-{
- struct qt602240_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- const struct firmware *fw = NULL;
- unsigned int frame_size;
- unsigned int pos = 0;
- int ret;
-
- ret = request_firmware(&fw, fn, dev);
- if (ret) {
- dev_err(dev, "Unable to open firmware %s\n", fn);
- return ret;
- }
-
- /* Change to the bootloader mode */
- qt602240_write_object(data, QT602240_GEN_COMMAND,
- QT602240_COMMAND_RESET, QT602240_BOOT_VALUE);
- msleep(QT602240_RESET_TIME);
-
- /* Change to slave address of bootloader */
- if (client->addr == QT602240_APP_LOW)
- client->addr = QT602240_BOOT_LOW;
- else
- client->addr = QT602240_BOOT_HIGH;
-
- ret = qt602240_check_bootloader(client, QT602240_WAITING_BOOTLOAD_CMD);
- if (ret)
- goto out;
-
- /* Unlock bootloader */
- qt602240_unlock_bootloader(client);
-
- while (pos < fw->size) {
- ret = qt602240_check_bootloader(client,
- QT602240_WAITING_FRAME_DATA);
- if (ret)
- goto out;
-
- frame_size = ((*(fw->data + pos) << 8) | *(fw->data + pos + 1));
-
- /* We should add 2 at frame size as the the firmware data is not
- * included the CRC bytes.
- */
- frame_size += 2;
-
- /* Write one frame to device */
- qt602240_fw_write(client, fw->data + pos, frame_size);
-
- ret = qt602240_check_bootloader(client,
- QT602240_FRAME_CRC_PASS);
- if (ret)
- goto out;
-
- pos += frame_size;
-
- dev_dbg(dev, "Updated %d bytes / %zd bytes\n", pos, fw->size);
- }
-
-out:
- release_firmware(fw);
-
- /* Change to slave address of application */
- if (client->addr == QT602240_BOOT_LOW)
- client->addr = QT602240_APP_LOW;
- else
- client->addr = QT602240_APP_HIGH;
-
- return ret;
-}
-
-static ssize_t qt602240_update_fw_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct qt602240_data *data = dev_get_drvdata(dev);
- unsigned int version;
- int error;
-
- if (sscanf(buf, "%u", &version) != 1) {
- dev_err(dev, "Invalid values\n");
- return -EINVAL;
- }
-
- if (data->info.version < QT602240_VER_21 || version < QT602240_VER_21) {
- dev_err(dev, "FW update supported starting with version 21\n");
- return -EINVAL;
- }
-
- disable_irq(data->irq);
-
- error = qt602240_load_fw(dev, QT602240_FW_NAME);
- if (error) {
- dev_err(dev, "The firmware update failed(%d)\n", error);
- count = error;
- } else {
- dev_dbg(dev, "The firmware update succeeded\n");
-
- /* Wait for reset */
- msleep(QT602240_FWRESET_TIME);
-
- kfree(data->object_table);
- data->object_table = NULL;
-
- qt602240_initialize(data);
- }
-
- enable_irq(data->irq);
-
- return count;
-}
-
-static DEVICE_ATTR(object, 0444, qt602240_object_show, NULL);
-static DEVICE_ATTR(update_fw, 0664, NULL, qt602240_update_fw_store);
-
-static struct attribute *qt602240_attrs[] = {
- &dev_attr_object.attr,
- &dev_attr_update_fw.attr,
- NULL
-};
-
-static const struct attribute_group qt602240_attr_group = {
- .attrs = qt602240_attrs,
-};
-
-static void qt602240_start(struct qt602240_data *data)
-{
- /* Touch enable */
- qt602240_write_object(data,
- QT602240_TOUCH_MULTI, QT602240_TOUCH_CTRL, 0x83);
-}
-
-static void qt602240_stop(struct qt602240_data *data)
-{
- /* Touch disable */
- qt602240_write_object(data,
- QT602240_TOUCH_MULTI, QT602240_TOUCH_CTRL, 0);
-}
-
-static int qt602240_input_open(struct input_dev *dev)
-{
- struct qt602240_data *data = input_get_drvdata(dev);
-
- qt602240_start(data);
-
- return 0;
-}
-
-static void qt602240_input_close(struct input_dev *dev)
-{
- struct qt602240_data *data = input_get_drvdata(dev);
-
- qt602240_stop(data);
-}
-
-static int __devinit qt602240_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct qt602240_data *data;
- struct input_dev *input_dev;
- int error;
-
- if (!client->dev.platform_data)
- return -EINVAL;
-
- data = kzalloc(sizeof(struct qt602240_data), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!data || !input_dev) {
- dev_err(&client->dev, "Failed to allocate memory\n");
- error = -ENOMEM;
- goto err_free_mem;
- }
-
- input_dev->name = "AT42QT602240/ATMXT224 Touchscreen";
- input_dev->id.bustype = BUS_I2C;
- input_dev->dev.parent = &client->dev;
- input_dev->open = qt602240_input_open;
- input_dev->close = qt602240_input_close;
-
- __set_bit(EV_ABS, input_dev->evbit);
- __set_bit(EV_KEY, input_dev->evbit);
- __set_bit(BTN_TOUCH, input_dev->keybit);
-
- /* For single touch */
- input_set_abs_params(input_dev, ABS_X,
- 0, QT602240_MAX_XC, 0, 0);
- input_set_abs_params(input_dev, ABS_Y,
- 0, QT602240_MAX_YC, 0, 0);
-
- /* For multi touch */
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
- 0, QT602240_MAX_AREA, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_X,
- 0, QT602240_MAX_XC, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
- 0, QT602240_MAX_YC, 0, 0);
-
- input_set_drvdata(input_dev, data);
-
- data->client = client;
- data->input_dev = input_dev;
- data->pdata = client->dev.platform_data;
- data->irq = client->irq;
-
- i2c_set_clientdata(client, data);
-
- error = qt602240_initialize(data);
- if (error)
- goto err_free_object;
-
- error = request_threaded_irq(client->irq, NULL, qt602240_interrupt,
- IRQF_TRIGGER_FALLING, client->dev.driver->name, data);
- if (error) {
- dev_err(&client->dev, "Failed to register interrupt\n");
- goto err_free_object;
- }
-
- error = input_register_device(input_dev);
- if (error)
- goto err_free_irq;
-
- error = sysfs_create_group(&client->dev.kobj, &qt602240_attr_group);
- if (error)
- goto err_unregister_device;
-
- return 0;
-
-err_unregister_device:
- input_unregister_device(input_dev);
- input_dev = NULL;
-err_free_irq:
- free_irq(client->irq, data);
-err_free_object:
- kfree(data->object_table);
-err_free_mem:
- input_free_device(input_dev);
- kfree(data);
- return error;
-}
-
-static int __devexit qt602240_remove(struct i2c_client *client)
-{
- struct qt602240_data *data = i2c_get_clientdata(client);
-
- sysfs_remove_group(&client->dev.kobj, &qt602240_attr_group);
- free_irq(data->irq, data);
- input_unregister_device(data->input_dev);
- kfree(data->object_table);
- kfree(data);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int qt602240_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct qt602240_data *data = i2c_get_clientdata(client);
- struct input_dev *input_dev = data->input_dev;
-
- mutex_lock(&input_dev->mutex);
-
- if (input_dev->users)
- qt602240_stop(data);
-
- mutex_unlock(&input_dev->mutex);
-
- return 0;
-}
-
-static int qt602240_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct qt602240_data *data = i2c_get_clientdata(client);
- struct input_dev *input_dev = data->input_dev;
-
- /* Soft reset */
- qt602240_write_object(data, QT602240_GEN_COMMAND,
- QT602240_COMMAND_RESET, 1);
-
- msleep(QT602240_RESET_TIME);
-
- mutex_lock(&input_dev->mutex);
-
- if (input_dev->users)
- qt602240_start(data);
-
- mutex_unlock(&input_dev->mutex);
-
- return 0;
-}
-
-static const struct dev_pm_ops qt602240_pm_ops = {
- .suspend = qt602240_suspend,
- .resume = qt602240_resume,
-};
-#endif
-
-static const struct i2c_device_id qt602240_id[] = {
- { "qt602240_ts", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, qt602240_id);
-
-static struct i2c_driver qt602240_driver = {
- .driver = {
- .name = "qt602240_ts",
- .owner = THIS_MODULE,
-#ifdef CONFIG_PM
- .pm = &qt602240_pm_ops,
-#endif
- },
- .probe = qt602240_probe,
- .remove = __devexit_p(qt602240_remove),
- .id_table = qt602240_id,
-};
-
-static int __init qt602240_init(void)
-{
- return i2c_add_driver(&qt602240_driver);
-}
-
-static void __exit qt602240_exit(void)
-{
- i2c_del_driver(&qt602240_driver);
-}
-
-module_init(qt602240_init);
-module_exit(qt602240_exit);
-
-/* Module information */
-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
-MODULE_DESCRIPTION("AT42QT602240/ATMXT224 Touchscreen driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
new file mode 100644
index 000000000000..87420616efa4
--- /dev/null
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -0,0 +1,756 @@
+/*
+ * TSC2005 touchscreen driver
+ *
+ * Copyright (C) 2006-2010 Nokia Corporation
+ *
+ * Author: Lauri Leukkunen <lauri.leukkunen@nokia.com>
+ * based on TSC2301 driver by Klaus K. Pedersen <klaus.k.pedersen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/tsc2005.h>
+
+/*
+ * The touchscreen interface operates as follows:
+ *
+ * 1) Pen is pressed against the touchscreen.
+ * 2) TSC2005 performs AD conversion.
+ * 3) After the conversion is done TSC2005 drives DAV line down.
+ * 4) GPIO IRQ is received and tsc2005_irq_thread() is scheduled.
+ * 5) tsc2005_irq_thread() queues up an spi transfer to fetch the x, y, z1, z2
+ * values.
+ * 6) tsc2005_irq_thread() reports coordinates to input layer and sets up
+ * tsc2005_penup_timer() to be called after TSC2005_PENUP_TIME_MS (40ms).
+ * 7) When the penup timer expires, there have not been touch or DAV interrupts
+ * during the last 40ms which means the pen has been lifted.
+ *
+ * ESD recovery via a hardware reset is done if the TSC2005 doesn't respond
+ * after a configurable period (in ms) of activity. If esd_timeout is 0, the
+ * watchdog is disabled.
+ */
+
+/* control byte 1 */
+#define TSC2005_CMD 0x80
+#define TSC2005_CMD_NORMAL 0x00
+#define TSC2005_CMD_STOP 0x01
+#define TSC2005_CMD_12BIT 0x04
+
+/* control byte 0 */
+#define TSC2005_REG_READ 0x0001
+#define TSC2005_REG_PND0 0x0002
+#define TSC2005_REG_X 0x0000
+#define TSC2005_REG_Y 0x0008
+#define TSC2005_REG_Z1 0x0010
+#define TSC2005_REG_Z2 0x0018
+#define TSC2005_REG_TEMP_HIGH 0x0050
+#define TSC2005_REG_CFR0 0x0060
+#define TSC2005_REG_CFR1 0x0068
+#define TSC2005_REG_CFR2 0x0070
+
+/* configuration register 0 */
+#define TSC2005_CFR0_PRECHARGE_276US 0x0040
+#define TSC2005_CFR0_STABTIME_1MS 0x0300
+#define TSC2005_CFR0_CLOCK_1MHZ 0x1000
+#define TSC2005_CFR0_RESOLUTION12 0x2000
+#define TSC2005_CFR0_PENMODE 0x8000
+#define TSC2005_CFR0_INITVALUE (TSC2005_CFR0_STABTIME_1MS | \
+ TSC2005_CFR0_CLOCK_1MHZ | \
+ TSC2005_CFR0_RESOLUTION12 | \
+ TSC2005_CFR0_PRECHARGE_276US | \
+ TSC2005_CFR0_PENMODE)
+
+/* bits common to both read and write of configuration register 0 */
+#define TSC2005_CFR0_RW_MASK 0x3fff
+
+/* configuration register 1 */
+#define TSC2005_CFR1_BATCHDELAY_4MS 0x0003
+#define TSC2005_CFR1_INITVALUE TSC2005_CFR1_BATCHDELAY_4MS
+
+/* configuration register 2 */
+#define TSC2005_CFR2_MAVE_Z 0x0004
+#define TSC2005_CFR2_MAVE_Y 0x0008
+#define TSC2005_CFR2_MAVE_X 0x0010
+#define TSC2005_CFR2_AVG_7 0x0800
+#define TSC2005_CFR2_MEDIUM_15 0x3000
+#define TSC2005_CFR2_INITVALUE (TSC2005_CFR2_MAVE_X | \
+ TSC2005_CFR2_MAVE_Y | \
+ TSC2005_CFR2_MAVE_Z | \
+ TSC2005_CFR2_MEDIUM_15 | \
+ TSC2005_CFR2_AVG_7)
+
+#define MAX_12BIT 0xfff
+#define TSC2005_SPI_MAX_SPEED_HZ 10000000
+#define TSC2005_PENUP_TIME_MS 40
+
+struct tsc2005_spi_rd {
+ struct spi_transfer spi_xfer;
+ u32 spi_tx;
+ u32 spi_rx;
+};
+
+struct tsc2005 {
+ struct spi_device *spi;
+
+ struct spi_message spi_read_msg;
+ struct tsc2005_spi_rd spi_x;
+ struct tsc2005_spi_rd spi_y;
+ struct tsc2005_spi_rd spi_z1;
+ struct tsc2005_spi_rd spi_z2;
+
+ struct input_dev *idev;
+ char phys[32];
+
+ struct mutex mutex;
+
+ /* raw copy of previous x,y,z */
+ int in_x;
+ int in_y;
+ int in_z1;
+ int in_z2;
+
+ spinlock_t lock;
+ struct timer_list penup_timer;
+
+ unsigned int esd_timeout;
+ struct delayed_work esd_work;
+ unsigned long last_valid_interrupt;
+
+ unsigned int x_plate_ohm;
+
+ bool opened;
+ bool suspended;
+
+ bool pen_down;
+
+ void (*set_reset)(bool enable);
+};
+
+static int tsc2005_cmd(struct tsc2005 *ts, u8 cmd)
+{
+ u8 tx = TSC2005_CMD | TSC2005_CMD_12BIT | cmd;
+ struct spi_transfer xfer = {
+ .tx_buf = &tx,
+ .len = 1,
+ .bits_per_word = 8,
+ };
+ struct spi_message msg;
+ int error;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ error = spi_sync(ts->spi, &msg);
+ if (error) {
+ dev_err(&ts->spi->dev, "%s: failed, command: %x, error: %d\n",
+ __func__, cmd, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int tsc2005_write(struct tsc2005 *ts, u8 reg, u16 value)
+{
+ u32 tx = ((reg | TSC2005_REG_PND0) << 16) | value;
+ struct spi_transfer xfer = {
+ .tx_buf = &tx,
+ .len = 4,
+ .bits_per_word = 24,
+ };
+ struct spi_message msg;
+ int error;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ error = spi_sync(ts->spi, &msg);
+ if (error) {
+ dev_err(&ts->spi->dev,
+ "%s: failed, register: %x, value: %x, error: %d\n",
+ __func__, reg, value, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void tsc2005_setup_read(struct tsc2005_spi_rd *rd, u8 reg, bool last)
+{
+ memset(rd, 0, sizeof(*rd));
+
+ rd->spi_tx = (reg | TSC2005_REG_READ) << 16;
+ rd->spi_xfer.tx_buf = &rd->spi_tx;
+ rd->spi_xfer.rx_buf = &rd->spi_rx;
+ rd->spi_xfer.len = 4;
+ rd->spi_xfer.bits_per_word = 24;
+ rd->spi_xfer.cs_change = !last;
+}
+
+static int tsc2005_read(struct tsc2005 *ts, u8 reg, u16 *value)
+{
+ struct tsc2005_spi_rd spi_rd;
+ struct spi_message msg;
+ int error;
+
+ tsc2005_setup_read(&spi_rd, reg, true);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&spi_rd.spi_xfer, &msg);
+
+ error = spi_sync(ts->spi, &msg);
+ if (error)
+ return error;
+
+ *value = spi_rd.spi_rx;
+ return 0;
+}
+
+static void tsc2005_update_pen_state(struct tsc2005 *ts,
+ int x, int y, int pressure)
+{
+ if (pressure) {
+ input_report_abs(ts->idev, ABS_X, x);
+ input_report_abs(ts->idev, ABS_Y, y);
+ input_report_abs(ts->idev, ABS_PRESSURE, pressure);
+ if (!ts->pen_down) {
+ input_report_key(ts->idev, BTN_TOUCH, !!pressure);
+ ts->pen_down = true;
+ }
+ } else {
+ input_report_abs(ts->idev, ABS_PRESSURE, 0);
+ if (ts->pen_down) {
+ input_report_key(ts->idev, BTN_TOUCH, 0);
+ ts->pen_down = false;
+ }
+ }
+ input_sync(ts->idev);
+ dev_dbg(&ts->spi->dev, "point(%4d,%4d), pressure (%4d)\n", x, y,
+ pressure);
+}
+
+static irqreturn_t tsc2005_irq_thread(int irq, void *_ts)
+{
+ struct tsc2005 *ts = _ts;
+ unsigned long flags;
+ unsigned int pressure;
+ u32 x, y;
+ u32 z1, z2;
+ int error;
+
+ /* read the coordinates */
+ error = spi_sync(ts->spi, &ts->spi_read_msg);
+ if (unlikely(error))
+ goto out;
+
+ x = ts->spi_x.spi_rx;
+ y = ts->spi_y.spi_rx;
+ z1 = ts->spi_z1.spi_rx;
+ z2 = ts->spi_z2.spi_rx;
+
+ /* validate position */
+ if (unlikely(x > MAX_12BIT || y > MAX_12BIT))
+ goto out;
+
+ /* Skip reading if the pressure components are out of range */
+ if (unlikely(z1 == 0 || z2 > MAX_12BIT || z1 >= z2))
+ goto out;
+
+ /*
+ * Skip point if this is a pen down with the exact same values as
+ * the value before pen-up - that implies SPI fed us stale data
+ */
+ if (!ts->pen_down &&
+ ts->in_x == x && ts->in_y == y &&
+ ts->in_z1 == z1 && ts->in_z2 == z2) {
+ goto out;
+ }
+
+ /*
+ * At this point we are happy we have a valid and useful reading.
+ * Remember it for later comparisons. We may now begin downsampling.
+ */
+ ts->in_x = x;
+ ts->in_y = y;
+ ts->in_z1 = z1;
+ ts->in_z2 = z2;
+
+ /* Compute touch pressure resistance using equation #1 */
+ pressure = x * (z2 - z1) / z1;
+ pressure = pressure * ts->x_plate_ohm / 4096;
+ if (unlikely(pressure > MAX_12BIT))
+ goto out;
+
+ spin_lock_irqsave(&ts->lock, flags);
+
+ tsc2005_update_pen_state(ts, x, y, pressure);
+ mod_timer(&ts->penup_timer,
+ jiffies + msecs_to_jiffies(TSC2005_PENUP_TIME_MS));
+
+ spin_unlock_irqrestore(&ts->lock, flags);
+
+ ts->last_valid_interrupt = jiffies;
+out:
+ return IRQ_HANDLED;
+}
+
+static void tsc2005_penup_timer(unsigned long data)
+{
+ struct tsc2005 *ts = (struct tsc2005 *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ts->lock, flags);
+ tsc2005_update_pen_state(ts, 0, 0, 0);
+ spin_unlock_irqrestore(&ts->lock, flags);
+}
+
+static void tsc2005_start_scan(struct tsc2005 *ts)
+{
+ tsc2005_write(ts, TSC2005_REG_CFR0, TSC2005_CFR0_INITVALUE);
+ tsc2005_write(ts, TSC2005_REG_CFR1, TSC2005_CFR1_INITVALUE);
+ tsc2005_write(ts, TSC2005_REG_CFR2, TSC2005_CFR2_INITVALUE);
+ tsc2005_cmd(ts, TSC2005_CMD_NORMAL);
+}
+
+static void tsc2005_stop_scan(struct tsc2005 *ts)
+{
+ tsc2005_cmd(ts, TSC2005_CMD_STOP);
+}
+
+/* must be called with ts->mutex held */
+static void __tsc2005_disable(struct tsc2005 *ts)
+{
+ tsc2005_stop_scan(ts);
+
+ disable_irq(ts->spi->irq);
+ del_timer_sync(&ts->penup_timer);
+
+ cancel_delayed_work_sync(&ts->esd_work);
+
+ enable_irq(ts->spi->irq);
+}
+
+/* must be called with ts->mutex held */
+static void __tsc2005_enable(struct tsc2005 *ts)
+{
+ tsc2005_start_scan(ts);
+
+ if (ts->esd_timeout && ts->set_reset) {
+ ts->last_valid_interrupt = jiffies;
+ schedule_delayed_work(&ts->esd_work,
+ round_jiffies(jiffies +
+ msecs_to_jiffies(ts->esd_timeout)));
+ }
+
+}
+
+static ssize_t tsc2005_selftest_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct tsc2005 *ts = spi_get_drvdata(spi);
+ u16 temp_high;
+ u16 temp_high_orig;
+ u16 temp_high_test;
+ bool success = true;
+ int error;
+
+ mutex_lock(&ts->mutex);
+
+ /*
+ * Test TSC2005 communications via temp high register.
+ */
+ __tsc2005_disable(ts);
+
+ error = tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high_orig);
+ if (error) {
+ dev_warn(dev, "selftest failed: read error %d\n", error);
+ success = false;
+ goto out;
+ }
+
+ temp_high_test = (temp_high_orig - 1) & MAX_12BIT;
+
+ error = tsc2005_write(ts, TSC2005_REG_TEMP_HIGH, temp_high_test);
+ if (error) {
+ dev_warn(dev, "selftest failed: write error %d\n", error);
+ success = false;
+ goto out;
+ }
+
+ error = tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high);
+ if (error) {
+ dev_warn(dev, "selftest failed: read error %d after write\n",
+ error);
+ success = false;
+ goto out;
+ }
+
+ if (temp_high != temp_high_test) {
+ dev_warn(dev, "selftest failed: %d != %d\n",
+ temp_high, temp_high_test);
+ success = false;
+ }
+
+ /* hardware reset */
+ ts->set_reset(false);
+ usleep_range(100, 500); /* only 10us required */
+ ts->set_reset(true);
+
+ if (!success)
+ goto out;
+
+ /* test that the reset really happened */
+ error = tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high);
+ if (error) {
+ dev_warn(dev, "selftest failed: read error %d after reset\n",
+ error);
+ success = false;
+ goto out;
+ }
+
+ if (temp_high != temp_high_orig) {
+ dev_warn(dev, "selftest failed after reset: %d != %d\n",
+ temp_high, temp_high_orig);
+ success = false;
+ }
+
+out:
+ __tsc2005_enable(ts);
+ mutex_unlock(&ts->mutex);
+
+ return sprintf(buf, "%d\n", success);
+}
+
+static DEVICE_ATTR(selftest, S_IRUGO, tsc2005_selftest_show, NULL);
+
+static struct attribute *tsc2005_attrs[] = {
+ &dev_attr_selftest.attr,
+ NULL
+};
+
+static mode_t tsc2005_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct spi_device *spi = to_spi_device(dev);
+ struct tsc2005 *ts = spi_get_drvdata(spi);
+ mode_t mode = attr->mode;
+
+ if (attr == &dev_attr_selftest.attr) {
+ if (!ts->set_reset)
+ mode = 0;
+ }
+
+ return mode;
+}
+
+static const struct attribute_group tsc2005_attr_group = {
+ .is_visible = tsc2005_attr_is_visible,
+ .attrs = tsc2005_attrs,
+};
+
+static void tsc2005_esd_work(struct work_struct *work)
+{
+ struct tsc2005 *ts = container_of(work, struct tsc2005, esd_work.work);
+ int error;
+ u16 r;
+
+ mutex_lock(&ts->mutex);
+
+ if (time_is_after_jiffies(ts->last_valid_interrupt +
+ msecs_to_jiffies(ts->esd_timeout)))
+ goto out;
+
+ /* We should be able to read register without disabling interrupts. */
+ error = tsc2005_read(ts, TSC2005_REG_CFR0, &r);
+ if (!error &&
+ !((r ^ TSC2005_CFR0_INITVALUE) & TSC2005_CFR0_RW_MASK)) {
+ goto out;
+ }
+
+ /*
+ * If we could not read our known value from configuration register 0
+ * then we should reset the controller as if from power-up and start
+ * scanning again.
+ */
+ dev_info(&ts->spi->dev, "TSC2005 not responding - resetting\n");
+
+ disable_irq(ts->spi->irq);
+ del_timer_sync(&ts->penup_timer);
+
+ tsc2005_update_pen_state(ts, 0, 0, 0);
+
+ ts->set_reset(false);
+ usleep_range(100, 500); /* only 10us required */
+ ts->set_reset(true);
+
+ enable_irq(ts->spi->irq);
+ tsc2005_start_scan(ts);
+
+out:
+ /* re-arm the watchdog */
+ schedule_delayed_work(&ts->esd_work,
+ round_jiffies(jiffies +
+ msecs_to_jiffies(ts->esd_timeout)));
+ mutex_unlock(&ts->mutex);
+}
+
+static int tsc2005_open(struct input_dev *input)
+{
+ struct tsc2005 *ts = input_get_drvdata(input);
+
+ mutex_lock(&ts->mutex);
+
+ if (!ts->suspended)
+ __tsc2005_enable(ts);
+
+ ts->opened = true;
+
+ mutex_unlock(&ts->mutex);
+
+ return 0;
+}
+
+static void tsc2005_close(struct input_dev *input)
+{
+ struct tsc2005 *ts = input_get_drvdata(input);
+
+ mutex_lock(&ts->mutex);
+
+ if (!ts->suspended)
+ __tsc2005_disable(ts);
+
+ ts->opened = false;
+
+ mutex_unlock(&ts->mutex);
+}
+
+static void __devinit tsc2005_setup_spi_xfer(struct tsc2005 *ts)
+{
+ tsc2005_setup_read(&ts->spi_x, TSC2005_REG_X, false);
+ tsc2005_setup_read(&ts->spi_y, TSC2005_REG_Y, false);
+ tsc2005_setup_read(&ts->spi_z1, TSC2005_REG_Z1, false);
+ tsc2005_setup_read(&ts->spi_z2, TSC2005_REG_Z2, true);
+
+ spi_message_init(&ts->spi_read_msg);
+ spi_message_add_tail(&ts->spi_x.spi_xfer, &ts->spi_read_msg);
+ spi_message_add_tail(&ts->spi_y.spi_xfer, &ts->spi_read_msg);
+ spi_message_add_tail(&ts->spi_z1.spi_xfer, &ts->spi_read_msg);
+ spi_message_add_tail(&ts->spi_z2.spi_xfer, &ts->spi_read_msg);
+}
+
+static int __devinit tsc2005_probe(struct spi_device *spi)
+{
+ const struct tsc2005_platform_data *pdata = spi->dev.platform_data;
+ struct tsc2005 *ts;
+ struct input_dev *input_dev;
+ unsigned int max_x, max_y, max_p;
+ unsigned int fudge_x, fudge_y, fudge_p;
+ int error;
+
+ if (!pdata) {
+ dev_dbg(&spi->dev, "no platform data\n");
+ return -ENODEV;
+ }
+
+ fudge_x = pdata->ts_x_fudge ? : 4;
+ fudge_y = pdata->ts_y_fudge ? : 8;
+ fudge_p = pdata->ts_pressure_fudge ? : 2;
+ max_x = pdata->ts_x_max ? : MAX_12BIT;
+ max_y = pdata->ts_y_max ? : MAX_12BIT;
+ max_p = pdata->ts_pressure_max ? : MAX_12BIT;
+
+ if (spi->irq <= 0) {
+ dev_dbg(&spi->dev, "no irq\n");
+ return -ENODEV;
+ }
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 8;
+ if (!spi->max_speed_hz)
+ spi->max_speed_hz = TSC2005_SPI_MAX_SPEED_HZ;
+
+ error = spi_setup(spi);
+ if (error)
+ return error;
+
+ ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!ts || !input_dev) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ ts->spi = spi;
+ ts->idev = input_dev;
+
+ ts->x_plate_ohm = pdata->ts_x_plate_ohm ? : 280;
+ ts->esd_timeout = pdata->esd_timeout_ms;
+ ts->set_reset = pdata->set_reset;
+
+ mutex_init(&ts->mutex);
+
+ spin_lock_init(&ts->lock);
+ setup_timer(&ts->penup_timer, tsc2005_penup_timer, (unsigned long)ts);
+
+ INIT_DELAYED_WORK(&ts->esd_work, tsc2005_esd_work);
+
+ tsc2005_setup_spi_xfer(ts);
+
+ snprintf(ts->phys, sizeof(ts->phys),
+ "%s/input-ts", dev_name(&spi->dev));
+
+ input_dev->name = "TSC2005 touchscreen";
+ input_dev->phys = ts->phys;
+ input_dev->id.bustype = BUS_SPI;
+ input_dev->dev.parent = &spi->dev;
+ input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ input_set_abs_params(input_dev, ABS_X, 0, max_x, fudge_x, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, max_y, fudge_y, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, max_p, fudge_p, 0);
+
+ input_dev->open = tsc2005_open;
+ input_dev->close = tsc2005_close;
+
+ input_set_drvdata(input_dev, ts);
+
+ /* Ensure the touchscreen is off */
+ tsc2005_stop_scan(ts);
+
+ error = request_threaded_irq(spi->irq, NULL, tsc2005_irq_thread,
+ IRQF_TRIGGER_RISING, "tsc2005", ts);
+ if (error) {
+ dev_err(&spi->dev, "Failed to request irq, err: %d\n", error);
+ goto err_free_mem;
+ }
+
+ spi_set_drvdata(spi, ts);
+ error = sysfs_create_group(&spi->dev.kobj, &tsc2005_attr_group);
+ if (error) {
+ dev_err(&spi->dev,
+ "Failed to create sysfs attributes, err: %d\n", error);
+ goto err_clear_drvdata;
+ }
+
+ error = input_register_device(ts->idev);
+ if (error) {
+ dev_err(&spi->dev,
+ "Failed to register input device, err: %d\n", error);
+ goto err_remove_sysfs;
+ }
+
+ set_irq_wake(spi->irq, 1);
+ return 0;
+
+err_remove_sysfs:
+ sysfs_remove_group(&spi->dev.kobj, &tsc2005_attr_group);
+err_clear_drvdata:
+ spi_set_drvdata(spi, NULL);
+ free_irq(spi->irq, ts);
+err_free_mem:
+ input_free_device(input_dev);
+ kfree(ts);
+ return error;
+}
+
+static int __devexit tsc2005_remove(struct spi_device *spi)
+{
+ struct tsc2005 *ts = spi_get_drvdata(spi);
+
+ sysfs_remove_group(&ts->spi->dev.kobj, &tsc2005_attr_group);
+
+ free_irq(ts->spi->irq, ts);
+ input_unregister_device(ts->idev);
+ kfree(ts);
+
+ spi_set_drvdata(spi, NULL);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tsc2005_suspend(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct tsc2005 *ts = spi_get_drvdata(spi);
+
+ mutex_lock(&ts->mutex);
+
+ if (!ts->suspended && ts->opened)
+ __tsc2005_disable(ts);
+
+ ts->suspended = true;
+
+ mutex_unlock(&ts->mutex);
+
+ return 0;
+}
+
+static int tsc2005_resume(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct tsc2005 *ts = spi_get_drvdata(spi);
+
+ mutex_lock(&ts->mutex);
+
+ if (ts->suspended && ts->opened)
+ __tsc2005_enable(ts);
+
+ ts->suspended = false;
+
+ mutex_unlock(&ts->mutex);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tsc2005_pm_ops, tsc2005_suspend, tsc2005_resume);
+
+static struct spi_driver tsc2005_driver = {
+ .driver = {
+ .name = "tsc2005",
+ .owner = THIS_MODULE,
+ .pm = &tsc2005_pm_ops,
+ },
+ .probe = tsc2005_probe,
+ .remove = __devexit_p(tsc2005_remove),
+};
+
+static int __init tsc2005_init(void)
+{
+ return spi_register_driver(&tsc2005_driver);
+}
+module_init(tsc2005_init);
+
+static void __exit tsc2005_exit(void)
+{
+ spi_unregister_driver(&tsc2005_driver);
+}
+module_exit(tsc2005_exit);
+
+MODULE_AUTHOR("Lauri Leukkunen <lauri.leukkunen@nokia.com>");
+MODULE_DESCRIPTION("TSC2005 Touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
new file mode 100644
index 000000000000..6ae054f8e0aa
--- /dev/null
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -0,0 +1,368 @@
+/*
+ * Touchscreen driver for WM831x PMICs
+ *
+ * Copyright 2011 Wolfson Microelectronics plc.
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/pm.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/wm831x/core.h>
+#include <linux/mfd/wm831x/irq.h>
+#include <linux/mfd/wm831x/pdata.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+/*
+ * R16424 (0x4028) - Touch Control 1
+ */
+#define WM831X_TCH_ENA 0x8000 /* TCH_ENA */
+#define WM831X_TCH_CVT_ENA 0x4000 /* TCH_CVT_ENA */
+#define WM831X_TCH_SLPENA 0x1000 /* TCH_SLPENA */
+#define WM831X_TCH_Z_ENA 0x0400 /* TCH_Z_ENA */
+#define WM831X_TCH_Y_ENA 0x0200 /* TCH_Y_ENA */
+#define WM831X_TCH_X_ENA 0x0100 /* TCH_X_ENA */
+#define WM831X_TCH_DELAY_MASK 0x00E0 /* TCH_DELAY - [7:5] */
+#define WM831X_TCH_DELAY_SHIFT 5 /* TCH_DELAY - [7:5] */
+#define WM831X_TCH_DELAY_WIDTH 3 /* TCH_DELAY - [7:5] */
+#define WM831X_TCH_RATE_MASK 0x001F /* TCH_RATE - [4:0] */
+#define WM831X_TCH_RATE_SHIFT 0 /* TCH_RATE - [4:0] */
+#define WM831X_TCH_RATE_WIDTH 5 /* TCH_RATE - [4:0] */
+
+/*
+ * R16425 (0x4029) - Touch Control 2
+ */
+#define WM831X_TCH_PD_WK 0x2000 /* TCH_PD_WK */
+#define WM831X_TCH_5WIRE 0x1000 /* TCH_5WIRE */
+#define WM831X_TCH_PDONLY 0x0800 /* TCH_PDONLY */
+#define WM831X_TCH_ISEL 0x0100 /* TCH_ISEL */
+#define WM831X_TCH_RPU_MASK 0x000F /* TCH_RPU - [3:0] */
+#define WM831X_TCH_RPU_SHIFT 0 /* TCH_RPU - [3:0] */
+#define WM831X_TCH_RPU_WIDTH 4 /* TCH_RPU - [3:0] */
+
+/*
+ * R16426-8 (0x402A-C) - Touch Data X/Y/X
+ */
+#define WM831X_TCH_PD 0x8000 /* TCH_PD1 */
+#define WM831X_TCH_DATA_MASK 0x0FFF /* TCH_DATA - [11:0] */
+#define WM831X_TCH_DATA_SHIFT 0 /* TCH_DATA - [11:0] */
+#define WM831X_TCH_DATA_WIDTH 12 /* TCH_DATA - [11:0] */
+
+struct wm831x_ts {
+ struct input_dev *input_dev;
+ struct wm831x *wm831x;
+ unsigned int data_irq;
+ unsigned int pd_irq;
+ bool pressure;
+ bool pen_down;
+};
+
+static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data)
+{
+ struct wm831x_ts *wm831x_ts = irq_data;
+ struct wm831x *wm831x = wm831x_ts->wm831x;
+ static int data_types[] = { ABS_X, ABS_Y, ABS_PRESSURE };
+ u16 data[3];
+ int count;
+ int i, ret;
+
+ if (wm831x_ts->pressure)
+ count = 3;
+ else
+ count = 2;
+
+ wm831x_set_bits(wm831x, WM831X_INTERRUPT_STATUS_1,
+ WM831X_TCHDATA_EINT, WM831X_TCHDATA_EINT);
+
+ ret = wm831x_bulk_read(wm831x, WM831X_TOUCH_DATA_X, count,
+ data);
+ if (ret != 0) {
+ dev_err(wm831x->dev, "Failed to read touch data: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ /*
+ * We get a pen down reading on every reading, report pen up if any
+ * individual reading does so.
+ */
+ wm831x_ts->pen_down = true;
+ for (i = 0; i < count; i++) {
+ if (!(data[i] & WM831X_TCH_PD)) {
+ wm831x_ts->pen_down = false;
+ continue;
+ }
+ input_report_abs(wm831x_ts->input_dev, data_types[i],
+ data[i] & WM831X_TCH_DATA_MASK);
+ }
+
+ if (!wm831x_ts->pen_down) {
+ disable_irq_nosync(wm831x_ts->data_irq);
+
+ /* Don't need data any more */
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
+ WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA |
+ WM831X_TCH_Z_ENA, 0);
+
+ /* Flush any final samples that arrived while reading */
+ wm831x_set_bits(wm831x, WM831X_INTERRUPT_STATUS_1,
+ WM831X_TCHDATA_EINT, WM831X_TCHDATA_EINT);
+
+ wm831x_bulk_read(wm831x, WM831X_TOUCH_DATA_X, count, data);
+
+ if (wm831x_ts->pressure)
+ input_report_abs(wm831x_ts->input_dev,
+ ABS_PRESSURE, 0);
+
+ input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 0);
+ }
+
+ input_sync(wm831x_ts->input_dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wm831x_ts_pen_down_irq(int irq, void *irq_data)
+{
+ struct wm831x_ts *wm831x_ts = irq_data;
+ struct wm831x *wm831x = wm831x_ts->wm831x;
+ int ena = 0;
+
+ /* Start collecting data */
+ if (wm831x_ts->pressure)
+ ena |= WM831X_TCH_Z_ENA;
+
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
+ WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | WM831X_TCH_Z_ENA,
+ WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | ena);
+
+ input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 1);
+ input_sync(wm831x_ts->input_dev);
+
+ wm831x_set_bits(wm831x, WM831X_INTERRUPT_STATUS_1,
+ WM831X_TCHPD_EINT, WM831X_TCHPD_EINT);
+
+ wm831x_ts->pen_down = true;
+ enable_irq(wm831x_ts->data_irq);
+
+ return IRQ_HANDLED;
+}
+
+static int wm831x_ts_input_open(struct input_dev *idev)
+{
+ struct wm831x_ts *wm831x_ts = input_get_drvdata(idev);
+ struct wm831x *wm831x = wm831x_ts->wm831x;
+
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
+ WM831X_TCH_ENA | WM831X_TCH_CVT_ENA |
+ WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA |
+ WM831X_TCH_Z_ENA, WM831X_TCH_ENA);
+
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
+ WM831X_TCH_CVT_ENA, WM831X_TCH_CVT_ENA);
+
+ return 0;
+}
+
+static void wm831x_ts_input_close(struct input_dev *idev)
+{
+ struct wm831x_ts *wm831x_ts = input_get_drvdata(idev);
+ struct wm831x *wm831x = wm831x_ts->wm831x;
+
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
+ WM831X_TCH_ENA | WM831X_TCH_CVT_ENA |
+ WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA |
+ WM831X_TCH_Z_ENA, 0);
+
+ if (wm831x_ts->pen_down)
+ disable_irq(wm831x_ts->data_irq);
+}
+
+static __devinit int wm831x_ts_probe(struct platform_device *pdev)
+{
+ struct wm831x_ts *wm831x_ts;
+ struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
+ struct wm831x_pdata *core_pdata = dev_get_platdata(pdev->dev.parent);
+ struct wm831x_touch_pdata *pdata = NULL;
+ struct input_dev *input_dev;
+ int error;
+
+ if (core_pdata)
+ pdata = core_pdata->touch;
+
+ wm831x_ts = kzalloc(sizeof(struct wm831x_ts), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!wm831x_ts || !input_dev) {
+ error = -ENOMEM;
+ goto err_alloc;
+ }
+
+ wm831x_ts->wm831x = wm831x;
+ wm831x_ts->input_dev = input_dev;
+
+ /*
+ * If we have a direct IRQ use it, otherwise use the interrupt
+ * from the WM831x IRQ controller.
+ */
+ if (pdata && pdata->data_irq)
+ wm831x_ts->data_irq = pdata->data_irq;
+ else
+ wm831x_ts->data_irq = platform_get_irq_byname(pdev, "TCHDATA");
+
+ if (pdata && pdata->pd_irq)
+ wm831x_ts->pd_irq = pdata->pd_irq;
+ else
+ wm831x_ts->pd_irq = platform_get_irq_byname(pdev, "TCHPD");
+
+ if (pdata)
+ wm831x_ts->pressure = pdata->pressure;
+ else
+ wm831x_ts->pressure = true;
+
+ /* Five wire touchscreens can't report pressure */
+ if (pdata && pdata->fivewire) {
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
+ WM831X_TCH_5WIRE, WM831X_TCH_5WIRE);
+
+ /* Pressure measurements are not possible for five wire mode */
+ WARN_ON(pdata->pressure && pdata->fivewire);
+ wm831x_ts->pressure = false;
+ } else {
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
+ WM831X_TCH_5WIRE, 0);
+ }
+
+ if (pdata) {
+ switch (pdata->isel) {
+ default:
+ dev_err(&pdev->dev, "Unsupported ISEL setting: %d\n",
+ pdata->isel);
+ /* Fall through */
+ case 200:
+ case 0:
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
+ WM831X_TCH_ISEL, 0);
+ break;
+ case 400:
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
+ WM831X_TCH_ISEL, WM831X_TCH_ISEL);
+ break;
+ }
+ }
+
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
+ WM831X_TCH_PDONLY, 0);
+
+ /* Default to 96 samples/sec */
+ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
+ WM831X_TCH_RATE_MASK, 6);
+
+ error = request_threaded_irq(wm831x_ts->data_irq,
+ NULL, wm831x_ts_data_irq,
+ IRQF_ONESHOT,
+ "Touchscreen data", wm831x_ts);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to request data IRQ %d: %d\n",
+ wm831x_ts->data_irq, error);
+ goto err_alloc;
+ }
+ disable_irq(wm831x_ts->data_irq);
+
+ error = request_threaded_irq(wm831x_ts->pd_irq,
+ NULL, wm831x_ts_pen_down_irq,
+ IRQF_ONESHOT,
+ "Touchscreen pen down", wm831x_ts);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to request pen down IRQ %d: %d\n",
+ wm831x_ts->pd_irq, error);
+ goto err_data_irq;
+ }
+
+ /* set up touch configuration */
+ input_dev->name = "WM831x touchscreen";
+ input_dev->phys = "wm831x";
+ input_dev->open = wm831x_ts_input_open;
+ input_dev->close = wm831x_ts_input_close;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_X, 0, 4095, 5, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, 4095, 5, 0);
+ if (wm831x_ts->pressure)
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 4095, 5, 0);
+
+ input_set_drvdata(input_dev, wm831x_ts);
+ input_dev->dev.parent = &pdev->dev;
+
+ error = input_register_device(input_dev);
+ if (error)
+ goto err_pd_irq;
+
+ platform_set_drvdata(pdev, wm831x_ts);
+ return 0;
+
+err_pd_irq:
+ free_irq(wm831x_ts->pd_irq, wm831x_ts);
+err_data_irq:
+ free_irq(wm831x_ts->data_irq, wm831x_ts);
+err_alloc:
+ input_free_device(input_dev);
+ kfree(wm831x_ts);
+
+ return error;
+}
+
+static __devexit int wm831x_ts_remove(struct platform_device *pdev)
+{
+ struct wm831x_ts *wm831x_ts = platform_get_drvdata(pdev);
+
+ free_irq(wm831x_ts->pd_irq, wm831x_ts);
+ free_irq(wm831x_ts->data_irq, wm831x_ts);
+ input_unregister_device(wm831x_ts->input_dev);
+ kfree(wm831x_ts);
+
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct platform_driver wm831x_ts_driver = {
+ .driver = {
+ .name = "wm831x-touch",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm831x_ts_probe,
+ .remove = __devexit_p(wm831x_ts_remove),
+};
+
+static int __init wm831x_ts_init(void)
+{
+ return platform_driver_register(&wm831x_ts_driver);
+}
+module_init(wm831x_ts_init);
+
+static void __exit wm831x_ts_exit(void)
+{
+ platform_driver_unregister(&wm831x_ts_driver);
+}
+module_exit(wm831x_ts_exit);
+
+/* Module information */
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_DESCRIPTION("WM831x PMIC touchscreen driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm831x-touch");
diff --git a/drivers/isdn/hisax/Makefile b/drivers/isdn/hisax/Makefile
index ab638b083df9..646368fe41c9 100644
--- a/drivers/isdn/hisax/Makefile
+++ b/drivers/isdn/hisax/Makefile
@@ -4,7 +4,7 @@
# Define maximum number of cards
-EXTRA_CFLAGS += -DHISAX_MAX_CARDS=$(CONFIG_HISAX_MAX_CARDS)
+ccflags-y := -DHISAX_MAX_CARDS=$(CONFIG_HISAX_MAX_CARDS)
obj-$(CONFIG_ISDN_DRV_HISAX) += hisax.o
obj-$(CONFIG_HISAX_SEDLBAUER_CS) += sedlbauer_cs.o
diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c
index 199f374cf9da..f6e108d0125f 100644
--- a/drivers/isdn/mISDN/hwchannel.c
+++ b/drivers/isdn/mISDN/hwchannel.c
@@ -206,7 +206,7 @@ recv_Bchannel(struct bchannel *bch, unsigned int id)
hh->id = id;
if (bch->rcount >= 64) {
printk(KERN_WARNING "B-channel %p receive queue overflow, "
- "fushing!\n", bch);
+ "flushing!\n", bch);
skb_queue_purge(&bch->rqueue);
bch->rcount = 0;
return;
@@ -231,7 +231,7 @@ recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
{
if (bch->rcount >= 64) {
printk(KERN_WARNING "B-channel %p receive queue overflow, "
- "fushing!\n", bch);
+ "flushing!\n", bch);
skb_queue_purge(&bch->rqueue);
bch->rcount = 0;
}
@@ -279,7 +279,7 @@ confirm_Bsend(struct bchannel *bch)
if (bch->rcount >= 64) {
printk(KERN_WARNING "B-channel %p receive queue overflow, "
- "fushing!\n", bch);
+ "flushing!\n", bch);
skb_queue_purge(&bch->rqueue);
bch->rcount = 0;
}
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 6f190f4cdbc0..9bec8699b8a3 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -34,6 +34,16 @@ config LEDS_ATMEL_PWM
This option enables support for LEDs driven using outputs
of the dedicated PWM controller found on newer Atmel SOCs.
+config LEDS_LM3530
+ tristate "LCD Backlight driver for LM3530"
+ depends on LEDS_CLASS
+ depends on I2C
+ help
+ This option enables support for the LCD backlight using
+ LM3530 ambient light sensor chip. This ALS chip can be
+ controlled manually or using PWM input or using ambient
+ light automatically.
+
config LEDS_LOCOMO
tristate "LED Support for Locomo device"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index aae6989ff6b6..39c80fca84d2 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o
obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
+obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index e672b44ee172..416def84d045 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -17,6 +17,7 @@
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/88pm860x.h>
#define LED_PWM_SHIFT (3)
@@ -118,7 +119,8 @@ static void pm860x_led_work(struct work_struct *work)
struct pm860x_led *led;
struct pm860x_chip *chip;
- int mask;
+ unsigned char buf[3];
+ int mask, ret;
led = container_of(work, struct pm860x_led, work);
chip = led->chip;
@@ -128,16 +130,27 @@ static void pm860x_led_work(struct work_struct *work)
pm860x_set_bits(led->i2c, __led_off(led->port),
LED_CURRENT_MASK, led->iset);
}
+ pm860x_set_bits(led->i2c, __blink_off(led->port),
+ LED_BLINK_MASK, LED_ON_CONTINUOUS);
mask = __blink_ctl_mask(led->port);
pm860x_set_bits(led->i2c, PM8606_WLED3B, mask, mask);
- } else if (led->brightness == 0) {
- pm860x_set_bits(led->i2c, __led_off(led->port),
- LED_CURRENT_MASK, 0);
- mask = __blink_ctl_mask(led->port);
- pm860x_set_bits(led->i2c, PM8606_WLED3B, mask, 0);
}
pm860x_set_bits(led->i2c, __led_off(led->port), LED_PWM_MASK,
led->brightness);
+
+ if (led->brightness == 0) {
+ pm860x_bulk_read(led->i2c, __led_off(led->port), 3, buf);
+ ret = buf[0] & LED_PWM_MASK;
+ ret |= buf[1] & LED_PWM_MASK;
+ ret |= buf[2] & LED_PWM_MASK;
+ if (ret == 0) {
+ /* unset current since no led is lighting */
+ pm860x_set_bits(led->i2c, __led_off(led->port),
+ LED_CURRENT_MASK, 0);
+ mask = __blink_ctl_mask(led->port);
+ pm860x_set_bits(led->i2c, PM8606_WLED3B, mask, 0);
+ }
+ }
led->current_brightness = led->brightness;
dev_dbg(chip->dev, "Update LED. (reg:%d, brightness:%d)\n",
__led_off(led->port), led->brightness);
@@ -153,31 +166,12 @@ static void pm860x_led_set(struct led_classdev *cdev,
schedule_work(&data->work);
}
-static int __check_device(struct pm860x_led_pdata *pdata, char *name)
-{
- struct pm860x_led_pdata *p = pdata;
- int ret = -EINVAL;
-
- while (p && p->id) {
- if ((p->id != PM8606_ID_LED) || (p->flags < 0))
- break;
-
- if (!strncmp(name, pm860x_led_name[p->flags],
- MFD_NAME_SIZE)) {
- ret = (int)p->flags;
- break;
- }
- p++;
- }
- return ret;
-}
-
static int pm860x_led_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct pm860x_platform_data *pm860x_pdata;
struct pm860x_led_pdata *pdata;
struct pm860x_led *data;
+ struct mfd_cell *cell;
struct resource *res;
int ret;
@@ -187,10 +181,11 @@ static int pm860x_led_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (pdev->dev.parent->platform_data) {
- pm860x_pdata = pdev->dev.parent->platform_data;
- pdata = pm860x_pdata->led;
- } else {
+ cell = pdev->dev.platform_data;
+ if (cell == NULL)
+ return -ENODEV;
+ pdata = cell->mfd_data;
+ if (pdata == NULL) {
dev_err(&pdev->dev, "No platform data!\n");
return -EINVAL;
}
@@ -198,12 +193,12 @@ static int pm860x_led_probe(struct platform_device *pdev)
data = kzalloc(sizeof(struct pm860x_led), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
- strncpy(data->name, res->name, MFD_NAME_SIZE);
+ strncpy(data->name, res->name, MFD_NAME_SIZE - 1);
dev_set_drvdata(&pdev->dev, data);
data->chip = chip;
data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion;
data->iset = pdata->iset;
- data->port = __check_device(pdata, data->name);
+ data->port = pdata->flags;
if (data->port < 0) {
dev_err(&pdev->dev, "check device failed\n");
kfree(data);
@@ -221,6 +216,7 @@ static int pm860x_led_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
goto out;
}
+ pm860x_led_set(&data->cdev, 0);
return 0;
out:
kfree(data);
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 19dc4b61a105..3ebe3824662d 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -19,7 +19,7 @@
#include <linux/leds.h>
#include <linux/leds-bd2802.h>
#include <linux/slab.h>
-
+#include <linux/pm.h>
#define LED_CTL(rgb2en, rgb1en) ((rgb2en) << 4 | ((rgb1en) << 0))
@@ -319,20 +319,6 @@ static void bd2802_turn_off(struct bd2802_led *led, enum led_ids id,
bd2802_update_state(led, id, color, BD2802_OFF);
}
-static void bd2802_restore_state(struct bd2802_led *led)
-{
- int i;
-
- for (i = 0; i < LED_NUM; i++) {
- if (led->led[i].r)
- bd2802_turn_on(led, i, RED, led->led[i].r);
- if (led->led[i].g)
- bd2802_turn_on(led, i, GREEN, led->led[i].g);
- if (led->led[i].b)
- bd2802_turn_on(led, i, BLUE, led->led[i].b);
- }
-}
-
#define BD2802_SET_REGISTER(reg_addr, reg_name) \
static ssize_t bd2802_store_reg##reg_addr(struct device *dev, \
struct device_attribute *attr, const char *buf, size_t count) \
@@ -761,8 +747,25 @@ static int __exit bd2802_remove(struct i2c_client *client)
return 0;
}
-static int bd2802_suspend(struct i2c_client *client, pm_message_t mesg)
+#ifdef CONFIG_PM
+
+static void bd2802_restore_state(struct bd2802_led *led)
{
+ int i;
+
+ for (i = 0; i < LED_NUM; i++) {
+ if (led->led[i].r)
+ bd2802_turn_on(led, i, RED, led->led[i].r);
+ if (led->led[i].g)
+ bd2802_turn_on(led, i, GREEN, led->led[i].g);
+ if (led->led[i].b)
+ bd2802_turn_on(led, i, BLUE, led->led[i].b);
+ }
+}
+
+static int bd2802_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
struct bd2802_led *led = i2c_get_clientdata(client);
gpio_set_value(led->pdata->reset_gpio, 0);
@@ -770,8 +773,9 @@ static int bd2802_suspend(struct i2c_client *client, pm_message_t mesg)
return 0;
}
-static int bd2802_resume(struct i2c_client *client)
+static int bd2802_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct bd2802_led *led = i2c_get_clientdata(client);
if (!bd2802_is_all_off(led) || led->adf_on) {
@@ -782,6 +786,12 @@ static int bd2802_resume(struct i2c_client *client)
return 0;
}
+static SIMPLE_DEV_PM_OPS(bd2802_pm, bd2802_suspend, bd2802_resume);
+#define BD2802_PM (&bd2802_pm)
+#else /* CONFIG_PM */
+#define BD2802_PM NULL
+#endif
+
static const struct i2c_device_id bd2802_id[] = {
{ "BD2802", 0 },
{ }
@@ -791,11 +801,10 @@ MODULE_DEVICE_TABLE(i2c, bd2802_id);
static struct i2c_driver bd2802_i2c_driver = {
.driver = {
.name = "BD2802",
+ .pm = BD2802_PM,
},
.probe = bd2802_probe,
.remove = __exit_p(bd2802_remove),
- .suspend = bd2802_suspend,
- .resume = bd2802_resume,
.id_table = bd2802_id,
};
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
new file mode 100644
index 000000000000..e7089a1f6cb6
--- /dev/null
+++ b/drivers/leds/leds-lm3530.c
@@ -0,0 +1,378 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA.
+ * Copyright (C) 2009 Motorola, Inc.
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Simple driver for National Semiconductor LM3530 Backlight driver chip
+ *
+ * Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>
+ * based on leds-lm3530.c by Dan Murphy <D.Murphy@motorola.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/led-lm3530.h>
+#include <linux/types.h>
+
+#define LM3530_LED_DEV "lcd-backlight"
+#define LM3530_NAME "lm3530-led"
+
+#define LM3530_GEN_CONFIG 0x10
+#define LM3530_ALS_CONFIG 0x20
+#define LM3530_BRT_RAMP_RATE 0x30
+#define LM3530_ALS_ZONE_REG 0x40
+#define LM3530_ALS_IMP_SELECT 0x41
+#define LM3530_BRT_CTRL_REG 0xA0
+#define LM3530_ALS_ZB0_REG 0x60
+#define LM3530_ALS_ZB1_REG 0x61
+#define LM3530_ALS_ZB2_REG 0x62
+#define LM3530_ALS_ZB3_REG 0x63
+#define LM3530_ALS_Z0T_REG 0x70
+#define LM3530_ALS_Z1T_REG 0x71
+#define LM3530_ALS_Z2T_REG 0x72
+#define LM3530_ALS_Z3T_REG 0x73
+#define LM3530_ALS_Z4T_REG 0x74
+#define LM3530_REG_MAX 15
+
+/* General Control Register */
+#define LM3530_EN_I2C_SHIFT (0)
+#define LM3530_RAMP_LAW_SHIFT (1)
+#define LM3530_MAX_CURR_SHIFT (2)
+#define LM3530_EN_PWM_SHIFT (5)
+#define LM3530_PWM_POL_SHIFT (6)
+#define LM3530_EN_PWM_SIMPLE_SHIFT (7)
+
+#define LM3530_ENABLE_I2C (1 << LM3530_EN_I2C_SHIFT)
+#define LM3530_ENABLE_PWM (1 << LM3530_EN_PWM_SHIFT)
+#define LM3530_POL_LOW (1 << LM3530_PWM_POL_SHIFT)
+#define LM3530_ENABLE_PWM_SIMPLE (1 << LM3530_EN_PWM_SIMPLE_SHIFT)
+
+/* ALS Config Register Options */
+#define LM3530_ALS_AVG_TIME_SHIFT (0)
+#define LM3530_EN_ALS_SHIFT (3)
+#define LM3530_ALS_SEL_SHIFT (5)
+
+#define LM3530_ENABLE_ALS (3 << LM3530_EN_ALS_SHIFT)
+
+/* Brightness Ramp Rate Register */
+#define LM3530_BRT_RAMP_FALL_SHIFT (0)
+#define LM3530_BRT_RAMP_RISE_SHIFT (3)
+
+/* ALS Resistor Select */
+#define LM3530_ALS1_IMP_SHIFT (0)
+#define LM3530_ALS2_IMP_SHIFT (4)
+
+/* Zone Boundary Register defaults */
+#define LM3530_DEF_ZB_0 (0x33)
+#define LM3530_DEF_ZB_1 (0x66)
+#define LM3530_DEF_ZB_2 (0x99)
+#define LM3530_DEF_ZB_3 (0xCC)
+
+/* Zone Target Register defaults */
+#define LM3530_DEF_ZT_0 (0x19)
+#define LM3530_DEF_ZT_1 (0x33)
+#define LM3530_DEF_ZT_2 (0x4C)
+#define LM3530_DEF_ZT_3 (0x66)
+#define LM3530_DEF_ZT_4 (0x7F)
+
+struct lm3530_mode_map {
+ const char *mode;
+ enum lm3530_mode mode_val;
+};
+
+static struct lm3530_mode_map mode_map[] = {
+ { "man", LM3530_BL_MODE_MANUAL },
+ { "als", LM3530_BL_MODE_ALS },
+ { "pwm", LM3530_BL_MODE_PWM },
+};
+
+/**
+ * struct lm3530_data
+ * @led_dev: led class device
+ * @client: i2c client
+ * @pdata: LM3530 platform data
+ * @mode: mode of operation - manual, ALS, PWM
+ */
+struct lm3530_data {
+ struct led_classdev led_dev;
+ struct i2c_client *client;
+ struct lm3530_platform_data *pdata;
+ enum lm3530_mode mode;
+};
+
+static const u8 lm3530_reg[LM3530_REG_MAX] = {
+ LM3530_GEN_CONFIG,
+ LM3530_ALS_CONFIG,
+ LM3530_BRT_RAMP_RATE,
+ LM3530_ALS_ZONE_REG,
+ LM3530_ALS_IMP_SELECT,
+ LM3530_BRT_CTRL_REG,
+ LM3530_ALS_ZB0_REG,
+ LM3530_ALS_ZB1_REG,
+ LM3530_ALS_ZB2_REG,
+ LM3530_ALS_ZB3_REG,
+ LM3530_ALS_Z0T_REG,
+ LM3530_ALS_Z1T_REG,
+ LM3530_ALS_Z2T_REG,
+ LM3530_ALS_Z3T_REG,
+ LM3530_ALS_Z4T_REG,
+};
+
+static int lm3530_get_mode_from_str(const char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mode_map); i++)
+ if (sysfs_streq(str, mode_map[i].mode))
+ return mode_map[i].mode_val;
+
+ return -1;
+}
+
+static int lm3530_init_registers(struct lm3530_data *drvdata)
+{
+ int ret = 0;
+ int i;
+ u8 gen_config;
+ u8 als_config = 0;
+ u8 brt_ramp;
+ u8 als_imp_sel = 0;
+ u8 brightness;
+ u8 reg_val[LM3530_REG_MAX];
+ struct lm3530_platform_data *pltfm = drvdata->pdata;
+ struct i2c_client *client = drvdata->client;
+
+ gen_config = (pltfm->brt_ramp_law << LM3530_RAMP_LAW_SHIFT) |
+ ((pltfm->max_current & 7) << LM3530_MAX_CURR_SHIFT);
+
+ if (drvdata->mode == LM3530_BL_MODE_MANUAL ||
+ drvdata->mode == LM3530_BL_MODE_ALS)
+ gen_config |= (LM3530_ENABLE_I2C);
+
+ if (drvdata->mode == LM3530_BL_MODE_ALS) {
+ als_config =
+ (pltfm->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
+ (LM3530_ENABLE_ALS) |
+ (pltfm->als_input_mode << LM3530_ALS_SEL_SHIFT);
+
+ als_imp_sel =
+ (pltfm->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
+ (pltfm->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
+ }
+
+ if (drvdata->mode == LM3530_BL_MODE_PWM)
+ gen_config |= (LM3530_ENABLE_PWM) |
+ (pltfm->pwm_pol_hi << LM3530_PWM_POL_SHIFT) |
+ (LM3530_ENABLE_PWM_SIMPLE);
+
+ brt_ramp = (pltfm->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) |
+ (pltfm->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT);
+
+ brightness = pltfm->brt_val;
+
+ reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */
+ reg_val[1] = als_config; /* LM3530_ALS_CONFIG */
+ reg_val[2] = brt_ramp; /* LM3530_BRT_RAMP_RATE */
+ reg_val[3] = 0x00; /* LM3530_ALS_ZONE_REG */
+ reg_val[4] = als_imp_sel; /* LM3530_ALS_IMP_SELECT */
+ reg_val[5] = brightness; /* LM3530_BRT_CTRL_REG */
+ reg_val[6] = LM3530_DEF_ZB_0; /* LM3530_ALS_ZB0_REG */
+ reg_val[7] = LM3530_DEF_ZB_1; /* LM3530_ALS_ZB1_REG */
+ reg_val[8] = LM3530_DEF_ZB_2; /* LM3530_ALS_ZB2_REG */
+ reg_val[9] = LM3530_DEF_ZB_3; /* LM3530_ALS_ZB3_REG */
+ reg_val[10] = LM3530_DEF_ZT_0; /* LM3530_ALS_Z0T_REG */
+ reg_val[11] = LM3530_DEF_ZT_1; /* LM3530_ALS_Z1T_REG */
+ reg_val[12] = LM3530_DEF_ZT_2; /* LM3530_ALS_Z2T_REG */
+ reg_val[13] = LM3530_DEF_ZT_3; /* LM3530_ALS_Z3T_REG */
+ reg_val[14] = LM3530_DEF_ZT_4; /* LM3530_ALS_Z4T_REG */
+
+ for (i = 0; i < LM3530_REG_MAX; i++) {
+ ret = i2c_smbus_write_byte_data(client,
+ lm3530_reg[i], reg_val[i]);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static void lm3530_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ int err;
+ struct lm3530_data *drvdata =
+ container_of(led_cdev, struct lm3530_data, led_dev);
+
+ switch (drvdata->mode) {
+ case LM3530_BL_MODE_MANUAL:
+
+ /* set the brightness in brightness control register*/
+ err = i2c_smbus_write_byte_data(drvdata->client,
+ LM3530_BRT_CTRL_REG, brt_val / 2);
+ if (err)
+ dev_err(&drvdata->client->dev,
+ "Unable to set brightness: %d\n", err);
+ break;
+ case LM3530_BL_MODE_ALS:
+ break;
+ case LM3530_BL_MODE_PWM:
+ break;
+ default:
+ break;
+ }
+}
+
+
+static ssize_t lm3530_mode_set(struct device *dev, struct device_attribute
+ *attr, const char *buf, size_t size)
+{
+ int err;
+ struct i2c_client *client = container_of(
+ dev->parent, struct i2c_client, dev);
+ struct lm3530_data *drvdata = i2c_get_clientdata(client);
+ int mode;
+
+ mode = lm3530_get_mode_from_str(buf);
+ if (mode < 0) {
+ dev_err(dev, "Invalid mode\n");
+ return -EINVAL;
+ }
+
+ if (mode == LM3530_BL_MODE_MANUAL)
+ drvdata->mode = LM3530_BL_MODE_MANUAL;
+ else if (mode == LM3530_BL_MODE_ALS)
+ drvdata->mode = LM3530_BL_MODE_ALS;
+ else if (mode == LM3530_BL_MODE_PWM) {
+ dev_err(dev, "PWM mode not supported\n");
+ return -EINVAL;
+ }
+
+ err = lm3530_init_registers(drvdata);
+ if (err) {
+ dev_err(dev, "Setting %s Mode failed :%d\n", buf, err);
+ return err;
+ }
+
+ return sizeof(drvdata->mode);
+}
+
+static DEVICE_ATTR(mode, 0644, NULL, lm3530_mode_set);
+
+static int __devinit lm3530_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lm3530_platform_data *pdata = client->dev.platform_data;
+ struct lm3530_data *drvdata;
+ int err = 0;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "platform data required\n");
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ /* BL mode */
+ if (pdata->mode > LM3530_BL_MODE_PWM) {
+ dev_err(&client->dev, "Illegal Mode request\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "I2C_FUNC_I2C not supported\n");
+ err = -EIO;
+ goto err_out;
+ }
+
+ drvdata = kzalloc(sizeof(struct lm3530_data), GFP_KERNEL);
+ if (drvdata == NULL) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ drvdata->mode = pdata->mode;
+ drvdata->client = client;
+ drvdata->pdata = pdata;
+ drvdata->led_dev.name = LM3530_LED_DEV;
+ drvdata->led_dev.brightness_set = lm3530_brightness_set;
+
+ i2c_set_clientdata(client, drvdata);
+
+ err = lm3530_init_registers(drvdata);
+ if (err < 0) {
+ dev_err(&client->dev, "Register Init failed: %d\n", err);
+ err = -ENODEV;
+ goto err_reg_init;
+ }
+
+ err = led_classdev_register((struct device *)
+ &client->dev, &drvdata->led_dev);
+ if (err < 0) {
+ dev_err(&client->dev, "Register led class failed: %d\n", err);
+ err = -ENODEV;
+ goto err_class_register;
+ }
+
+ err = device_create_file(drvdata->led_dev.dev, &dev_attr_mode);
+ if (err < 0) {
+ dev_err(&client->dev, "File device creation failed: %d\n", err);
+ err = -ENODEV;
+ goto err_create_file;
+ }
+
+ return 0;
+
+err_create_file:
+ led_classdev_unregister(&drvdata->led_dev);
+err_class_register:
+err_reg_init:
+ kfree(drvdata);
+err_out:
+ return err;
+}
+
+static int __devexit lm3530_remove(struct i2c_client *client)
+{
+ struct lm3530_data *drvdata = i2c_get_clientdata(client);
+
+ device_remove_file(drvdata->led_dev.dev, &dev_attr_mode);
+ led_classdev_unregister(&drvdata->led_dev);
+ kfree(drvdata);
+ return 0;
+}
+
+static const struct i2c_device_id lm3530_id[] = {
+ {LM3530_NAME, 0},
+ {}
+};
+
+static struct i2c_driver lm3530_i2c_driver = {
+ .probe = lm3530_probe,
+ .remove = lm3530_remove,
+ .id_table = lm3530_id,
+ .driver = {
+ .name = LM3530_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init lm3530_init(void)
+{
+ return i2c_add_driver(&lm3530_i2c_driver);
+}
+
+static void __exit lm3530_exit(void)
+{
+ i2c_del_driver(&lm3530_i2c_driver);
+}
+
+module_init(lm3530_init);
+module_exit(lm3530_exit);
+
+MODULE_DESCRIPTION("Back Light driver for LM3530");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>");
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 80a3ae3c00b9..c0cff64a1ae6 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -534,7 +534,7 @@ static ssize_t lp5521_selftest(struct device *dev,
}
/* led class device attributes */
-static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current);
static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
static struct attribute *lp5521_led_attributes[] = {
@@ -548,15 +548,15 @@ static struct attribute_group lp5521_led_attribute_group = {
};
/* device attributes */
-static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
+static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR,
show_engine1_mode, store_engine1_mode);
-static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
+static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR,
show_engine2_mode, store_engine2_mode);
-static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
+static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR,
show_engine3_mode, store_engine3_mode);
-static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
-static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
-static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
+static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load);
+static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load);
+static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load);
static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL);
static struct attribute *lp5521_attributes[] = {
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index d0c4068ecddd..e19fed25f137 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -713,7 +713,7 @@ static ssize_t store_current(struct device *dev,
}
/* led class device attributes */
-static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current);
static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
static struct attribute *lp5523_led_attributes[] = {
@@ -727,21 +727,21 @@ static struct attribute_group lp5523_led_attribute_group = {
};
/* device attributes */
-static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
+static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR,
show_engine1_mode, store_engine1_mode);
-static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
+static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR,
show_engine2_mode, store_engine2_mode);
-static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
+static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR,
show_engine3_mode, store_engine3_mode);
-static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO,
+static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUSR,
show_engine1_leds, store_engine1_leds);
-static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO,
+static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUSR,
show_engine2_leds, store_engine2_leds);
-static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO,
+static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUSR,
show_engine3_leds, store_engine3_leds);
-static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
-static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
-static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
+static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load);
+static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load);
+static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load);
static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL);
static struct attribute *lp5523_attributes[] = {
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index f05bb08d0f09..06a5bb484707 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -22,6 +22,7 @@
#include <linux/leds.h>
#include <linux/workqueue.h>
#include <linux/mfd/mc13783.h>
+#include <linux/mfd/core.h>
#include <linux/slab.h>
struct mc13783_led {
@@ -183,7 +184,7 @@ static int __devinit mc13783_led_setup(struct mc13783_led *led, int max_current)
static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
{
- struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev);
struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
int ret = 0;
int reg = 0;
@@ -264,7 +265,7 @@ out:
static int __devinit mc13783_led_probe(struct platform_device *pdev)
{
- struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev);
struct mc13783_led_platform_data *led_cur;
struct mc13783_led *led, *led_dat;
int ret, i;
@@ -351,7 +352,7 @@ err_free:
static int __devexit mc13783_led_remove(struct platform_device *pdev)
{
- struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev);
struct mc13783_led *led = platform_get_drvdata(pdev);
struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
int i;
diff --git a/drivers/leds/leds-net5501.c b/drivers/leds/leds-net5501.c
index 1739557a9038..7e764b8365e6 100644
--- a/drivers/leds/leds-net5501.c
+++ b/drivers/leds/leds-net5501.c
@@ -19,7 +19,7 @@
#include <asm/geode.h>
-static struct gpio_led net5501_leds[] = {
+static const struct gpio_led net5501_leds[] = {
{
.name = "error",
.gpio = 6,
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 39f660b2a60d..2637c139777b 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -283,7 +283,7 @@ static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm)
}
}
-static void __devexit rackmeter_stop_cpu_sniffer(struct rackmeter *rm)
+static void rackmeter_stop_cpu_sniffer(struct rackmeter *rm)
{
cancel_delayed_work_sync(&rm->cpu[0].sniffer);
cancel_delayed_work_sync(&rm->cpu[1].sniffer);
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index ade1e656bfb2..b1d91170ded0 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -163,6 +163,7 @@ void __init pmu_backlight_init()
snprintf(name, sizeof(name), "pmubl");
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
bd = backlight_device_register(name, NULL, NULL, &pmu_backlight_data,
&props);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 9a35320fb59f..a2ce0b2da281 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -854,7 +854,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
if (bitmap->flags & BITMAP_HOSTENDIAN)
set_bit(bit, kaddr);
else
- ext2_set_bit(bit, kaddr);
+ __test_and_set_bit_le(bit, kaddr);
kunmap_atomic(kaddr, KM_USER0);
PRINTK("set file bit %lu page %lu\n", bit, page->index);
}
@@ -1050,7 +1050,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
if (bitmap->flags & BITMAP_HOSTENDIAN)
b = test_bit(bit, paddr);
else
- b = ext2_test_bit(bit, paddr);
+ b = test_bit_le(bit, paddr);
kunmap_atomic(paddr, KM_USER0);
if (b) {
/* if the disk bit is set, set the memory bit */
@@ -1226,7 +1226,7 @@ void bitmap_daemon_work(mddev_t *mddev)
clear_bit(file_page_offset(bitmap, j),
paddr);
else
- ext2_clear_bit(file_page_offset(bitmap, j),
+ __test_and_clear_bit_le(file_page_offset(bitmap, j),
paddr);
kunmap_atomic(paddr, KM_USER0);
} else
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 6951536ea29c..57968eb382c1 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -251,20 +251,20 @@ struct log_c {
*/
static inline int log_test_bit(uint32_t *bs, unsigned bit)
{
- return ext2_test_bit(bit, (unsigned long *) bs) ? 1 : 0;
+ return test_bit_le(bit, (unsigned long *) bs) ? 1 : 0;
}
static inline void log_set_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
{
- ext2_set_bit(bit, (unsigned long *) bs);
+ __test_and_set_bit_le(bit, (unsigned long *) bs);
l->touched_cleaned = 1;
}
static inline void log_clear_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
{
- ext2_clear_bit(bit, (unsigned long *) bs);
+ __test_and_clear_bit_le(bit, (unsigned long *) bs);
l->touched_dirtied = 1;
}
@@ -740,7 +740,7 @@ static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
return 0;
do {
- *region = ext2_find_next_zero_bit(
+ *region = find_next_zero_bit_le(
(unsigned long *) lc->sync_bits,
lc->region_count,
lc->sync_search);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index b82d28819e2a..4b0b63c290a6 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1283,24 +1283,22 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (!error && !clone->errors)
return 0; /* I/O complete */
- if (error == -EOPNOTSUPP)
- return error;
-
- if (clone->cmd_flags & REQ_DISCARD)
- /*
- * Pass all discard request failures up.
- * FIXME: only fail_path if the discard failed due to a
- * transport problem. This requires precise understanding
- * of the underlying failure (e.g. the SCSI sense).
- */
+ if (error == -EOPNOTSUPP || error == -EREMOTEIO)
return error;
if (mpio->pgpath)
fail_path(mpio->pgpath);
spin_lock_irqsave(&m->lock, flags);
- if (!m->nr_valid_paths && !m->queue_if_no_path && !__must_push_back(m))
- r = -EIO;
+ if (!m->nr_valid_paths) {
+ if (!m->queue_if_no_path) {
+ if (!__must_push_back(m))
+ r = -EIO;
+ } else {
+ if (error == -EBADE)
+ r = error;
+ }
+ }
spin_unlock_irqrestore(&m->lock, flags);
return r;
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
index 23005b3cf30b..b2b9415d874d 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
@@ -8,60 +8,71 @@
#include "dvb-usb-common.h"
#include <linux/usb/input.h>
+static unsigned int
+legacy_dvb_usb_get_keymap_index(const struct input_keymap_entry *ke,
+ struct rc_map_table *keymap,
+ unsigned int keymap_size)
+{
+ unsigned int index;
+ unsigned int scancode;
+
+ if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
+ index = ke->index;
+ } else {
+ if (input_scancode_to_scalar(ke, &scancode))
+ return keymap_size;
+
+ /* See if we can match the raw key code. */
+ for (index = 0; index < keymap_size; index++)
+ if (keymap[index].scancode == scancode)
+ break;
+
+ /* See if there is an unused hole in the map */
+ if (index >= keymap_size) {
+ for (index = 0; index < keymap_size; index++) {
+ if (keymap[index].keycode == KEY_RESERVED ||
+ keymap[index].keycode == KEY_UNKNOWN) {
+ break;
+ }
+ }
+ }
+ }
+
+ return index;
+}
+
static int legacy_dvb_usb_getkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int *keycode)
+ struct input_keymap_entry *ke)
{
struct dvb_usb_device *d = input_get_drvdata(dev);
-
struct rc_map_table *keymap = d->props.rc.legacy.rc_map_table;
- int i;
+ unsigned int keymap_size = d->props.rc.legacy.rc_map_size;
+ unsigned int index;
- /* See if we can match the raw key code. */
- for (i = 0; i < d->props.rc.legacy.rc_map_size; i++)
- if (keymap[i].scancode == scancode) {
- *keycode = keymap[i].keycode;
- return 0;
- }
+ index = legacy_dvb_usb_get_keymap_index(ke, keymap, keymap_size);
+ if (index >= keymap_size)
+ return -EINVAL;
- /*
- * If is there extra space, returns KEY_RESERVED,
- * otherwise, input core won't let legacy_dvb_usb_setkeycode
- * to work
- */
- for (i = 0; i < d->props.rc.legacy.rc_map_size; i++)
- if (keymap[i].keycode == KEY_RESERVED ||
- keymap[i].keycode == KEY_UNKNOWN) {
- *keycode = KEY_RESERVED;
- return 0;
- }
+ ke->keycode = keymap[index].keycode;
+ if (ke->keycode == KEY_UNKNOWN)
+ ke->keycode = KEY_RESERVED;
+ ke->len = sizeof(keymap[index].scancode);
+ memcpy(&ke->scancode, &keymap[index].scancode, ke->len);
+ ke->index = index;
- return -EINVAL;
+ return 0;
}
static int legacy_dvb_usb_setkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int keycode)
+ const struct input_keymap_entry *ke,
+ unsigned int *old_keycode)
{
struct dvb_usb_device *d = input_get_drvdata(dev);
-
struct rc_map_table *keymap = d->props.rc.legacy.rc_map_table;
- int i;
-
- /* Search if it is replacing an existing keycode */
- for (i = 0; i < d->props.rc.legacy.rc_map_size; i++)
- if (keymap[i].scancode == scancode) {
- keymap[i].keycode = keycode;
- return 0;
- }
-
- /* Search if is there a clean entry. If so, use it */
- for (i = 0; i < d->props.rc.legacy.rc_map_size; i++)
- if (keymap[i].keycode == KEY_RESERVED ||
- keymap[i].keycode == KEY_UNKNOWN) {
- keymap[i].scancode = scancode;
- keymap[i].keycode = keycode;
- return 0;
- }
+ unsigned int keymap_size = d->props.rc.legacy.rc_map_size;
+ unsigned int index;
+ index = legacy_dvb_usb_get_keymap_index(ke, keymap, keymap_size);
/*
* FIXME: Currently, it is not possible to increase the size of
* scancode table. For it to happen, one possibility
@@ -69,8 +80,24 @@ static int legacy_dvb_usb_setkeycode(struct input_dev *dev,
* copying data, appending the new key on it, and freeing
* the old one - or maybe just allocating some spare space
*/
+ if (index >= keymap_size)
+ return -EINVAL;
+
+ *old_keycode = keymap[index].keycode;
+ keymap->keycode = ke->keycode;
+ __set_bit(ke->keycode, dev->keybit);
+
+ if (*old_keycode != KEY_RESERVED) {
+ __clear_bit(*old_keycode, dev->keybit);
+ for (index = 0; index < keymap_size; index++) {
+ if (keymap[index].keycode == *old_keycode) {
+ __set_bit(*old_keycode, dev->keybit);
+ break;
+ }
+ }
+ }
- return -EINVAL;
+ return 0;
}
/* Remote-control poll function - called every dib->rc_query_interval ms to see
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
index a185610b376b..1e3a8dd820a4 100644
--- a/drivers/media/radio/radio-timb.c
+++ b/drivers/media/radio/radio-timb.c
@@ -21,6 +21,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -148,7 +149,7 @@ static const struct v4l2_file_operations timbradio_fops = {
static int __devinit timbradio_probe(struct platform_device *pdev)
{
- struct timb_radio_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_radio_platform_data *pdata = mfd_get_data(pdev);
struct timbradio *tr;
int err;
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 7ecc8e657663..4698eb00b59f 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -2138,7 +2138,7 @@ static int wl1273_fm_radio_remove(struct platform_device *pdev)
static int __devinit wl1273_fm_radio_probe(struct platform_device *pdev)
{
- struct wl1273_core **core = pdev->dev.platform_data;
+ struct wl1273_core **core = mfd_get_data(pdev);
struct wl1273_device *radio;
struct v4l2_ctrl *ctrl;
int r = 0;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 5b4422ef4e6d..5ac1baf45c8e 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -966,8 +966,8 @@ struct rc_dev *rc_allocate_device(void)
return NULL;
}
- dev->input_dev->getkeycode_new = ir_getkeycode;
- dev->input_dev->setkeycode_new = ir_setkeycode;
+ dev->input_dev->getkeycode = ir_getkeycode;
+ dev->input_dev->setkeycode = ir_setkeycode;
input_set_drvdata(dev->input_dev, dev);
spin_lock_init(&dev->rc_map.lock);
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
index fc611ebeb82c..84d4c7c83435 100644
--- a/drivers/media/video/timblogiw.c
+++ b/drivers/media/video/timblogiw.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
+#include <linux/mfd/core.h>
#include <linux/scatterlist.h>
#include <linux/interrupt.h>
#include <linux/list.h>
@@ -790,7 +791,7 @@ static int __devinit timblogiw_probe(struct platform_device *pdev)
{
int err;
struct timblogiw *lw = NULL;
- struct timb_video_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_video_platform_data *pdata = mfd_get_data(pdev);
if (!pdata) {
dev_err(&pdev->dev, "No platform data\n");
diff --git a/drivers/memstick/Makefile b/drivers/memstick/Makefile
index dc160fb43515..98623590c7fe 100644
--- a/drivers/memstick/Makefile
+++ b/drivers/memstick/Makefile
@@ -2,9 +2,7 @@
# Makefile for the kernel MemoryStick device drivers.
#
-ifeq ($(CONFIG_MEMSTICK_DEBUG),y)
- EXTRA_CFLAGS += -DDEBUG
-endif
+subdir-ccflags-$(CONFIG_MEMSTICK_DEBUG) := -DDEBUG
obj-$(CONFIG_MEMSTICK) += core/
obj-$(CONFIG_MEMSTICK) += host/
diff --git a/drivers/memstick/core/Makefile b/drivers/memstick/core/Makefile
index 8b2b5293877e..ecd029937738 100644
--- a/drivers/memstick/core/Makefile
+++ b/drivers/memstick/core/Makefile
@@ -2,10 +2,6 @@
# Makefile for the kernel MemoryStick core.
#
-ifeq ($(CONFIG_MEMSTICK_DEBUG),y)
- EXTRA_CFLAGS += -DDEBUG
-endif
-
obj-$(CONFIG_MEMSTICK) += memstick.o
obj-$(CONFIG_MSPRO_BLOCK) += mspro_block.o
diff --git a/drivers/memstick/host/Makefile b/drivers/memstick/host/Makefile
index 12530e4311d3..a1815e9dd019 100644
--- a/drivers/memstick/host/Makefile
+++ b/drivers/memstick/host/Makefile
@@ -2,9 +2,5 @@
# Makefile for MemoryStick host controller drivers
#
-ifeq ($(CONFIG_MEMSTICK_DEBUG),y)
- EXTRA_CFLAGS += -DDEBUG
-endif
-
obj-$(CONFIG_MEMSTICK_TIFM_MS) += tifm_ms.o
obj-$(CONFIG_MEMSTICK_JMICRON_38X) += jmb38x_ms.o
diff --git a/drivers/message/fusion/Makefile b/drivers/message/fusion/Makefile
index 95c9532cb07c..d182a24b3195 100644
--- a/drivers/message/fusion/Makefile
+++ b/drivers/message/fusion/Makefile
@@ -2,7 +2,7 @@
# enable verbose logging
# CONFIG_FUSION_LOGGING needs to be enabled in Kconfig
-#EXTRA_CFLAGS += -DMPT_DEBUG_VERBOSE
+#ccflags-y := -DMPT_DEBUG_VERBOSE
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-} LSI_LOGIC
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 013c7d881948..22027e7946f7 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -2593,6 +2593,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
#define MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE (0x03)
#define MPI_SAS_IOUNIT0_RATE_1_5 (0x08)
#define MPI_SAS_IOUNIT0_RATE_3_0 (0x09)
+#define MPI_SAS_IOUNIT0_RATE_6_0 (0x0A)
/* see mpi_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index 8faa4fab7b89..fd6222882a0e 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -841,6 +841,7 @@ typedef struct _EVENT_DATA_SAS_PHY_LINK_STATUS
#define MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE (0x03)
#define MPI_EVENT_SAS_PLS_LR_RATE_1_5 (0x08)
#define MPI_EVENT_SAS_PLS_LR_RATE_3_0 (0x09)
+#define MPI_EVENT_SAS_PLS_LR_RATE_6_0 (0x0A)
/* SAS Discovery Event data */
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 3358c0af3466..ec8080c98081 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -7418,7 +7418,12 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
case MPI_EVENT_SAS_PLS_LR_RATE_3_0:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
- " Rate 3.0 Gpbs",PhyNumber);
+ " Rate 3.0 Gbps", PhyNumber);
+ break;
+ case MPI_EVENT_SAS_PLS_LR_RATE_6_0:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
+ " Rate 6.0 Gbps", PhyNumber);
break;
default:
snprintf(evStr, EVENT_DESCR_STR_SZ,
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index e8deb8ed0499..878bda0cce70 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1314,8 +1314,10 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
else
karg->adapterType = MPT_IOCTL_INTERFACE_SCSI;
- if (karg->hdr.port > 1)
+ if (karg->hdr.port > 1) {
+ kfree(karg);
return -EINVAL;
+ }
port = karg->hdr.port;
karg->port = port;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 8aefb1829fcd..f5a14afad2cd 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1973,7 +1973,6 @@ static struct scsi_host_template mptsas_driver_template = {
.change_queue_depth = mptscsih_change_queue_depth,
.eh_abort_handler = mptscsih_abort,
.eh_device_reset_handler = mptscsih_dev_reset,
- .eh_bus_reset_handler = mptscsih_bus_reset,
.eh_host_reset_handler = mptscsih_host_reset,
.bios_param = mptscsih_bios_param,
.can_queue = MPT_SAS_CAN_QUEUE,
@@ -3063,6 +3062,9 @@ static int mptsas_probe_one_phy(struct device *dev,
case MPI_SAS_IOUNIT0_RATE_3_0:
phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
break;
+ case MPI_SAS_IOUNIT0_RATE_6_0:
+ phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
+ break;
case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE:
case MPI_SAS_IOUNIT0_RATE_UNKNOWN:
default:
@@ -3691,7 +3693,8 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event)
}
if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
- link_rate == MPI_SAS_IOUNIT0_RATE_3_0) {
+ link_rate == MPI_SAS_IOUNIT0_RATE_3_0 ||
+ link_rate == MPI_SAS_IOUNIT0_RATE_6_0) {
if (!port_info) {
if (ioc->old_sas_discovery_protocal) {
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 7d3cc575c361..098de2b35784 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -1044,8 +1044,7 @@ static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
static int cfg_open(struct inode *inode, struct file *file)
{
- struct i2o_cfg_info *tmp =
- (struct i2o_cfg_info *)kmalloc(sizeof(struct i2o_cfg_info),
+ struct i2o_cfg_info *tmp = kmalloc(sizeof(struct i2o_cfg_info),
GFP_KERNEL);
unsigned long flags;
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 793300c554b4..9c511c1604a5 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -17,230 +17,138 @@
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/88pm860x.h>
+#include <linux/regulator/machine.h>
#define INT_STATUS_NUM 3
-char pm860x_backlight_name[][MFD_NAME_SIZE] = {
- "backlight-0",
- "backlight-1",
- "backlight-2",
+static struct resource bk_resources[] __initdata = {
+ {PM8606_BACKLIGHT1, PM8606_BACKLIGHT1, "backlight-0", IORESOURCE_IO,},
+ {PM8606_BACKLIGHT2, PM8606_BACKLIGHT2, "backlight-1", IORESOURCE_IO,},
+ {PM8606_BACKLIGHT3, PM8606_BACKLIGHT3, "backlight-2", IORESOURCE_IO,},
};
-EXPORT_SYMBOL(pm860x_backlight_name);
-
-char pm860x_led_name[][MFD_NAME_SIZE] = {
- "led0-red",
- "led0-green",
- "led0-blue",
- "led1-red",
- "led1-green",
- "led1-blue",
-};
-EXPORT_SYMBOL(pm860x_led_name);
-
-#define PM8606_BACKLIGHT_RESOURCE(_i, _x) \
-{ \
- .name = pm860x_backlight_name[_i], \
- .start = PM8606_##_x, \
- .end = PM8606_##_x, \
- .flags = IORESOURCE_IO, \
-}
-static struct resource backlight_resources[] = {
- PM8606_BACKLIGHT_RESOURCE(PM8606_BACKLIGHT1, WLED1A),
- PM8606_BACKLIGHT_RESOURCE(PM8606_BACKLIGHT2, WLED2A),
- PM8606_BACKLIGHT_RESOURCE(PM8606_BACKLIGHT3, WLED3A),
+static struct resource led_resources[] __initdata = {
+ {PM8606_LED1_RED, PM8606_LED1_RED, "led0-red", IORESOURCE_IO,},
+ {PM8606_LED1_GREEN, PM8606_LED1_GREEN, "led0-green", IORESOURCE_IO,},
+ {PM8606_LED1_BLUE, PM8606_LED1_BLUE, "led0-blue", IORESOURCE_IO,},
+ {PM8606_LED2_RED, PM8606_LED2_RED, "led1-red", IORESOURCE_IO,},
+ {PM8606_LED2_GREEN, PM8606_LED2_GREEN, "led1-green", IORESOURCE_IO,},
+ {PM8606_LED2_BLUE, PM8606_LED2_BLUE, "led1-blue", IORESOURCE_IO,},
};
-#define PM8606_BACKLIGHT_DEVS(_i) \
-{ \
- .name = "88pm860x-backlight", \
- .num_resources = 1, \
- .resources = &backlight_resources[_i], \
- .id = _i, \
-}
-
-static struct mfd_cell backlight_devs[] = {
- PM8606_BACKLIGHT_DEVS(PM8606_BACKLIGHT1),
- PM8606_BACKLIGHT_DEVS(PM8606_BACKLIGHT2),
- PM8606_BACKLIGHT_DEVS(PM8606_BACKLIGHT3),
+static struct resource regulator_resources[] __initdata = {
+ {PM8607_ID_BUCK1, PM8607_ID_BUCK1, "buck-1", IORESOURCE_IO,},
+ {PM8607_ID_BUCK2, PM8607_ID_BUCK2, "buck-2", IORESOURCE_IO,},
+ {PM8607_ID_BUCK3, PM8607_ID_BUCK3, "buck-3", IORESOURCE_IO,},
+ {PM8607_ID_LDO1, PM8607_ID_LDO1, "ldo-01", IORESOURCE_IO,},
+ {PM8607_ID_LDO2, PM8607_ID_LDO2, "ldo-02", IORESOURCE_IO,},
+ {PM8607_ID_LDO3, PM8607_ID_LDO3, "ldo-03", IORESOURCE_IO,},
+ {PM8607_ID_LDO4, PM8607_ID_LDO4, "ldo-04", IORESOURCE_IO,},
+ {PM8607_ID_LDO5, PM8607_ID_LDO5, "ldo-05", IORESOURCE_IO,},
+ {PM8607_ID_LDO6, PM8607_ID_LDO6, "ldo-06", IORESOURCE_IO,},
+ {PM8607_ID_LDO7, PM8607_ID_LDO7, "ldo-07", IORESOURCE_IO,},
+ {PM8607_ID_LDO8, PM8607_ID_LDO8, "ldo-08", IORESOURCE_IO,},
+ {PM8607_ID_LDO9, PM8607_ID_LDO9, "ldo-09", IORESOURCE_IO,},
+ {PM8607_ID_LDO10, PM8607_ID_LDO10, "ldo-10", IORESOURCE_IO,},
+ {PM8607_ID_LDO11, PM8607_ID_LDO11, "ldo-11", IORESOURCE_IO,},
+ {PM8607_ID_LDO12, PM8607_ID_LDO12, "ldo-12", IORESOURCE_IO,},
+ {PM8607_ID_LDO13, PM8607_ID_LDO13, "ldo-13", IORESOURCE_IO,},
+ {PM8607_ID_LDO14, PM8607_ID_LDO14, "ldo-14", IORESOURCE_IO,},
+ {PM8607_ID_LDO15, PM8607_ID_LDO15, "ldo-15", IORESOURCE_IO,},
};
-#define PM8606_LED_RESOURCE(_i, _x) \
-{ \
- .name = pm860x_led_name[_i], \
- .start = PM8606_##_x, \
- .end = PM8606_##_x, \
- .flags = IORESOURCE_IO, \
-}
-
-static struct resource led_resources[] = {
- PM8606_LED_RESOURCE(PM8606_LED1_RED, RGB1B),
- PM8606_LED_RESOURCE(PM8606_LED1_GREEN, RGB1C),
- PM8606_LED_RESOURCE(PM8606_LED1_BLUE, RGB1D),
- PM8606_LED_RESOURCE(PM8606_LED2_RED, RGB2B),
- PM8606_LED_RESOURCE(PM8606_LED2_GREEN, RGB2C),
- PM8606_LED_RESOURCE(PM8606_LED2_BLUE, RGB2D),
+static struct resource touch_resources[] __initdata = {
+ {PM8607_IRQ_PEN, PM8607_IRQ_PEN, "touch", IORESOURCE_IRQ,},
};
-#define PM8606_LED_DEVS(_i) \
-{ \
- .name = "88pm860x-led", \
- .num_resources = 1, \
- .resources = &led_resources[_i], \
- .id = _i, \
-}
-
-static struct mfd_cell led_devs[] = {
- PM8606_LED_DEVS(PM8606_LED1_RED),
- PM8606_LED_DEVS(PM8606_LED1_GREEN),
- PM8606_LED_DEVS(PM8606_LED1_BLUE),
- PM8606_LED_DEVS(PM8606_LED2_RED),
- PM8606_LED_DEVS(PM8606_LED2_GREEN),
- PM8606_LED_DEVS(PM8606_LED2_BLUE),
+static struct resource onkey_resources[] __initdata = {
+ {PM8607_IRQ_ONKEY, PM8607_IRQ_ONKEY, "onkey", IORESOURCE_IRQ,},
};
-static struct resource touch_resources[] = {
- {
- .start = PM8607_IRQ_PEN,
- .end = PM8607_IRQ_PEN,
- .flags = IORESOURCE_IRQ,
- },
+static struct resource codec_resources[] __initdata = {
+ /* Headset microphone insertion or removal */
+ {PM8607_IRQ_MICIN, PM8607_IRQ_MICIN, "micin", IORESOURCE_IRQ,},
+ /* Hook-switch press or release */
+ {PM8607_IRQ_HOOK, PM8607_IRQ_HOOK, "hook", IORESOURCE_IRQ,},
+ /* Headset insertion or removal */
+ {PM8607_IRQ_HEADSET, PM8607_IRQ_HEADSET, "headset", IORESOURCE_IRQ,},
+ /* Audio short */
+ {PM8607_IRQ_AUDIO_SHORT, PM8607_IRQ_AUDIO_SHORT, "audio-short", IORESOURCE_IRQ,},
};
-static struct mfd_cell touch_devs[] = {
- {
- .name = "88pm860x-touch",
- .num_resources = 1,
- .resources = &touch_resources[0],
- },
+static struct resource battery_resources[] __initdata = {
+ {PM8607_IRQ_CC, PM8607_IRQ_CC, "columb counter", IORESOURCE_IRQ,},
+ {PM8607_IRQ_BAT, PM8607_IRQ_BAT, "battery", IORESOURCE_IRQ,},
};
-#define PM8607_REG_RESOURCE(_start, _end) \
-{ \
- .start = PM8607_##_start, \
- .end = PM8607_##_end, \
- .flags = IORESOURCE_IO, \
-}
-
-static struct resource power_supply_resources[] = {
- {
- .name = "88pm860x-power",
- .start = PM8607_IRQ_CHG,
- .end = PM8607_IRQ_CHG,
- .flags = IORESOURCE_IRQ,
- },
+static struct resource charger_resources[] __initdata = {
+ {PM8607_IRQ_CHG, PM8607_IRQ_CHG, "charger detect", IORESOURCE_IRQ,},
+ {PM8607_IRQ_CHG_DONE, PM8607_IRQ_CHG_DONE, "charging done", IORESOURCE_IRQ,},
+ {PM8607_IRQ_CHG_FAULT, PM8607_IRQ_CHG_FAULT, "charging timeout", IORESOURCE_IRQ,},
+ {PM8607_IRQ_GPADC1, PM8607_IRQ_GPADC1, "battery temperature", IORESOURCE_IRQ,},
+ {PM8607_IRQ_VBAT, PM8607_IRQ_VBAT, "battery voltage", IORESOURCE_IRQ,},
+ {PM8607_IRQ_VCHG, PM8607_IRQ_VCHG, "vchg voltage", IORESOURCE_IRQ,},
};
-static struct mfd_cell power_devs[] = {
- {
- .name = "88pm860x-power",
- .num_resources = 1,
- .resources = &power_supply_resources[0],
- .id = -1,
- },
+static struct mfd_cell bk_devs[] __initdata = {
+ {"88pm860x-backlight", 0,},
+ {"88pm860x-backlight", 1,},
+ {"88pm860x-backlight", 2,},
};
-static struct resource onkey_resources[] = {
- {
- .name = "88pm860x-onkey",
- .start = PM8607_IRQ_ONKEY,
- .end = PM8607_IRQ_ONKEY,
- .flags = IORESOURCE_IRQ,
- },
+static struct mfd_cell led_devs[] __initdata = {
+ {"88pm860x-led", 0,},
+ {"88pm860x-led", 1,},
+ {"88pm860x-led", 2,},
+ {"88pm860x-led", 3,},
+ {"88pm860x-led", 4,},
+ {"88pm860x-led", 5,},
};
-static struct mfd_cell onkey_devs[] = {
- {
- .name = "88pm860x-onkey",
- .num_resources = 1,
- .resources = &onkey_resources[0],
- .id = -1,
- },
+static struct mfd_cell regulator_devs[] __initdata = {
+ {"88pm860x-regulator", 0,},
+ {"88pm860x-regulator", 1,},
+ {"88pm860x-regulator", 2,},
+ {"88pm860x-regulator", 3,},
+ {"88pm860x-regulator", 4,},
+ {"88pm860x-regulator", 5,},
+ {"88pm860x-regulator", 6,},
+ {"88pm860x-regulator", 7,},
+ {"88pm860x-regulator", 8,},
+ {"88pm860x-regulator", 9,},
+ {"88pm860x-regulator", 10,},
+ {"88pm860x-regulator", 11,},
+ {"88pm860x-regulator", 12,},
+ {"88pm860x-regulator", 13,},
+ {"88pm860x-regulator", 14,},
+ {"88pm860x-regulator", 15,},
+ {"88pm860x-regulator", 16,},
+ {"88pm860x-regulator", 17,},
};
-static struct resource codec_resources[] = {
- {
- /* Headset microphone insertion or removal */
- .name = "micin",
- .start = PM8607_IRQ_MICIN,
- .end = PM8607_IRQ_MICIN,
- .flags = IORESOURCE_IRQ,
- }, {
- /* Hook-switch press or release */
- .name = "hook",
- .start = PM8607_IRQ_HOOK,
- .end = PM8607_IRQ_HOOK,
- .flags = IORESOURCE_IRQ,
- }, {
- /* Headset insertion or removal */
- .name = "headset",
- .start = PM8607_IRQ_HEADSET,
- .end = PM8607_IRQ_HEADSET,
- .flags = IORESOURCE_IRQ,
- }, {
- /* Audio short */
- .name = "audio-short",
- .start = PM8607_IRQ_AUDIO_SHORT,
- .end = PM8607_IRQ_AUDIO_SHORT,
- .flags = IORESOURCE_IRQ,
- },
+static struct mfd_cell touch_devs[] __initdata = {
+ {"88pm860x-touch", -1,},
};
-static struct mfd_cell codec_devs[] = {
- {
- .name = "88pm860x-codec",
- .num_resources = ARRAY_SIZE(codec_resources),
- .resources = &codec_resources[0],
- .id = -1,
- },
+static struct mfd_cell onkey_devs[] __initdata = {
+ {"88pm860x-onkey", -1,},
};
-static struct resource regulator_resources[] = {
- PM8607_REG_RESOURCE(BUCK1, BUCK1),
- PM8607_REG_RESOURCE(BUCK2, BUCK2),
- PM8607_REG_RESOURCE(BUCK3, BUCK3),
- PM8607_REG_RESOURCE(LDO1, LDO1),
- PM8607_REG_RESOURCE(LDO2, LDO2),
- PM8607_REG_RESOURCE(LDO3, LDO3),
- PM8607_REG_RESOURCE(LDO4, LDO4),
- PM8607_REG_RESOURCE(LDO5, LDO5),
- PM8607_REG_RESOURCE(LDO6, LDO6),
- PM8607_REG_RESOURCE(LDO7, LDO7),
- PM8607_REG_RESOURCE(LDO8, LDO8),
- PM8607_REG_RESOURCE(LDO9, LDO9),
- PM8607_REG_RESOURCE(LDO10, LDO10),
- PM8607_REG_RESOURCE(LDO12, LDO12),
- PM8607_REG_RESOURCE(VIBRATOR_SET, VIBRATOR_SET),
- PM8607_REG_RESOURCE(LDO14, LDO14),
+static struct mfd_cell codec_devs[] __initdata = {
+ {"88pm860x-codec", -1,},
};
-#define PM8607_REG_DEVS(_id) \
-{ \
- .name = "88pm860x-regulator", \
- .num_resources = 1, \
- .resources = &regulator_resources[PM8607_ID_##_id], \
- .id = PM8607_ID_##_id, \
-}
-
-static struct mfd_cell regulator_devs[] = {
- PM8607_REG_DEVS(BUCK1),
- PM8607_REG_DEVS(BUCK2),
- PM8607_REG_DEVS(BUCK3),
- PM8607_REG_DEVS(LDO1),
- PM8607_REG_DEVS(LDO2),
- PM8607_REG_DEVS(LDO3),
- PM8607_REG_DEVS(LDO4),
- PM8607_REG_DEVS(LDO5),
- PM8607_REG_DEVS(LDO6),
- PM8607_REG_DEVS(LDO7),
- PM8607_REG_DEVS(LDO8),
- PM8607_REG_DEVS(LDO9),
- PM8607_REG_DEVS(LDO10),
- PM8607_REG_DEVS(LDO12),
- PM8607_REG_DEVS(LDO13),
- PM8607_REG_DEVS(LDO14),
+static struct mfd_cell power_devs[] = {
+ {"88pm860x-battery", -1,},
+ {"88pm860x-charger", -1,},
};
+static struct pm860x_backlight_pdata bk_pdata[ARRAY_SIZE(bk_devs)];
+static struct pm860x_led_pdata led_pdata[ARRAY_SIZE(led_devs)];
+static struct regulator_init_data regulator_pdata[ARRAY_SIZE(regulator_devs)];
+static struct pm860x_touch_pdata touch_pdata;
+static struct pm860x_power_pdata power_pdata;
+
struct pm860x_irq_data {
int reg;
int mask_reg;
@@ -595,37 +503,212 @@ static void device_irq_exit(struct pm860x_chip *chip)
free_irq(chip->core_irq, chip);
}
-static void __devinit device_8606_init(struct pm860x_chip *chip,
- struct i2c_client *i2c,
- struct pm860x_platform_data *pdata)
+static void __devinit device_bk_init(struct pm860x_chip *chip,
+ struct i2c_client *i2c,
+ struct pm860x_platform_data *pdata)
{
int ret;
+ int i, j, id;
+
+ if ((pdata == NULL) || (pdata->backlight == NULL))
+ return;
+
+ if (pdata->num_backlights > ARRAY_SIZE(bk_devs))
+ pdata->num_backlights = ARRAY_SIZE(bk_devs);
+
+ for (i = 0; i < pdata->num_backlights; i++) {
+ memcpy(&bk_pdata[i], &pdata->backlight[i],
+ sizeof(struct pm860x_backlight_pdata));
+ bk_devs[i].mfd_data = &bk_pdata[i];
+
+ for (j = 0; j < ARRAY_SIZE(bk_devs); j++) {
+ id = bk_resources[j].start;
+ if (bk_pdata[i].flags != id)
+ continue;
+
+ bk_devs[i].num_resources = 1;
+ bk_devs[i].resources = &bk_resources[j];
+ ret = mfd_add_devices(chip->dev, 0,
+ &bk_devs[i], 1,
+ &bk_resources[j], 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add "
+ "backlight subdev\n");
+ return;
+ }
+ }
+ }
+}
- if (pdata && pdata->backlight) {
- ret = mfd_add_devices(chip->dev, 0, &backlight_devs[0],
- ARRAY_SIZE(backlight_devs),
- &backlight_resources[0], 0);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to add backlight "
- "subdev\n");
- goto out_dev;
+static void __devinit device_led_init(struct pm860x_chip *chip,
+ struct i2c_client *i2c,
+ struct pm860x_platform_data *pdata)
+{
+ int ret;
+ int i, j, id;
+
+ if ((pdata == NULL) || (pdata->led == NULL))
+ return;
+
+ if (pdata->num_leds > ARRAY_SIZE(led_devs))
+ pdata->num_leds = ARRAY_SIZE(led_devs);
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ memcpy(&led_pdata[i], &pdata->led[i],
+ sizeof(struct pm860x_led_pdata));
+ led_devs[i].mfd_data = &led_pdata[i];
+
+ for (j = 0; j < ARRAY_SIZE(led_devs); j++) {
+ id = led_resources[j].start;
+ if (led_pdata[i].flags != id)
+ continue;
+
+ led_devs[i].num_resources = 1;
+ led_devs[i].resources = &led_resources[j],
+ ret = mfd_add_devices(chip->dev, 0,
+ &led_devs[i], 1,
+ &led_resources[j], 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add "
+ "led subdev\n");
+ return;
+ }
}
}
+}
- if (pdata && pdata->led) {
- ret = mfd_add_devices(chip->dev, 0, &led_devs[0],
- ARRAY_SIZE(led_devs),
- &led_resources[0], 0);
+static void __devinit device_regulator_init(struct pm860x_chip *chip,
+ struct i2c_client *i2c,
+ struct pm860x_platform_data *pdata)
+{
+ struct regulator_init_data *initdata;
+ int ret;
+ int i, j;
+
+ if ((pdata == NULL) || (pdata->regulator == NULL))
+ return;
+
+ if (pdata->num_regulators > ARRAY_SIZE(regulator_devs))
+ pdata->num_regulators = ARRAY_SIZE(regulator_devs);
+
+ for (i = 0, j = -1; i < pdata->num_regulators; i++) {
+ initdata = &pdata->regulator[i];
+ if (strstr(initdata->constraints.name, "BUCK")) {
+ sscanf(initdata->constraints.name, "BUCK%d", &j);
+ /* BUCK1 ~ BUCK3 */
+ if ((j < 1) || (j > 3)) {
+ dev_err(chip->dev, "Failed to add constraint "
+ "(%s)\n", initdata->constraints.name);
+ goto out;
+ }
+ j = (j - 1) + PM8607_ID_BUCK1;
+ }
+ if (strstr(initdata->constraints.name, "LDO")) {
+ sscanf(initdata->constraints.name, "LDO%d", &j);
+ /* LDO1 ~ LDO15 */
+ if ((j < 1) || (j > 15)) {
+ dev_err(chip->dev, "Failed to add constraint "
+ "(%s)\n", initdata->constraints.name);
+ goto out;
+ }
+ j = (j - 1) + PM8607_ID_LDO1;
+ }
+ if (j == -1) {
+ dev_err(chip->dev, "Failed to add constraint (%s)\n",
+ initdata->constraints.name);
+ goto out;
+ }
+ memcpy(&regulator_pdata[i], &pdata->regulator[i],
+ sizeof(struct regulator_init_data));
+ regulator_devs[i].mfd_data = &regulator_pdata[i];
+ regulator_devs[i].num_resources = 1;
+ regulator_devs[i].resources = &regulator_resources[j];
+
+ ret = mfd_add_devices(chip->dev, 0, &regulator_devs[i], 1,
+ &regulator_resources[j], 0);
if (ret < 0) {
- dev_err(chip->dev, "Failed to add led "
- "subdev\n");
- goto out_dev;
+ dev_err(chip->dev, "Failed to add regulator subdev\n");
+ goto out;
}
}
+out:
return;
-out_dev:
- mfd_remove_devices(chip->dev);
- device_irq_exit(chip);
+}
+
+static void __devinit device_touch_init(struct pm860x_chip *chip,
+ struct i2c_client *i2c,
+ struct pm860x_platform_data *pdata)
+{
+ int ret;
+
+ if ((pdata == NULL) || (pdata->touch == NULL))
+ return;
+
+ memcpy(&touch_pdata, pdata->touch, sizeof(struct pm860x_touch_pdata));
+ touch_devs[0].mfd_data = &touch_pdata;
+ touch_devs[0].num_resources = ARRAY_SIZE(touch_resources);
+ touch_devs[0].resources = &touch_resources[0];
+ ret = mfd_add_devices(chip->dev, 0, &touch_devs[0],
+ ARRAY_SIZE(touch_devs), &touch_resources[0],
+ chip->irq_base);
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to add touch subdev\n");
+}
+
+static void __devinit device_power_init(struct pm860x_chip *chip,
+ struct i2c_client *i2c,
+ struct pm860x_platform_data *pdata)
+{
+ int ret;
+
+ if ((pdata == NULL) || (pdata->power == NULL))
+ return;
+
+ memcpy(&power_pdata, pdata->power, sizeof(struct pm860x_power_pdata));
+ power_devs[0].mfd_data = &power_pdata;
+ power_devs[0].num_resources = ARRAY_SIZE(battery_resources);
+ power_devs[0].resources = &battery_resources[0],
+ ret = mfd_add_devices(chip->dev, 0, &power_devs[0], 1,
+ &battery_resources[0], chip->irq_base);
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to add battery subdev\n");
+
+ power_devs[1].mfd_data = &power_pdata;
+ power_devs[1].num_resources = ARRAY_SIZE(charger_resources);
+ power_devs[1].resources = &charger_resources[0],
+ ret = mfd_add_devices(chip->dev, 0, &power_devs[1], 1,
+ &charger_resources[0], chip->irq_base);
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to add charger subdev\n");
+}
+
+static void __devinit device_onkey_init(struct pm860x_chip *chip,
+ struct i2c_client *i2c,
+ struct pm860x_platform_data *pdata)
+{
+ int ret;
+
+ onkey_devs[0].num_resources = ARRAY_SIZE(onkey_resources);
+ onkey_devs[0].resources = &onkey_resources[0],
+ ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0],
+ ARRAY_SIZE(onkey_devs), &onkey_resources[0],
+ chip->irq_base);
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to add onkey subdev\n");
+}
+
+static void __devinit device_codec_init(struct pm860x_chip *chip,
+ struct i2c_client *i2c,
+ struct pm860x_platform_data *pdata)
+{
+ int ret;
+
+ codec_devs[0].num_resources = ARRAY_SIZE(codec_resources);
+ codec_devs[0].resources = &codec_resources[0],
+ ret = mfd_add_devices(chip->dev, 0, &codec_devs[0],
+ ARRAY_SIZE(codec_devs), &codec_resources[0], 0);
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to add codec subdev\n");
}
static void __devinit device_8607_init(struct pm860x_chip *chip,
@@ -683,55 +766,11 @@ static void __devinit device_8607_init(struct pm860x_chip *chip,
if (ret < 0)
goto out;
- ret = mfd_add_devices(chip->dev, 0, &regulator_devs[0],
- ARRAY_SIZE(regulator_devs),
- &regulator_resources[0], 0);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to add regulator subdev\n");
- goto out_dev;
- }
-
- if (pdata && pdata->touch) {
- ret = mfd_add_devices(chip->dev, 0, &touch_devs[0],
- ARRAY_SIZE(touch_devs),
- &touch_resources[0], 0);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to add touch "
- "subdev\n");
- goto out_dev;
- }
- }
-
- if (pdata && pdata->power) {
- ret = mfd_add_devices(chip->dev, 0, &power_devs[0],
- ARRAY_SIZE(power_devs),
- &power_supply_resources[0], 0);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to add power supply "
- "subdev\n");
- goto out_dev;
- }
- }
-
- ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0],
- ARRAY_SIZE(onkey_devs),
- &onkey_resources[0], 0);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to add onkey subdev\n");
- goto out_dev;
- }
-
- ret = mfd_add_devices(chip->dev, 0, &codec_devs[0],
- ARRAY_SIZE(codec_devs),
- &codec_resources[0], 0);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to add codec subdev\n");
- goto out_dev;
- }
- return;
-out_dev:
- mfd_remove_devices(chip->dev);
- device_irq_exit(chip);
+ device_regulator_init(chip, i2c, pdata);
+ device_onkey_init(chip, i2c, pdata);
+ device_touch_init(chip, i2c, pdata);
+ device_power_init(chip, i2c, pdata);
+ device_codec_init(chip, i2c, pdata);
out:
return;
}
@@ -743,7 +782,8 @@ int __devinit pm860x_device_init(struct pm860x_chip *chip,
switch (chip->id) {
case CHIP_PM8606:
- device_8606_init(chip, chip->client, pdata);
+ device_bk_init(chip, chip->client, pdata);
+ device_led_init(chip, chip->client, pdata);
break;
case CHIP_PM8607:
device_8607_init(chip, chip->client, pdata);
@@ -753,7 +793,8 @@ int __devinit pm860x_device_init(struct pm860x_chip *chip,
if (chip->companion) {
switch (chip->id) {
case CHIP_PM8607:
- device_8606_init(chip, chip->companion, pdata);
+ device_bk_init(chip, chip->companion, pdata);
+ device_led_init(chip, chip->companion, pdata);
break;
case CHIP_PM8606:
device_8607_init(chip, chip->companion, pdata);
diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c
index bc02e6b21608..e017dc88622a 100644
--- a/drivers/mfd/88pm860x-i2c.c
+++ b/drivers/mfd/88pm860x-i2c.c
@@ -126,6 +126,109 @@ out:
}
EXPORT_SYMBOL(pm860x_set_bits);
+int pm860x_page_reg_read(struct i2c_client *i2c, int reg)
+{
+ struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ unsigned char zero = 0;
+ unsigned char data;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ pm860x_write_device(i2c, 0xFA, 0, &zero);
+ pm860x_write_device(i2c, 0xFB, 0, &zero);
+ pm860x_write_device(i2c, 0xFF, 0, &zero);
+ ret = pm860x_read_device(i2c, reg, 1, &data);
+ if (ret >= 0)
+ ret = (int)data;
+ pm860x_write_device(i2c, 0xFE, 0, &zero);
+ pm860x_write_device(i2c, 0xFC, 0, &zero);
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(pm860x_page_reg_read);
+
+int pm860x_page_reg_write(struct i2c_client *i2c, int reg,
+ unsigned char data)
+{
+ struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ unsigned char zero;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ pm860x_write_device(i2c, 0xFA, 0, &zero);
+ pm860x_write_device(i2c, 0xFB, 0, &zero);
+ pm860x_write_device(i2c, 0xFF, 0, &zero);
+ ret = pm860x_write_device(i2c, reg, 1, &data);
+ pm860x_write_device(i2c, 0xFE, 0, &zero);
+ pm860x_write_device(i2c, 0xFC, 0, &zero);
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(pm860x_page_reg_write);
+
+int pm860x_page_bulk_read(struct i2c_client *i2c, int reg,
+ int count, unsigned char *buf)
+{
+ struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ unsigned char zero = 0;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ pm860x_write_device(i2c, 0xFA, 0, &zero);
+ pm860x_write_device(i2c, 0xFB, 0, &zero);
+ pm860x_write_device(i2c, 0xFF, 0, &zero);
+ ret = pm860x_read_device(i2c, reg, count, buf);
+ pm860x_write_device(i2c, 0xFE, 0, &zero);
+ pm860x_write_device(i2c, 0xFC, 0, &zero);
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(pm860x_page_bulk_read);
+
+int pm860x_page_bulk_write(struct i2c_client *i2c, int reg,
+ int count, unsigned char *buf)
+{
+ struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ unsigned char zero = 0;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ pm860x_write_device(i2c, 0xFA, 0, &zero);
+ pm860x_write_device(i2c, 0xFB, 0, &zero);
+ pm860x_write_device(i2c, 0xFF, 0, &zero);
+ ret = pm860x_write_device(i2c, reg, count, buf);
+ pm860x_write_device(i2c, 0xFE, 0, &zero);
+ pm860x_write_device(i2c, 0xFC, 0, &zero);
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(pm860x_page_bulk_write);
+
+int pm860x_page_set_bits(struct i2c_client *i2c, int reg,
+ unsigned char mask, unsigned char data)
+{
+ struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ unsigned char zero;
+ unsigned char value;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ pm860x_write_device(i2c, 0xFA, 0, &zero);
+ pm860x_write_device(i2c, 0xFB, 0, &zero);
+ pm860x_write_device(i2c, 0xFF, 0, &zero);
+ ret = pm860x_read_device(i2c, reg, 1, &value);
+ if (ret < 0)
+ goto out;
+ value &= ~mask;
+ value |= data;
+ ret = pm860x_write_device(i2c, reg, 1, &value);
+out:
+ pm860x_write_device(i2c, 0xFE, 0, &zero);
+ pm860x_write_device(i2c, 0xFC, 0, &zero);
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(pm860x_page_set_bits);
static const struct i2c_device_id pm860x_id_table[] = {
{ "88PM860x", 0 },
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index a6dfa37a674d..8d7d098f7a03 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -81,6 +81,17 @@ config MFD_DM355EVM_MSP
boards. MSP430 firmware manages resets and power sequencing,
inputs from buttons and the IR remote, LEDs, an RTC, and more.
+config MFD_TI_SSP
+ tristate "TI Sequencer Serial Port support"
+ depends on ARCH_DAVINCI_TNETV107X
+ select MFD_CORE
+ ---help---
+ Say Y here if you want support for the Sequencer Serial Port
+ in a Texas Instruments TNETV107X SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ti-ssp.
+
config HTC_EGPIO
bool "HTC EGPIO support"
depends on GENERIC_HARDIRQS && GPIOLIB && ARM
@@ -118,6 +129,17 @@ config UCB1400_CORE
To compile this driver as a module, choose M here: the
module will be called ucb1400_core.
+config TPS6105X
+ tristate "TPS61050/61052 Boost Converters"
+ depends on I2C
+ select REGULATOR
+ select REGULATOR_FIXED_VOLTAGE
+ help
+ This option enables a driver for the TP61050/TPS61052
+ high-power "white LED driver". This boost converter is
+ sometimes used for other things than white LEDs, and
+ also contains a GPIO pin.
+
config TPS65010
tristate "TPS6501x Power Management chips"
depends on I2C && GPIOLIB
@@ -167,6 +189,16 @@ config TWL4030_CORE
high speed USB OTG transceiver, an audio codec (on most
versions) and many other features.
+config TWL4030_MADC
+ tristate "Texas Instruments TWL4030 MADC"
+ depends on TWL4030_CORE
+ help
+ This driver provides support for triton TWL4030-MADC. The
+ driver supports both RT and SW conversion methods.
+
+ This driver can be built as a module. If so it will be
+ named twl4030-madc
+
config TWL4030_POWER
bool "Support power resources on TWL4030 family chips"
depends on TWL4030_CORE && ARM
@@ -293,6 +325,18 @@ config MFD_MAX8925
accessing the device, additional drivers must be enabled in order
to use the functionality of the device.
+config MFD_MAX8997
+ bool "Maxim Semiconductor MAX8997/8966 PMIC Support"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ Say yes here to support for Maxim Semiconductor MAX8998/8966.
+ This is a Power Management IC with RTC, Flash, Fuel Gauge, Haptic,
+ MUIC controls on chip.
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
config MFD_MAX8998
bool "Maxim Semiconductor MAX8998/National LP3974 PMIC Support"
depends on I2C=y && GENERIC_HARDIRQS
@@ -523,6 +567,13 @@ config AB8500_DEBUG
Select this option if you want debug information using the debug
filesystem, debugfs.
+config AB8500_GPADC
+ bool "AB8500 GPADC driver"
+ depends on AB8500_CORE && REGULATOR_AB8500
+ default y
+ help
+ AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage
+
config AB3550_CORE
bool "ST-Ericsson AB3550 Mixed Signal Circuit core functions"
select MFD_CORE
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 91fe384459ab..47f5709f3828 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
+obj-$(CONFIG_MFD_TI_SSP) += ti-ssp.o
obj-$(CONFIG_MFD_STMPE) += stmpe.o
obj-$(CONFIG_MFD_TC3589X) += tc3589x.o
@@ -32,11 +33,13 @@ obj-$(CONFIG_MFD_WM8350) += wm8350.o
obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o
obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o
+obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
obj-$(CONFIG_TPS6507X) += tps6507x.o
obj-$(CONFIG_MENELAUS) += menelaus.o
obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
+obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
obj-$(CONFIG_TWL4030_CODEC) += twl4030-codec.o
obj-$(CONFIG_TWL6030_PWM) += twl6030-pwm.o
@@ -60,6 +63,7 @@ obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o
obj-$(CONFIG_PMIC_DA903X) += da903x.o
max8925-objs := max8925-core.o max8925-i2c.o
obj-$(CONFIG_MFD_MAX8925) += max8925.o
+obj-$(CONFIG_MFD_MAX8997) += max8997.o
obj-$(CONFIG_MFD_MAX8998) += max8998.o max8998-irq.o
pcf50633-objs := pcf50633-core.o pcf50633-irq.o
@@ -70,9 +74,10 @@ obj-$(CONFIG_ABX500_CORE) += abx500-core.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
obj-$(CONFIG_AB3550_CORE) += ab3550-core.o
-obj-$(CONFIG_AB8500_CORE) += ab8500-core.o
+obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o
obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
+obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
obj-$(CONFIG_LPC_SCH) += lpc_sch.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 4193af5f2743..a751927047ac 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -613,7 +613,7 @@ static void ab3100_setup_debugfs(struct ab3100 *ab3100)
ab3100_get_priv.ab3100 = ab3100;
ab3100_get_priv.mode = false;
ab3100_get_reg_file = debugfs_create_file("get_reg",
- S_IWUGO, ab3100_dir, &ab3100_get_priv,
+ S_IWUSR, ab3100_dir, &ab3100_get_priv,
&ab3100_get_set_reg_fops);
if (!ab3100_get_reg_file) {
err = -ENOMEM;
@@ -623,7 +623,7 @@ static void ab3100_setup_debugfs(struct ab3100 *ab3100)
ab3100_set_priv.ab3100 = ab3100;
ab3100_set_priv.mode = true;
ab3100_set_reg_file = debugfs_create_file("set_reg",
- S_IWUGO, ab3100_dir, &ab3100_set_priv,
+ S_IWUSR, ab3100_dir, &ab3100_set_priv,
&ab3100_get_set_reg_fops);
if (!ab3100_set_reg_file) {
err = -ENOMEM;
@@ -949,10 +949,8 @@ static int __devinit ab3100_probe(struct i2c_client *client,
goto exit_no_ops;
/* Set up and register the platform devices. */
- for (i = 0; i < ARRAY_SIZE(ab3100_devs); i++) {
- ab3100_devs[i].platform_data = ab3100_plf_data;
- ab3100_devs[i].data_size = sizeof(struct ab3100_platform_data);
- }
+ for (i = 0; i < ARRAY_SIZE(ab3100_devs); i++)
+ ab3100_devs[i].mfd_data = ab3100_plf_data;
err = mfd_add_devices(&client->dev, 0, ab3100_devs,
ARRAY_SIZE(ab3100_devs), NULL, 0);
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
index 5fbca346b998..c12d04285226 100644
--- a/drivers/mfd/ab3550-core.c
+++ b/drivers/mfd/ab3550-core.c
@@ -1053,17 +1053,17 @@ static inline void ab3550_setup_debugfs(struct ab3550 *ab)
goto exit_destroy_dir;
ab3550_bank_file = debugfs_create_file("register-bank",
- (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_bank_fops);
+ (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_bank_fops);
if (!ab3550_bank_file)
goto exit_destroy_reg;
ab3550_address_file = debugfs_create_file("register-address",
- (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_address_fops);
+ (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_address_fops);
if (!ab3550_address_file)
goto exit_destroy_bank;
ab3550_val_file = debugfs_create_file("register-value",
- (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_val_fops);
+ (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_val_fops);
if (!ab3550_val_file)
goto exit_destroy_address;
@@ -1320,10 +1320,8 @@ static int __init ab3550_probe(struct i2c_client *client,
goto exit_no_ops;
/* Set up and register the platform devices. */
- for (i = 0; i < AB3550_NUM_DEVICES; i++) {
- ab3550_devs[i].platform_data = ab3550_plf_data->dev_data[i];
- ab3550_devs[i].data_size = ab3550_plf_data->dev_data_sz[i];
- }
+ for (i = 0; i < AB3550_NUM_DEVICES; i++)
+ ab3550_devs[i].mfd_data = ab3550_plf_data->dev_data[i];
err = mfd_add_devices(&client->dev, 0, ab3550_devs,
ARRAY_SIZE(ab3550_devs), NULL,
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index b6887014d687..6e185b272d00 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -4,7 +4,7 @@
* License Terms: GNU General Public License v2
* Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
* Author: Rabin Vincent <rabin.vincent@stericsson.com>
- * Changes: Mattias Wallin <mattias.wallin@stericsson.com>
+ * Author: Mattias Wallin <mattias.wallin@stericsson.com>
*/
#include <linux/kernel.h>
@@ -90,6 +90,7 @@
#define AB8500_IT_MASK24_REG 0x57
#define AB8500_REV_REG 0x80
+#define AB8500_SWITCH_OFF_STATUS 0x00
/*
* Map interrupt numbers to the LATCH and MASK register offsets, Interrupt
@@ -652,10 +653,38 @@ static ssize_t show_chip_id(struct device *dev,
return sprintf(buf, "%#x\n", ab8500 ? ab8500->chip_id : -EINVAL);
}
+/*
+ * ab8500 has switched off due to (SWITCH_OFF_STATUS):
+ * 0x01 Swoff bit programming
+ * 0x02 Thermal protection activation
+ * 0x04 Vbat lower then BattOk falling threshold
+ * 0x08 Watchdog expired
+ * 0x10 Non presence of 32kHz clock
+ * 0x20 Battery level lower than power on reset threshold
+ * 0x40 Power on key 1 pressed longer than 10 seconds
+ * 0x80 DB8500 thermal shutdown
+ */
+static ssize_t show_switch_off_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ u8 value;
+ struct ab8500 *ab8500;
+
+ ab8500 = dev_get_drvdata(dev);
+ ret = get_register_interruptible(ab8500, AB8500_RTC,
+ AB8500_SWITCH_OFF_STATUS, &value);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%#x\n", value);
+}
+
static DEVICE_ATTR(chip_id, S_IRUGO, show_chip_id, NULL);
+static DEVICE_ATTR(switch_off_status, S_IRUGO, show_switch_off_status, NULL);
static struct attribute *ab8500_sysfs_entries[] = {
&dev_attr_chip_id.attr,
+ &dev_attr_switch_off_status.attr,
NULL,
};
@@ -686,9 +715,10 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
* 0x10 - Cut 1.0
* 0x11 - Cut 1.1
* 0x20 - Cut 2.0
+ * 0x30 - Cut 3.0
*/
- if (value == 0x0 || value == 0x10 || value == 0x11 || value == 0x20) {
- ab8500->revision = value;
+ if (value == 0x0 || value == 0x10 || value == 0x11 || value == 0x20 ||
+ value == 0x30) {
dev_info(ab8500->dev, "detected chip, revision: %#x\n", value);
} else {
dev_err(ab8500->dev, "unknown chip, revision: %#x\n", value);
@@ -696,6 +726,24 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
}
ab8500->chip_id = value;
+ /*
+ * ab8500 has switched off due to (SWITCH_OFF_STATUS):
+ * 0x01 Swoff bit programming
+ * 0x02 Thermal protection activation
+ * 0x04 Vbat lower then BattOk falling threshold
+ * 0x08 Watchdog expired
+ * 0x10 Non presence of 32kHz clock
+ * 0x20 Battery level lower than power on reset threshold
+ * 0x40 Power on key 1 pressed longer than 10 seconds
+ * 0x80 DB8500 thermal shutdown
+ */
+
+ ret = get_register_interruptible(ab8500, AB8500_RTC,
+ AB8500_SWITCH_OFF_STATUS, &value);
+ if (ret < 0)
+ return ret;
+ dev_info(ab8500->dev, "switch off status: %#x", value);
+
if (plat && plat->init)
plat->init(ab8500);
@@ -764,6 +812,6 @@ int __devexit ab8500_exit(struct ab8500 *ab8500)
return 0;
}
-MODULE_AUTHOR("Srinidhi Kasagar, Rabin Vincent");
+MODULE_AUTHOR("Mattias Wallin, Srinidhi Kasagar, Rabin Vincent");
MODULE_DESCRIPTION("AB8500 MFD core");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 3c1541ae7223..64748e42ac03 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -585,18 +585,18 @@ static int __devinit ab8500_debug_probe(struct platform_device *plf)
goto exit_destroy_dir;
ab8500_bank_file = debugfs_create_file("register-bank",
- (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_bank_fops);
+ (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_bank_fops);
if (!ab8500_bank_file)
goto exit_destroy_reg;
ab8500_address_file = debugfs_create_file("register-address",
- (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev,
+ (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev,
&ab8500_address_fops);
if (!ab8500_address_file)
goto exit_destroy_bank;
ab8500_val_file = debugfs_create_file("register-value",
- (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_val_fops);
+ (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_val_fops);
if (!ab8500_val_file)
goto exit_destroy_address;
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
new file mode 100644
index 000000000000..bc93b2e8230c
--- /dev/null
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -0,0 +1,614 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>
+ * Author: Daniel Willerud <daniel.willerud@stericsson.com>
+ * Author: Johan Palsson <johan.palsson@stericsson.com>
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/ab8500/gpadc.h>
+
+/*
+ * GPADC register offsets
+ * Bank : 0x0A
+ */
+#define AB8500_GPADC_CTRL1_REG 0x00
+#define AB8500_GPADC_CTRL2_REG 0x01
+#define AB8500_GPADC_CTRL3_REG 0x02
+#define AB8500_GPADC_AUTO_TIMER_REG 0x03
+#define AB8500_GPADC_STAT_REG 0x04
+#define AB8500_GPADC_MANDATAL_REG 0x05
+#define AB8500_GPADC_MANDATAH_REG 0x06
+#define AB8500_GPADC_AUTODATAL_REG 0x07
+#define AB8500_GPADC_AUTODATAH_REG 0x08
+#define AB8500_GPADC_MUX_CTRL_REG 0x09
+
+/*
+ * OTP register offsets
+ * Bank : 0x15
+ */
+#define AB8500_GPADC_CAL_1 0x0F
+#define AB8500_GPADC_CAL_2 0x10
+#define AB8500_GPADC_CAL_3 0x11
+#define AB8500_GPADC_CAL_4 0x12
+#define AB8500_GPADC_CAL_5 0x13
+#define AB8500_GPADC_CAL_6 0x14
+#define AB8500_GPADC_CAL_7 0x15
+
+/* gpadc constants */
+#define EN_VINTCORE12 0x04
+#define EN_VTVOUT 0x02
+#define EN_GPADC 0x01
+#define DIS_GPADC 0x00
+#define SW_AVG_16 0x60
+#define ADC_SW_CONV 0x04
+#define EN_ICHAR 0x80
+#define EN_BUF 0x40
+#define DIS_ZERO 0x00
+#define GPADC_BUSY 0x01
+
+/* GPADC constants from AB8500 spec, UM0836 */
+#define ADC_RESOLUTION 1024
+#define ADC_CH_BTEMP_MIN 0
+#define ADC_CH_BTEMP_MAX 1350
+#define ADC_CH_DIETEMP_MIN 0
+#define ADC_CH_DIETEMP_MAX 1350
+#define ADC_CH_CHG_V_MIN 0
+#define ADC_CH_CHG_V_MAX 20030
+#define ADC_CH_ACCDET2_MIN 0
+#define ADC_CH_ACCDET2_MAX 2500
+#define ADC_CH_VBAT_MIN 2300
+#define ADC_CH_VBAT_MAX 4800
+#define ADC_CH_CHG_I_MIN 0
+#define ADC_CH_CHG_I_MAX 1500
+#define ADC_CH_BKBAT_MIN 0
+#define ADC_CH_BKBAT_MAX 3200
+
+/* This is used to not lose precision when dividing to get gain and offset */
+#define CALIB_SCALE 1000
+
+enum cal_channels {
+ ADC_INPUT_VMAIN = 0,
+ ADC_INPUT_BTEMP,
+ ADC_INPUT_VBAT,
+ NBR_CAL_INPUTS,
+};
+
+/**
+ * struct adc_cal_data - Table for storing gain and offset for the calibrated
+ * ADC channels
+ * @gain: Gain of the ADC channel
+ * @offset: Offset of the ADC channel
+ */
+struct adc_cal_data {
+ u64 gain;
+ u64 offset;
+};
+
+/**
+ * struct ab8500_gpadc - AB8500 GPADC device information
+ * @dev: pointer to the struct device
+ * @node: a list of AB8500 GPADCs, hence prepared for
+ reentrance
+ * @ab8500_gpadc_complete: pointer to the struct completion, to indicate
+ * the completion of gpadc conversion
+ * @ab8500_gpadc_lock: structure of type mutex
+ * @regu: pointer to the struct regulator
+ * @irq: interrupt number that is used by gpadc
+ * @cal_data array of ADC calibration data structs
+ */
+struct ab8500_gpadc {
+ struct device *dev;
+ struct list_head node;
+ struct completion ab8500_gpadc_complete;
+ struct mutex ab8500_gpadc_lock;
+ struct regulator *regu;
+ int irq;
+ struct adc_cal_data cal_data[NBR_CAL_INPUTS];
+};
+
+static LIST_HEAD(ab8500_gpadc_list);
+
+/**
+ * ab8500_gpadc_get() - returns a reference to the primary AB8500 GPADC
+ * (i.e. the first GPADC in the instance list)
+ */
+struct ab8500_gpadc *ab8500_gpadc_get(char *name)
+{
+ struct ab8500_gpadc *gpadc;
+
+ list_for_each_entry(gpadc, &ab8500_gpadc_list, node) {
+ if (!strcmp(name, dev_name(gpadc->dev)))
+ return gpadc;
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(ab8500_gpadc_get);
+
+static int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc, u8 input,
+ int ad_value)
+{
+ int res;
+
+ switch (input) {
+ case MAIN_CHARGER_V:
+ /* For some reason we don't have calibrated data */
+ if (!gpadc->cal_data[ADC_INPUT_VMAIN].gain) {
+ res = ADC_CH_CHG_V_MIN + (ADC_CH_CHG_V_MAX -
+ ADC_CH_CHG_V_MIN) * ad_value /
+ ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use the calibrated data */
+ res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_VMAIN].gain +
+ gpadc->cal_data[ADC_INPUT_VMAIN].offset) / CALIB_SCALE;
+ break;
+
+ case BAT_CTRL:
+ case BTEMP_BALL:
+ case ACC_DETECT1:
+ case ADC_AUX1:
+ case ADC_AUX2:
+ /* For some reason we don't have calibrated data */
+ if (!gpadc->cal_data[ADC_INPUT_BTEMP].gain) {
+ res = ADC_CH_BTEMP_MIN + (ADC_CH_BTEMP_MAX -
+ ADC_CH_BTEMP_MIN) * ad_value /
+ ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use the calibrated data */
+ res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_BTEMP].gain +
+ gpadc->cal_data[ADC_INPUT_BTEMP].offset) / CALIB_SCALE;
+ break;
+
+ case MAIN_BAT_V:
+ /* For some reason we don't have calibrated data */
+ if (!gpadc->cal_data[ADC_INPUT_VBAT].gain) {
+ res = ADC_CH_VBAT_MIN + (ADC_CH_VBAT_MAX -
+ ADC_CH_VBAT_MIN) * ad_value /
+ ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use the calibrated data */
+ res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_VBAT].gain +
+ gpadc->cal_data[ADC_INPUT_VBAT].offset) / CALIB_SCALE;
+ break;
+
+ case DIE_TEMP:
+ res = ADC_CH_DIETEMP_MIN +
+ (ADC_CH_DIETEMP_MAX - ADC_CH_DIETEMP_MIN) * ad_value /
+ ADC_RESOLUTION;
+ break;
+
+ case ACC_DETECT2:
+ res = ADC_CH_ACCDET2_MIN +
+ (ADC_CH_ACCDET2_MAX - ADC_CH_ACCDET2_MIN) * ad_value /
+ ADC_RESOLUTION;
+ break;
+
+ case VBUS_V:
+ res = ADC_CH_CHG_V_MIN +
+ (ADC_CH_CHG_V_MAX - ADC_CH_CHG_V_MIN) * ad_value /
+ ADC_RESOLUTION;
+ break;
+
+ case MAIN_CHARGER_C:
+ case USB_CHARGER_C:
+ res = ADC_CH_CHG_I_MIN +
+ (ADC_CH_CHG_I_MAX - ADC_CH_CHG_I_MIN) * ad_value /
+ ADC_RESOLUTION;
+ break;
+
+ case BK_BAT_V:
+ res = ADC_CH_BKBAT_MIN +
+ (ADC_CH_BKBAT_MAX - ADC_CH_BKBAT_MIN) * ad_value /
+ ADC_RESOLUTION;
+ break;
+
+ default:
+ dev_err(gpadc->dev,
+ "unknown channel, not possible to convert\n");
+ res = -EINVAL;
+ break;
+
+ }
+ return res;
+}
+
+/**
+ * ab8500_gpadc_convert() - gpadc conversion
+ * @input: analog input to be converted to digital data
+ *
+ * This function converts the selected analog i/p to digital
+ * data.
+ */
+int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input)
+{
+ int ret;
+ u16 data = 0;
+ int looplimit = 0;
+ u8 val, low_data, high_data;
+
+ if (!gpadc)
+ return -ENODEV;
+
+ mutex_lock(&gpadc->ab8500_gpadc_lock);
+ /* Enable VTVout LDO this is required for GPADC */
+ regulator_enable(gpadc->regu);
+
+ /* Check if ADC is not busy, lock and proceed */
+ do {
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_STAT_REG, &val);
+ if (ret < 0)
+ goto out;
+ if (!(val & GPADC_BUSY))
+ break;
+ msleep(10);
+ } while (++looplimit < 10);
+ if (looplimit >= 10 && (val & GPADC_BUSY)) {
+ dev_err(gpadc->dev, "gpadc_conversion: GPADC busy");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Enable GPADC */
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL1_REG, EN_GPADC, EN_GPADC);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc_conversion: enable gpadc failed\n");
+ goto out;
+ }
+ /* Select the input source and set average samples to 16 */
+ ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL2_REG, (input | SW_AVG_16));
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: set avg samples failed\n");
+ goto out;
+ }
+ /*
+ * Enable ADC, buffering, select rising edge and enable ADC path
+ * charging current sense if it needed
+ */
+ switch (input) {
+ case MAIN_CHARGER_C:
+ case USB_CHARGER_C:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL1_REG,
+ EN_BUF | EN_ICHAR,
+ EN_BUF | EN_ICHAR);
+ break;
+ default:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL1_REG, EN_BUF, EN_BUF);
+ break;
+ }
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: select falling edge failed\n");
+ goto out;
+ }
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL1_REG, ADC_SW_CONV, ADC_SW_CONV);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: start s/w conversion failed\n");
+ goto out;
+ }
+ /* wait for completion of conversion */
+ if (!wait_for_completion_timeout(&gpadc->ab8500_gpadc_complete, 2*HZ)) {
+ dev_err(gpadc->dev,
+ "timeout: didnt recieve GPADC conversion interrupt\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Read the converted RAW data */
+ ret = abx500_get_register_interruptible(gpadc->dev, AB8500_GPADC,
+ AB8500_GPADC_MANDATAL_REG, &low_data);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc_conversion: read low data failed\n");
+ goto out;
+ }
+
+ ret = abx500_get_register_interruptible(gpadc->dev, AB8500_GPADC,
+ AB8500_GPADC_MANDATAH_REG, &high_data);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: read high data failed\n");
+ goto out;
+ }
+
+ data = (high_data << 8) | low_data;
+ /* Disable GPADC */
+ ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL1_REG, DIS_GPADC);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc_conversion: disable gpadc failed\n");
+ goto out;
+ }
+ /* Disable VTVout LDO this is required for GPADC */
+ regulator_disable(gpadc->regu);
+ mutex_unlock(&gpadc->ab8500_gpadc_lock);
+ ret = ab8500_gpadc_ad_to_voltage(gpadc, input, data);
+ return ret;
+
+out:
+ /*
+ * It has shown to be needed to turn off the GPADC if an error occurs,
+ * otherwise we might have problem when waiting for the busy bit in the
+ * GPADC status register to go low. In V1.1 there wait_for_completion
+ * seems to timeout when waiting for an interrupt.. Not seen in V2.0
+ */
+ (void) abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL1_REG, DIS_GPADC);
+ regulator_disable(gpadc->regu);
+ mutex_unlock(&gpadc->ab8500_gpadc_lock);
+ dev_err(gpadc->dev,
+ "gpadc_conversion: Failed to AD convert channel %d\n", input);
+ return ret;
+}
+EXPORT_SYMBOL(ab8500_gpadc_convert);
+
+/**
+ * ab8500_bm_gpswadcconvend_handler() - isr for s/w gpadc conversion completion
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This is a interrupt service routine for s/w gpadc conversion completion.
+ * Notifies the gpadc completion is completed and the converted raw value
+ * can be read from the registers.
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_bm_gpswadcconvend_handler(int irq, void *_gpadc)
+{
+ struct ab8500_gpadc *gpadc = _gpadc;
+
+ complete(&gpadc->ab8500_gpadc_complete);
+
+ return IRQ_HANDLED;
+}
+
+static int otp_cal_regs[] = {
+ AB8500_GPADC_CAL_1,
+ AB8500_GPADC_CAL_2,
+ AB8500_GPADC_CAL_3,
+ AB8500_GPADC_CAL_4,
+ AB8500_GPADC_CAL_5,
+ AB8500_GPADC_CAL_6,
+ AB8500_GPADC_CAL_7,
+};
+
+static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc)
+{
+ int i;
+ int ret[ARRAY_SIZE(otp_cal_regs)];
+ u8 gpadc_cal[ARRAY_SIZE(otp_cal_regs)];
+
+ int vmain_high, vmain_low;
+ int btemp_high, btemp_low;
+ int vbat_high, vbat_low;
+
+ /* First we read all OTP registers and store the error code */
+ for (i = 0; i < ARRAY_SIZE(otp_cal_regs); i++) {
+ ret[i] = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_OTP_EMUL, otp_cal_regs[i], &gpadc_cal[i]);
+ if (ret[i] < 0)
+ dev_err(gpadc->dev, "%s: read otp reg 0x%02x failed\n",
+ __func__, otp_cal_regs[i]);
+ }
+
+ /*
+ * The ADC calibration data is stored in OTP registers.
+ * The layout of the calibration data is outlined below and a more
+ * detailed description can be found in UM0836
+ *
+ * vm_h/l = vmain_high/low
+ * bt_h/l = btemp_high/low
+ * vb_h/l = vbat_high/low
+ *
+ * Data bits:
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | | vm_h9 | vm_h8
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | | vm_h7 | vm_h6 | vm_h5 | vm_h4 | vm_h3 | vm_h2
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vm_h1 | vm_h0 | vm_l4 | vm_l3 | vm_l2 | vm_l1 | vm_l0 | bt_h9
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | bt_h8 | bt_h7 | bt_h6 | bt_h5 | bt_h4 | bt_h3 | bt_h2 | bt_h1
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | bt_h0 | bt_l4 | bt_l3 | bt_l2 | bt_l1 | bt_l0 | vb_h9 | vb_h8
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vb_h7 | vb_h6 | vb_h5 | vb_h4 | vb_h3 | vb_h2 | vb_h1 | vb_h0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vb_l5 | vb_l4 | vb_l3 | vb_l2 | vb_l1 | vb_l0 |
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ *
+ *
+ * Ideal output ADC codes corresponding to injected input voltages
+ * during manufacturing is:
+ *
+ * vmain_high: Vin = 19500mV / ADC ideal code = 997
+ * vmain_low: Vin = 315mV / ADC ideal code = 16
+ * btemp_high: Vin = 1300mV / ADC ideal code = 985
+ * btemp_low: Vin = 21mV / ADC ideal code = 16
+ * vbat_high: Vin = 4700mV / ADC ideal code = 982
+ * vbat_low: Vin = 2380mV / ADC ideal code = 33
+ */
+
+ /* Calculate gain and offset for VMAIN if all reads succeeded */
+ if (!(ret[0] < 0 || ret[1] < 0 || ret[2] < 0)) {
+ vmain_high = (((gpadc_cal[0] & 0x03) << 8) |
+ ((gpadc_cal[1] & 0x3F) << 2) |
+ ((gpadc_cal[2] & 0xC0) >> 6));
+
+ vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
+
+ gpadc->cal_data[ADC_INPUT_VMAIN].gain = CALIB_SCALE *
+ (19500 - 315) / (vmain_high - vmain_low);
+
+ gpadc->cal_data[ADC_INPUT_VMAIN].offset = CALIB_SCALE * 19500 -
+ (CALIB_SCALE * (19500 - 315) /
+ (vmain_high - vmain_low)) * vmain_high;
+ } else {
+ gpadc->cal_data[ADC_INPUT_VMAIN].gain = 0;
+ }
+
+ /* Calculate gain and offset for BTEMP if all reads succeeded */
+ if (!(ret[2] < 0 || ret[3] < 0 || ret[4] < 0)) {
+ btemp_high = (((gpadc_cal[2] & 0x01) << 9) |
+ (gpadc_cal[3] << 1) |
+ ((gpadc_cal[4] & 0x80) >> 7));
+
+ btemp_low = ((gpadc_cal[4] & 0x7C) >> 2);
+
+ gpadc->cal_data[ADC_INPUT_BTEMP].gain =
+ CALIB_SCALE * (1300 - 21) / (btemp_high - btemp_low);
+
+ gpadc->cal_data[ADC_INPUT_BTEMP].offset = CALIB_SCALE * 1300 -
+ (CALIB_SCALE * (1300 - 21) /
+ (btemp_high - btemp_low)) * btemp_high;
+ } else {
+ gpadc->cal_data[ADC_INPUT_BTEMP].gain = 0;
+ }
+
+ /* Calculate gain and offset for VBAT if all reads succeeded */
+ if (!(ret[4] < 0 || ret[5] < 0 || ret[6] < 0)) {
+ vbat_high = (((gpadc_cal[4] & 0x03) << 8) | gpadc_cal[5]);
+ vbat_low = ((gpadc_cal[6] & 0xFC) >> 2);
+
+ gpadc->cal_data[ADC_INPUT_VBAT].gain = CALIB_SCALE *
+ (4700 - 2380) / (vbat_high - vbat_low);
+
+ gpadc->cal_data[ADC_INPUT_VBAT].offset = CALIB_SCALE * 4700 -
+ (CALIB_SCALE * (4700 - 2380) /
+ (vbat_high - vbat_low)) * vbat_high;
+ } else {
+ gpadc->cal_data[ADC_INPUT_VBAT].gain = 0;
+ }
+
+ dev_dbg(gpadc->dev, "VMAIN gain %llu offset %llu\n",
+ gpadc->cal_data[ADC_INPUT_VMAIN].gain,
+ gpadc->cal_data[ADC_INPUT_VMAIN].offset);
+
+ dev_dbg(gpadc->dev, "BTEMP gain %llu offset %llu\n",
+ gpadc->cal_data[ADC_INPUT_BTEMP].gain,
+ gpadc->cal_data[ADC_INPUT_BTEMP].offset);
+
+ dev_dbg(gpadc->dev, "VBAT gain %llu offset %llu\n",
+ gpadc->cal_data[ADC_INPUT_VBAT].gain,
+ gpadc->cal_data[ADC_INPUT_VBAT].offset);
+}
+
+static int __devinit ab8500_gpadc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = kzalloc(sizeof(struct ab8500_gpadc), GFP_KERNEL);
+ if (!gpadc) {
+ dev_err(&pdev->dev, "Error: No memory\n");
+ return -ENOMEM;
+ }
+
+ gpadc->irq = platform_get_irq_byname(pdev, "SW_CONV_END");
+ if (gpadc->irq < 0) {
+ dev_err(gpadc->dev, "failed to get platform irq-%d\n",
+ gpadc->irq);
+ ret = gpadc->irq;
+ goto fail;
+ }
+
+ gpadc->dev = &pdev->dev;
+ mutex_init(&gpadc->ab8500_gpadc_lock);
+
+ /* Initialize completion used to notify completion of conversion */
+ init_completion(&gpadc->ab8500_gpadc_complete);
+
+ /* Register interrupt - SwAdcComplete */
+ ret = request_threaded_irq(gpadc->irq, NULL,
+ ab8500_bm_gpswadcconvend_handler,
+ IRQF_NO_SUSPEND | IRQF_SHARED, "ab8500-gpadc", gpadc);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "Failed to register interrupt, irq: %d\n",
+ gpadc->irq);
+ goto fail;
+ }
+
+ /* VTVout LDO used to power up ab8500-GPADC */
+ gpadc->regu = regulator_get(&pdev->dev, "vddadc");
+ if (IS_ERR(gpadc->regu)) {
+ ret = PTR_ERR(gpadc->regu);
+ dev_err(gpadc->dev, "failed to get vtvout LDO\n");
+ goto fail_irq;
+ }
+ ab8500_gpadc_read_calibration_data(gpadc);
+ list_add_tail(&gpadc->node, &ab8500_gpadc_list);
+ dev_dbg(gpadc->dev, "probe success\n");
+ return 0;
+fail_irq:
+ free_irq(gpadc->irq, gpadc);
+fail:
+ kfree(gpadc);
+ gpadc = NULL;
+ return ret;
+}
+
+static int __devexit ab8500_gpadc_remove(struct platform_device *pdev)
+{
+ struct ab8500_gpadc *gpadc = platform_get_drvdata(pdev);
+
+ /* remove this gpadc entry from the list */
+ list_del(&gpadc->node);
+ /* remove interrupt - completion of Sw ADC conversion */
+ free_irq(gpadc->irq, gpadc);
+ /* disable VTVout LDO that is being used by GPADC */
+ regulator_put(gpadc->regu);
+ kfree(gpadc);
+ gpadc = NULL;
+ return 0;
+}
+
+static struct platform_driver ab8500_gpadc_driver = {
+ .probe = ab8500_gpadc_probe,
+ .remove = __devexit_p(ab8500_gpadc_remove),
+ .driver = {
+ .name = "ab8500-gpadc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab8500_gpadc_init(void)
+{
+ return platform_driver_register(&ab8500_gpadc_driver);
+}
+
+static void __exit ab8500_gpadc_exit(void)
+{
+ platform_driver_unregister(&ab8500_gpadc_driver);
+}
+
+subsys_initcall_sync(ab8500_gpadc_init);
+module_exit(ab8500_gpadc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Arun R Murthy, Daniel Willerud, Johan Palsson");
+MODULE_ALIAS("platform:ab8500_gpadc");
+MODULE_DESCRIPTION("AB8500 GPADC driver");
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
new file mode 100644
index 000000000000..392185965b39
--- /dev/null
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> for ST Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/ab8500/sysctrl.h>
+
+static struct device *sysctrl_dev;
+
+static inline bool valid_bank(u8 bank)
+{
+ return ((bank == AB8500_SYS_CTRL1_BLOCK) ||
+ (bank == AB8500_SYS_CTRL2_BLOCK));
+}
+
+int ab8500_sysctrl_read(u16 reg, u8 *value)
+{
+ u8 bank;
+
+ if (sysctrl_dev == NULL)
+ return -EAGAIN;
+
+ bank = (reg >> 8);
+ if (!valid_bank(bank))
+ return -EINVAL;
+
+ return abx500_get_register_interruptible(sysctrl_dev, bank,
+ (u8)(reg & 0xFF), value);
+}
+
+int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
+{
+ u8 bank;
+
+ if (sysctrl_dev == NULL)
+ return -EAGAIN;
+
+ bank = (reg >> 8);
+ if (!valid_bank(bank))
+ return -EINVAL;
+
+ return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank,
+ (u8)(reg & 0xFF), mask, value);
+}
+
+static int __devinit ab8500_sysctrl_probe(struct platform_device *pdev)
+{
+ sysctrl_dev = &pdev->dev;
+ return 0;
+}
+
+static int __devexit ab8500_sysctrl_remove(struct platform_device *pdev)
+{
+ sysctrl_dev = NULL;
+ return 0;
+}
+
+static struct platform_driver ab8500_sysctrl_driver = {
+ .driver = {
+ .name = "ab8500-sysctrl",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab8500_sysctrl_probe,
+ .remove = __devexit_p(ab8500_sysctrl_remove),
+};
+
+static int __init ab8500_sysctrl_init(void)
+{
+ return platform_driver_register(&ab8500_sysctrl_driver);
+}
+subsys_initcall(ab8500_sysctrl_init);
+
+MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com");
+MODULE_DESCRIPTION("AB8500 system control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index 3122139b4300..f1d88483112c 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -321,27 +321,27 @@ static int __devexit adp5520_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
-static int adp5520_suspend(struct i2c_client *client,
- pm_message_t state)
+static int adp5520_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
adp5520_clr_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
return 0;
}
-static int adp5520_resume(struct i2c_client *client)
+static int adp5520_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
adp5520_set_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
return 0;
}
-#else
-#define adp5520_suspend NULL
-#define adp5520_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(adp5520_pm, adp5520_suspend, adp5520_resume);
+
static const struct i2c_device_id adp5520_id[] = {
{ "pmic-adp5520", ID_ADP5520 },
{ "pmic-adp5501", ID_ADP5501 },
@@ -353,11 +353,10 @@ static struct i2c_driver adp5520_driver = {
.driver = {
.name = "adp5520",
.owner = THIS_MODULE,
+ .pm = &adp5520_pm,
},
.probe = adp5520_probe,
.remove = __devexit_p(adp5520_remove),
- .suspend = adp5520_suspend,
- .resume = adp5520_resume,
.id_table = adp5520_id,
};
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index c45e6305b26f..0241f08fc00d 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -682,7 +682,7 @@ static struct mfd_cell asic3_cell_ds1wm = {
.name = "ds1wm",
.enable = ds1wm_enable,
.disable = ds1wm_disable,
- .driver_data = &ds1wm_pdata,
+ .mfd_data = &ds1wm_pdata,
.num_resources = ARRAY_SIZE(ds1wm_resources),
.resources = ds1wm_resources,
};
@@ -783,7 +783,7 @@ static struct mfd_cell asic3_cell_mmc = {
.name = "tmio-mmc",
.enable = asic3_mmc_enable,
.disable = asic3_mmc_disable,
- .driver_data = &asic3_mmc_data,
+ .mfd_data = &asic3_mmc_data,
.num_resources = ARRAY_SIZE(asic3_mmc_resources),
.resources = asic3_mmc_resources,
};
@@ -810,9 +810,6 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
ds1wm_resources[0].start >>= asic->bus_shift;
ds1wm_resources[0].end >>= asic->bus_shift;
- asic3_cell_ds1wm.platform_data = &asic3_cell_ds1wm;
- asic3_cell_ds1wm.data_size = sizeof(asic3_cell_ds1wm);
-
/* MMC */
asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >> asic->bus_shift) +
mem_sdio->start, 0x400 >> asic->bus_shift);
@@ -824,9 +821,6 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
asic3_mmc_resources[0].start >>= asic->bus_shift;
asic3_mmc_resources[0].end >>= asic->bus_shift;
- asic3_cell_mmc.platform_data = &asic3_cell_mmc;
- asic3_cell_mmc.data_size = sizeof(asic3_cell_mmc);
-
ret = mfd_add_devices(&pdev->dev, pdev->id,
&asic3_cell_ds1wm, 1, mem, asic->irq_base);
if (ret < 0)
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
index 59ca6f151e78..886a06871065 100644
--- a/drivers/mfd/cs5535-mfd.c
+++ b/drivers/mfd/cs5535-mfd.c
@@ -39,6 +39,37 @@ enum cs5535_mfd_bars {
NR_BARS,
};
+static int cs5535_mfd_res_enable(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't fetch device resource info\n");
+ return -EIO;
+ }
+
+ if (!request_region(res->start, resource_size(res), DRV_NAME)) {
+ dev_err(&pdev->dev, "can't request region\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int cs5535_mfd_res_disable(struct platform_device *pdev)
+{
+ struct resource *res;
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't fetch device resource info\n");
+ return -EIO;
+ }
+
+ release_region(res->start, resource_size(res));
+ return 0;
+}
+
static __devinitdata struct resource cs5535_mfd_resources[NR_BARS];
static __devinitdata struct mfd_cell cs5535_mfd_cells[] = {
@@ -65,12 +96,18 @@ static __devinitdata struct mfd_cell cs5535_mfd_cells[] = {
.name = "cs5535-pms",
.num_resources = 1,
.resources = &cs5535_mfd_resources[PMS_BAR],
+
+ .enable = cs5535_mfd_res_enable,
+ .disable = cs5535_mfd_res_disable,
},
{
.id = ACPI_BAR,
.name = "cs5535-acpi",
.num_resources = 1,
.resources = &cs5535_mfd_resources[ACPI_BAR],
+
+ .enable = cs5535_mfd_res_enable,
+ .disable = cs5535_mfd_res_disable,
},
};
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index fdd8a1b8bc67..414783b04849 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -119,12 +119,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
/* Voice codec interface client */
cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
cell->name = "davinci-vcif";
- cell->driver_data = davinci_vc;
+ cell->mfd_data = davinci_vc;
/* Voice codec CQ93VC client */
cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
cell->name = "cq93vc-codec";
- cell->driver_data = davinci_vc;
+ cell->mfd_data = davinci_vc;
ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
DAVINCI_VC_CELLS, NULL, 0);
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index 7bc752272dc1..fb9770b39a32 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -117,7 +117,7 @@ static struct mfd_cell ds1wm_cell __initdata = {
.name = "ds1wm",
.enable = ds1wm_enable,
.disable = ds1wm_disable,
- .driver_data = &ds1wm_pdata,
+ .mfd_data = &ds1wm_pdata,
.num_resources = 2,
.resources = ds1wm_resources,
};
@@ -165,8 +165,6 @@ static int __init pasic3_probe(struct platform_device *pdev)
ds1wm_pdata.clock_rate = pdata->clock_rate;
/* the first 5 PASIC3 registers control the DS1WM */
ds1wm_resources[0].end = (5 << asic->bus_shift) - 1;
- ds1wm_cell.platform_data = &ds1wm_cell;
- ds1wm_cell.data_size = sizeof(ds1wm_cell);
ret = mfd_add_devices(&pdev->dev, pdev->id,
&ds1wm_cell, 1, r, irq);
if (ret < 0)
@@ -174,9 +172,6 @@ static int __init pasic3_probe(struct platform_device *pdev)
}
if (pdata && pdata->led_pdata) {
- led_cell.driver_data = pdata->led_pdata;
- led_cell.platform_data = &led_cell;
- led_cell.data_size = sizeof(ds1wm_cell);
ret = mfd_add_devices(&pdev->dev, pdev->id, &led_cell, 1, r, 0);
if (ret < 0)
dev_warn(dev, "failed to register LED device\n");
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index 36a166bcdb08..fc4191137e90 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -86,8 +86,7 @@ static int __devinit cmodio_setup_subdevice(struct cmodio_device *priv,
/* Add platform data */
pdata->modno = modno;
- cell->platform_data = pdata;
- cell->data_size = sizeof(*pdata);
+ cell->mfd_data = pdata;
/* MODULbus registers -- PCI BAR3 is big-endian MODULbus access */
res->flags = IORESOURCE_MEM;
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 0cc59795f600..aa518b9beaf5 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -232,8 +232,6 @@ const struct mfd_cell jz4740_adc_cells[] = {
.name = "jz4740-hwmon",
.num_resources = ARRAY_SIZE(jz4740_hwmon_resources),
.resources = jz4740_hwmon_resources,
- .platform_data = (void *)&jz4740_adc_cells[0],
- .data_size = sizeof(struct mfd_cell),
.enable = jz4740_adc_cell_enable,
.disable = jz4740_adc_cell_disable,
@@ -243,8 +241,6 @@ const struct mfd_cell jz4740_adc_cells[] = {
.name = "jz4740-battery",
.num_resources = ARRAY_SIZE(jz4740_battery_resources),
.resources = jz4740_battery_resources,
- .platform_data = (void *)&jz4740_adc_cells[1],
- .data_size = sizeof(struct mfd_cell),
.enable = jz4740_adc_cell_enable,
.disable = jz4740_adc_cell_disable,
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index 51b2f6065a0b..ea3f52c07ef7 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -61,6 +61,7 @@ static struct mfd_cell lpc_sch_cells[] = {
static struct pci_device_id lpc_sch_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, lpc_sch_ids);
@@ -70,6 +71,7 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
{
unsigned int base_addr_cfg;
unsigned short base_addr;
+ int i;
pci_read_config_dword(dev, SMBASE, &base_addr_cfg);
if (!(base_addr_cfg & (1 << 31))) {
@@ -99,7 +101,10 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
gpio_sch_resource.start = base_addr;
gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
- return mfd_add_devices(&dev->dev, -1,
+ for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++)
+ lpc_sch_cells[i].id = id->device;
+
+ return mfd_add_devices(&dev->dev, 0,
lpc_sch_cells, ARRAY_SIZE(lpc_sch_cells), NULL, 0);
}
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
new file mode 100644
index 000000000000..5d1fca0277ef
--- /dev/null
+++ b/drivers/mfd/max8997.c
@@ -0,0 +1,427 @@
+/*
+ * max8997.c - mfd core driver for the Maxim 8966 and 8997
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@smasung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8998.c
+ */
+
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max8997.h>
+#include <linux/mfd/max8997-private.h>
+
+#define I2C_ADDR_PMIC (0xCC >> 1)
+#define I2C_ADDR_MUIC (0x4A >> 1)
+#define I2C_ADDR_BATTERY (0x6C >> 1)
+#define I2C_ADDR_RTC (0x0C >> 1)
+#define I2C_ADDR_HAPTIC (0x90 >> 1)
+
+static struct mfd_cell max8997_devs[] = {
+ { .name = "max8997-pmic", },
+ { .name = "max8997-rtc", },
+ { .name = "max8997-battery", },
+ { .name = "max8997-haptic", },
+ { .name = "max8997-muic", },
+ { .name = "max8997-flash", },
+};
+
+int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
+{
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&max8997->iolock);
+ ret = i2c_smbus_read_byte_data(i2c, reg);
+ mutex_unlock(&max8997->iolock);
+ if (ret < 0)
+ return ret;
+
+ ret &= 0xff;
+ *dest = ret;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(max8997_read_reg);
+
+int max8997_bulk_read(struct i2c_client *i2c, u8 reg, int count, u8 *buf)
+{
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&max8997->iolock);
+ ret = i2c_smbus_read_i2c_block_data(i2c, reg, count, buf);
+ mutex_unlock(&max8997->iolock);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(max8997_bulk_read);
+
+int max8997_write_reg(struct i2c_client *i2c, u8 reg, u8 value)
+{
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&max8997->iolock);
+ ret = i2c_smbus_write_byte_data(i2c, reg, value);
+ mutex_unlock(&max8997->iolock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max8997_write_reg);
+
+int max8997_bulk_write(struct i2c_client *i2c, u8 reg, int count, u8 *buf)
+{
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&max8997->iolock);
+ ret = i2c_smbus_write_i2c_block_data(i2c, reg, count, buf);
+ mutex_unlock(&max8997->iolock);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(max8997_bulk_write);
+
+int max8997_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask)
+{
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&max8997->iolock);
+ ret = i2c_smbus_read_byte_data(i2c, reg);
+ if (ret >= 0) {
+ u8 old_val = ret & 0xff;
+ u8 new_val = (val & mask) | (old_val & (~mask));
+ ret = i2c_smbus_write_byte_data(i2c, reg, new_val);
+ }
+ mutex_unlock(&max8997->iolock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max8997_update_reg);
+
+static int max8997_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct max8997_dev *max8997;
+ struct max8997_platform_data *pdata = i2c->dev.platform_data;
+ int ret = 0;
+
+ max8997 = kzalloc(sizeof(struct max8997_dev), GFP_KERNEL);
+ if (max8997 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, max8997);
+ max8997->dev = &i2c->dev;
+ max8997->i2c = i2c;
+ max8997->type = id->driver_data;
+
+ if (!pdata)
+ goto err;
+
+ max8997->wakeup = pdata->wakeup;
+
+ mutex_init(&max8997->iolock);
+
+ max8997->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
+ i2c_set_clientdata(max8997->rtc, max8997);
+ max8997->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
+ i2c_set_clientdata(max8997->haptic, max8997);
+ max8997->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
+ i2c_set_clientdata(max8997->muic, max8997);
+
+ pm_runtime_set_active(max8997->dev);
+
+ mfd_add_devices(max8997->dev, -1, max8997_devs,
+ ARRAY_SIZE(max8997_devs),
+ NULL, 0);
+
+ /*
+ * TODO: enable others (flash, muic, rtc, battery, ...) and
+ * check the return value
+ */
+
+ if (ret < 0)
+ goto err_mfd;
+
+ return ret;
+
+err_mfd:
+ mfd_remove_devices(max8997->dev);
+ i2c_unregister_device(max8997->muic);
+ i2c_unregister_device(max8997->haptic);
+ i2c_unregister_device(max8997->rtc);
+err:
+ kfree(max8997);
+ return ret;
+}
+
+static int max8997_i2c_remove(struct i2c_client *i2c)
+{
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(max8997->dev);
+ i2c_unregister_device(max8997->muic);
+ i2c_unregister_device(max8997->haptic);
+ i2c_unregister_device(max8997->rtc);
+ kfree(max8997);
+
+ return 0;
+}
+
+static const struct i2c_device_id max8997_i2c_id[] = {
+ { "max8997", TYPE_MAX8997 },
+ { "max8966", TYPE_MAX8966 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max8998_i2c_id);
+
+u8 max8997_dumpaddr_pmic[] = {
+ MAX8997_REG_INT1MSK,
+ MAX8997_REG_INT2MSK,
+ MAX8997_REG_INT3MSK,
+ MAX8997_REG_INT4MSK,
+ MAX8997_REG_MAINCON1,
+ MAX8997_REG_MAINCON2,
+ MAX8997_REG_BUCKRAMP,
+ MAX8997_REG_BUCK1CTRL,
+ MAX8997_REG_BUCK1DVS1,
+ MAX8997_REG_BUCK1DVS2,
+ MAX8997_REG_BUCK1DVS3,
+ MAX8997_REG_BUCK1DVS4,
+ MAX8997_REG_BUCK1DVS5,
+ MAX8997_REG_BUCK1DVS6,
+ MAX8997_REG_BUCK1DVS7,
+ MAX8997_REG_BUCK1DVS8,
+ MAX8997_REG_BUCK2CTRL,
+ MAX8997_REG_BUCK2DVS1,
+ MAX8997_REG_BUCK2DVS2,
+ MAX8997_REG_BUCK2DVS3,
+ MAX8997_REG_BUCK2DVS4,
+ MAX8997_REG_BUCK2DVS5,
+ MAX8997_REG_BUCK2DVS6,
+ MAX8997_REG_BUCK2DVS7,
+ MAX8997_REG_BUCK2DVS8,
+ MAX8997_REG_BUCK3CTRL,
+ MAX8997_REG_BUCK3DVS,
+ MAX8997_REG_BUCK4CTRL,
+ MAX8997_REG_BUCK4DVS,
+ MAX8997_REG_BUCK5CTRL,
+ MAX8997_REG_BUCK5DVS1,
+ MAX8997_REG_BUCK5DVS2,
+ MAX8997_REG_BUCK5DVS3,
+ MAX8997_REG_BUCK5DVS4,
+ MAX8997_REG_BUCK5DVS5,
+ MAX8997_REG_BUCK5DVS6,
+ MAX8997_REG_BUCK5DVS7,
+ MAX8997_REG_BUCK5DVS8,
+ MAX8997_REG_BUCK6CTRL,
+ MAX8997_REG_BUCK6BPSKIPCTRL,
+ MAX8997_REG_BUCK7CTRL,
+ MAX8997_REG_BUCK7DVS,
+ MAX8997_REG_LDO1CTRL,
+ MAX8997_REG_LDO2CTRL,
+ MAX8997_REG_LDO3CTRL,
+ MAX8997_REG_LDO4CTRL,
+ MAX8997_REG_LDO5CTRL,
+ MAX8997_REG_LDO6CTRL,
+ MAX8997_REG_LDO7CTRL,
+ MAX8997_REG_LDO8CTRL,
+ MAX8997_REG_LDO9CTRL,
+ MAX8997_REG_LDO10CTRL,
+ MAX8997_REG_LDO11CTRL,
+ MAX8997_REG_LDO12CTRL,
+ MAX8997_REG_LDO13CTRL,
+ MAX8997_REG_LDO14CTRL,
+ MAX8997_REG_LDO15CTRL,
+ MAX8997_REG_LDO16CTRL,
+ MAX8997_REG_LDO17CTRL,
+ MAX8997_REG_LDO18CTRL,
+ MAX8997_REG_LDO21CTRL,
+ MAX8997_REG_MBCCTRL1,
+ MAX8997_REG_MBCCTRL2,
+ MAX8997_REG_MBCCTRL3,
+ MAX8997_REG_MBCCTRL4,
+ MAX8997_REG_MBCCTRL5,
+ MAX8997_REG_MBCCTRL6,
+ MAX8997_REG_OTPCGHCVS,
+ MAX8997_REG_SAFEOUTCTRL,
+ MAX8997_REG_LBCNFG1,
+ MAX8997_REG_LBCNFG2,
+ MAX8997_REG_BBCCTRL,
+
+ MAX8997_REG_FLASH1_CUR,
+ MAX8997_REG_FLASH2_CUR,
+ MAX8997_REG_MOVIE_CUR,
+ MAX8997_REG_GSMB_CUR,
+ MAX8997_REG_BOOST_CNTL,
+ MAX8997_REG_LEN_CNTL,
+ MAX8997_REG_FLASH_CNTL,
+ MAX8997_REG_WDT_CNTL,
+ MAX8997_REG_MAXFLASH1,
+ MAX8997_REG_MAXFLASH2,
+ MAX8997_REG_FLASHSTATUSMASK,
+
+ MAX8997_REG_GPIOCNTL1,
+ MAX8997_REG_GPIOCNTL2,
+ MAX8997_REG_GPIOCNTL3,
+ MAX8997_REG_GPIOCNTL4,
+ MAX8997_REG_GPIOCNTL5,
+ MAX8997_REG_GPIOCNTL6,
+ MAX8997_REG_GPIOCNTL7,
+ MAX8997_REG_GPIOCNTL8,
+ MAX8997_REG_GPIOCNTL9,
+ MAX8997_REG_GPIOCNTL10,
+ MAX8997_REG_GPIOCNTL11,
+ MAX8997_REG_GPIOCNTL12,
+
+ MAX8997_REG_LDO1CONFIG,
+ MAX8997_REG_LDO2CONFIG,
+ MAX8997_REG_LDO3CONFIG,
+ MAX8997_REG_LDO4CONFIG,
+ MAX8997_REG_LDO5CONFIG,
+ MAX8997_REG_LDO6CONFIG,
+ MAX8997_REG_LDO7CONFIG,
+ MAX8997_REG_LDO8CONFIG,
+ MAX8997_REG_LDO9CONFIG,
+ MAX8997_REG_LDO10CONFIG,
+ MAX8997_REG_LDO11CONFIG,
+ MAX8997_REG_LDO12CONFIG,
+ MAX8997_REG_LDO13CONFIG,
+ MAX8997_REG_LDO14CONFIG,
+ MAX8997_REG_LDO15CONFIG,
+ MAX8997_REG_LDO16CONFIG,
+ MAX8997_REG_LDO17CONFIG,
+ MAX8997_REG_LDO18CONFIG,
+ MAX8997_REG_LDO21CONFIG,
+
+ MAX8997_REG_DVSOKTIMER1,
+ MAX8997_REG_DVSOKTIMER2,
+ MAX8997_REG_DVSOKTIMER4,
+ MAX8997_REG_DVSOKTIMER5,
+};
+
+u8 max8997_dumpaddr_muic[] = {
+ MAX8997_MUIC_REG_INTMASK1,
+ MAX8997_MUIC_REG_INTMASK2,
+ MAX8997_MUIC_REG_INTMASK3,
+ MAX8997_MUIC_REG_CDETCTRL,
+ MAX8997_MUIC_REG_CONTROL1,
+ MAX8997_MUIC_REG_CONTROL2,
+ MAX8997_MUIC_REG_CONTROL3,
+};
+
+u8 max8997_dumpaddr_haptic[] = {
+ MAX8997_HAPTIC_REG_CONF1,
+ MAX8997_HAPTIC_REG_CONF2,
+ MAX8997_HAPTIC_REG_DRVCONF,
+ MAX8997_HAPTIC_REG_CYCLECONF1,
+ MAX8997_HAPTIC_REG_CYCLECONF2,
+ MAX8997_HAPTIC_REG_SIGCONF1,
+ MAX8997_HAPTIC_REG_SIGCONF2,
+ MAX8997_HAPTIC_REG_SIGCONF3,
+ MAX8997_HAPTIC_REG_SIGCONF4,
+ MAX8997_HAPTIC_REG_SIGDC1,
+ MAX8997_HAPTIC_REG_SIGDC2,
+ MAX8997_HAPTIC_REG_SIGPWMDC1,
+ MAX8997_HAPTIC_REG_SIGPWMDC2,
+ MAX8997_HAPTIC_REG_SIGPWMDC3,
+ MAX8997_HAPTIC_REG_SIGPWMDC4,
+};
+
+static int max8997_freeze(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_pmic); i++)
+ max8997_read_reg(i2c, max8997_dumpaddr_pmic[i],
+ &max8997->reg_dump[i]);
+
+ for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_muic); i++)
+ max8997_read_reg(i2c, max8997_dumpaddr_muic[i],
+ &max8997->reg_dump[i + MAX8997_REG_PMIC_END]);
+
+ for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_haptic); i++)
+ max8997_read_reg(i2c, max8997_dumpaddr_haptic[i],
+ &max8997->reg_dump[i + MAX8997_REG_PMIC_END +
+ MAX8997_MUIC_REG_END]);
+
+ return 0;
+}
+
+static int max8997_restore(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_pmic); i++)
+ max8997_write_reg(i2c, max8997_dumpaddr_pmic[i],
+ max8997->reg_dump[i]);
+
+ for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_muic); i++)
+ max8997_write_reg(i2c, max8997_dumpaddr_muic[i],
+ max8997->reg_dump[i + MAX8997_REG_PMIC_END]);
+
+ for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_haptic); i++)
+ max8997_write_reg(i2c, max8997_dumpaddr_haptic[i],
+ max8997->reg_dump[i + MAX8997_REG_PMIC_END +
+ MAX8997_MUIC_REG_END]);
+
+ return 0;
+}
+
+const struct dev_pm_ops max8997_pm = {
+ .freeze = max8997_freeze,
+ .restore = max8997_restore,
+};
+
+static struct i2c_driver max8997_i2c_driver = {
+ .driver = {
+ .name = "max8997",
+ .owner = THIS_MODULE,
+ .pm = &max8997_pm,
+ },
+ .probe = max8997_i2c_probe,
+ .remove = max8997_i2c_remove,
+ .id_table = max8997_i2c_id,
+};
+
+static int __init max8997_i2c_init(void)
+{
+ return i2c_add_driver(&max8997_i2c_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(max8997_i2c_init);
+
+static void __exit max8997_i2c_exit(void)
+{
+ i2c_del_driver(&max8997_i2c_driver);
+}
+module_exit(max8997_i2c_exit);
+
+MODULE_DESCRIPTION("MAXIM 8997 multi-function core driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index bbfe86732602..c00214257da2 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -233,7 +233,7 @@ struct max8998_reg_dump {
u8 val;
};
#define SAVE_ITEM(x) { .addr = (x), .val = 0x0, }
-struct max8998_reg_dump max8998_dump[] = {
+static struct max8998_reg_dump max8998_dump[] = {
SAVE_ITEM(MAX8998_REG_IRQM1),
SAVE_ITEM(MAX8998_REG_IRQM2),
SAVE_ITEM(MAX8998_REG_IRQM3),
@@ -298,7 +298,7 @@ static int max8998_restore(struct device *dev)
return 0;
}
-const struct dev_pm_ops max8998_pm = {
+static const struct dev_pm_ops max8998_pm = {
.suspend = max8998_suspend,
.resume = max8998_resume,
.freeze = max8998_freeze,
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index b9fcaf0004da..668634e89e81 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -683,14 +683,13 @@ out:
EXPORT_SYMBOL_GPL(mc13783_adc_do_conversion);
static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
- const char *format, void *pdata, size_t pdata_size)
+ const char *format, void *pdata)
{
char buf[30];
const char *name = mc13xxx_get_chipname(mc13xxx);
struct mfd_cell cell = {
- .platform_data = pdata,
- .data_size = pdata_size,
+ .mfd_data = pdata,
};
/* there is no asnprintf in the kernel :-( */
@@ -706,7 +705,7 @@ static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
{
- return mc13xxx_add_subdevice_pdata(mc13xxx, format, NULL, 0);
+ return mc13xxx_add_subdevice_pdata(mc13xxx, format, NULL);
}
static int mc13xxx_probe(struct spi_device *spi)
@@ -764,13 +763,8 @@ err_revision:
mc13xxx_add_subdevice(mc13xxx, "%s-codec");
if (pdata->flags & MC13XXX_USE_REGULATOR) {
- struct mc13xxx_regulator_platform_data regulator_pdata = {
- .num_regulators = pdata->num_regulators,
- .regulators = pdata->regulators,
- };
-
mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator",
- &regulator_pdata, sizeof(regulator_pdata));
+ &pdata->regulators);
}
if (pdata->flags & MC13XXX_USE_RTC)
@@ -779,10 +773,8 @@ err_revision:
if (pdata->flags & MC13XXX_USE_TOUCHSCREEN)
mc13xxx_add_subdevice(mc13xxx, "%s-ts");
- if (pdata->flags & MC13XXX_USE_LED) {
- mc13xxx_add_subdevice_pdata(mc13xxx, "%s-led",
- pdata->leds, sizeof(*pdata->leds));
- }
+ if (pdata->flags & MC13XXX_USE_LED)
+ mc13xxx_add_subdevice_pdata(mc13xxx, "%s-led", pdata->leds);
return 0;
}
@@ -811,6 +803,7 @@ static const struct spi_device_id mc13xxx_device_id[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
static struct spi_driver mc13xxx_driver = {
.id_table = mc13xxx_device_id,
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index d83ad0f141af..79eda0264fb2 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -18,6 +18,43 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+int mfd_cell_enable(struct platform_device *pdev)
+{
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ int err = 0;
+
+ /* only call enable hook if the cell wasn't previously enabled */
+ if (atomic_inc_return(cell->usage_count) == 1)
+ err = cell->enable(pdev);
+
+ /* if the enable hook failed, decrement counter to allow retries */
+ if (err)
+ atomic_dec(cell->usage_count);
+
+ return err;
+}
+EXPORT_SYMBOL(mfd_cell_enable);
+
+int mfd_cell_disable(struct platform_device *pdev)
+{
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ int err = 0;
+
+ /* only disable if no other clients are using it */
+ if (atomic_dec_return(cell->usage_count) == 0)
+ err = cell->disable(pdev);
+
+ /* if the disable hook failed, increment to allow retries */
+ if (err)
+ atomic_inc(cell->usage_count);
+
+ /* sanity check; did someone call disable too many times? */
+ WARN_ON(atomic_read(cell->usage_count) < 0);
+
+ return err;
+}
+EXPORT_SYMBOL(mfd_cell_disable);
+
static int mfd_add_device(struct device *parent, int id,
const struct mfd_cell *cell,
struct resource *mem_base,
@@ -37,14 +74,10 @@ static int mfd_add_device(struct device *parent, int id,
goto fail_device;
pdev->dev.parent = parent;
- platform_set_drvdata(pdev, cell->driver_data);
- if (cell->data_size) {
- ret = platform_device_add_data(pdev,
- cell->platform_data, cell->data_size);
- if (ret)
- goto fail_res;
- }
+ ret = platform_device_add_data(pdev, cell, sizeof(*cell));
+ if (ret)
+ goto fail_res;
for (r = 0; r < cell->num_resources; r++) {
res[r].name = cell->resources[r].name;
@@ -100,14 +133,22 @@ fail_alloc:
}
int mfd_add_devices(struct device *parent, int id,
- const struct mfd_cell *cells, int n_devs,
+ struct mfd_cell *cells, int n_devs,
struct resource *mem_base,
int irq_base)
{
int i;
int ret = 0;
+ atomic_t *cnts;
+
+ /* initialize reference counting for all cells */
+ cnts = kcalloc(sizeof(*cnts), n_devs, GFP_KERNEL);
+ if (!cnts)
+ return -ENOMEM;
for (i = 0; i < n_devs; i++) {
+ atomic_set(&cnts[i], 0);
+ cells[i].usage_count = &cnts[i];
ret = mfd_add_device(parent, id, cells + i, mem_base, irq_base);
if (ret)
break;
@@ -120,17 +161,89 @@ int mfd_add_devices(struct device *parent, int id,
}
EXPORT_SYMBOL(mfd_add_devices);
-static int mfd_remove_devices_fn(struct device *dev, void *unused)
+static int mfd_remove_devices_fn(struct device *dev, void *c)
{
- platform_device_unregister(to_platform_device(dev));
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ atomic_t **usage_count = c;
+
+ /* find the base address of usage_count pointers (for freeing) */
+ if (!*usage_count || (cell->usage_count < *usage_count))
+ *usage_count = cell->usage_count;
+
+ platform_device_unregister(pdev);
return 0;
}
void mfd_remove_devices(struct device *parent)
{
- device_for_each_child(parent, NULL, mfd_remove_devices_fn);
+ atomic_t *cnts = NULL;
+
+ device_for_each_child(parent, &cnts, mfd_remove_devices_fn);
+ kfree(cnts);
}
EXPORT_SYMBOL(mfd_remove_devices);
+static int add_shared_platform_device(const char *cell, const char *name)
+{
+ struct mfd_cell cell_entry;
+ struct device *dev;
+ struct platform_device *pdev;
+ int err;
+
+ /* check if we've already registered a device (don't fail if we have) */
+ if (bus_find_device_by_name(&platform_bus_type, NULL, name))
+ return 0;
+
+ /* fetch the parent cell's device (should already be registered!) */
+ dev = bus_find_device_by_name(&platform_bus_type, NULL, cell);
+ if (!dev) {
+ printk(KERN_ERR "failed to find device for cell %s\n", cell);
+ return -ENODEV;
+ }
+ pdev = to_platform_device(dev);
+ memcpy(&cell_entry, mfd_get_cell(pdev), sizeof(cell_entry));
+
+ WARN_ON(!cell_entry.enable);
+
+ cell_entry.name = name;
+ err = mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0);
+ if (err)
+ dev_err(dev, "MFD add devices failed: %d\n", err);
+ return err;
+}
+
+int mfd_shared_platform_driver_register(struct platform_driver *drv,
+ const char *cellname)
+{
+ int err;
+
+ err = add_shared_platform_device(cellname, drv->driver.name);
+ if (err)
+ printk(KERN_ERR "failed to add platform device %s\n",
+ drv->driver.name);
+
+ err = platform_driver_register(drv);
+ if (err)
+ printk(KERN_ERR "failed to add platform driver %s\n",
+ drv->driver.name);
+
+ return err;
+}
+EXPORT_SYMBOL(mfd_shared_platform_driver_register);
+
+void mfd_shared_platform_driver_unregister(struct platform_driver *drv)
+{
+ struct device *dev;
+
+ dev = bus_find_device_by_name(&platform_bus_type, NULL,
+ drv->driver.name);
+ if (dev)
+ platform_device_unregister(to_platform_device(dev));
+
+ platform_driver_unregister(drv);
+}
+EXPORT_SYMBOL(mfd_shared_platform_driver_unregister);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 501ce13b693e..c1306ed43e3c 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -21,6 +21,7 @@
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/mfd/pcf50633/core.h>
@@ -230,27 +231,26 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
}
}
-#ifdef CONFIG_PM
-static int pcf50633_suspend(struct i2c_client *client, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int pcf50633_suspend(struct device *dev)
{
- struct pcf50633 *pcf;
- pcf = i2c_get_clientdata(client);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pcf50633 *pcf = i2c_get_clientdata(client);
return pcf50633_irq_suspend(pcf);
}
-static int pcf50633_resume(struct i2c_client *client)
+static int pcf50633_resume(struct device *dev)
{
- struct pcf50633 *pcf;
- pcf = i2c_get_clientdata(client);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pcf50633 *pcf = i2c_get_clientdata(client);
return pcf50633_irq_resume(pcf);
}
-#else
-#define pcf50633_suspend NULL
-#define pcf50633_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(pcf50633_pm, pcf50633_suspend, pcf50633_resume);
+
static int __devinit pcf50633_probe(struct i2c_client *client,
const struct i2c_device_id *ids)
{
@@ -360,16 +360,16 @@ static struct i2c_device_id pcf50633_id_table[] = {
{"pcf50633", 0x73},
{/* end of list */}
};
+MODULE_DEVICE_TABLE(i2c, pcf50633_id_table);
static struct i2c_driver pcf50633_driver = {
.driver = {
.name = "pcf50633",
+ .pm = &pcf50633_pm,
},
.id_table = pcf50633_id_table,
.probe = pcf50633_probe,
.remove = __devexit_p(pcf50633_remove),
- .suspend = pcf50633_suspend,
- .resume = pcf50633_resume,
};
static int __init pcf50633_init(void)
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
index 50922975bda3..193c940225b5 100644
--- a/drivers/mfd/rdc321x-southbridge.c
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -61,12 +61,12 @@ static struct mfd_cell rdc321x_sb_cells[] = {
.name = "rdc321x-wdt",
.resources = rdc321x_wdt_resource,
.num_resources = ARRAY_SIZE(rdc321x_wdt_resource),
- .driver_data = &rdc321x_wdt_pdata,
+ .mfd_data = &rdc321x_wdt_pdata,
}, {
.name = "rdc321x-gpio",
.resources = rdc321x_gpio_resources,
.num_resources = ARRAY_SIZE(rdc321x_gpio_resources),
- .driver_data = &rdc321x_gpio_pdata,
+ .mfd_data = &rdc321x_gpio_pdata,
},
};
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c
index 0a7df44a93c0..53a63024bf11 100644
--- a/drivers/mfd/sh_mobile_sdhi.c
+++ b/drivers/mfd/sh_mobile_sdhi.c
@@ -146,9 +146,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
}
memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc));
- priv->cell_mmc.driver_data = mmc_data;
- priv->cell_mmc.platform_data = &priv->cell_mmc;
- priv->cell_mmc.data_size = sizeof(priv->cell_mmc);
+ priv->cell_mmc.mfd_data = mmc_data;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 5de3a760ea1e..df3702c1756d 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -133,10 +133,10 @@ static unsigned long decode_div(unsigned long pll2, unsigned long val,
static void sm501_dump_clk(struct sm501_devdata *sm)
{
- unsigned long misct = readl(sm->regs + SM501_MISC_TIMING);
- unsigned long pm0 = readl(sm->regs + SM501_POWER_MODE_0_CLOCK);
- unsigned long pm1 = readl(sm->regs + SM501_POWER_MODE_1_CLOCK);
- unsigned long pmc = readl(sm->regs + SM501_POWER_MODE_CONTROL);
+ unsigned long misct = smc501_readl(sm->regs + SM501_MISC_TIMING);
+ unsigned long pm0 = smc501_readl(sm->regs + SM501_POWER_MODE_0_CLOCK);
+ unsigned long pm1 = smc501_readl(sm->regs + SM501_POWER_MODE_1_CLOCK);
+ unsigned long pmc = smc501_readl(sm->regs + SM501_POWER_MODE_CONTROL);
unsigned long sdclk0, sdclk1;
unsigned long pll2 = 0;
@@ -193,29 +193,29 @@ static void sm501_dump_regs(struct sm501_devdata *sm)
void __iomem *regs = sm->regs;
dev_info(sm->dev, "System Control %08x\n",
- readl(regs + SM501_SYSTEM_CONTROL));
+ smc501_readl(regs + SM501_SYSTEM_CONTROL));
dev_info(sm->dev, "Misc Control %08x\n",
- readl(regs + SM501_MISC_CONTROL));
+ smc501_readl(regs + SM501_MISC_CONTROL));
dev_info(sm->dev, "GPIO Control Low %08x\n",
- readl(regs + SM501_GPIO31_0_CONTROL));
+ smc501_readl(regs + SM501_GPIO31_0_CONTROL));
dev_info(sm->dev, "GPIO Control Hi %08x\n",
- readl(regs + SM501_GPIO63_32_CONTROL));
+ smc501_readl(regs + SM501_GPIO63_32_CONTROL));
dev_info(sm->dev, "DRAM Control %08x\n",
- readl(regs + SM501_DRAM_CONTROL));
+ smc501_readl(regs + SM501_DRAM_CONTROL));
dev_info(sm->dev, "Arbitration Ctrl %08x\n",
- readl(regs + SM501_ARBTRTN_CONTROL));
+ smc501_readl(regs + SM501_ARBTRTN_CONTROL));
dev_info(sm->dev, "Misc Timing %08x\n",
- readl(regs + SM501_MISC_TIMING));
+ smc501_readl(regs + SM501_MISC_TIMING));
}
static void sm501_dump_gate(struct sm501_devdata *sm)
{
dev_info(sm->dev, "CurrentGate %08x\n",
- readl(sm->regs + SM501_CURRENT_GATE));
+ smc501_readl(sm->regs + SM501_CURRENT_GATE));
dev_info(sm->dev, "CurrentClock %08x\n",
- readl(sm->regs + SM501_CURRENT_CLOCK));
+ smc501_readl(sm->regs + SM501_CURRENT_CLOCK));
dev_info(sm->dev, "PowerModeControl %08x\n",
- readl(sm->regs + SM501_POWER_MODE_CONTROL));
+ smc501_readl(sm->regs + SM501_POWER_MODE_CONTROL));
}
#else
@@ -231,7 +231,7 @@ static inline void sm501_dump_clk(struct sm501_devdata *sm) { }
static void sm501_sync_regs(struct sm501_devdata *sm)
{
- readl(sm->regs);
+ smc501_readl(sm->regs);
}
static inline void sm501_mdelay(struct sm501_devdata *sm, unsigned int delay)
@@ -261,11 +261,11 @@ int sm501_misc_control(struct device *dev,
spin_lock_irqsave(&sm->reg_lock, save);
- misc = readl(sm->regs + SM501_MISC_CONTROL);
+ misc = smc501_readl(sm->regs + SM501_MISC_CONTROL);
to = (misc & ~clear) | set;
if (to != misc) {
- writel(to, sm->regs + SM501_MISC_CONTROL);
+ smc501_writel(to, sm->regs + SM501_MISC_CONTROL);
sm501_sync_regs(sm);
dev_dbg(sm->dev, "MISC_CONTROL %08lx\n", misc);
@@ -294,11 +294,11 @@ unsigned long sm501_modify_reg(struct device *dev,
spin_lock_irqsave(&sm->reg_lock, save);
- data = readl(sm->regs + reg);
+ data = smc501_readl(sm->regs + reg);
data |= set;
data &= ~clear;
- writel(data, sm->regs + reg);
+ smc501_writel(data, sm->regs + reg);
sm501_sync_regs(sm);
spin_unlock_irqrestore(&sm->reg_lock, save);
@@ -322,9 +322,9 @@ int sm501_unit_power(struct device *dev, unsigned int unit, unsigned int to)
mutex_lock(&sm->clock_lock);
- mode = readl(sm->regs + SM501_POWER_MODE_CONTROL);
- gate = readl(sm->regs + SM501_CURRENT_GATE);
- clock = readl(sm->regs + SM501_CURRENT_CLOCK);
+ mode = smc501_readl(sm->regs + SM501_POWER_MODE_CONTROL);
+ gate = smc501_readl(sm->regs + SM501_CURRENT_GATE);
+ clock = smc501_readl(sm->regs + SM501_CURRENT_CLOCK);
mode &= 3; /* get current power mode */
@@ -356,14 +356,14 @@ int sm501_unit_power(struct device *dev, unsigned int unit, unsigned int to)
switch (mode) {
case 1:
- writel(gate, sm->regs + SM501_POWER_MODE_0_GATE);
- writel(clock, sm->regs + SM501_POWER_MODE_0_CLOCK);
+ smc501_writel(gate, sm->regs + SM501_POWER_MODE_0_GATE);
+ smc501_writel(clock, sm->regs + SM501_POWER_MODE_0_CLOCK);
mode = 0;
break;
case 2:
case 0:
- writel(gate, sm->regs + SM501_POWER_MODE_1_GATE);
- writel(clock, sm->regs + SM501_POWER_MODE_1_CLOCK);
+ smc501_writel(gate, sm->regs + SM501_POWER_MODE_1_GATE);
+ smc501_writel(clock, sm->regs + SM501_POWER_MODE_1_CLOCK);
mode = 1;
break;
@@ -372,7 +372,7 @@ int sm501_unit_power(struct device *dev, unsigned int unit, unsigned int to)
goto already;
}
- writel(mode, sm->regs + SM501_POWER_MODE_CONTROL);
+ smc501_writel(mode, sm->regs + SM501_POWER_MODE_CONTROL);
sm501_sync_regs(sm);
dev_dbg(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n",
@@ -519,9 +519,9 @@ unsigned long sm501_set_clock(struct device *dev,
unsigned long req_freq)
{
struct sm501_devdata *sm = dev_get_drvdata(dev);
- unsigned long mode = readl(sm->regs + SM501_POWER_MODE_CONTROL);
- unsigned long gate = readl(sm->regs + SM501_CURRENT_GATE);
- unsigned long clock = readl(sm->regs + SM501_CURRENT_CLOCK);
+ unsigned long mode = smc501_readl(sm->regs + SM501_POWER_MODE_CONTROL);
+ unsigned long gate = smc501_readl(sm->regs + SM501_CURRENT_GATE);
+ unsigned long clock = smc501_readl(sm->regs + SM501_CURRENT_CLOCK);
unsigned char reg;
unsigned int pll_reg = 0;
unsigned long sm501_freq; /* the actual frequency achieved */
@@ -592,9 +592,9 @@ unsigned long sm501_set_clock(struct device *dev,
mutex_lock(&sm->clock_lock);
- mode = readl(sm->regs + SM501_POWER_MODE_CONTROL);
- gate = readl(sm->regs + SM501_CURRENT_GATE);
- clock = readl(sm->regs + SM501_CURRENT_CLOCK);
+ mode = smc501_readl(sm->regs + SM501_POWER_MODE_CONTROL);
+ gate = smc501_readl(sm->regs + SM501_CURRENT_GATE);
+ clock = smc501_readl(sm->regs + SM501_CURRENT_CLOCK);
clock = clock & ~(0xFF << clksrc);
clock |= reg<<clksrc;
@@ -603,14 +603,14 @@ unsigned long sm501_set_clock(struct device *dev,
switch (mode) {
case 1:
- writel(gate, sm->regs + SM501_POWER_MODE_0_GATE);
- writel(clock, sm->regs + SM501_POWER_MODE_0_CLOCK);
+ smc501_writel(gate, sm->regs + SM501_POWER_MODE_0_GATE);
+ smc501_writel(clock, sm->regs + SM501_POWER_MODE_0_CLOCK);
mode = 0;
break;
case 2:
case 0:
- writel(gate, sm->regs + SM501_POWER_MODE_1_GATE);
- writel(clock, sm->regs + SM501_POWER_MODE_1_CLOCK);
+ smc501_writel(gate, sm->regs + SM501_POWER_MODE_1_GATE);
+ smc501_writel(clock, sm->regs + SM501_POWER_MODE_1_CLOCK);
mode = 1;
break;
@@ -619,10 +619,11 @@ unsigned long sm501_set_clock(struct device *dev,
return -1;
}
- writel(mode, sm->regs + SM501_POWER_MODE_CONTROL);
+ smc501_writel(mode, sm->regs + SM501_POWER_MODE_CONTROL);
if (pll_reg)
- writel(pll_reg, sm->regs + SM501_PROGRAMMABLE_PLL_CONTROL);
+ smc501_writel(pll_reg,
+ sm->regs + SM501_PROGRAMMABLE_PLL_CONTROL);
sm501_sync_regs(sm);
@@ -902,7 +903,7 @@ static int sm501_gpio_get(struct gpio_chip *chip, unsigned offset)
struct sm501_gpio_chip *smgpio = to_sm501_gpio(chip);
unsigned long result;
- result = readl(smgpio->regbase + SM501_GPIO_DATA_LOW);
+ result = smc501_readl(smgpio->regbase + SM501_GPIO_DATA_LOW);
result >>= offset;
return result & 1UL;
@@ -915,13 +916,13 @@ static void sm501_gpio_ensure_gpio(struct sm501_gpio_chip *smchip,
/* check and modify if this pin is not set as gpio. */
- if (readl(smchip->control) & bit) {
+ if (smc501_readl(smchip->control) & bit) {
dev_info(sm501_gpio_to_dev(smchip->ourgpio)->dev,
"changing mode of gpio, bit %08lx\n", bit);
- ctrl = readl(smchip->control);
+ ctrl = smc501_readl(smchip->control);
ctrl &= ~bit;
- writel(ctrl, smchip->control);
+ smc501_writel(ctrl, smchip->control);
sm501_sync_regs(sm501_gpio_to_dev(smchip->ourgpio));
}
@@ -942,10 +943,10 @@ static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
spin_lock_irqsave(&smgpio->lock, save);
- val = readl(regs + SM501_GPIO_DATA_LOW) & ~bit;
+ val = smc501_readl(regs + SM501_GPIO_DATA_LOW) & ~bit;
if (value)
val |= bit;
- writel(val, regs);
+ smc501_writel(val, regs);
sm501_sync_regs(sm501_gpio_to_dev(smgpio));
sm501_gpio_ensure_gpio(smchip, bit);
@@ -967,8 +968,8 @@ static int sm501_gpio_input(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&smgpio->lock, save);
- ddr = readl(regs + SM501_GPIO_DDR_LOW);
- writel(ddr & ~bit, regs + SM501_GPIO_DDR_LOW);
+ ddr = smc501_readl(regs + SM501_GPIO_DDR_LOW);
+ smc501_writel(ddr & ~bit, regs + SM501_GPIO_DDR_LOW);
sm501_sync_regs(sm501_gpio_to_dev(smgpio));
sm501_gpio_ensure_gpio(smchip, bit);
@@ -994,18 +995,18 @@ static int sm501_gpio_output(struct gpio_chip *chip,
spin_lock_irqsave(&smgpio->lock, save);
- val = readl(regs + SM501_GPIO_DATA_LOW);
+ val = smc501_readl(regs + SM501_GPIO_DATA_LOW);
if (value)
val |= bit;
else
val &= ~bit;
- writel(val, regs);
+ smc501_writel(val, regs);
- ddr = readl(regs + SM501_GPIO_DDR_LOW);
- writel(ddr | bit, regs + SM501_GPIO_DDR_LOW);
+ ddr = smc501_readl(regs + SM501_GPIO_DDR_LOW);
+ smc501_writel(ddr | bit, regs + SM501_GPIO_DDR_LOW);
sm501_sync_regs(sm501_gpio_to_dev(smgpio));
- writel(val, regs + SM501_GPIO_DATA_LOW);
+ smc501_writel(val, regs + SM501_GPIO_DATA_LOW);
sm501_sync_regs(sm501_gpio_to_dev(smgpio));
spin_unlock_irqrestore(&smgpio->lock, save);
@@ -1231,7 +1232,7 @@ static ssize_t sm501_dbg_regs(struct device *dev,
for (reg = 0x00; reg < 0x70; reg += 4) {
ret = sprintf(ptr, "%08x = %08x\n",
- reg, readl(sm->regs + reg));
+ reg, smc501_readl(sm->regs + reg));
ptr += ret;
}
@@ -1255,10 +1256,10 @@ static inline void sm501_init_reg(struct sm501_devdata *sm,
{
unsigned long tmp;
- tmp = readl(sm->regs + reg);
+ tmp = smc501_readl(sm->regs + reg);
tmp &= ~r->mask;
tmp |= r->set;
- writel(tmp, sm->regs + reg);
+ smc501_writel(tmp, sm->regs + reg);
}
/* sm501_init_regs
@@ -1299,7 +1300,7 @@ static void sm501_init_regs(struct sm501_devdata *sm,
static int sm501_check_clocks(struct sm501_devdata *sm)
{
- unsigned long pwrmode = readl(sm->regs + SM501_CURRENT_CLOCK);
+ unsigned long pwrmode = smc501_readl(sm->regs + SM501_CURRENT_CLOCK);
unsigned long msrc = (pwrmode & SM501_POWERMODE_M_SRC);
unsigned long m1src = (pwrmode & SM501_POWERMODE_M1_SRC);
@@ -1334,7 +1335,7 @@ static int __devinit sm501_init_dev(struct sm501_devdata *sm)
INIT_LIST_HEAD(&sm->devices);
- devid = readl(sm->regs + SM501_DEVICEID);
+ devid = smc501_readl(sm->regs + SM501_DEVICEID);
if ((devid & SM501_DEVICEID_IDMASK) != SM501_DEVICEID_SM501) {
dev_err(sm->dev, "incorrect device id %08lx\n", devid);
@@ -1342,9 +1343,9 @@ static int __devinit sm501_init_dev(struct sm501_devdata *sm)
}
/* disable irqs */
- writel(0, sm->regs + SM501_IRQ_MASK);
+ smc501_writel(0, sm->regs + SM501_IRQ_MASK);
- dramctrl = readl(sm->regs + SM501_DRAM_CONTROL);
+ dramctrl = smc501_readl(sm->regs + SM501_DRAM_CONTROL);
mem_avail = sm501_mem_local[(dramctrl >> 13) & 0x7];
dev_info(sm->dev, "SM501 At %p: Version %08lx, %ld Mb, IRQ %d\n",
@@ -1376,7 +1377,7 @@ static int __devinit sm501_init_dev(struct sm501_devdata *sm)
sm501_register_gpio(sm);
}
- if (pdata->gpio_i2c != NULL && pdata->gpio_i2c_nr > 0) {
+ if (pdata && pdata->gpio_i2c != NULL && pdata->gpio_i2c_nr > 0) {
if (!sm501_gpio_isregistered(sm))
dev_err(sm->dev, "no gpio available for i2c gpio.\n");
else
@@ -1421,6 +1422,7 @@ static int __devinit sm501_plat_probe(struct platform_device *dev)
sm->io_res = platform_get_resource(dev, IORESOURCE_MEM, 1);
sm->mem_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+
if (sm->io_res == NULL || sm->mem_res == NULL) {
dev_err(&dev->dev, "failed to get IO resource\n");
ret = -ENOENT;
@@ -1489,7 +1491,7 @@ static int sm501_plat_suspend(struct platform_device *pdev, pm_message_t state)
struct sm501_devdata *sm = platform_get_drvdata(pdev);
sm->in_suspend = 1;
- sm->pm_misc = readl(sm->regs + SM501_MISC_CONTROL);
+ sm->pm_misc = smc501_readl(sm->regs + SM501_MISC_CONTROL);
sm501_dump_regs(sm);
@@ -1513,9 +1515,9 @@ static int sm501_plat_resume(struct platform_device *pdev)
/* check to see if we are in the same state as when suspended */
- if (readl(sm->regs + SM501_MISC_CONTROL) != sm->pm_misc) {
+ if (smc501_readl(sm->regs + SM501_MISC_CONTROL) != sm->pm_misc) {
dev_info(sm->dev, "SM501_MISC_CONTROL changed over sleep\n");
- writel(sm->pm_misc, sm->regs + SM501_MISC_CONTROL);
+ smc501_writel(sm->pm_misc, sm->regs + SM501_MISC_CONTROL);
/* our suspend causes the controller state to change,
* either by something attempting setup, power loss,
@@ -1734,10 +1736,16 @@ static struct pci_driver sm501_pci_driver = {
MODULE_ALIAS("platform:sm501");
+static struct of_device_id __devinitdata of_sm501_match_tbl[] = {
+ { .compatible = "smi,sm501", },
+ { /* end */ }
+};
+
static struct platform_driver sm501_plat_driver = {
.driver = {
.name = "sm501",
.owner = THIS_MODULE,
+ .of_match_table = of_sm501_match_tbl,
},
.probe = sm501_plat_probe,
.remove = sm501_plat_remove,
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 9caeb4ac6ea6..af57fc706a4c 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -170,7 +170,7 @@ static struct mfd_cell t7l66xb_cells[] = {
.name = "tmio-mmc",
.enable = t7l66xb_mmc_enable,
.disable = t7l66xb_mmc_disable,
- .driver_data = &t7166xb_mmc_data,
+ .mfd_data = &t7166xb_mmc_data,
.num_resources = ARRAY_SIZE(t7l66xb_mmc_resources),
.resources = t7l66xb_mmc_resources,
},
@@ -383,16 +383,7 @@ static int t7l66xb_probe(struct platform_device *dev)
t7l66xb_attach_irq(dev);
- t7l66xb_cells[T7L66XB_CELL_NAND].driver_data = pdata->nand_data;
- t7l66xb_cells[T7L66XB_CELL_NAND].platform_data =
- &t7l66xb_cells[T7L66XB_CELL_NAND];
- t7l66xb_cells[T7L66XB_CELL_NAND].data_size =
- sizeof(t7l66xb_cells[T7L66XB_CELL_NAND]);
-
- t7l66xb_cells[T7L66XB_CELL_MMC].platform_data =
- &t7l66xb_cells[T7L66XB_CELL_MMC];
- t7l66xb_cells[T7L66XB_CELL_MMC].data_size =
- sizeof(t7l66xb_cells[T7L66XB_CELL_MMC]);
+ t7l66xb_cells[T7L66XB_CELL_NAND].mfd_data = pdata->nand_data;
ret = mfd_add_devices(&dev->dev, dev->id,
t7l66xb_cells, ARRAY_SIZE(t7l66xb_cells),
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index 6315f63f017d..b006f7cee952 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -131,7 +131,7 @@ static struct mfd_cell tc6387xb_cells[] = {
.name = "tmio-mmc",
.enable = tc6387xb_mmc_enable,
.disable = tc6387xb_mmc_disable,
- .driver_data = &tc6387xb_mmc_data,
+ .mfd_data = &tc6387xb_mmc_data,
.num_resources = ARRAY_SIZE(tc6387xb_mmc_resources),
.resources = tc6387xb_mmc_resources,
},
@@ -190,11 +190,6 @@ static int __devinit tc6387xb_probe(struct platform_device *dev)
printk(KERN_INFO "Toshiba tc6387xb initialised\n");
- tc6387xb_cells[TC6387XB_CELL_MMC].platform_data =
- &tc6387xb_cells[TC6387XB_CELL_MMC];
- tc6387xb_cells[TC6387XB_CELL_MMC].data_size =
- sizeof(tc6387xb_cells[TC6387XB_CELL_MMC]);
-
ret = mfd_add_devices(&dev->dev, dev->id, tc6387xb_cells,
ARRAY_SIZE(tc6387xb_cells), iomem, irq);
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 9a238633a54d..3d62ded86a8f 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -393,7 +393,7 @@ static struct mfd_cell __devinitdata tc6393xb_cells[] = {
.name = "tmio-mmc",
.enable = tc6393xb_mmc_enable,
.resume = tc6393xb_mmc_resume,
- .driver_data = &tc6393xb_mmc_data,
+ .mfd_data = &tc6393xb_mmc_data,
.num_resources = ARRAY_SIZE(tc6393xb_mmc_resources),
.resources = tc6393xb_mmc_resources,
},
@@ -693,27 +693,8 @@ static int __devinit tc6393xb_probe(struct platform_device *dev)
goto err_setup;
}
- tc6393xb_cells[TC6393XB_CELL_NAND].driver_data = tcpd->nand_data;
- tc6393xb_cells[TC6393XB_CELL_NAND].platform_data =
- &tc6393xb_cells[TC6393XB_CELL_NAND];
- tc6393xb_cells[TC6393XB_CELL_NAND].data_size =
- sizeof(tc6393xb_cells[TC6393XB_CELL_NAND]);
-
- tc6393xb_cells[TC6393XB_CELL_MMC].platform_data =
- &tc6393xb_cells[TC6393XB_CELL_MMC];
- tc6393xb_cells[TC6393XB_CELL_MMC].data_size =
- sizeof(tc6393xb_cells[TC6393XB_CELL_MMC]);
-
- tc6393xb_cells[TC6393XB_CELL_OHCI].platform_data =
- &tc6393xb_cells[TC6393XB_CELL_OHCI];
- tc6393xb_cells[TC6393XB_CELL_OHCI].data_size =
- sizeof(tc6393xb_cells[TC6393XB_CELL_OHCI]);
-
- tc6393xb_cells[TC6393XB_CELL_FB].driver_data = tcpd->fb_data;
- tc6393xb_cells[TC6393XB_CELL_FB].platform_data =
- &tc6393xb_cells[TC6393XB_CELL_FB];
- tc6393xb_cells[TC6393XB_CELL_FB].data_size =
- sizeof(tc6393xb_cells[TC6393XB_CELL_FB]);
+ tc6393xb_cells[TC6393XB_CELL_NAND].mfd_data = tcpd->nand_data;
+ tc6393xb_cells[TC6393XB_CELL_FB].mfd_data = tcpd->fb_data;
ret = mfd_add_devices(&dev->dev, dev->id,
tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells),
diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c
new file mode 100644
index 000000000000..af9ab0e5ca64
--- /dev/null
+++ b/drivers/mfd/ti-ssp.c
@@ -0,0 +1,476 @@
+/*
+ * Sequencer Serial Port (SSP) driver for Texas Instruments' SoCs
+ *
+ * Copyright (C) 2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/ti_ssp.h>
+
+/* Register Offsets */
+#define REG_REV 0x00
+#define REG_IOSEL_1 0x04
+#define REG_IOSEL_2 0x08
+#define REG_PREDIV 0x0c
+#define REG_INTR_ST 0x10
+#define REG_INTR_EN 0x14
+#define REG_TEST_CTRL 0x18
+
+/* Per port registers */
+#define PORT_CFG_2 0x00
+#define PORT_ADDR 0x04
+#define PORT_DATA 0x08
+#define PORT_CFG_1 0x0c
+#define PORT_STATE 0x10
+
+#define SSP_PORT_CONFIG_MASK (SSP_EARLY_DIN | SSP_DELAY_DOUT)
+#define SSP_PORT_CLKRATE_MASK 0x0f
+
+#define SSP_SEQRAM_WR_EN BIT(4)
+#define SSP_SEQRAM_RD_EN BIT(5)
+#define SSP_START BIT(15)
+#define SSP_BUSY BIT(10)
+#define SSP_PORT_ASL BIT(7)
+#define SSP_PORT_CFO1 BIT(6)
+
+#define SSP_PORT_SEQRAM_SIZE 32
+
+static const int ssp_port_base[] = {0x040, 0x080};
+static const int ssp_port_seqram[] = {0x100, 0x180};
+
+struct ti_ssp {
+ struct resource *res;
+ struct device *dev;
+ void __iomem *regs;
+ spinlock_t lock;
+ struct clk *clk;
+ int irq;
+ wait_queue_head_t wqh;
+
+ /*
+ * Some of the iosel2 register bits always read-back as 0, we need to
+ * remember these values so that we don't clobber previously set
+ * values.
+ */
+ u32 iosel2;
+};
+
+static inline struct ti_ssp *dev_to_ssp(struct device *dev)
+{
+ return dev_get_drvdata(dev->parent);
+}
+
+static inline int dev_to_port(struct device *dev)
+{
+ return to_platform_device(dev)->id;
+}
+
+/* Register Access Helpers, rmw() functions need to run locked */
+static inline u32 ssp_read(struct ti_ssp *ssp, int reg)
+{
+ return __raw_readl(ssp->regs + reg);
+}
+
+static inline void ssp_write(struct ti_ssp *ssp, int reg, u32 val)
+{
+ __raw_writel(val, ssp->regs + reg);
+}
+
+static inline void ssp_rmw(struct ti_ssp *ssp, int reg, u32 mask, u32 bits)
+{
+ ssp_write(ssp, reg, (ssp_read(ssp, reg) & ~mask) | bits);
+}
+
+static inline u32 ssp_port_read(struct ti_ssp *ssp, int port, int reg)
+{
+ return ssp_read(ssp, ssp_port_base[port] + reg);
+}
+
+static inline void ssp_port_write(struct ti_ssp *ssp, int port, int reg,
+ u32 val)
+{
+ ssp_write(ssp, ssp_port_base[port] + reg, val);
+}
+
+static inline void ssp_port_rmw(struct ti_ssp *ssp, int port, int reg,
+ u32 mask, u32 bits)
+{
+ ssp_rmw(ssp, ssp_port_base[port] + reg, mask, bits);
+}
+
+static inline void ssp_port_clr_bits(struct ti_ssp *ssp, int port, int reg,
+ u32 bits)
+{
+ ssp_port_rmw(ssp, port, reg, bits, 0);
+}
+
+static inline void ssp_port_set_bits(struct ti_ssp *ssp, int port, int reg,
+ u32 bits)
+{
+ ssp_port_rmw(ssp, port, reg, 0, bits);
+}
+
+/* Called to setup port clock mode, caller must hold ssp->lock */
+static int __set_mode(struct ti_ssp *ssp, int port, int mode)
+{
+ mode &= SSP_PORT_CONFIG_MASK;
+ ssp_port_rmw(ssp, port, PORT_CFG_1, SSP_PORT_CONFIG_MASK, mode);
+
+ return 0;
+}
+
+int ti_ssp_set_mode(struct device *dev, int mode)
+{
+ struct ti_ssp *ssp = dev_to_ssp(dev);
+ int port = dev_to_port(dev);
+ int ret;
+
+ spin_lock(&ssp->lock);
+ ret = __set_mode(ssp, port, mode);
+ spin_unlock(&ssp->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ti_ssp_set_mode);
+
+/* Called to setup iosel2, caller must hold ssp->lock */
+static void __set_iosel2(struct ti_ssp *ssp, u32 mask, u32 val)
+{
+ ssp->iosel2 = (ssp->iosel2 & ~mask) | val;
+ ssp_write(ssp, REG_IOSEL_2, ssp->iosel2);
+}
+
+/* Called to setup port iosel, caller must hold ssp->lock */
+static void __set_iosel(struct ti_ssp *ssp, int port, u32 iosel)
+{
+ unsigned val, shift = port ? 16 : 0;
+
+ /* IOSEL1 gets the least significant 16 bits */
+ val = ssp_read(ssp, REG_IOSEL_1);
+ val &= 0xffff << (port ? 0 : 16);
+ val |= (iosel & 0xffff) << (port ? 16 : 0);
+ ssp_write(ssp, REG_IOSEL_1, val);
+
+ /* IOSEL2 gets the most significant 16 bits */
+ val = (iosel >> 16) & 0x7;
+ __set_iosel2(ssp, 0x7 << shift, val << shift);
+}
+
+int ti_ssp_set_iosel(struct device *dev, u32 iosel)
+{
+ struct ti_ssp *ssp = dev_to_ssp(dev);
+ int port = dev_to_port(dev);
+
+ spin_lock(&ssp->lock);
+ __set_iosel(ssp, port, iosel);
+ spin_unlock(&ssp->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ti_ssp_set_iosel);
+
+int ti_ssp_load(struct device *dev, int offs, u32* prog, int len)
+{
+ struct ti_ssp *ssp = dev_to_ssp(dev);
+ int port = dev_to_port(dev);
+ int i;
+
+ if (len > SSP_PORT_SEQRAM_SIZE)
+ return -ENOSPC;
+
+ spin_lock(&ssp->lock);
+
+ /* Enable SeqRAM access */
+ ssp_port_set_bits(ssp, port, PORT_CFG_2, SSP_SEQRAM_WR_EN);
+
+ /* Copy code */
+ for (i = 0; i < len; i++) {
+ __raw_writel(prog[i], ssp->regs + offs + 4*i +
+ ssp_port_seqram[port]);
+ }
+
+ /* Disable SeqRAM access */
+ ssp_port_clr_bits(ssp, port, PORT_CFG_2, SSP_SEQRAM_WR_EN);
+
+ spin_unlock(&ssp->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ti_ssp_load);
+
+int ti_ssp_raw_read(struct device *dev)
+{
+ struct ti_ssp *ssp = dev_to_ssp(dev);
+ int port = dev_to_port(dev);
+ int shift = port ? 27 : 11;
+
+ return (ssp_read(ssp, REG_IOSEL_2) >> shift) & 0xf;
+}
+EXPORT_SYMBOL(ti_ssp_raw_read);
+
+int ti_ssp_raw_write(struct device *dev, u32 val)
+{
+ struct ti_ssp *ssp = dev_to_ssp(dev);
+ int port = dev_to_port(dev), shift;
+
+ spin_lock(&ssp->lock);
+
+ shift = port ? 22 : 6;
+ val &= 0xf;
+ __set_iosel2(ssp, 0xf << shift, val << shift);
+
+ spin_unlock(&ssp->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ti_ssp_raw_write);
+
+static inline int __xfer_done(struct ti_ssp *ssp, int port)
+{
+ return !(ssp_port_read(ssp, port, PORT_CFG_1) & SSP_BUSY);
+}
+
+int ti_ssp_run(struct device *dev, u32 pc, u32 input, u32 *output)
+{
+ struct ti_ssp *ssp = dev_to_ssp(dev);
+ int port = dev_to_port(dev);
+ int ret;
+
+ if (pc & ~(0x3f))
+ return -EINVAL;
+
+ /* Grab ssp->lock to serialize rmw on ssp registers */
+ spin_lock(&ssp->lock);
+
+ ssp_port_write(ssp, port, PORT_ADDR, input >> 16);
+ ssp_port_write(ssp, port, PORT_DATA, input & 0xffff);
+ ssp_port_rmw(ssp, port, PORT_CFG_1, 0x3f, pc);
+
+ /* grab wait queue head lock to avoid race with the isr */
+ spin_lock_irq(&ssp->wqh.lock);
+
+ /* kick off sequence execution in hardware */
+ ssp_port_set_bits(ssp, port, PORT_CFG_1, SSP_START);
+
+ /* drop ssp lock; no register writes beyond this */
+ spin_unlock(&ssp->lock);
+
+ ret = wait_event_interruptible_locked_irq(ssp->wqh,
+ __xfer_done(ssp, port));
+ spin_unlock_irq(&ssp->wqh.lock);
+
+ if (ret < 0)
+ return ret;
+
+ if (output) {
+ *output = (ssp_port_read(ssp, port, PORT_ADDR) << 16) |
+ (ssp_port_read(ssp, port, PORT_DATA) & 0xffff);
+ }
+
+ ret = ssp_port_read(ssp, port, PORT_STATE) & 0x3f; /* stop address */
+
+ return ret;
+}
+EXPORT_SYMBOL(ti_ssp_run);
+
+static irqreturn_t ti_ssp_interrupt(int irq, void *dev_data)
+{
+ struct ti_ssp *ssp = dev_data;
+
+ spin_lock(&ssp->wqh.lock);
+
+ ssp_write(ssp, REG_INTR_ST, 0x3);
+ wake_up_locked(&ssp->wqh);
+
+ spin_unlock(&ssp->wqh.lock);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit ti_ssp_probe(struct platform_device *pdev)
+{
+ static struct ti_ssp *ssp;
+ const struct ti_ssp_data *pdata = pdev->dev.platform_data;
+ int error = 0, prediv = 0xff, id;
+ unsigned long sysclk;
+ struct device *dev = &pdev->dev;
+ struct mfd_cell cells[2];
+
+ ssp = kzalloc(sizeof(*ssp), GFP_KERNEL);
+ if (!ssp) {
+ dev_err(dev, "cannot allocate device info\n");
+ return -ENOMEM;
+ }
+
+ ssp->dev = dev;
+ dev_set_drvdata(dev, ssp);
+
+ ssp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ssp->res) {
+ error = -ENODEV;
+ dev_err(dev, "cannot determine register area\n");
+ goto error_res;
+ }
+
+ if (!request_mem_region(ssp->res->start, resource_size(ssp->res),
+ pdev->name)) {
+ error = -ENOMEM;
+ dev_err(dev, "cannot claim register memory\n");
+ goto error_res;
+ }
+
+ ssp->regs = ioremap(ssp->res->start, resource_size(ssp->res));
+ if (!ssp->regs) {
+ error = -ENOMEM;
+ dev_err(dev, "cannot map register memory\n");
+ goto error_map;
+ }
+
+ ssp->clk = clk_get(dev, NULL);
+ if (IS_ERR(ssp->clk)) {
+ error = PTR_ERR(ssp->clk);
+ dev_err(dev, "cannot claim device clock\n");
+ goto error_clk;
+ }
+
+ ssp->irq = platform_get_irq(pdev, 0);
+ if (ssp->irq < 0) {
+ error = -ENODEV;
+ dev_err(dev, "unknown irq\n");
+ goto error_irq;
+ }
+
+ error = request_threaded_irq(ssp->irq, NULL, ti_ssp_interrupt, 0,
+ dev_name(dev), ssp);
+ if (error < 0) {
+ dev_err(dev, "cannot acquire irq\n");
+ goto error_irq;
+ }
+
+ spin_lock_init(&ssp->lock);
+ init_waitqueue_head(&ssp->wqh);
+
+ /* Power on and initialize SSP */
+ error = clk_enable(ssp->clk);
+ if (error) {
+ dev_err(dev, "cannot enable device clock\n");
+ goto error_enable;
+ }
+
+ /* Reset registers to a sensible known state */
+ ssp_write(ssp, REG_IOSEL_1, 0);
+ ssp_write(ssp, REG_IOSEL_2, 0);
+ ssp_write(ssp, REG_INTR_EN, 0x3);
+ ssp_write(ssp, REG_INTR_ST, 0x3);
+ ssp_write(ssp, REG_TEST_CTRL, 0);
+ ssp_port_write(ssp, 0, PORT_CFG_1, SSP_PORT_ASL);
+ ssp_port_write(ssp, 1, PORT_CFG_1, SSP_PORT_ASL);
+ ssp_port_write(ssp, 0, PORT_CFG_2, SSP_PORT_CFO1);
+ ssp_port_write(ssp, 1, PORT_CFG_2, SSP_PORT_CFO1);
+
+ sysclk = clk_get_rate(ssp->clk);
+ if (pdata && pdata->out_clock)
+ prediv = (sysclk / pdata->out_clock) - 1;
+ prediv = clamp(prediv, 0, 0xff);
+ ssp_rmw(ssp, REG_PREDIV, 0xff, prediv);
+
+ memset(cells, 0, sizeof(cells));
+ for (id = 0; id < 2; id++) {
+ const struct ti_ssp_dev_data *data = &pdata->dev_data[id];
+
+ cells[id].id = id;
+ cells[id].name = data->dev_name;
+ cells[id].platform_data = data->pdata;
+ cells[id].data_size = data->pdata_size;
+ }
+
+ error = mfd_add_devices(dev, 0, cells, 2, NULL, 0);
+ if (error < 0) {
+ dev_err(dev, "cannot add mfd cells\n");
+ goto error_enable;
+ }
+
+ return 0;
+
+error_enable:
+ free_irq(ssp->irq, ssp);
+error_irq:
+ clk_put(ssp->clk);
+error_clk:
+ iounmap(ssp->regs);
+error_map:
+ release_mem_region(ssp->res->start, resource_size(ssp->res));
+error_res:
+ kfree(ssp);
+ return error;
+}
+
+static int __devexit ti_ssp_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ti_ssp *ssp = dev_get_drvdata(dev);
+
+ mfd_remove_devices(dev);
+ clk_disable(ssp->clk);
+ free_irq(ssp->irq, ssp);
+ clk_put(ssp->clk);
+ iounmap(ssp->regs);
+ release_mem_region(ssp->res->start, resource_size(ssp->res));
+ kfree(ssp);
+ dev_set_drvdata(dev, NULL);
+ return 0;
+}
+
+static struct platform_driver ti_ssp_driver = {
+ .probe = ti_ssp_probe,
+ .remove = __devexit_p(ti_ssp_remove),
+ .driver = {
+ .name = "ti-ssp",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init ti_ssp_init(void)
+{
+ return platform_driver_register(&ti_ssp_driver);
+}
+module_init(ti_ssp_init);
+
+static void __exit ti_ssp_exit(void)
+{
+ platform_driver_unregister(&ti_ssp_driver);
+}
+module_exit(ti_ssp_exit);
+
+MODULE_DESCRIPTION("Sequencer Serial Port (SSP) Driver");
+MODULE_AUTHOR("Cyril Chemparathy");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ti-ssp");
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 6ad8a7f8d390..94c6c8afad12 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -384,8 +384,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
- .platform_data = &timb_dma_platform_data,
- .data_size = sizeof(timb_dma_platform_data),
+ .mfd_data = &timb_dma_platform_data,
},
{
.name = "timb-uart",
@@ -396,43 +395,37 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
.name = "xiic-i2c",
.num_resources = ARRAY_SIZE(timberdale_xiic_resources),
.resources = timberdale_xiic_resources,
- .platform_data = &timberdale_xiic_platform_data,
- .data_size = sizeof(timberdale_xiic_platform_data),
+ .mfd_data = &timberdale_xiic_platform_data,
},
{
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
- .platform_data = &timberdale_gpio_platform_data,
- .data_size = sizeof(timberdale_gpio_platform_data),
+ .mfd_data = &timberdale_gpio_platform_data,
},
{
.name = "timb-video",
.num_resources = ARRAY_SIZE(timberdale_video_resources),
.resources = timberdale_video_resources,
- .platform_data = &timberdale_video_platform_data,
- .data_size = sizeof(timberdale_video_platform_data),
+ .mfd_data = &timberdale_video_platform_data,
},
{
.name = "timb-radio",
.num_resources = ARRAY_SIZE(timberdale_radio_resources),
.resources = timberdale_radio_resources,
- .platform_data = &timberdale_radio_platform_data,
- .data_size = sizeof(timberdale_radio_platform_data),
+ .mfd_data = &timberdale_radio_platform_data,
},
{
.name = "xilinx_spi",
.num_resources = ARRAY_SIZE(timberdale_spi_resources),
.resources = timberdale_spi_resources,
- .platform_data = &timberdale_xspi_platform_data,
- .data_size = sizeof(timberdale_xspi_platform_data),
+ .mfd_data = &timberdale_xspi_platform_data,
},
{
.name = "ks8842",
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
- .platform_data = &timberdale_ks8842_platform_data,
- .data_size = sizeof(timberdale_ks8842_platform_data)
+ .mfd_data = &timberdale_ks8842_platform_data,
},
};
@@ -441,8 +434,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
- .platform_data = &timb_dma_platform_data,
- .data_size = sizeof(timb_dma_platform_data),
+ .mfd_data = &timb_dma_platform_data,
},
{
.name = "timb-uart",
@@ -458,15 +450,13 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
.name = "xiic-i2c",
.num_resources = ARRAY_SIZE(timberdale_xiic_resources),
.resources = timberdale_xiic_resources,
- .platform_data = &timberdale_xiic_platform_data,
- .data_size = sizeof(timberdale_xiic_platform_data),
+ .mfd_data = &timberdale_xiic_platform_data,
},
{
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
- .platform_data = &timberdale_gpio_platform_data,
- .data_size = sizeof(timberdale_gpio_platform_data),
+ .mfd_data = &timberdale_gpio_platform_data,
},
{
.name = "timb-mlogicore",
@@ -477,29 +467,25 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
.name = "timb-video",
.num_resources = ARRAY_SIZE(timberdale_video_resources),
.resources = timberdale_video_resources,
- .platform_data = &timberdale_video_platform_data,
- .data_size = sizeof(timberdale_video_platform_data),
+ .mfd_data = &timberdale_video_platform_data,
},
{
.name = "timb-radio",
.num_resources = ARRAY_SIZE(timberdale_radio_resources),
.resources = timberdale_radio_resources,
- .platform_data = &timberdale_radio_platform_data,
- .data_size = sizeof(timberdale_radio_platform_data),
+ .mfd_data = &timberdale_radio_platform_data,
},
{
.name = "xilinx_spi",
.num_resources = ARRAY_SIZE(timberdale_spi_resources),
.resources = timberdale_spi_resources,
- .platform_data = &timberdale_xspi_platform_data,
- .data_size = sizeof(timberdale_xspi_platform_data),
+ .mfd_data = &timberdale_xspi_platform_data,
},
{
.name = "ks8842",
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
- .platform_data = &timberdale_ks8842_platform_data,
- .data_size = sizeof(timberdale_ks8842_platform_data)
+ .mfd_data = &timberdale_ks8842_platform_data,
},
};
@@ -508,8 +494,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
- .platform_data = &timb_dma_platform_data,
- .data_size = sizeof(timb_dma_platform_data),
+ .mfd_data = &timb_dma_platform_data,
},
{
.name = "timb-uart",
@@ -520,36 +505,31 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
.name = "xiic-i2c",
.num_resources = ARRAY_SIZE(timberdale_xiic_resources),
.resources = timberdale_xiic_resources,
- .platform_data = &timberdale_xiic_platform_data,
- .data_size = sizeof(timberdale_xiic_platform_data),
+ .mfd_data = &timberdale_xiic_platform_data,
},
{
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
- .platform_data = &timberdale_gpio_platform_data,
- .data_size = sizeof(timberdale_gpio_platform_data),
+ .mfd_data = &timberdale_gpio_platform_data,
},
{
.name = "timb-video",
.num_resources = ARRAY_SIZE(timberdale_video_resources),
.resources = timberdale_video_resources,
- .platform_data = &timberdale_video_platform_data,
- .data_size = sizeof(timberdale_video_platform_data),
+ .mfd_data = &timberdale_video_platform_data,
},
{
.name = "timb-radio",
.num_resources = ARRAY_SIZE(timberdale_radio_resources),
.resources = timberdale_radio_resources,
- .platform_data = &timberdale_radio_platform_data,
- .data_size = sizeof(timberdale_radio_platform_data),
+ .mfd_data = &timberdale_radio_platform_data,
},
{
.name = "xilinx_spi",
.num_resources = ARRAY_SIZE(timberdale_spi_resources),
.resources = timberdale_spi_resources,
- .platform_data = &timberdale_xspi_platform_data,
- .data_size = sizeof(timberdale_xspi_platform_data),
+ .mfd_data = &timberdale_xspi_platform_data,
},
};
@@ -558,8 +538,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
- .platform_data = &timb_dma_platform_data,
- .data_size = sizeof(timb_dma_platform_data),
+ .mfd_data = &timb_dma_platform_data,
},
{
.name = "timb-uart",
@@ -570,43 +549,37 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
.name = "ocores-i2c",
.num_resources = ARRAY_SIZE(timberdale_ocores_resources),
.resources = timberdale_ocores_resources,
- .platform_data = &timberdale_ocores_platform_data,
- .data_size = sizeof(timberdale_ocores_platform_data),
+ .mfd_data = &timberdale_ocores_platform_data,
},
{
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
- .platform_data = &timberdale_gpio_platform_data,
- .data_size = sizeof(timberdale_gpio_platform_data),
+ .mfd_data = &timberdale_gpio_platform_data,
},
{
.name = "timb-video",
.num_resources = ARRAY_SIZE(timberdale_video_resources),
.resources = timberdale_video_resources,
- .platform_data = &timberdale_video_platform_data,
- .data_size = sizeof(timberdale_video_platform_data),
+ .mfd_data = &timberdale_video_platform_data,
},
{
.name = "timb-radio",
.num_resources = ARRAY_SIZE(timberdale_radio_resources),
.resources = timberdale_radio_resources,
- .platform_data = &timberdale_radio_platform_data,
- .data_size = sizeof(timberdale_radio_platform_data),
+ .mfd_data = &timberdale_radio_platform_data,
},
{
.name = "xilinx_spi",
.num_resources = ARRAY_SIZE(timberdale_spi_resources),
.resources = timberdale_spi_resources,
- .platform_data = &timberdale_xspi_platform_data,
- .data_size = sizeof(timberdale_xspi_platform_data),
+ .mfd_data = &timberdale_xspi_platform_data,
},
{
.name = "ks8842",
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
- .platform_data = &timberdale_ks8842_platform_data,
- .data_size = sizeof(timberdale_ks8842_platform_data)
+ .mfd_data = &timberdale_ks8842_platform_data,
},
};
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
new file mode 100644
index 000000000000..46d8205646b6
--- /dev/null
+++ b/drivers/mfd/tps6105x.c
@@ -0,0 +1,246 @@
+/*
+ * Core driver for TPS61050/61052 boost converters, used for while LED
+ * driving, audio power amplification, white LED flash, and generic
+ * boost conversion. Additionally it provides a 1-bit GPIO pin (out or in)
+ * and a flash synchronization pin to synchronize flash events when used as
+ * flashgun.
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/regulator/driver.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps6105x.h>
+
+int tps6105x_set(struct tps6105x *tps6105x, u8 reg, u8 value)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&tps6105x->lock);
+ if (ret)
+ return ret;
+ ret = i2c_smbus_write_byte_data(tps6105x->client, reg, value);
+ mutex_unlock(&tps6105x->lock);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(tps6105x_set);
+
+int tps6105x_get(struct tps6105x *tps6105x, u8 reg, u8 *buf)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&tps6105x->lock);
+ if (ret)
+ return ret;
+ ret = i2c_smbus_read_byte_data(tps6105x->client, reg);
+ mutex_unlock(&tps6105x->lock);
+ if (ret < 0)
+ return ret;
+
+ *buf = ret;
+ return 0;
+}
+EXPORT_SYMBOL(tps6105x_get);
+
+/*
+ * Masks off the bits in the mask and sets the bits in the bitvalues
+ * parameter in one atomic operation
+ */
+int tps6105x_mask_and_set(struct tps6105x *tps6105x, u8 reg,
+ u8 bitmask, u8 bitvalues)
+{
+ int ret;
+ u8 regval;
+
+ ret = mutex_lock_interruptible(&tps6105x->lock);
+ if (ret)
+ return ret;
+ ret = i2c_smbus_read_byte_data(tps6105x->client, reg);
+ if (ret < 0)
+ goto fail;
+ regval = ret;
+ regval = (~bitmask & regval) | (bitmask & bitvalues);
+ ret = i2c_smbus_write_byte_data(tps6105x->client, reg, regval);
+fail:
+ mutex_unlock(&tps6105x->lock);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(tps6105x_mask_and_set);
+
+static int __devinit tps6105x_startup(struct tps6105x *tps6105x)
+{
+ int ret;
+ u8 regval;
+
+ ret = tps6105x_get(tps6105x, TPS6105X_REG_0, &regval);
+ if (ret)
+ return ret;
+ switch (regval >> TPS6105X_REG0_MODE_SHIFT) {
+ case TPS6105X_REG0_MODE_SHUTDOWN:
+ dev_info(&tps6105x->client->dev,
+ "TPS6105x found in SHUTDOWN mode\n");
+ break;
+ case TPS6105X_REG0_MODE_TORCH:
+ dev_info(&tps6105x->client->dev,
+ "TPS6105x found in TORCH mode\n");
+ break;
+ case TPS6105X_REG0_MODE_TORCH_FLASH:
+ dev_info(&tps6105x->client->dev,
+ "TPS6105x found in FLASH mode\n");
+ break;
+ case TPS6105X_REG0_MODE_VOLTAGE:
+ dev_info(&tps6105x->client->dev,
+ "TPS6105x found in VOLTAGE mode\n");
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * MFD cells - we have one cell which is selected operation
+ * mode, and we always have a GPIO cell.
+ */
+static struct mfd_cell tps6105x_cells[] = {
+ {
+ /* name will be runtime assigned */
+ .id = -1,
+ },
+ {
+ .name = "tps6105x-gpio",
+ .id = -1,
+ },
+};
+
+static int __devinit tps6105x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tps6105x *tps6105x;
+ struct tps6105x_platform_data *pdata;
+ int ret;
+ int i;
+
+ tps6105x = kmalloc(sizeof(*tps6105x), GFP_KERNEL);
+ if (!tps6105x)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, tps6105x);
+ tps6105x->client = client;
+ pdata = client->dev.platform_data;
+ tps6105x->pdata = pdata;
+ mutex_init(&tps6105x->lock);
+
+ ret = tps6105x_startup(tps6105x);
+ if (ret) {
+ dev_err(&client->dev, "chip initialization failed\n");
+ goto fail;
+ }
+
+ /* Remove warning texts when you implement new cell drivers */
+ switch (pdata->mode) {
+ case TPS6105X_MODE_SHUTDOWN:
+ dev_info(&client->dev,
+ "present, not used for anything, only GPIO\n");
+ break;
+ case TPS6105X_MODE_TORCH:
+ tps6105x_cells[0].name = "tps6105x-leds";
+ dev_warn(&client->dev,
+ "torch mode is unsupported\n");
+ break;
+ case TPS6105X_MODE_TORCH_FLASH:
+ tps6105x_cells[0].name = "tps6105x-flash";
+ dev_warn(&client->dev,
+ "flash mode is unsupported\n");
+ break;
+ case TPS6105X_MODE_VOLTAGE:
+ tps6105x_cells[0].name ="tps6105x-regulator";
+ break;
+ default:
+ break;
+ }
+
+ /* Set up and register the platform devices. */
+ for (i = 0; i < ARRAY_SIZE(tps6105x_cells); i++) {
+ /* One state holder for all drivers, this is simple */
+ tps6105x_cells[i].mfd_data = tps6105x;
+ }
+
+ ret = mfd_add_devices(&client->dev, 0, tps6105x_cells,
+ ARRAY_SIZE(tps6105x_cells), NULL, 0);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ kfree(tps6105x);
+ return ret;
+}
+
+static int __devexit tps6105x_remove(struct i2c_client *client)
+{
+ struct tps6105x *tps6105x = i2c_get_clientdata(client);
+
+ mfd_remove_devices(&client->dev);
+
+ /* Put chip in shutdown mode */
+ tps6105x_mask_and_set(tps6105x, TPS6105X_REG_0,
+ TPS6105X_REG0_MODE_MASK,
+ TPS6105X_MODE_SHUTDOWN << TPS6105X_REG0_MODE_SHIFT);
+
+ kfree(tps6105x);
+ return 0;
+}
+
+static const struct i2c_device_id tps6105x_id[] = {
+ { "tps61050", 0 },
+ { "tps61052", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tps6105x_id);
+
+static struct i2c_driver tps6105x_driver = {
+ .driver = {
+ .name = "tps6105x",
+ },
+ .probe = tps6105x_probe,
+ .remove = __devexit_p(tps6105x_remove),
+ .id_table = tps6105x_id,
+};
+
+static int __init tps6105x_init(void)
+{
+ return i2c_add_driver(&tps6105x_driver);
+}
+subsys_initcall(tps6105x_init);
+
+static void __exit tps6105x_exit(void)
+{
+ i2c_del_driver(&tps6105x_driver);
+}
+module_exit(tps6105x_exit);
+
+MODULE_AUTHOR("Linus Walleij");
+MODULE_DESCRIPTION("TPS6105x White LED Boost Converter Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index e9018d1394ee..0aa9186aec19 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -288,12 +288,10 @@ static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
return tps6586x_update(tps6586x->dev, TPS6586X_GPIOSET1, val, mask);
}
-static void tps6586x_gpio_init(struct tps6586x *tps6586x, int gpio_base)
+static int tps6586x_gpio_init(struct tps6586x *tps6586x, int gpio_base)
{
- int ret;
-
if (!gpio_base)
- return;
+ return 0;
tps6586x->gpio.owner = THIS_MODULE;
tps6586x->gpio.label = tps6586x->client->name;
@@ -307,9 +305,7 @@ static void tps6586x_gpio_init(struct tps6586x *tps6586x, int gpio_base)
tps6586x->gpio.set = tps6586x_gpio_set;
tps6586x->gpio.get = tps6586x_gpio_get;
- ret = gpiochip_add(&tps6586x->gpio);
- if (ret)
- dev_warn(tps6586x->dev, "GPIO registration failed: %d\n", ret);
+ return gpiochip_add(&tps6586x->gpio);
}
static int __remove_subdev(struct device *dev, void *unused)
@@ -517,17 +513,28 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
}
}
+ ret = tps6586x_gpio_init(tps6586x, pdata->gpio_base);
+ if (ret) {
+ dev_err(&client->dev, "GPIO registration failed: %d\n", ret);
+ goto err_gpio_init;
+ }
+
ret = tps6586x_add_subdevs(tps6586x, pdata);
if (ret) {
dev_err(&client->dev, "add devices failed: %d\n", ret);
goto err_add_devs;
}
- tps6586x_gpio_init(tps6586x, pdata->gpio_base);
-
return 0;
err_add_devs:
+ if (pdata->gpio_base) {
+ ret = gpiochip_remove(&tps6586x->gpio);
+ if (ret)
+ dev_err(&client->dev, "Can't remove gpio chip: %d\n",
+ ret);
+ }
+err_gpio_init:
if (client->irq)
free_irq(client->irq, tps6586x);
err_irq_init:
@@ -587,4 +594,3 @@ module_exit(tps6586x_exit);
MODULE_DESCRIPTION("TPS6586X core driver");
MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index a35fa7dcbf53..960b5bed7f52 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -721,13 +721,13 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
}
- if (twl_has_watchdog()) {
+ if (twl_has_watchdog() && twl_class_is_4030()) {
child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
- if (twl_has_pwrbutton()) {
+ if (twl_has_pwrbutton() && twl_class_is_4030()) {
child = add_child(1, "twl4030_pwrbutton",
NULL, 0, true, pdata->irq_base + 8 + 0, 0);
if (IS_ERR(child))
@@ -864,6 +864,10 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
child = add_regulator(TWL6030_REG_VAUX3_6030, pdata->vaux3);
if (IS_ERR(child))
return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_CLK32KG, pdata->clk32kg);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
}
if (twl_has_bci() && pdata->bci &&
diff --git a/drivers/mfd/twl4030-codec.c b/drivers/mfd/twl4030-codec.c
index 9a4b196d6deb..c02fded316c9 100644
--- a/drivers/mfd/twl4030-codec.c
+++ b/drivers/mfd/twl4030-codec.c
@@ -208,15 +208,13 @@ static int __devinit twl4030_codec_probe(struct platform_device *pdev)
if (pdata->audio) {
cell = &codec->cells[childs];
cell->name = "twl4030-codec";
- cell->platform_data = pdata->audio;
- cell->data_size = sizeof(*pdata->audio);
+ cell->mfd_data = pdata->audio;
childs++;
}
if (pdata->vibra) {
cell = &codec->cells[childs];
cell->name = "twl4030-vibra";
- cell->platform_data = pdata->vibra;
- cell->data_size = sizeof(*pdata->vibra);
+ cell->mfd_data = pdata->vibra;
childs++;
}
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
new file mode 100644
index 000000000000..3941ddcf15fe
--- /dev/null
+++ b/drivers/mfd/twl4030-madc.c
@@ -0,0 +1,802 @@
+/*
+ *
+ * TWL4030 MADC module driver-This driver monitors the real time
+ * conversion of analog signals like battery temperature,
+ * battery type, battery level etc.
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * J Keerthy <j-keerthy@ti.com>
+ *
+ * Based on twl4030-madc.c
+ * Copyright (C) 2008 Nokia Corporation
+ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
+ *
+ * Amit Kucheria <amit.kucheria@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/i2c/twl.h>
+#include <linux/i2c/twl4030-madc.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+#include <linux/types.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+
+/*
+ * struct twl4030_madc_data - a container for madc info
+ * @dev - pointer to device structure for madc
+ * @lock - mutex protecting this data structure
+ * @requests - Array of request struct corresponding to SW1, SW2 and RT
+ * @imr - Interrupt mask register of MADC
+ * @isr - Interrupt status register of MADC
+ */
+struct twl4030_madc_data {
+ struct device *dev;
+ struct mutex lock; /* mutex protecting this data structure */
+ struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS];
+ int imr;
+ int isr;
+};
+
+static struct twl4030_madc_data *twl4030_madc;
+
+struct twl4030_prescale_divider_ratios {
+ s16 numerator;
+ s16 denominator;
+};
+
+static const struct twl4030_prescale_divider_ratios
+twl4030_divider_ratios[16] = {
+ {1, 1}, /* CHANNEL 0 No Prescaler */
+ {1, 1}, /* CHANNEL 1 No Prescaler */
+ {6, 10}, /* CHANNEL 2 */
+ {6, 10}, /* CHANNEL 3 */
+ {6, 10}, /* CHANNEL 4 */
+ {6, 10}, /* CHANNEL 5 */
+ {6, 10}, /* CHANNEL 6 */
+ {6, 10}, /* CHANNEL 7 */
+ {3, 14}, /* CHANNEL 8 */
+ {1, 3}, /* CHANNEL 9 */
+ {1, 1}, /* CHANNEL 10 No Prescaler */
+ {15, 100}, /* CHANNEL 11 */
+ {1, 4}, /* CHANNEL 12 */
+ {1, 1}, /* CHANNEL 13 Reserved channels */
+ {1, 1}, /* CHANNEL 14 Reseved channels */
+ {5, 11}, /* CHANNEL 15 */
+};
+
+
+/*
+ * Conversion table from -3 to 55 degree Celcius
+ */
+static int therm_tbl[] = {
+30800, 29500, 28300, 27100,
+26000, 24900, 23900, 22900, 22000, 21100, 20300, 19400, 18700, 17900,
+17200, 16500, 15900, 15300, 14700, 14100, 13600, 13100, 12600, 12100,
+11600, 11200, 10800, 10400, 10000, 9630, 9280, 8950, 8620, 8310,
+8020, 7730, 7460, 7200, 6950, 6710, 6470, 6250, 6040, 5830,
+5640, 5450, 5260, 5090, 4920, 4760, 4600, 4450, 4310, 4170,
+4040, 3910, 3790, 3670, 3550
+};
+
+/*
+ * Structure containing the registers
+ * of different conversion methods supported by MADC.
+ * Hardware or RT real time conversion request initiated by external host
+ * processor for RT Signal conversions.
+ * External host processors can also request for non RT conversions
+ * SW1 and SW2 software conversions also called asynchronous or GPC request.
+ */
+static
+const struct twl4030_madc_conversion_method twl4030_conversion_methods[] = {
+ [TWL4030_MADC_RT] = {
+ .sel = TWL4030_MADC_RTSELECT_LSB,
+ .avg = TWL4030_MADC_RTAVERAGE_LSB,
+ .rbase = TWL4030_MADC_RTCH0_LSB,
+ },
+ [TWL4030_MADC_SW1] = {
+ .sel = TWL4030_MADC_SW1SELECT_LSB,
+ .avg = TWL4030_MADC_SW1AVERAGE_LSB,
+ .rbase = TWL4030_MADC_GPCH0_LSB,
+ .ctrl = TWL4030_MADC_CTRL_SW1,
+ },
+ [TWL4030_MADC_SW2] = {
+ .sel = TWL4030_MADC_SW2SELECT_LSB,
+ .avg = TWL4030_MADC_SW2AVERAGE_LSB,
+ .rbase = TWL4030_MADC_GPCH0_LSB,
+ .ctrl = TWL4030_MADC_CTRL_SW2,
+ },
+};
+
+/*
+ * Function to read a particular channel value.
+ * @madc - pointer to struct twl4030_madc_data
+ * @reg - lsb of ADC Channel
+ * If the i2c read fails it returns an error else returns 0.
+ */
+static int twl4030_madc_channel_raw_read(struct twl4030_madc_data *madc, u8 reg)
+{
+ u8 msb, lsb;
+ int ret;
+ /*
+ * For each ADC channel, we have MSB and LSB register pair. MSB address
+ * is always LSB address+1. reg parameter is the address of LSB register
+ */
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &msb, reg + 1);
+ if (ret) {
+ dev_err(madc->dev, "unable to read MSB register 0x%X\n",
+ reg + 1);
+ return ret;
+ }
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &lsb, reg);
+ if (ret) {
+ dev_err(madc->dev, "unable to read LSB register 0x%X\n", reg);
+ return ret;
+ }
+
+ return (int)(((msb << 8) | lsb) >> 6);
+}
+
+/*
+ * Return battery temperature
+ * Or < 0 on failure.
+ */
+static int twl4030battery_temperature(int raw_volt)
+{
+ u8 val;
+ int temp, curr, volt, res, ret;
+
+ volt = (raw_volt * TEMP_STEP_SIZE) / TEMP_PSR_R;
+ /* Getting and calculating the supply current in micro ampers */
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &val,
+ REG_BCICTL2);
+ if (ret < 0)
+ return ret;
+ curr = ((val & TWL4030_BCI_ITHEN) + 1) * 10;
+ /* Getting and calculating the thermistor resistance in ohms */
+ res = volt * 1000 / curr;
+ /* calculating temperature */
+ for (temp = 58; temp >= 0; temp--) {
+ int actual = therm_tbl[temp];
+
+ if ((actual - res) >= 0)
+ break;
+ }
+
+ return temp + 1;
+}
+
+static int twl4030battery_current(int raw_volt)
+{
+ int ret;
+ u8 val;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &val,
+ TWL4030_BCI_BCICTL1);
+ if (ret)
+ return ret;
+ if (val & TWL4030_BCI_CGAIN) /* slope of 0.44 mV/mA */
+ return (raw_volt * CURR_STEP_SIZE) / CURR_PSR_R1;
+ else /* slope of 0.88 mV/mA */
+ return (raw_volt * CURR_STEP_SIZE) / CURR_PSR_R2;
+}
+/*
+ * Function to read channel values
+ * @madc - pointer to twl4030_madc_data struct
+ * @reg_base - Base address of the first channel
+ * @Channels - 16 bit bitmap. If the bit is set, channel value is read
+ * @buf - The channel values are stored here. if read fails error
+ * value is stored
+ * Returns the number of successfully read channels.
+ */
+static int twl4030_madc_read_channels(struct twl4030_madc_data *madc,
+ u8 reg_base, unsigned
+ long channels, int *buf)
+{
+ int count = 0, count_req = 0, i;
+ u8 reg;
+
+ for_each_set_bit(i, &channels, TWL4030_MADC_MAX_CHANNELS) {
+ reg = reg_base + 2 * i;
+ buf[i] = twl4030_madc_channel_raw_read(madc, reg);
+ if (buf[i] < 0) {
+ dev_err(madc->dev,
+ "Unable to read register 0x%X\n", reg);
+ count_req++;
+ continue;
+ }
+ switch (i) {
+ case 10:
+ buf[i] = twl4030battery_current(buf[i]);
+ if (buf[i] < 0) {
+ dev_err(madc->dev, "err reading current\n");
+ count_req++;
+ } else {
+ count++;
+ buf[i] = buf[i] - 750;
+ }
+ break;
+ case 1:
+ buf[i] = twl4030battery_temperature(buf[i]);
+ if (buf[i] < 0) {
+ dev_err(madc->dev, "err reading temperature\n");
+ count_req++;
+ } else {
+ buf[i] -= 3;
+ count++;
+ }
+ break;
+ default:
+ count++;
+ /* Analog Input (V) = conv_result * step_size / R
+ * conv_result = decimal value of 10-bit conversion
+ * result
+ * step size = 1.5 / (2 ^ 10 -1)
+ * R = Prescaler ratio for input channels.
+ * Result given in mV hence multiplied by 1000.
+ */
+ buf[i] = (buf[i] * 3 * 1000 *
+ twl4030_divider_ratios[i].denominator)
+ / (2 * 1023 *
+ twl4030_divider_ratios[i].numerator);
+ }
+ }
+ if (count_req)
+ dev_err(madc->dev, "%d channel conversion failed\n", count_req);
+
+ return count;
+}
+
+/*
+ * Enables irq.
+ * @madc - pointer to twl4030_madc_data struct
+ * @id - irq number to be enabled
+ * can take one of TWL4030_MADC_RT, TWL4030_MADC_SW1, TWL4030_MADC_SW2
+ * corresponding to RT, SW1, SW2 conversion requests.
+ * If the i2c read fails it returns an error else returns 0.
+ */
+static int twl4030_madc_enable_irq(struct twl4030_madc_data *madc, u8 id)
+{
+ u8 val;
+ int ret;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev, "unable to read imr register 0x%X\n",
+ madc->imr);
+ return ret;
+ }
+ val &= ~(1 << id);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev,
+ "unable to write imr register 0x%X\n", madc->imr);
+ return ret;
+
+ }
+
+ return 0;
+}
+
+/*
+ * Disables irq.
+ * @madc - pointer to twl4030_madc_data struct
+ * @id - irq number to be disabled
+ * can take one of TWL4030_MADC_RT, TWL4030_MADC_SW1, TWL4030_MADC_SW2
+ * corresponding to RT, SW1, SW2 conversion requests.
+ * Returns error if i2c read/write fails.
+ */
+static int twl4030_madc_disable_irq(struct twl4030_madc_data *madc, u8 id)
+{
+ u8 val;
+ int ret;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev, "unable to read imr register 0x%X\n",
+ madc->imr);
+ return ret;
+ }
+ val |= (1 << id);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev,
+ "unable to write imr register 0x%X\n", madc->imr);
+ return ret;
+ }
+
+ return 0;
+}
+
+static irqreturn_t twl4030_madc_threaded_irq_handler(int irq, void *_madc)
+{
+ struct twl4030_madc_data *madc = _madc;
+ const struct twl4030_madc_conversion_method *method;
+ u8 isr_val, imr_val;
+ int i, len, ret;
+ struct twl4030_madc_request *r;
+
+ mutex_lock(&madc->lock);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &isr_val, madc->isr);
+ if (ret) {
+ dev_err(madc->dev, "unable to read isr register 0x%X\n",
+ madc->isr);
+ goto err_i2c;
+ }
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &imr_val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev, "unable to read imr register 0x%X\n",
+ madc->imr);
+ goto err_i2c;
+ }
+ isr_val &= ~imr_val;
+ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
+ if (!(isr_val & (1 << i)))
+ continue;
+ ret = twl4030_madc_disable_irq(madc, i);
+ if (ret < 0)
+ dev_dbg(madc->dev, "Disable interrupt failed%d\n", i);
+ madc->requests[i].result_pending = 1;
+ }
+ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
+ r = &madc->requests[i];
+ /* No pending results for this method, move to next one */
+ if (!r->result_pending)
+ continue;
+ method = &twl4030_conversion_methods[r->method];
+ /* Read results */
+ len = twl4030_madc_read_channels(madc, method->rbase,
+ r->channels, r->rbuf);
+ /* Return results to caller */
+ if (r->func_cb != NULL) {
+ r->func_cb(len, r->channels, r->rbuf);
+ r->func_cb = NULL;
+ }
+ /* Free request */
+ r->result_pending = 0;
+ r->active = 0;
+ }
+ mutex_unlock(&madc->lock);
+
+ return IRQ_HANDLED;
+
+err_i2c:
+ /*
+ * In case of error check whichever request is active
+ * and service the same.
+ */
+ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
+ r = &madc->requests[i];
+ if (r->active == 0)
+ continue;
+ method = &twl4030_conversion_methods[r->method];
+ /* Read results */
+ len = twl4030_madc_read_channels(madc, method->rbase,
+ r->channels, r->rbuf);
+ /* Return results to caller */
+ if (r->func_cb != NULL) {
+ r->func_cb(len, r->channels, r->rbuf);
+ r->func_cb = NULL;
+ }
+ /* Free request */
+ r->result_pending = 0;
+ r->active = 0;
+ }
+ mutex_unlock(&madc->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int twl4030_madc_set_irq(struct twl4030_madc_data *madc,
+ struct twl4030_madc_request *req)
+{
+ struct twl4030_madc_request *p;
+ int ret;
+
+ p = &madc->requests[req->method];
+ memcpy(p, req, sizeof(*req));
+ ret = twl4030_madc_enable_irq(madc, req->method);
+ if (ret < 0) {
+ dev_err(madc->dev, "enable irq failed!!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Function which enables the madc conversion
+ * by writing to the control register.
+ * @madc - pointer to twl4030_madc_data struct
+ * @conv_method - can be TWL4030_MADC_RT, TWL4030_MADC_SW2, TWL4030_MADC_SW1
+ * corresponding to RT SW1 or SW2 conversion methods.
+ * Returns 0 if succeeds else a negative error value
+ */
+static int twl4030_madc_start_conversion(struct twl4030_madc_data *madc,
+ int conv_method)
+{
+ const struct twl4030_madc_conversion_method *method;
+ int ret = 0;
+ method = &twl4030_conversion_methods[conv_method];
+ switch (conv_method) {
+ case TWL4030_MADC_SW1:
+ case TWL4030_MADC_SW2:
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
+ TWL4030_MADC_SW_START, method->ctrl);
+ if (ret) {
+ dev_err(madc->dev,
+ "unable to write ctrl register 0x%X\n",
+ method->ctrl);
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Function that waits for conversion to be ready
+ * @madc - pointer to twl4030_madc_data struct
+ * @timeout_ms - timeout value in milliseconds
+ * @status_reg - ctrl register
+ * returns 0 if succeeds else a negative error value
+ */
+static int twl4030_madc_wait_conversion_ready(struct twl4030_madc_data *madc,
+ unsigned int timeout_ms,
+ u8 status_reg)
+{
+ unsigned long timeout;
+ int ret;
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ u8 reg;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &reg, status_reg);
+ if (ret) {
+ dev_err(madc->dev,
+ "unable to read status register 0x%X\n",
+ status_reg);
+ return ret;
+ }
+ if (!(reg & TWL4030_MADC_BUSY) && (reg & TWL4030_MADC_EOC_SW))
+ return 0;
+ usleep_range(500, 2000);
+ } while (!time_after(jiffies, timeout));
+ dev_err(madc->dev, "conversion timeout!\n");
+
+ return -EAGAIN;
+}
+
+/*
+ * An exported function which can be called from other kernel drivers.
+ * @req twl4030_madc_request structure
+ * req->rbuf will be filled with read values of channels based on the
+ * channel index. If a particular channel reading fails there will
+ * be a negative error value in the corresponding array element.
+ * returns 0 if succeeds else error value
+ */
+int twl4030_madc_conversion(struct twl4030_madc_request *req)
+{
+ const struct twl4030_madc_conversion_method *method;
+ u8 ch_msb, ch_lsb;
+ int ret;
+
+ if (!req)
+ return -EINVAL;
+ mutex_lock(&twl4030_madc->lock);
+ if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {
+ ret = -EINVAL;
+ goto out;
+ }
+ /* Do we have a conversion request ongoing */
+ if (twl4030_madc->requests[req->method].active) {
+ ret = -EBUSY;
+ goto out;
+ }
+ ch_msb = (req->channels >> 8) & 0xff;
+ ch_lsb = req->channels & 0xff;
+ method = &twl4030_conversion_methods[req->method];
+ /* Select channels to be converted */
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_msb, method->sel + 1);
+ if (ret) {
+ dev_err(twl4030_madc->dev,
+ "unable to write sel register 0x%X\n", method->sel + 1);
+ return ret;
+ }
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_lsb, method->sel);
+ if (ret) {
+ dev_err(twl4030_madc->dev,
+ "unable to write sel register 0x%X\n", method->sel + 1);
+ return ret;
+ }
+ /* Select averaging for all channels if do_avg is set */
+ if (req->do_avg) {
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
+ ch_msb, method->avg + 1);
+ if (ret) {
+ dev_err(twl4030_madc->dev,
+ "unable to write avg register 0x%X\n",
+ method->avg + 1);
+ return ret;
+ }
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
+ ch_lsb, method->avg);
+ if (ret) {
+ dev_err(twl4030_madc->dev,
+ "unable to write sel reg 0x%X\n",
+ method->sel + 1);
+ return ret;
+ }
+ }
+ if (req->type == TWL4030_MADC_IRQ_ONESHOT && req->func_cb != NULL) {
+ ret = twl4030_madc_set_irq(twl4030_madc, req);
+ if (ret < 0)
+ goto out;
+ ret = twl4030_madc_start_conversion(twl4030_madc, req->method);
+ if (ret < 0)
+ goto out;
+ twl4030_madc->requests[req->method].active = 1;
+ ret = 0;
+ goto out;
+ }
+ /* With RT method we should not be here anymore */
+ if (req->method == TWL4030_MADC_RT) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = twl4030_madc_start_conversion(twl4030_madc, req->method);
+ if (ret < 0)
+ goto out;
+ twl4030_madc->requests[req->method].active = 1;
+ /* Wait until conversion is ready (ctrl register returns EOC) */
+ ret = twl4030_madc_wait_conversion_ready(twl4030_madc, 5, method->ctrl);
+ if (ret) {
+ twl4030_madc->requests[req->method].active = 0;
+ goto out;
+ }
+ ret = twl4030_madc_read_channels(twl4030_madc, method->rbase,
+ req->channels, req->rbuf);
+ twl4030_madc->requests[req->method].active = 0;
+
+out:
+ mutex_unlock(&twl4030_madc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(twl4030_madc_conversion);
+
+/*
+ * Return channel value
+ * Or < 0 on failure.
+ */
+int twl4030_get_madc_conversion(int channel_no)
+{
+ struct twl4030_madc_request req;
+ int temp = 0;
+ int ret;
+
+ req.channels = (1 << channel_no);
+ req.method = TWL4030_MADC_SW2;
+ req.active = 0;
+ req.func_cb = NULL;
+ ret = twl4030_madc_conversion(&req);
+ if (ret < 0)
+ return ret;
+ if (req.rbuf[channel_no] > 0)
+ temp = req.rbuf[channel_no];
+
+ return temp;
+}
+EXPORT_SYMBOL_GPL(twl4030_get_madc_conversion);
+
+/*
+ * Function to enable or disable bias current for
+ * main battery type reading or temperature sensing
+ * @madc - pointer to twl4030_madc_data struct
+ * @chan - can be one of the two values
+ * TWL4030_BCI_ITHEN - Enables bias current for main battery type reading
+ * TWL4030_BCI_TYPEN - Enables bias current for main battery temperature
+ * sensing
+ * @on - enable or disable chan.
+ */
+static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
+ int chan, int on)
+{
+ int ret;
+ u8 regval;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ &regval, TWL4030_BCI_BCICTL1);
+ if (ret) {
+ dev_err(madc->dev, "unable to read BCICTL1 reg 0x%X",
+ TWL4030_BCI_BCICTL1);
+ return ret;
+ }
+ if (on)
+ regval |= chan ? TWL4030_BCI_ITHEN : TWL4030_BCI_TYPEN;
+ else
+ regval &= chan ? ~TWL4030_BCI_ITHEN : ~TWL4030_BCI_TYPEN;
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
+ regval, TWL4030_BCI_BCICTL1);
+ if (ret) {
+ dev_err(madc->dev, "unable to write BCICTL1 reg 0x%X\n",
+ TWL4030_BCI_BCICTL1);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Function that sets MADC software power on bit to enable MADC
+ * @madc - pointer to twl4030_madc_data struct
+ * @on - Enable or disable MADC software powen on bit.
+ * returns error if i2c read/write fails else 0
+ */
+static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on)
+{
+ u8 regval;
+ int ret;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ &regval, TWL4030_MADC_CTRL1);
+ if (ret) {
+ dev_err(madc->dev, "unable to read madc ctrl1 reg 0x%X\n",
+ TWL4030_MADC_CTRL1);
+ return ret;
+ }
+ if (on)
+ regval |= TWL4030_MADC_MADCON;
+ else
+ regval &= ~TWL4030_MADC_MADCON;
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, regval, TWL4030_MADC_CTRL1);
+ if (ret) {
+ dev_err(madc->dev, "unable to write madc ctrl1 reg 0x%X\n",
+ TWL4030_MADC_CTRL1);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Initialize MADC and request for threaded irq
+ */
+static int __devinit twl4030_madc_probe(struct platform_device *pdev)
+{
+ struct twl4030_madc_data *madc;
+ struct twl4030_madc_platform_data *pdata = pdev->dev.platform_data;
+ int ret;
+ u8 regval;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform_data not available\n");
+ return -EINVAL;
+ }
+ madc = kzalloc(sizeof(*madc), GFP_KERNEL);
+ if (!madc)
+ return -ENOMEM;
+
+ /*
+ * Phoenix provides 2 interrupt lines. The first one is connected to
+ * the OMAP. The other one can be connected to the other processor such
+ * as modem. Hence two separate ISR and IMR registers.
+ */
+ madc->imr = (pdata->irq_line == 1) ?
+ TWL4030_MADC_IMR1 : TWL4030_MADC_IMR2;
+ madc->isr = (pdata->irq_line == 1) ?
+ TWL4030_MADC_ISR1 : TWL4030_MADC_ISR2;
+ ret = twl4030_madc_set_power(madc, 1);
+ if (ret < 0)
+ goto err_power;
+ ret = twl4030_madc_set_current_generator(madc, 0, 1);
+ if (ret < 0)
+ goto err_current_generator;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ &regval, TWL4030_BCI_BCICTL1);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to read reg BCI CTL1 0x%X\n",
+ TWL4030_BCI_BCICTL1);
+ goto err_i2c;
+ }
+ regval |= TWL4030_BCI_MESBAT;
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
+ regval, TWL4030_BCI_BCICTL1);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to write reg BCI Ctl1 0x%X\n",
+ TWL4030_BCI_BCICTL1);
+ goto err_i2c;
+ }
+ platform_set_drvdata(pdev, madc);
+ mutex_init(&madc->lock);
+ ret = request_threaded_irq(platform_get_irq(pdev, 0), NULL,
+ twl4030_madc_threaded_irq_handler,
+ IRQF_TRIGGER_RISING, "twl4030_madc", madc);
+ if (ret) {
+ dev_dbg(&pdev->dev, "could not request irq\n");
+ goto err_irq;
+ }
+ twl4030_madc = madc;
+ return 0;
+err_irq:
+ platform_set_drvdata(pdev, NULL);
+err_i2c:
+ twl4030_madc_set_current_generator(madc, 0, 0);
+err_current_generator:
+ twl4030_madc_set_power(madc, 0);
+err_power:
+ kfree(madc);
+
+ return ret;
+}
+
+static int __devexit twl4030_madc_remove(struct platform_device *pdev)
+{
+ struct twl4030_madc_data *madc = platform_get_drvdata(pdev);
+
+ free_irq(platform_get_irq(pdev, 0), madc);
+ platform_set_drvdata(pdev, NULL);
+ twl4030_madc_set_current_generator(madc, 0, 0);
+ twl4030_madc_set_power(madc, 0);
+ kfree(madc);
+
+ return 0;
+}
+
+static struct platform_driver twl4030_madc_driver = {
+ .probe = twl4030_madc_probe,
+ .remove = __exit_p(twl4030_madc_remove),
+ .driver = {
+ .name = "twl4030_madc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init twl4030_madc_init(void)
+{
+ return platform_driver_register(&twl4030_madc_driver);
+}
+
+module_init(twl4030_madc_init);
+
+static void __exit twl4030_madc_exit(void)
+{
+ platform_driver_unregister(&twl4030_madc_driver);
+}
+
+module_exit(twl4030_madc_exit);
+
+MODULE_DESCRIPTION("TWL4030 ADC driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("J Keerthy");
+MODULE_ALIAS("platform:twl4030_madc");
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 92b85e28a15e..38ffbd50a0d2 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -60,6 +60,7 @@ static inline void ucb1x00_ts_evt_add(struct ucb1x00_ts *ts, u16 pressure, u16 x
input_report_abs(idev, ABS_X, x);
input_report_abs(idev, ABS_Y, y);
input_report_abs(idev, ABS_PRESSURE, pressure);
+ input_report_key(idev, BTN_TOUCH, 1);
input_sync(idev);
}
@@ -68,6 +69,7 @@ static inline void ucb1x00_ts_event_release(struct ucb1x00_ts *ts)
struct input_dev *idev = ts->idev;
input_report_abs(idev, ABS_PRESSURE, 0);
+ input_report_key(idev, BTN_TOUCH, 0);
input_sync(idev);
}
@@ -384,7 +386,8 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
idev->open = ucb1x00_ts_open;
idev->close = ucb1x00_ts_close;
- __set_bit(EV_ABS, idev->evbit);
+ idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY);
+ idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_drvdata(idev, ts);
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
index 348052aa5dbf..d698703dbd46 100644
--- a/drivers/mfd/vx855.c
+++ b/drivers/mfd/vx855.c
@@ -122,6 +122,7 @@ static struct pci_device_id vx855_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) },
{ 0, }
};
+MODULE_DEVICE_TABLE(pci, vx855_pci_tbl);
static struct pci_driver vx855_pci_driver = {
.name = "vx855",
diff --git a/drivers/mfd/wl1273-core.c b/drivers/mfd/wl1273-core.c
index d2ecc2435736..529d65ba5353 100644
--- a/drivers/mfd/wl1273-core.c
+++ b/drivers/mfd/wl1273-core.c
@@ -38,7 +38,6 @@ static int wl1273_core_remove(struct i2c_client *client)
dev_dbg(&client->dev, "%s\n", __func__);
mfd_remove_devices(&client->dev);
- i2c_set_clientdata(client, NULL);
kfree(core);
return 0;
@@ -79,8 +78,7 @@ static int __devinit wl1273_core_probe(struct i2c_client *client,
cell = &core->cells[children];
cell->name = "wl1273_fm_radio";
- cell->platform_data = &core;
- cell->data_size = sizeof(core);
+ cell->mfd_data = &core;
children++;
if (pdata->children & WL1273_CODEC_CHILD) {
@@ -88,8 +86,7 @@ static int __devinit wl1273_core_probe(struct i2c_client *client,
dev_dbg(&client->dev, "%s: Have codec.\n", __func__);
cell->name = "wl1273-codec";
- cell->platform_data = &core;
- cell->data_size = sizeof(core);
+ cell->mfd_data = &core;
children++;
}
@@ -104,7 +101,6 @@ static int __devinit wl1273_core_probe(struct i2c_client *client,
return 0;
err:
- i2c_set_clientdata(client, NULL);
pdata->free_resources();
kfree(core);
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index 3853fa8e7cc2..a06cbc739716 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -51,17 +51,25 @@ static int wm831x_i2c_write_device(struct wm831x *wm831x, unsigned short reg,
int bytes, void *src)
{
struct i2c_client *i2c = wm831x->control_data;
- unsigned char msg[bytes + 2];
+ struct i2c_msg xfer[2];
int ret;
reg = cpu_to_be16(reg);
- memcpy(&msg[0], &reg, 2);
- memcpy(&msg[2], src, bytes);
- ret = i2c_master_send(i2c, msg, bytes + 2);
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 2;
+ xfer[0].buf = (char *)&reg;
+
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_NOSTART;
+ xfer[1].len = bytes;
+ xfer[1].buf = (char *)src;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
if (ret < 0)
return ret;
- if (ret < bytes + 2)
+ if (ret != 2)
return -EIO;
return 0;
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index f7192d438aab..a5cd17e18d09 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -26,15 +26,6 @@
#include <linux/delay.h>
-/*
- * Since generic IRQs don't currently support interrupt controllers on
- * interrupt driven buses we don't use genirq but instead provide an
- * interface that looks very much like the standard ones. This leads
- * to some bodges, including storing interrupt handler information in
- * the static irq_data table we use to look up the data for individual
- * interrupts, but hopefully won't last too long.
- */
-
struct wm831x_irq_data {
int primary;
int reg;
@@ -361,6 +352,10 @@ static void wm831x_irq_sync_unlock(struct irq_data *data)
/* If there's been a change in the mask write it back
* to the hardware. */
if (wm831x->irq_masks_cur[i] != wm831x->irq_masks_cache[i]) {
+ dev_dbg(wm831x->dev, "IRQ mask sync: %x = %x\n",
+ WM831X_INTERRUPT_STATUS_1_MASK + i,
+ wm831x->irq_masks_cur[i]);
+
wm831x->irq_masks_cache[i] = wm831x->irq_masks_cur[i];
wm831x_reg_write(wm831x,
WM831X_INTERRUPT_STATUS_1_MASK + i,
@@ -371,7 +366,7 @@ static void wm831x_irq_sync_unlock(struct irq_data *data)
mutex_unlock(&wm831x->irq_lock);
}
-static void wm831x_irq_unmask(struct irq_data *data)
+static void wm831x_irq_enable(struct irq_data *data)
{
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
@@ -380,7 +375,7 @@ static void wm831x_irq_unmask(struct irq_data *data)
wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
-static void wm831x_irq_mask(struct irq_data *data)
+static void wm831x_irq_disable(struct irq_data *data)
{
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
@@ -426,8 +421,8 @@ static struct irq_chip wm831x_irq_chip = {
.name = "wm831x",
.irq_bus_lock = wm831x_irq_lock,
.irq_bus_sync_unlock = wm831x_irq_sync_unlock,
- .irq_mask = wm831x_irq_mask,
- .irq_unmask = wm831x_irq_unmask,
+ .irq_disable = wm831x_irq_disable,
+ .irq_enable = wm831x_irq_enable,
.irq_set_type = wm831x_irq_set_type,
};
@@ -449,6 +444,18 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
goto out;
}
+ /* The touch interrupts are visible in the primary register as
+ * an optimisation; open code this to avoid complicating the
+ * main handling loop and so we can also skip iterating the
+ * descriptors.
+ */
+ if (primary & WM831X_TCHPD_INT)
+ handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHPD);
+ if (primary & WM831X_TCHDATA_INT)
+ handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHDATA);
+ if (primary & (WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT))
+ goto out;
+
for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) {
int offset = wm831x_irqs[i].reg - 1;
@@ -481,6 +488,9 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
}
out:
+ /* Touchscreen interrupts are handled specially in the driver */
+ status_regs[0] &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT);
+
for (i = 0; i < ARRAY_SIZE(status_regs); i++) {
if (status_regs[i])
wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1 + i,
@@ -517,6 +527,14 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
return 0;
}
+ if (pdata->irq_cmos)
+ i = 0;
+ else
+ i = WM831X_IRQ_OD;
+
+ wm831x_set_bits(wm831x, WM831X_IRQ_CONFIG,
+ WM831X_IRQ_OD, i);
+
/* Try to flag /IRQ as a wake source; there are a number of
* unconditional wake sources in the PMIC so this isn't
* conditional but we don't actually care *too* much if it
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index 0a8f772be88c..eed8e4f7a5a1 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/pm.h>
#include <linux/spi/spi.h>
#include <linux/mfd/wm831x/core.h>
@@ -113,22 +114,27 @@ static int __devexit wm831x_spi_remove(struct spi_device *spi)
return 0;
}
-static int wm831x_spi_suspend(struct spi_device *spi, pm_message_t m)
+static int wm831x_spi_suspend(struct device *dev)
{
- struct wm831x *wm831x = dev_get_drvdata(&spi->dev);
+ struct wm831x *wm831x = dev_get_drvdata(dev);
return wm831x_device_suspend(wm831x);
}
+static const struct dev_pm_ops wm831x_spi_pm = {
+ .freeze = wm831x_spi_suspend,
+ .suspend = wm831x_spi_suspend,
+};
+
static struct spi_driver wm8310_spi_driver = {
.driver = {
.name = "wm8310",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8311_spi_driver = {
@@ -136,10 +142,10 @@ static struct spi_driver wm8311_spi_driver = {
.name = "wm8311",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8312_spi_driver = {
@@ -147,10 +153,10 @@ static struct spi_driver wm8312_spi_driver = {
.name = "wm8312",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8320_spi_driver = {
@@ -158,10 +164,10 @@ static struct spi_driver wm8320_spi_driver = {
.name = "wm8320",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8321_spi_driver = {
@@ -169,10 +175,10 @@ static struct spi_driver wm8321_spi_driver = {
.name = "wm8321",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8325_spi_driver = {
@@ -180,10 +186,10 @@ static struct spi_driver wm8325_spi_driver = {
.name = "wm8325",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8326_spi_driver = {
@@ -191,10 +197,10 @@ static struct spi_driver wm8326_spi_driver = {
.name = "wm8326",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static int __init wm831x_spi_init(void)
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 1bfef4846b07..3a6e78cb0384 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -245,7 +245,7 @@ static int wm8400_register_codec(struct wm8400 *wm8400)
{
struct mfd_cell cell = {
.name = "wm8400-codec",
- .driver_data = wm8400,
+ .mfd_data = wm8400,
};
return mfd_add_devices(wm8400->dev, -1, &cell, 1, NULL, 0);
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index f4016a075fd6..e198d40292e7 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -40,10 +40,8 @@ static int wm8994_read(struct wm8994 *wm8994, unsigned short reg,
return ret;
for (i = 0; i < bytes / 2; i++) {
- buf[i] = be16_to_cpu(buf[i]);
-
dev_vdbg(wm8994->dev, "Read %04x from R%d(0x%x)\n",
- buf[i], reg + i, reg + i);
+ be16_to_cpu(buf[i]), reg + i, reg + i);
}
return 0;
@@ -69,7 +67,7 @@ int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg)
if (ret < 0)
return ret;
else
- return val;
+ return be16_to_cpu(val);
}
EXPORT_SYMBOL_GPL(wm8994_reg_read);
@@ -79,7 +77,7 @@ EXPORT_SYMBOL_GPL(wm8994_reg_read);
* @wm8994: Device to read from
* @reg: First register
* @count: Number of registers
- * @buf: Buffer to fill.
+ * @buf: Buffer to fill. The data will be returned big endian.
*/
int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
int count, u16 *buf)
@@ -97,9 +95,9 @@ int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
EXPORT_SYMBOL_GPL(wm8994_bulk_read);
static int wm8994_write(struct wm8994 *wm8994, unsigned short reg,
- int bytes, void *src)
+ int bytes, const void *src)
{
- u16 *buf = src;
+ const u16 *buf = src;
int i;
BUG_ON(bytes % 2);
@@ -107,9 +105,7 @@ static int wm8994_write(struct wm8994 *wm8994, unsigned short reg,
for (i = 0; i < bytes / 2; i++) {
dev_vdbg(wm8994->dev, "Write %04x to R%d(0x%x)\n",
- buf[i], reg + i, reg + i);
-
- buf[i] = cpu_to_be16(buf[i]);
+ be16_to_cpu(buf[i]), reg + i, reg + i);
}
return wm8994->write_dev(wm8994, reg, bytes, src);
@@ -127,6 +123,8 @@ int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
{
int ret;
+ val = cpu_to_be16(val);
+
mutex_lock(&wm8994->io_lock);
ret = wm8994_write(wm8994, reg, 2, &val);
@@ -138,6 +136,29 @@ int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
EXPORT_SYMBOL_GPL(wm8994_reg_write);
/**
+ * wm8994_bulk_write: Write multiple WM8994 registers
+ *
+ * @wm8994: Device to write to
+ * @reg: First register
+ * @count: Number of registers
+ * @buf: Buffer to write from. Data must be big-endian formatted.
+ */
+int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg,
+ int count, const u16 *buf)
+{
+ int ret;
+
+ mutex_lock(&wm8994->io_lock);
+
+ ret = wm8994_write(wm8994, reg, count * 2, buf);
+
+ mutex_unlock(&wm8994->io_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(wm8994_bulk_write);
+
+/**
* wm8994_set_bits: Set the value of a bitfield in a WM8994 register
*
* @wm8994: Device to write to.
@@ -157,9 +178,13 @@ int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
if (ret < 0)
goto out;
+ r = be16_to_cpu(r);
+
r &= ~mask;
r |= val;
+ r = cpu_to_be16(r);
+
ret = wm8994_write(wm8994, reg, 2, &r);
out:
@@ -271,6 +296,11 @@ static int wm8994_suspend(struct device *dev)
if (ret < 0)
dev_err(dev, "Failed to save LDO registers: %d\n", ret);
+ /* Explicitly put the device into reset in case regulators
+ * don't get disabled in order to ensure consistent restart.
+ */
+ wm8994_reg_write(wm8994, WM8994_SOFTWARE_RESET, 0x8994);
+
wm8994->suspended = true;
ret = regulator_bulk_disable(wm8994->num_supplies,
@@ -552,25 +582,29 @@ static int wm8994_i2c_read_device(struct wm8994 *wm8994, unsigned short reg,
return 0;
}
-/* Currently we allocate the write buffer on the stack; this is OK for
- * small writes - if we need to do large writes this will need to be
- * revised.
- */
static int wm8994_i2c_write_device(struct wm8994 *wm8994, unsigned short reg,
- int bytes, void *src)
+ int bytes, const void *src)
{
struct i2c_client *i2c = wm8994->control_data;
- unsigned char msg[bytes + 2];
+ struct i2c_msg xfer[2];
int ret;
reg = cpu_to_be16(reg);
- memcpy(&msg[0], &reg, 2);
- memcpy(&msg[2], src, bytes);
- ret = i2c_master_send(i2c, msg, bytes + 2);
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 2;
+ xfer[0].buf = (char *)&reg;
+
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_NOSTART;
+ xfer[1].len = bytes;
+ xfer[1].buf = (char *)src;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
if (ret < 0)
return ret;
- if (ret < bytes + 2)
+ if (ret != 2)
return -EIO;
return 0;
@@ -612,7 +646,8 @@ static const struct i2c_device_id wm8994_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wm8994_i2c_id);
-UNIVERSAL_DEV_PM_OPS(wm8994_pm_ops, wm8994_suspend, wm8994_resume, NULL);
+static UNIVERSAL_DEV_PM_OPS(wm8994_pm_ops, wm8994_suspend, wm8994_resume,
+ NULL);
static struct i2c_driver wm8994_i2c_driver = {
.driver = {
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 29e8faf9c01c..1e3bf4a2ff8e 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -182,7 +182,7 @@ static void wm8994_irq_sync_unlock(struct irq_data *data)
mutex_unlock(&wm8994->irq_lock);
}
-static void wm8994_irq_unmask(struct irq_data *data)
+static void wm8994_irq_enable(struct irq_data *data)
{
struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
@@ -191,7 +191,7 @@ static void wm8994_irq_unmask(struct irq_data *data)
wm8994->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
-static void wm8994_irq_mask(struct irq_data *data)
+static void wm8994_irq_disable(struct irq_data *data)
{
struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
@@ -204,8 +204,8 @@ static struct irq_chip wm8994_irq_chip = {
.name = "wm8994",
.irq_bus_lock = wm8994_irq_lock,
.irq_bus_sync_unlock = wm8994_irq_sync_unlock,
- .irq_mask = wm8994_irq_mask,
- .irq_unmask = wm8994_irq_unmask,
+ .irq_disable = wm8994_irq_disable,
+ .irq_enable = wm8994_irq_enable,
};
/* The processing of the primary interrupt occurs in a thread so that
@@ -225,9 +225,11 @@ static irqreturn_t wm8994_irq_thread(int irq, void *data)
return IRQ_NONE;
}
- /* Apply masking */
- for (i = 0; i < WM8994_NUM_IRQ_REGS; i++)
+ /* Bit swap and apply masking */
+ for (i = 0; i < WM8994_NUM_IRQ_REGS; i++) {
+ status[i] = be16_to_cpu(status[i]);
status[i] &= ~wm8994->irq_masks_cur[i];
+ }
/* Report */
for (i = 0; i < ARRAY_SIZE(wm8994_irqs); i++) {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index b7d5ef234ac9..4e007c6a4b44 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -2,6 +2,14 @@
# Misc strange devices
#
+# This one has to live outside of the MISC_DEVICES conditional,
+# because it may be selected by drivers/platform/x86/hp_accel.
+config SENSORS_LIS3LV02D
+ tristate
+ depends on INPUT
+ select INPUT_POLLDEV
+ default n
+
menuconfig MISC_DEVICES
bool "Misc devices"
---help---
@@ -394,6 +402,16 @@ config DS1682
This driver can also be built as a module. If so, the module
will be called ds1682.
+config SPEAR13XX_PCIE_GADGET
+ bool "PCIe gadget support for SPEAr13XX platform"
+ depends on ARCH_SPEAR13XX
+ default n
+ help
+ This option enables gadget support for PCIe controller. If
+ board file defines any controller as PCIe endpoint then a sysfs
+ entry will be created for that controller. User can use these
+ sysfs node to configure PCIe EP as per his requirements.
+
config TI_DAC7512
tristate "Texas Instruments DAC7512"
depends on SPI && SYSFS
@@ -462,5 +480,6 @@ source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
source "drivers/misc/iwmc3200top/Kconfig"
source "drivers/misc/ti-st/Kconfig"
+source "drivers/misc/lis3lv02d/Kconfig"
endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 98009cc20cb9..f5468602961f 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -37,8 +37,10 @@ obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
obj-y += cb710/
+obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
obj-$(CONFIG_PCH_PHUB) += pch_phub.o
obj-y += ti-st/
obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
+obj-y += lis3lv02d/
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index 644d4cd071cc..81db7811cf68 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -245,9 +245,8 @@ static int apds9802als_probe(struct i2c_client *client,
als_set_default_config(client);
mutex_init(&data->mutex);
+ pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
- pm_runtime_get(&client->dev);
- pm_runtime_put(&client->dev);
return res;
als_error1:
@@ -255,12 +254,19 @@ als_error1:
return res;
}
-static int apds9802als_remove(struct i2c_client *client)
+static int __devexit apds9802als_remove(struct i2c_client *client)
{
struct als_data *data = i2c_get_clientdata(client);
+ pm_runtime_get_sync(&client->dev);
+
als_set_power_state(client, false);
sysfs_remove_group(&client->dev.kobj, &m_als_gr);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
kfree(data);
return 0;
}
@@ -275,9 +281,6 @@ static int apds9802als_suspend(struct i2c_client *client, pm_message_t mesg)
static int apds9802als_resume(struct i2c_client *client)
{
als_set_default_config(client);
-
- pm_runtime_get(&client->dev);
- pm_runtime_put(&client->dev);
return 0;
}
@@ -323,7 +326,7 @@ static struct i2c_driver apds9802als_driver = {
.pm = APDS9802ALS_PM_OPS,
},
.probe = apds9802als_probe,
- .remove = apds9802als_remove,
+ .remove = __devexit_p(apds9802als_remove),
.suspend = apds9802als_suspend,
.resume = apds9802als_resume,
.id_table = apds9802als_id,
diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c
index 3891124001f2..a844810b50f6 100644
--- a/drivers/misc/atmel_tclib.c
+++ b/drivers/misc/atmel_tclib.c
@@ -75,7 +75,7 @@ out:
return tc;
fail_ioremap:
- release_resource(r);
+ release_mem_region(r->start, ATMEL_TC_IOMEM_SIZE);
fail:
tc = NULL;
goto out;
@@ -95,7 +95,7 @@ void atmel_tc_free(struct atmel_tc *tc)
spin_lock(&tc_list_lock);
if (tc->regs) {
iounmap(tc->regs);
- release_resource(tc->iomem);
+ release_mem_region(tc->iomem->start, ATMEL_TC_IOMEM_SIZE);
tc->regs = NULL;
tc->iomem = NULL;
}
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index d5f3a3fd2319..d07cd67c951c 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -196,10 +196,11 @@ static int __devexit bh1780_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
-static int bh1780_suspend(struct i2c_client *client, pm_message_t mesg)
+static int bh1780_suspend(struct device *dev)
{
struct bh1780_data *ddata;
int state, ret;
+ struct i2c_client *client = to_i2c_client(dev);
ddata = i2c_get_clientdata(client);
state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
@@ -217,14 +218,14 @@ static int bh1780_suspend(struct i2c_client *client, pm_message_t mesg)
return 0;
}
-static int bh1780_resume(struct i2c_client *client)
+static int bh1780_resume(struct device *dev)
{
struct bh1780_data *ddata;
int state, ret;
+ struct i2c_client *client = to_i2c_client(dev);
ddata = i2c_get_clientdata(client);
state = ddata->power_state;
-
ret = bh1780_write(ddata, BH1780_REG_CONTROL, state,
"CONTROL");
@@ -233,9 +234,10 @@ static int bh1780_resume(struct i2c_client *client)
return 0;
}
+static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume);
+#define BH1780_PMOPS (&bh1780_pm)
#else
-#define bh1780_suspend NULL
-#define bh1780_resume NULL
+#define BH1780_PMOPS NULL
#endif /* CONFIG_PM */
static const struct i2c_device_id bh1780_id[] = {
@@ -247,11 +249,10 @@ static struct i2c_driver bh1780_driver = {
.probe = bh1780_probe,
.remove = bh1780_remove,
.id_table = bh1780_id,
- .suspend = bh1780_suspend,
- .resume = bh1780_resume,
.driver = {
- .name = "bh1780"
- },
+ .name = "bh1780",
+ .pm = BH1780_PMOPS,
+},
};
static int __init bh1780_init(void)
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index b6e1c9a6679e..ecd276ad6b19 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -402,7 +402,7 @@ exit:
return status;
}
-static int bmp085_probe(struct i2c_client *client,
+static int __devinit bmp085_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct bmp085_data *data;
@@ -438,7 +438,7 @@ exit:
return err;
}
-static int bmp085_remove(struct i2c_client *client)
+static int __devexit bmp085_remove(struct i2c_client *client)
{
sysfs_remove_group(&client->dev.kobj, &bmp085_attr_group);
kfree(i2c_get_clientdata(client));
@@ -458,7 +458,7 @@ static struct i2c_driver bmp085_driver = {
},
.id_table = bmp085_id,
.probe = bmp085_probe,
- .remove = bmp085_remove,
+ .remove = __devexit_p(bmp085_remove),
.detect = bmp085_detect,
.address_list = normal_i2c
diff --git a/drivers/misc/cb710/Makefile b/drivers/misc/cb710/Makefile
index 7b80cbf1a609..467c8e9ca3c9 100644
--- a/drivers/misc/cb710/Makefile
+++ b/drivers/misc/cb710/Makefile
@@ -1,6 +1,4 @@
-ifeq ($(CONFIG_CB710_DEBUG),y)
- EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_CB710_DEBUG) := -DDEBUG
obj-$(CONFIG_CB710_CORE) += cb710.o
diff --git a/drivers/misc/ep93xx_pwm.c b/drivers/misc/ep93xx_pwm.c
index 46b3439673e9..16d7179e2f9b 100644
--- a/drivers/misc/ep93xx_pwm.c
+++ b/drivers/misc/ep93xx_pwm.c
@@ -249,11 +249,11 @@ static ssize_t ep93xx_pwm_set_invert(struct device *dev,
static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL);
static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL);
-static DEVICE_ATTR(freq, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(freq, S_IWUSR | S_IRUGO,
ep93xx_pwm_get_freq, ep93xx_pwm_set_freq);
-static DEVICE_ATTR(duty_percent, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(duty_percent, S_IWUSR | S_IRUGO,
ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent);
-static DEVICE_ATTR(invert, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(invert, S_IWUSR | S_IRUGO,
ep93xx_pwm_get_invert, ep93xx_pwm_set_invert);
static struct attribute *ep93xx_pwm_attrs[] = {
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index 234bfcaf2099..ca938fc8a8d6 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -75,7 +75,7 @@ static ssize_t compass_heading_data_show(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev);
unsigned char i2c_data[2];
- unsigned int ret;
+ int ret;
mutex_lock(&compass_mutex);
ret = compass_command(client, 'A');
@@ -86,7 +86,7 @@ static ssize_t compass_heading_data_show(struct device *dev,
msleep(10); /* sending 'A' cmd we need to wait for 7-10 millisecs */
ret = i2c_master_recv(client, i2c_data, 2);
mutex_unlock(&compass_mutex);
- if (ret != 1) {
+ if (ret < 0) {
dev_warn(dev, "i2c read data cmd failed\n");
return ret;
}
diff --git a/drivers/misc/lis3lv02d/Kconfig b/drivers/misc/lis3lv02d/Kconfig
new file mode 100644
index 000000000000..8f474e6fc7b4
--- /dev/null
+++ b/drivers/misc/lis3lv02d/Kconfig
@@ -0,0 +1,37 @@
+#
+# STMicroelectonics LIS3LV02D and similar accelerometers
+#
+
+config SENSORS_LIS3_SPI
+ tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)"
+ depends on !ACPI && SPI_MASTER && INPUT
+ select SENSORS_LIS3LV02D
+ default n
+ help
+ This driver provides support for the LIS3LV02Dx accelerometer connected
+ via SPI. The accelerometer data is readable via
+ /sys/devices/platform/lis3lv02d.
+
+ This driver also provides an absolute input class device, allowing
+ the laptop to act as a pinball machine-esque joystick.
+
+ This driver can also be built as modules. If so, the core module
+ will be called lis3lv02d and a specific module for the SPI transport
+ is called lis3lv02d_spi.
+
+config SENSORS_LIS3_I2C
+ tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (I2C)"
+ depends on I2C && INPUT
+ select SENSORS_LIS3LV02D
+ default n
+ help
+ This driver provides support for the LIS3LV02Dx accelerometer connected
+ via I2C. The accelerometer data is readable via
+ /sys/devices/platform/lis3lv02d.
+
+ This driver also provides an absolute input class device, allowing
+ the device to act as a pinball machine-esque joystick.
+
+ This driver can also be built as modules. If so, the core module
+ will be called lis3lv02d and a specific module for the I2C transport
+ is called lis3lv02d_i2c.
diff --git a/drivers/misc/lis3lv02d/Makefile b/drivers/misc/lis3lv02d/Makefile
new file mode 100644
index 000000000000..4bf58b16fcf8
--- /dev/null
+++ b/drivers/misc/lis3lv02d/Makefile
@@ -0,0 +1,7 @@
+#
+# STMicroelectonics LIS3LV02D and similar accelerometers
+#
+
+obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o
+obj-$(CONFIG_SENSORS_LIS3_SPI) += lis3lv02d_spi.o
+obj-$(CONFIG_SENSORS_LIS3_I2C) += lis3lv02d_i2c.o
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index d805e8e57967..b928bc14e97b 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -38,7 +38,7 @@
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/pm_runtime.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "lis3lv02d.h"
#define DRIVER_NAME "lis3lv02d"
@@ -88,7 +88,6 @@
struct lis3lv02d lis3_dev = {
.misc_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lis3_dev.misc_wait),
};
-
EXPORT_SYMBOL_GPL(lis3_dev);
/* just like param_set_int() but does sanity-check so that it won't point
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
index a1939589eb2c..a1939589eb2c 100644
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index 8853afce85ce..b20dfb4522d2 100644
--- a/drivers/hwmon/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -33,7 +33,7 @@
#include <linux/delay.h>
#include "lis3lv02d.h"
-#define DRV_NAME "lis3lv02d_i2c"
+#define DRV_NAME "lis3lv02d_i2c"
static const char reg_vdd[] = "Vdd";
static const char reg_vdd_io[] = "Vdd_IO";
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
index 2549de1de4e2..c1f8a8fbf694 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/spi/spi.h>
+#include <linux/pm.h>
#include "lis3lv02d.h"
@@ -88,9 +89,10 @@ static int __devexit lis302dl_spi_remove(struct spi_device *spi)
return lis3lv02d_remove_fs(&lis3_dev);
}
-#ifdef CONFIG_PM
-static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int lis3lv02d_spi_suspend(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
if (!lis3->pdata || !lis3->pdata->wakeup_flags)
@@ -99,8 +101,9 @@ static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
return 0;
}
-static int lis3lv02d_spi_resume(struct spi_device *spi)
+static int lis3lv02d_spi_resume(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
if (!lis3->pdata || !lis3->pdata->wakeup_flags)
@@ -108,21 +111,19 @@ static int lis3lv02d_spi_resume(struct spi_device *spi)
return 0;
}
-
-#else
-#define lis3lv02d_spi_suspend NULL
-#define lis3lv02d_spi_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(lis3lv02d_spi_pm, lis3lv02d_spi_suspend,
+ lis3lv02d_spi_resume);
+
static struct spi_driver lis302dl_spi_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .pm = &lis3lv02d_spi_pm,
},
.probe = lis302dl_spi_probe,
.remove = __devexit_p(lis302dl_spi_remove),
- .suspend = lis3lv02d_spi_suspend,
- .resume = lis3lv02d_spi_resume,
};
static int __init lis302dl_init(void)
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 380ba806495d..a19cb710a246 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -735,6 +735,7 @@ static struct pci_device_id pch_phub_pcidev_id[] = {
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, },
{ }
};
+MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id);
static struct pci_driver pch_phub_driver = {
.name = "pch_phub",
diff --git a/drivers/misc/sgi-gru/Makefile b/drivers/misc/sgi-gru/Makefile
index 7c4c306dfa8a..0003a1d56f7f 100644
--- a/drivers/misc/sgi-gru/Makefile
+++ b/drivers/misc/sgi-gru/Makefile
@@ -1,6 +1,4 @@
-ifdef CONFIG_SGI_GRU_DEBUG
- EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_SGI_GRU_DEBUG) := -DDEBUG
obj-$(CONFIG_SGI_GRU) := gru.o
gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o gruhandles.o grukdump.o
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
new file mode 100644
index 000000000000..ec3b8c911833
--- /dev/null
+++ b/drivers/misc/spear13xx_pcie_gadget.c
@@ -0,0 +1,908 @@
+/*
+ * drivers/misc/spear13xx_pcie_gadget.c
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Pratyush Anand<pratyush.anand@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pci_regs.h>
+#include <linux/configfs.h>
+#include <mach/pcie.h>
+#include <mach/misc_regs.h>
+
+#define IN0_MEM_SIZE (200 * 1024 * 1024 - 1)
+/* In current implementation address translation is done using IN0 only.
+ * So IN1 start address and IN0 end address has been kept same
+*/
+#define IN1_MEM_SIZE (0 * 1024 * 1024 - 1)
+#define IN_IO_SIZE (20 * 1024 * 1024 - 1)
+#define IN_CFG0_SIZE (12 * 1024 * 1024 - 1)
+#define IN_CFG1_SIZE (12 * 1024 * 1024 - 1)
+#define IN_MSG_SIZE (12 * 1024 * 1024 - 1)
+/* Keep default BAR size as 4K*/
+/* AORAM would be mapped by default*/
+#define INBOUND_ADDR_MASK (SPEAR13XX_SYSRAM1_SIZE - 1)
+
+#define INT_TYPE_NO_INT 0
+#define INT_TYPE_INTX 1
+#define INT_TYPE_MSI 2
+struct spear_pcie_gadget_config {
+ void __iomem *base;
+ void __iomem *va_app_base;
+ void __iomem *va_dbi_base;
+ char int_type[10];
+ ulong requested_msi;
+ ulong configured_msi;
+ ulong bar0_size;
+ ulong bar0_rw_offset;
+ void __iomem *va_bar0_address;
+};
+
+struct pcie_gadget_target {
+ struct configfs_subsystem subsys;
+ struct spear_pcie_gadget_config config;
+};
+
+struct pcie_gadget_target_attr {
+ struct configfs_attribute attr;
+ ssize_t (*show)(struct spear_pcie_gadget_config *config,
+ char *buf);
+ ssize_t (*store)(struct spear_pcie_gadget_config *config,
+ const char *buf,
+ size_t count);
+};
+
+static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg)
+{
+ /* Enable DBI access */
+ writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID),
+ &app_reg->slv_armisc);
+ writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID),
+ &app_reg->slv_awmisc);
+
+}
+
+static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg)
+{
+ /* disable DBI access */
+ writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
+ &app_reg->slv_armisc);
+ writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
+ &app_reg->slv_awmisc);
+
+}
+
+static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config,
+ int where, int size, u32 *val)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+ ulong va_address;
+
+ /* Enable DBI access */
+ enable_dbi_access(app_reg);
+
+ va_address = (ulong)config->va_dbi_base + (where & ~0x3);
+
+ *val = readl(va_address);
+
+ if (size == 1)
+ *val = (*val >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (8 * (where & 3))) & 0xffff;
+
+ /* Disable DBI access */
+ disable_dbi_access(app_reg);
+}
+
+static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config,
+ int where, int size, u32 val)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+ ulong va_address;
+
+ /* Enable DBI access */
+ enable_dbi_access(app_reg);
+
+ va_address = (ulong)config->va_dbi_base + (where & ~0x3);
+
+ if (size == 4)
+ writel(val, va_address);
+ else if (size == 2)
+ writew(val, va_address + (where & 2));
+ else if (size == 1)
+ writeb(val, va_address + (where & 3));
+
+ /* Disable DBI access */
+ disable_dbi_access(app_reg);
+}
+
+#define PCI_FIND_CAP_TTL 48
+
+static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config,
+ u32 pos, int cap, int *ttl)
+{
+ u32 id;
+
+ while ((*ttl)--) {
+ spear_dbi_read_reg(config, pos, 1, &pos);
+ if (pos < 0x40)
+ break;
+ pos &= ~3;
+ spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id);
+ if (id == 0xff)
+ break;
+ if (id == cap)
+ return pos;
+ pos += PCI_CAP_LIST_NEXT;
+ }
+ return 0;
+}
+
+static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config,
+ u32 pos, int cap)
+{
+ int ttl = PCI_FIND_CAP_TTL;
+
+ return pci_find_own_next_cap_ttl(config, pos, cap, &ttl);
+}
+
+static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config,
+ u8 hdr_type)
+{
+ u32 status;
+
+ spear_dbi_read_reg(config, PCI_STATUS, 2, &status);
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0;
+
+ switch (hdr_type) {
+ case PCI_HEADER_TYPE_NORMAL:
+ case PCI_HEADER_TYPE_BRIDGE:
+ return PCI_CAPABILITY_LIST;
+ case PCI_HEADER_TYPE_CARDBUS:
+ return PCI_CB_CAPABILITY_LIST;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Tell if a device supports a given PCI capability.
+ * Returns the address of the requested capability structure within the
+ * device's PCI configuration space or 0 in case the device does not
+ * support it. Possible values for @cap:
+ *
+ * %PCI_CAP_ID_PM Power Management
+ * %PCI_CAP_ID_AGP Accelerated Graphics Port
+ * %PCI_CAP_ID_VPD Vital Product Data
+ * %PCI_CAP_ID_SLOTID Slot Identification
+ * %PCI_CAP_ID_MSI Message Signalled Interrupts
+ * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
+ * %PCI_CAP_ID_PCIX PCI-X
+ * %PCI_CAP_ID_EXP PCI Express
+ */
+static int pci_find_own_capability(struct spear_pcie_gadget_config *config,
+ int cap)
+{
+ u32 pos;
+ u32 hdr_type;
+
+ spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type);
+
+ pos = pci_find_own_cap_start(config, hdr_type);
+ if (pos)
+ pos = pci_find_own_next_cap(config, pos, cap);
+
+ return pos;
+}
+
+static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id)
+{
+ return 0;
+}
+
+/*
+ * configfs interfaces show/store functions
+ */
+static ssize_t pcie_gadget_show_link(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+
+ if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID))
+ return sprintf(buf, "UP");
+ else
+ return sprintf(buf, "DOWN");
+}
+
+static ssize_t pcie_gadget_store_link(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+
+ if (sysfs_streq(buf, "UP"))
+ writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID),
+ &app_reg->app_ctrl_0);
+ else if (sysfs_streq(buf, "DOWN"))
+ writel(readl(&app_reg->app_ctrl_0)
+ & ~(1 << APP_LTSSM_ENABLE_ID),
+ &app_reg->app_ctrl_0);
+ else
+ return -EINVAL;
+ return count;
+}
+
+static ssize_t pcie_gadget_show_int_type(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ return sprintf(buf, "%s", config->int_type);
+}
+
+static ssize_t pcie_gadget_store_int_type(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ u32 cap, vec, flags;
+ ulong vector;
+
+ if (sysfs_streq(buf, "INTA"))
+ spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
+
+ else if (sysfs_streq(buf, "MSI")) {
+ vector = config->requested_msi;
+ vec = 0;
+ while (vector > 1) {
+ vector /= 2;
+ vec++;
+ }
+ spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0);
+ cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
+ spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
+ flags &= ~PCI_MSI_FLAGS_QMASK;
+ flags |= vec << 1;
+ spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags);
+ } else
+ return -EINVAL;
+
+ strcpy(config->int_type, buf);
+
+ return count;
+}
+
+static ssize_t pcie_gadget_show_no_of_msi(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+ u32 cap, vec, flags;
+ ulong vector;
+
+ if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID))
+ != (1 << CFG_MSI_EN_ID))
+ vector = 0;
+ else {
+ cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
+ spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
+ flags &= ~PCI_MSI_FLAGS_QSIZE;
+ vec = flags >> 4;
+ vector = 1;
+ while (vec--)
+ vector *= 2;
+ }
+ config->configured_msi = vector;
+
+ return sprintf(buf, "%lu", vector);
+}
+
+static ssize_t pcie_gadget_store_no_of_msi(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ if (strict_strtoul(buf, 0, &config->requested_msi))
+ return -EINVAL;
+ if (config->requested_msi > 32)
+ config->requested_msi = 32;
+
+ return count;
+}
+
+static ssize_t pcie_gadget_store_inta(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+ ulong en;
+
+ if (strict_strtoul(buf, 0, &en))
+ return -EINVAL;
+
+ if (en)
+ writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID),
+ &app_reg->app_ctrl_0);
+ else
+ writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID),
+ &app_reg->app_ctrl_0);
+
+ return count;
+}
+
+static ssize_t pcie_gadget_store_send_msi(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+ ulong vector;
+ u32 ven_msi;
+
+ if (strict_strtoul(buf, 0, &vector))
+ return -EINVAL;
+
+ if (!config->configured_msi)
+ return -EINVAL;
+
+ if (vector >= config->configured_msi)
+ return -EINVAL;
+
+ ven_msi = readl(&app_reg->ven_msi_1);
+ ven_msi &= ~VEN_MSI_FUN_NUM_MASK;
+ ven_msi |= 0 << VEN_MSI_FUN_NUM_ID;
+ ven_msi &= ~VEN_MSI_TC_MASK;
+ ven_msi |= 0 << VEN_MSI_TC_ID;
+ ven_msi &= ~VEN_MSI_VECTOR_MASK;
+ ven_msi |= vector << VEN_MSI_VECTOR_ID;
+
+ /* generating interrupt for msi vector */
+ ven_msi |= VEN_MSI_REQ_EN;
+ writel(ven_msi, &app_reg->ven_msi_1);
+ udelay(1);
+ ven_msi &= ~VEN_MSI_REQ_EN;
+ writel(ven_msi, &app_reg->ven_msi_1);
+
+ return count;
+}
+
+static ssize_t pcie_gadget_show_vendor_id(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ u32 id;
+
+ spear_dbi_read_reg(config, PCI_VENDOR_ID, 2, &id);
+
+ return sprintf(buf, "%x", id);
+}
+
+static ssize_t pcie_gadget_store_vendor_id(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ ulong id;
+
+ if (strict_strtoul(buf, 0, &id))
+ return -EINVAL;
+
+ spear_dbi_write_reg(config, PCI_VENDOR_ID, 2, id);
+
+ return count;
+}
+
+static ssize_t pcie_gadget_show_device_id(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ u32 id;
+
+ spear_dbi_read_reg(config, PCI_DEVICE_ID, 2, &id);
+
+ return sprintf(buf, "%x", id);
+}
+
+static ssize_t pcie_gadget_store_device_id(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ ulong id;
+
+ if (strict_strtoul(buf, 0, &id))
+ return -EINVAL;
+
+ spear_dbi_write_reg(config, PCI_DEVICE_ID, 2, id);
+
+ return count;
+}
+
+static ssize_t pcie_gadget_show_bar0_size(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ return sprintf(buf, "%lx", config->bar0_size);
+}
+
+static ssize_t pcie_gadget_store_bar0_size(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ ulong size;
+ u32 pos, pos1;
+ u32 no_of_bit = 0;
+
+ if (strict_strtoul(buf, 0, &size))
+ return -EINVAL;
+ /* min bar size is 256 */
+ if (size <= 0x100)
+ size = 0x100;
+ /* max bar size is 1MB*/
+ else if (size >= 0x100000)
+ size = 0x100000;
+ else {
+ pos = 0;
+ pos1 = 0;
+ while (pos < 21) {
+ pos = find_next_bit((ulong *)&size, 21, pos);
+ if (pos != 21)
+ pos1 = pos + 1;
+ pos++;
+ no_of_bit++;
+ }
+ if (no_of_bit == 2)
+ pos1--;
+
+ size = 1 << pos1;
+ }
+ config->bar0_size = size;
+ spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1);
+
+ return count;
+}
+
+static ssize_t pcie_gadget_show_bar0_address(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+
+ u32 address = readl(&app_reg->pim0_mem_addr_start);
+
+ return sprintf(buf, "%x", address);
+}
+
+static ssize_t pcie_gadget_store_bar0_address(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+ ulong address;
+
+ if (strict_strtoul(buf, 0, &address))
+ return -EINVAL;
+
+ address &= ~(config->bar0_size - 1);
+ if (config->va_bar0_address)
+ iounmap(config->va_bar0_address);
+ config->va_bar0_address = ioremap(address, config->bar0_size);
+ if (!config->va_bar0_address)
+ return -ENOMEM;
+
+ writel(address, &app_reg->pim0_mem_addr_start);
+
+ return count;
+}
+
+static ssize_t pcie_gadget_show_bar0_rw_offset(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ return sprintf(buf, "%lx", config->bar0_rw_offset);
+}
+
+static ssize_t pcie_gadget_store_bar0_rw_offset(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ ulong offset;
+
+ if (strict_strtoul(buf, 0, &offset))
+ return -EINVAL;
+
+ if (offset % 4)
+ return -EINVAL;
+
+ config->bar0_rw_offset = offset;
+
+ return count;
+}
+
+static ssize_t pcie_gadget_show_bar0_data(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ ulong data;
+
+ if (!config->va_bar0_address)
+ return -ENOMEM;
+
+ data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset);
+
+ return sprintf(buf, "%lx", data);
+}
+
+static ssize_t pcie_gadget_store_bar0_data(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ ulong data;
+
+ if (strict_strtoul(buf, 0, &data))
+ return -EINVAL;
+
+ if (!config->va_bar0_address)
+ return -ENOMEM;
+
+ writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset);
+
+ return count;
+}
+
+/*
+ * Attribute definitions.
+ */
+
+#define PCIE_GADGET_TARGET_ATTR_RO(_name) \
+static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \
+ __CONFIGFS_ATTR(_name, S_IRUGO, pcie_gadget_show_##_name, NULL)
+
+#define PCIE_GADGET_TARGET_ATTR_WO(_name) \
+static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \
+ __CONFIGFS_ATTR(_name, S_IWUSR, NULL, pcie_gadget_store_##_name)
+
+#define PCIE_GADGET_TARGET_ATTR_RW(_name) \
+static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \
+ __CONFIGFS_ATTR(_name, S_IRUGO | S_IWUSR, pcie_gadget_show_##_name, \
+ pcie_gadget_store_##_name)
+PCIE_GADGET_TARGET_ATTR_RW(link);
+PCIE_GADGET_TARGET_ATTR_RW(int_type);
+PCIE_GADGET_TARGET_ATTR_RW(no_of_msi);
+PCIE_GADGET_TARGET_ATTR_WO(inta);
+PCIE_GADGET_TARGET_ATTR_WO(send_msi);
+PCIE_GADGET_TARGET_ATTR_RW(vendor_id);
+PCIE_GADGET_TARGET_ATTR_RW(device_id);
+PCIE_GADGET_TARGET_ATTR_RW(bar0_size);
+PCIE_GADGET_TARGET_ATTR_RW(bar0_address);
+PCIE_GADGET_TARGET_ATTR_RW(bar0_rw_offset);
+PCIE_GADGET_TARGET_ATTR_RW(bar0_data);
+
+static struct configfs_attribute *pcie_gadget_target_attrs[] = {
+ &pcie_gadget_target_link.attr,
+ &pcie_gadget_target_int_type.attr,
+ &pcie_gadget_target_no_of_msi.attr,
+ &pcie_gadget_target_inta.attr,
+ &pcie_gadget_target_send_msi.attr,
+ &pcie_gadget_target_vendor_id.attr,
+ &pcie_gadget_target_device_id.attr,
+ &pcie_gadget_target_bar0_size.attr,
+ &pcie_gadget_target_bar0_address.attr,
+ &pcie_gadget_target_bar0_rw_offset.attr,
+ &pcie_gadget_target_bar0_data.attr,
+ NULL,
+};
+
+static struct pcie_gadget_target *to_target(struct config_item *item)
+{
+ return item ?
+ container_of(to_configfs_subsystem(to_config_group(item)),
+ struct pcie_gadget_target, subsys) : NULL;
+}
+
+/*
+ * Item operations and type for pcie_gadget_target.
+ */
+
+static ssize_t pcie_gadget_target_attr_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *buf)
+{
+ ssize_t ret = -EINVAL;
+ struct pcie_gadget_target *target = to_target(item);
+ struct pcie_gadget_target_attr *t_attr =
+ container_of(attr, struct pcie_gadget_target_attr, attr);
+
+ if (t_attr->show)
+ ret = t_attr->show(&target->config, buf);
+ return ret;
+}
+
+static ssize_t pcie_gadget_target_attr_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ ssize_t ret = -EINVAL;
+ struct pcie_gadget_target *target = to_target(item);
+ struct pcie_gadget_target_attr *t_attr =
+ container_of(attr, struct pcie_gadget_target_attr, attr);
+
+ if (t_attr->store)
+ ret = t_attr->store(&target->config, buf, count);
+ return ret;
+}
+
+static struct configfs_item_operations pcie_gadget_target_item_ops = {
+ .show_attribute = pcie_gadget_target_attr_show,
+ .store_attribute = pcie_gadget_target_attr_store,
+};
+
+static struct config_item_type pcie_gadget_target_type = {
+ .ct_attrs = pcie_gadget_target_attrs,
+ .ct_item_ops = &pcie_gadget_target_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+
+ /*setup registers for outbound translation */
+
+ writel(config->base, &app_reg->in0_mem_addr_start);
+ writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE,
+ &app_reg->in0_mem_addr_limit);
+ writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start);
+ writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE,
+ &app_reg->in1_mem_addr_limit);
+ writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start);
+ writel(app_reg->in_io_addr_start + IN_IO_SIZE,
+ &app_reg->in_io_addr_limit);
+ writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start);
+ writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE,
+ &app_reg->in_cfg0_addr_limit);
+ writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start);
+ writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE,
+ &app_reg->in_cfg1_addr_limit);
+ writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start);
+ writel(app_reg->in_msg_addr_start + IN_MSG_SIZE,
+ &app_reg->in_msg_addr_limit);
+
+ writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start);
+ writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start);
+ writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start);
+
+ /*setup registers for inbound translation */
+
+ /* Keep AORAM mapped at BAR0 as default */
+ config->bar0_size = INBOUND_ADDR_MASK + 1;
+ spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK);
+ spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC);
+ config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE,
+ config->bar0_size);
+
+ writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start);
+ writel(0, &app_reg->pim1_mem_addr_start);
+ writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit);
+
+ writel(0x0, &app_reg->pim_io_addr_start);
+ writel(0x0, &app_reg->pim_io_addr_start);
+ writel(0x0, &app_reg->pim_rom_addr_start);
+
+ writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID)
+ | ((u32)1 << REG_TRANSLATION_ENABLE),
+ &app_reg->app_ctrl_0);
+ /* disable all rx interrupts */
+ writel(0, &app_reg->int_mask);
+
+ /* Select INTA as default*/
+ spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
+}
+
+static int __devinit spear_pcie_gadget_probe(struct platform_device *pdev)
+{
+ struct resource *res0, *res1;
+ unsigned int status = 0;
+ int irq;
+ struct clk *clk;
+ static struct pcie_gadget_target *target;
+ struct spear_pcie_gadget_config *config;
+ struct config_item *cg_item;
+ struct configfs_subsystem *subsys;
+
+ /* get resource for application registers*/
+
+ res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res0) {
+ dev_err(&pdev->dev, "no resource defined\n");
+ return -EBUSY;
+ }
+ if (!request_mem_region(res0->start, resource_size(res0),
+ pdev->name)) {
+ dev_err(&pdev->dev, "pcie gadget region already claimed\n");
+ return -EBUSY;
+ }
+ /* get resource for dbi registers*/
+
+ res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res1) {
+ dev_err(&pdev->dev, "no resource defined\n");
+ goto err_rel_res0;
+ }
+ if (!request_mem_region(res1->start, resource_size(res1),
+ pdev->name)) {
+ dev_err(&pdev->dev, "pcie gadget region already claimed\n");
+ goto err_rel_res0;
+ }
+
+ target = kzalloc(sizeof(*target), GFP_KERNEL);
+ if (!target) {
+ dev_err(&pdev->dev, "out of memory\n");
+ status = -ENOMEM;
+ goto err_rel_res;
+ }
+
+ cg_item = &target->subsys.su_group.cg_item;
+ sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id);
+ cg_item->ci_type = &pcie_gadget_target_type;
+ config = &target->config;
+ config->va_app_base = (void __iomem *)ioremap(res0->start,
+ resource_size(res0));
+ if (!config->va_app_base) {
+ dev_err(&pdev->dev, "ioremap fail\n");
+ status = -ENOMEM;
+ goto err_kzalloc;
+ }
+
+ config->base = (void __iomem *)res1->start;
+
+ config->va_dbi_base = (void __iomem *)ioremap(res1->start,
+ resource_size(res1));
+ if (!config->va_dbi_base) {
+ dev_err(&pdev->dev, "ioremap fail\n");
+ status = -ENOMEM;
+ goto err_iounmap_app;
+ }
+
+ dev_set_drvdata(&pdev->dev, target);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no update irq?\n");
+ status = irq;
+ goto err_iounmap;
+ }
+
+ status = request_irq(irq, spear_pcie_gadget_irq, 0, pdev->name, NULL);
+ if (status) {
+ dev_err(&pdev->dev, "pcie gadget interrupt IRQ%d already \
+ claimed\n", irq);
+ goto err_iounmap;
+ }
+
+ /* Register configfs hooks */
+ subsys = &target->subsys;
+ config_group_init(&subsys->su_group);
+ mutex_init(&subsys->su_mutex);
+ status = configfs_register_subsystem(subsys);
+ if (status)
+ goto err_irq;
+
+ /*
+ * init basic pcie application registers
+ * do not enable clock if it is PCIE0.Ideally , all controller should
+ * have been independent from others with respect to clock. But PCIE1
+ * and 2 depends on PCIE0.So PCIE0 clk is provided during board init.
+ */
+ if (pdev->id == 1) {
+ /*
+ * Ideally CFG Clock should have been also enabled here. But
+ * it is done currently during board init routne
+ */
+ clk = clk_get_sys("pcie1", NULL);
+ if (IS_ERR(clk)) {
+ pr_err("%s:couldn't get clk for pcie1\n", __func__);
+ goto err_irq;
+ }
+ if (clk_enable(clk)) {
+ pr_err("%s:couldn't enable clk for pcie1\n", __func__);
+ goto err_irq;
+ }
+ } else if (pdev->id == 2) {
+ /*
+ * Ideally CFG Clock should have been also enabled here. But
+ * it is done currently during board init routne
+ */
+ clk = clk_get_sys("pcie2", NULL);
+ if (IS_ERR(clk)) {
+ pr_err("%s:couldn't get clk for pcie2\n", __func__);
+ goto err_irq;
+ }
+ if (clk_enable(clk)) {
+ pr_err("%s:couldn't enable clk for pcie2\n", __func__);
+ goto err_irq;
+ }
+ }
+ spear13xx_pcie_device_init(config);
+
+ return 0;
+err_irq:
+ free_irq(irq, NULL);
+err_iounmap:
+ iounmap(config->va_dbi_base);
+err_iounmap_app:
+ iounmap(config->va_app_base);
+err_kzalloc:
+ kfree(config);
+err_rel_res:
+ release_mem_region(res1->start, resource_size(res1));
+err_rel_res0:
+ release_mem_region(res0->start, resource_size(res0));
+ return status;
+}
+
+static int __devexit spear_pcie_gadget_remove(struct platform_device *pdev)
+{
+ struct resource *res0, *res1;
+ static struct pcie_gadget_target *target;
+ struct spear_pcie_gadget_config *config;
+ int irq;
+
+ res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ irq = platform_get_irq(pdev, 0);
+ target = dev_get_drvdata(&pdev->dev);
+ config = &target->config;
+
+ free_irq(irq, NULL);
+ iounmap(config->va_dbi_base);
+ iounmap(config->va_app_base);
+ release_mem_region(res1->start, resource_size(res1));
+ release_mem_region(res0->start, resource_size(res0));
+ configfs_unregister_subsystem(&target->subsys);
+ kfree(target);
+
+ return 0;
+}
+
+static void spear_pcie_gadget_shutdown(struct platform_device *pdev)
+{
+}
+
+static struct platform_driver spear_pcie_gadget_driver = {
+ .probe = spear_pcie_gadget_probe,
+ .remove = spear_pcie_gadget_remove,
+ .shutdown = spear_pcie_gadget_shutdown,
+ .driver = {
+ .name = "pcie-gadget-spear",
+ .bus = &platform_bus_type
+ },
+};
+
+static int __init spear_pcie_gadget_init(void)
+{
+ return platform_driver_register(&spear_pcie_gadget_driver);
+}
+module_init(spear_pcie_gadget_init);
+
+static void __exit spear_pcie_gadget_exit(void)
+{
+ platform_driver_unregister(&spear_pcie_gadget_driver);
+}
+module_exit(spear_pcie_gadget_exit);
+
+MODULE_ALIAS("pcie-gadget-spear");
+MODULE_AUTHOR("Pratyush Anand");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 2a876c4099cd..3b1f783bf924 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -58,12 +58,11 @@ config SDIO_UART
config MMC_TEST
tristate "MMC host test driver"
- default n
help
Development driver that performs a series of reads and writes
to a memory card in order to expose certain well known bugs
in host controllers. The tests are executed by writing to the
- "test" file in sysfs under each card. Note that whatever is
+ "test" file in debugfs under each card. Note that whatever is
on your card will be overwritten by these tests.
This driver is only of interest to those developing or
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index bfc8a8ae55df..61d233a7c118 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -621,6 +621,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
md->disk->private_data = md;
md->disk->queue = md->queue.queue;
md->disk->driverfs_dev = &card->dev;
+ set_disk_ro(md->disk, md->read_only);
/*
* As discussed on lkml, GENHD_FL_REMOVABLE should:
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 21adc27f4132..5ec8eddfcf6e 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -88,6 +88,7 @@ struct mmc_test_area {
* @sectors: amount of sectors to check in one group
* @ts: time values of transfer
* @rate: calculated transfer rate
+ * @iops: I/O operations per second (times 100)
*/
struct mmc_test_transfer_result {
struct list_head link;
@@ -95,6 +96,7 @@ struct mmc_test_transfer_result {
unsigned int sectors;
struct timespec ts;
unsigned int rate;
+ unsigned int iops;
};
/**
@@ -226,9 +228,10 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
if (!busy && mmc_test_busy(&cmd)) {
busy = 1;
- printk(KERN_INFO "%s: Warning: Host did not "
- "wait for busy state to end.\n",
- mmc_hostname(test->card->host));
+ if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+ printk(KERN_INFO "%s: Warning: Host did not "
+ "wait for busy state to end.\n",
+ mmc_hostname(test->card->host));
}
} while (mmc_test_busy(&cmd));
@@ -494,7 +497,7 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
*/
static void mmc_test_save_transfer_result(struct mmc_test_card *test,
unsigned int count, unsigned int sectors, struct timespec ts,
- unsigned int rate)
+ unsigned int rate, unsigned int iops)
{
struct mmc_test_transfer_result *tr;
@@ -509,6 +512,7 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
tr->sectors = sectors;
tr->ts = ts;
tr->rate = rate;
+ tr->iops = iops;
list_add_tail(&tr->link, &test->gr->tr_lst);
}
@@ -519,20 +523,22 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
struct timespec *ts1, struct timespec *ts2)
{
- unsigned int rate, sectors = bytes >> 9;
+ unsigned int rate, iops, sectors = bytes >> 9;
struct timespec ts;
ts = timespec_sub(*ts2, *ts1);
rate = mmc_test_rate(bytes, &ts);
+ iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
- "seconds (%u kB/s, %u KiB/s)\n",
+ "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
mmc_hostname(test->card->host), sectors, sectors >> 1,
(sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
- (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024);
+ (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
+ iops / 100, iops % 100);
- mmc_test_save_transfer_result(test, 1, sectors, ts, rate);
+ mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
}
/*
@@ -542,22 +548,24 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
unsigned int count, struct timespec *ts1,
struct timespec *ts2)
{
- unsigned int rate, sectors = bytes >> 9;
+ unsigned int rate, iops, sectors = bytes >> 9;
uint64_t tot = bytes * count;
struct timespec ts;
ts = timespec_sub(*ts2, *ts1);
rate = mmc_test_rate(tot, &ts);
+ iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
- "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
+ "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
+ "%u.%02u IOPS)\n",
mmc_hostname(test->card->host), count, sectors, count,
sectors >> 1, (sectors & 1 ? ".5" : ""),
(unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
- rate / 1000, rate / 1024);
+ rate / 1000, rate / 1024, iops / 100, iops % 100);
- mmc_test_save_transfer_result(test, count, sectors, ts, rate);
+ mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
}
/*
@@ -1425,28 +1433,29 @@ static int mmc_test_area_cleanup(struct mmc_test_card *test)
}
/*
- * Initialize an area for testing large transfers. The size of the area is the
- * preferred erase size which is a good size for optimal transfer speed. Note
- * that is typically 4MiB for modern cards. The test area is set to the middle
- * of the card because cards may have different charateristics at the front
- * (for FAT file system optimization). Optionally, the area is erased (if the
- * card supports it) which may improve write performance. Optionally, the area
- * is filled with data for subsequent read tests.
+ * Initialize an area for testing large transfers. The test area is set to the
+ * middle of the card because cards may have different charateristics at the
+ * front (for FAT file system optimization). Optionally, the area is erased
+ * (if the card supports it) which may improve write performance. Optionally,
+ * the area is filled with data for subsequent read tests.
*/
static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
{
struct mmc_test_area *t = &test->area;
- unsigned long min_sz = 64 * 1024;
+ unsigned long min_sz = 64 * 1024, sz;
int ret;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
- if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9)
- t->max_sz = TEST_AREA_MAX_SIZE;
- else
- t->max_sz = (unsigned long)test->card->pref_erase << 9;
+ /* Make the test area size about 4MiB */
+ sz = (unsigned long)test->card->pref_erase << 9;
+ t->max_sz = sz;
+ while (t->max_sz < 4 * 1024 * 1024)
+ t->max_sz += sz;
+ while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
+ t->max_sz -= sz;
t->max_segs = test->card->host->max_segs;
t->max_seg_sz = test->card->host->max_seg_size;
@@ -1766,6 +1775,188 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
return 0;
}
+static unsigned int rnd_next = 1;
+
+static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
+{
+ uint64_t r;
+
+ rnd_next = rnd_next * 1103515245 + 12345;
+ r = (rnd_next >> 16) & 0x7fff;
+ return (r * rnd_cnt) >> 15;
+}
+
+static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
+ unsigned long sz)
+{
+ unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
+ unsigned int ssz;
+ struct timespec ts1, ts2, ts;
+ int ret;
+
+ ssz = sz >> 9;
+
+ rnd_addr = mmc_test_capacity(test->card) / 4;
+ range1 = rnd_addr / test->card->pref_erase;
+ range2 = range1 / ssz;
+
+ getnstimeofday(&ts1);
+ for (cnt = 0; cnt < UINT_MAX; cnt++) {
+ getnstimeofday(&ts2);
+ ts = timespec_sub(ts2, ts1);
+ if (ts.tv_sec >= 10)
+ break;
+ ea = mmc_test_rnd_num(range1);
+ if (ea == last_ea)
+ ea -= 1;
+ last_ea = ea;
+ dev_addr = rnd_addr + test->card->pref_erase * ea +
+ ssz * mmc_test_rnd_num(range2);
+ ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
+ if (ret)
+ return ret;
+ }
+ if (print)
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+ return 0;
+}
+
+static int mmc_test_random_perf(struct mmc_test_card *test, int write)
+{
+ unsigned int next;
+ unsigned long sz;
+ int ret;
+
+ for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
+ /*
+ * When writing, try to get more consistent results by running
+ * the test twice with exactly the same I/O but outputting the
+ * results only for the 2nd run.
+ */
+ if (write) {
+ next = rnd_next;
+ ret = mmc_test_rnd_perf(test, write, 0, sz);
+ if (ret)
+ return ret;
+ rnd_next = next;
+ }
+ ret = mmc_test_rnd_perf(test, write, 1, sz);
+ if (ret)
+ return ret;
+ }
+ sz = test->area.max_tfr;
+ if (write) {
+ next = rnd_next;
+ ret = mmc_test_rnd_perf(test, write, 0, sz);
+ if (ret)
+ return ret;
+ rnd_next = next;
+ }
+ return mmc_test_rnd_perf(test, write, 1, sz);
+}
+
+/*
+ * Random read performance by transfer size.
+ */
+static int mmc_test_random_read_perf(struct mmc_test_card *test)
+{
+ return mmc_test_random_perf(test, 0);
+}
+
+/*
+ * Random write performance by transfer size.
+ */
+static int mmc_test_random_write_perf(struct mmc_test_card *test)
+{
+ return mmc_test_random_perf(test, 1);
+}
+
+static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
+ unsigned int tot_sz, int max_scatter)
+{
+ unsigned int dev_addr, i, cnt, sz, ssz;
+ struct timespec ts1, ts2, ts;
+ int ret;
+
+ sz = test->area.max_tfr;
+ /*
+ * In the case of a maximally scattered transfer, the maximum transfer
+ * size is further limited by using PAGE_SIZE segments.
+ */
+ if (max_scatter) {
+ struct mmc_test_area *t = &test->area;
+ unsigned long max_tfr;
+
+ if (t->max_seg_sz >= PAGE_SIZE)
+ max_tfr = t->max_segs * PAGE_SIZE;
+ else
+ max_tfr = t->max_segs * t->max_seg_sz;
+ if (sz > max_tfr)
+ sz = max_tfr;
+ }
+
+ ssz = sz >> 9;
+ dev_addr = mmc_test_capacity(test->card) / 4;
+ if (tot_sz > dev_addr << 9)
+ tot_sz = dev_addr << 9;
+ cnt = tot_sz / sz;
+ dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
+
+ getnstimeofday(&ts1);
+ for (i = 0; i < cnt; i++) {
+ ret = mmc_test_area_io(test, sz, dev_addr, write,
+ max_scatter, 0);
+ if (ret)
+ return ret;
+ dev_addr += ssz;
+ }
+ getnstimeofday(&ts2);
+
+ ts = timespec_sub(ts2, ts1);
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+
+ return 0;
+}
+
+static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
+{
+ int ret, i;
+
+ for (i = 0; i < 10; i++) {
+ ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < 5; i++) {
+ ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < 3; i++) {
+ ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/*
+ * Large sequential read performance.
+ */
+static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
+{
+ return mmc_test_large_seq_perf(test, 0);
+}
+
+/*
+ * Large sequential write performance.
+ */
+static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
+{
+ return mmc_test_large_seq_perf(test, 1);
+}
+
static const struct mmc_test_case mmc_test_cases[] = {
{
.name = "Basic write (no data verification)",
@@ -2005,6 +2196,34 @@ static const struct mmc_test_case mmc_test_cases[] = {
.cleanup = mmc_test_area_cleanup,
},
+ {
+ .name = "Random read performance by transfer size",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_random_read_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Random write performance by transfer size",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_random_write_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Large sequential read into scattered pages",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_large_seq_read_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Large sequential write from scattered pages",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_large_seq_write_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
};
static DEFINE_MUTEX(mmc_test_lock);
@@ -2148,11 +2367,11 @@ static int mtf_test_show(struct seq_file *sf, void *data)
seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
list_for_each_entry(tr, &gr->tr_lst, link) {
- seq_printf(sf, "%u %d %lu.%09lu %u\n",
+ seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
tr->count, tr->sectors,
(unsigned long)tr->ts.tv_sec,
(unsigned long)tr->ts.tv_nsec,
- tr->rate);
+ tr->rate, tr->iops / 100, tr->iops % 100);
}
}
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 86b479119332..639501970b41 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_MMC) += mmc_core.o
mmc_core-y := core.o bus.o host.o \
mmc.o mmc_ops.o sd.o sd_ops.o \
sdio.o sdio_ops.o sdio_bus.o \
- sdio_cis.o sdio_io.o sdio_irq.o
+ sdio_cis.o sdio_io.o sdio_irq.o \
+ quirks.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 150b5f3cd401..1f453acc8682 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -167,8 +167,6 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
WARN_ON(!host->claimed);
- led_trigger_event(host->led, LED_FULL);
-
mrq->cmd->error = 0;
mrq->cmd->mrq = mrq;
if (mrq->data) {
@@ -194,6 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
}
}
mmc_host_clk_ungate(host);
+ led_trigger_event(host->led, LED_FULL);
host->ops->request(host, mrq);
}
@@ -528,7 +527,14 @@ int mmc_try_claim_host(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_try_claim_host);
-static void mmc_do_release_host(struct mmc_host *host)
+/**
+ * mmc_do_release_host - release a claimed host
+ * @host: mmc host to release
+ *
+ * If you successfully claimed a host, this function will
+ * release it again.
+ */
+void mmc_do_release_host(struct mmc_host *host)
{
unsigned long flags;
@@ -543,6 +549,7 @@ static void mmc_do_release_host(struct mmc_host *host)
wake_up(&host->wq);
}
}
+EXPORT_SYMBOL(mmc_do_release_host);
void mmc_host_deeper_disable(struct work_struct *work)
{
@@ -1002,6 +1009,13 @@ static void mmc_power_off(struct mmc_host *host)
{
host->ios.clock = 0;
host->ios.vdd = 0;
+
+ /*
+ * Reset ocr mask to be the highest possible voltage supported for
+ * this mmc host. This value will be used at next power up.
+ */
+ host->ocr = 1 << (fls(host->ocr_avail) - 1);
+
if (!mmc_host_is_spi(host)) {
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
host->ios.chip_select = MMC_CS_DONTCARE;
@@ -1495,6 +1509,12 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
mmc_hostname(host), __func__, host->f_init);
#endif
mmc_power_up(host);
+
+ /*
+ * sdio_reset sends CMD52 to reset card. Since we do not know
+ * if the card is being re-initialized, just send it. CMD52
+ * should be ignored by SD/eMMC cards.
+ */
sdio_reset(host);
mmc_go_idle(host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index ca1fdde29df6..20b1c0831eac 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -61,6 +61,8 @@ int mmc_attach_mmc(struct mmc_host *host);
int mmc_attach_sd(struct mmc_host *host);
int mmc_attach_sdio(struct mmc_host *host);
+void mmc_fixup_device(struct mmc_card *card);
+
/* Module parameters */
extern int use_spi_crc;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index b3ac6c5bc5c6..461e6a17fb90 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -160,10 +160,7 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
* gate the clock, because there is somebody out there that may still
* be using it.
*/
- if (mmc_card_sdio(card))
- return false;
-
- return true;
+ return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
}
/**
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 16006ef153fe..14e95f39a7bf 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -302,6 +302,44 @@ static int mmc_read_ext_csd(struct mmc_card *card)
}
if (card->ext_csd.rev >= 4) {
+ /*
+ * Enhanced area feature support -- check whether the eMMC
+ * card has the Enhanced area enabled. If so, export enhanced
+ * area offset and size to user by adding sysfs interface.
+ */
+ if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
+ (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
+ u8 hc_erase_grp_sz =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
+ u8 hc_wp_grp_sz =
+ ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
+
+ card->ext_csd.enhanced_area_en = 1;
+ /*
+ * calculate the enhanced data area offset, in bytes
+ */
+ card->ext_csd.enhanced_area_offset =
+ (ext_csd[139] << 24) + (ext_csd[138] << 16) +
+ (ext_csd[137] << 8) + ext_csd[136];
+ if (mmc_card_blockaddr(card))
+ card->ext_csd.enhanced_area_offset <<= 9;
+ /*
+ * calculate the enhanced data area size, in kilobytes
+ */
+ card->ext_csd.enhanced_area_size =
+ (ext_csd[142] << 16) + (ext_csd[141] << 8) +
+ ext_csd[140];
+ card->ext_csd.enhanced_area_size *=
+ (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
+ card->ext_csd.enhanced_area_size <<= 9;
+ } else {
+ /*
+ * If the enhanced area is not enabled, disable these
+ * device attributes.
+ */
+ card->ext_csd.enhanced_area_offset = -EINVAL;
+ card->ext_csd.enhanced_area_size = -EINVAL;
+ }
card->ext_csd.sec_trim_mult =
ext_csd[EXT_CSD_SEC_TRIM_MULT];
card->ext_csd.sec_erase_mult =
@@ -336,6 +374,9 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
+MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
+ card->ext_csd.enhanced_area_offset);
+MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
static struct attribute *mmc_std_attrs[] = {
&dev_attr_cid.attr,
@@ -349,6 +390,8 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_name.attr,
&dev_attr_oemid.attr,
&dev_attr_serial.attr,
+ &dev_attr_enhanced_area_offset.attr,
+ &dev_attr_enhanced_area_size.attr,
NULL,
};
@@ -378,6 +421,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
int err, ddr = 0;
u32 cid[4];
unsigned int max_dtr;
+ u32 rocr;
BUG_ON(!host);
WARN_ON(!host->claimed);
@@ -391,7 +435,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
mmc_go_idle(host);
/* The extra bit indicates that we support high capacity */
- err = mmc_send_op_cond(host, ocr | (1 << 30), NULL);
+ err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
if (err)
goto err;
@@ -479,11 +523,51 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_read_ext_csd(card);
if (err)
goto free_card;
+
+ /* If doing byte addressing, check if required to do sector
+ * addressing. Handle the case of <2GB cards needing sector
+ * addressing. See section 8.1 JEDEC Standard JED84-A441;
+ * ocr register has bit 30 set for sector addressing.
+ */
+ if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
+ mmc_card_set_blockaddr(card);
+
/* Erase size depends on CSD and Extended CSD */
mmc_set_erase_size(card);
}
/*
+ * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
+ * bit. This bit will be lost everytime after a reset or power off.
+ */
+ if (card->ext_csd.enhanced_area_en) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_ERASE_GROUP_DEF, 1);
+
+ if (err && err != -EBADMSG)
+ goto free_card;
+
+ if (err) {
+ err = 0;
+ /*
+ * Just disable enhanced area off & sz
+ * will try to enable ERASE_GROUP_DEF
+ * during next time reinit
+ */
+ card->ext_csd.enhanced_area_offset = -EINVAL;
+ card->ext_csd.enhanced_area_size = -EINVAL;
+ } else {
+ card->ext_csd.erase_group_def = 1;
+ /*
+ * enable ERASE_GRP_DEF successfully.
+ * This will affect the erase size, so
+ * here need to reset erase size
+ */
+ mmc_set_erase_size(card);
+ }
+ }
+
+ /*
* Activate high speed (if supported)
*/
if ((card->ext_csd.hs_max_dtr != 0) &&
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
new file mode 100644
index 000000000000..11118b74eb20
--- /dev/null
+++ b/drivers/mmc/core/quirks.c
@@ -0,0 +1,84 @@
+/*
+ * This file contains work-arounds for many known sdio hardware
+ * bugs.
+ *
+ * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com>
+ * Inspired from pci fixup code:
+ * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mmc/card.h>
+#include <linux/mod_devicetable.h>
+
+/*
+ * The world is not perfect and supplies us with broken mmc/sdio devices.
+ * For at least a part of these bugs we need a work-around
+ */
+
+struct mmc_fixup {
+ u16 vendor, device; /* You can use SDIO_ANY_ID here of course */
+ void (*vendor_fixup)(struct mmc_card *card, int data);
+ int data;
+};
+
+/*
+ * This hook just adds a quirk unconditionnally
+ */
+static void __maybe_unused add_quirk(struct mmc_card *card, int data)
+{
+ card->quirks |= data;
+}
+
+/*
+ * This hook just removes a quirk unconditionnally
+ */
+static void __maybe_unused remove_quirk(struct mmc_card *card, int data)
+{
+ card->quirks &= ~data;
+}
+
+/*
+ * This hook just adds a quirk for all sdio devices
+ */
+static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
+{
+ if (mmc_card_sdio(card))
+ card->quirks |= data;
+}
+
+#ifndef SDIO_VENDOR_ID_TI
+#define SDIO_VENDOR_ID_TI 0x0097
+#endif
+
+#ifndef SDIO_DEVICE_ID_TI_WL1271
+#define SDIO_DEVICE_ID_TI_WL1271 0x4076
+#endif
+
+static const struct mmc_fixup mmc_fixup_methods[] = {
+ /* by default sdio devices are considered CLK_GATING broken */
+ /* good cards will be whitelisted as they are tested */
+ { SDIO_ANY_ID, SDIO_ANY_ID,
+ add_quirk_for_sdio_devices, MMC_QUIRK_BROKEN_CLK_GATING },
+ { SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING },
+ { 0 }
+};
+
+void mmc_fixup_device(struct mmc_card *card)
+{
+ const struct mmc_fixup *f;
+
+ for (f = mmc_fixup_methods; f->vendor_fixup; f++) {
+ if ((f->vendor == card->cis.vendor
+ || f->vendor == (u16) SDIO_ANY_ID) &&
+ (f->device == card->cis.device
+ || f->device == (u16) SDIO_ANY_ID)) {
+ dev_dbg(&card->dev, "calling %pF\n", f->vendor_fixup);
+ f->vendor_fixup(card, f->data);
+ }
+ }
+}
+EXPORT_SYMBOL(mmc_fixup_device);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index d18c32bca99b..6dac89fe0535 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -21,6 +21,7 @@
#include "core.h"
#include "bus.h"
#include "mmc_ops.h"
+#include "sd.h"
#include "sd_ops.h"
static const unsigned int tran_exp[] = {
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index ebc62ad4cc56..db0f0b44d684 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -395,6 +395,14 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
if (err)
goto remove;
+ /*
+ * Update oldcard with the new RCA received from the SDIO
+ * device -- we're doing this so that it's updated in the
+ * "card" struct when oldcard overwrites that later.
+ */
+ if (oldcard)
+ oldcard->rca = card->rca;
+
mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
}
@@ -458,6 +466,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
card = oldcard;
}
+ mmc_fixup_device(card);
if (card->type == MMC_TYPE_SD_COMBO) {
err = mmc_sd_setup_card(host, card, oldcard != NULL);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index afe8c6fa166a..1a21c6427a19 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -225,7 +225,7 @@ config MMC_OMAP
config MMC_OMAP_HS
tristate "TI OMAP High Speed Multimedia Card Interface support"
- depends on ARCH_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4
+ depends on SOC_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4
help
This selects the TI OMAP High Speed Multimedia card Interface.
If you have an OMAP2430 or OMAP3 board or OMAP4 board with a
@@ -311,7 +311,7 @@ config MMC_MSM
config MMC_MXC
tristate "Freescale i.MX2/3 Multimedia Card Interface support"
- depends on ARCH_MXC
+ depends on MACH_MX21 || MACH_MX27 || ARCH_MX31
help
This selects the Freescale i.MX2/3 Multimedia card Interface.
If you have a i.MX platform with a Multimedia Card slot,
@@ -319,6 +319,15 @@ config MMC_MXC
If unsure, say N.
+config MMC_MXS
+ tristate "Freescale MXS Multimedia Card Interface support"
+ depends on ARCH_MXS && MXS_DMA
+ help
+ This selects the Freescale SSP MMC controller found on MXS based
+ platforms like mx23/28.
+
+ If unsure, say N.
+
config MMC_TIFM_SD
tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)"
depends on EXPERIMENTAL && PCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index e834fb223e9a..30aa6867745f 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
obj-$(CONFIG_MMC_PXA) += pxamci.o
obj-$(CONFIG_MMC_IMX) += imxmmc.o
obj-$(CONFIG_MMC_MXC) += mxcmmc.o
+obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index ad2a7a032cdf..80bc9a5c25cc 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -578,7 +578,8 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
struct mmc_data *data = host->data;
if (data)
- dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
+ dma_unmap_sg(host->dma.chan->device->dev,
+ data->sg, data->sg_len,
((data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE : DMA_FROM_DEVICE));
}
@@ -588,7 +589,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
struct dma_chan *chan = host->data_chan;
if (chan) {
- chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(chan);
atmci_dma_cleanup(host);
} else {
/* Data transfer was stopped by the interrupt handler */
@@ -684,11 +685,11 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
else
direction = DMA_TO_DEVICE;
- sglen = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, direction);
- if (sglen != data->sg_len)
- goto unmap_exit;
+ sglen = dma_map_sg(chan->device->dev, data->sg,
+ data->sg_len, direction);
+
desc = chan->device->device_prep_slave_sg(chan,
- data->sg, data->sg_len, direction,
+ data->sg, sglen, direction,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
goto unmap_exit;
@@ -699,7 +700,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
return 0;
unmap_exit:
- dma_unmap_sg(&host->pdev->dev, data->sg, sglen, direction);
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
return -ENOMEM;
}
@@ -709,8 +710,8 @@ static void atmci_submit_data(struct atmel_mci *host)
struct dma_async_tx_descriptor *desc = host->dma.data_desc;
if (chan) {
- desc->tx_submit(desc);
- chan->device->device_issue_pending(chan);
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
}
}
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 66b4ce587f4b..ce2a47b71dd6 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -205,7 +205,7 @@ static int cb710_wait_while_busy(struct cb710_slot *slot, uint8_t mask)
"WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n",
limit, mask, e, x);
#endif
- return 0;
+ return err;
}
static void cb710_mmc_set_transfer_size(struct cb710_slot *slot,
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 2fcc82577c1b..5a614069cb00 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -32,6 +32,7 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/dw_mmc.h>
#include <linux/bitops.h>
+#include <linux/regulator/consumer.h>
#include "dw_mmc.h"
@@ -562,7 +563,8 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
/* enable clock */
- mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE);
+ mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE |
+ SDMMC_CLKEN_LOW_PWR);
/* inform CIU */
mci_send_cmd(slot,
@@ -661,6 +663,7 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
+ u32 regs;
/* set default 1 bit mode */
slot->ctype = SDMMC_CTYPE_1BIT;
@@ -672,6 +675,16 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_BUS_WIDTH_4:
slot->ctype = SDMMC_CTYPE_4BIT;
break;
+ case MMC_BUS_WIDTH_8:
+ slot->ctype = SDMMC_CTYPE_8BIT;
+ break;
+ }
+
+ /* DDR mode set */
+ if (ios->ddr) {
+ regs = mci_readl(slot->host, UHS_REG);
+ regs |= (0x1 << slot->id) << 16;
+ mci_writel(slot->host, UHS_REG, regs);
}
if (ios->clock) {
@@ -717,7 +730,9 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
struct dw_mci_board *brd = slot->host->pdata;
/* Use platform get_cd function, else try onboard card detect */
- if (brd->get_cd)
+ if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
+ present = 1;
+ else if (brd->get_cd)
present = !brd->get_cd(slot->id);
else
present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
@@ -1019,13 +1034,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
- unsigned int nbytes = 0, len, old_len, count = 0;
+ unsigned int nbytes = 0, len;
do {
len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
- if (count == 0)
- old_len = len;
-
if (offset + len <= sg->length) {
host->pull_data(host, (void *)(buf + offset), len);
@@ -1070,7 +1082,6 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
tasklet_schedule(&host->tasklet);
return;
}
- count++;
} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
host->pio_offset = offset;
@@ -1395,7 +1406,11 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
if (host->pdata->setpower)
host->pdata->setpower(id, 0);
- mmc->caps = 0;
+ if (host->pdata->caps)
+ mmc->caps = host->pdata->caps;
+ else
+ mmc->caps = 0;
+
if (host->pdata->get_bus_wd)
if (host->pdata->get_bus_wd(slot->id) >= 4)
mmc->caps |= MMC_CAP_4_BIT_DATA;
@@ -1426,6 +1441,13 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
}
#endif /* CONFIG_MMC_DW_IDMAC */
+ host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
+ if (IS_ERR(host->vmmc)) {
+ printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
+ host->vmmc = NULL;
+ } else
+ regulator_enable(host->vmmc);
+
if (dw_mci_get_cd(mmc))
set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
else
@@ -1441,6 +1463,12 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
/* Card initially undetected */
slot->last_detect_state = 0;
+ /*
+ * Card may have been plugged in prior to boot so we
+ * need to run the detect tasklet
+ */
+ tasklet_schedule(&host->card_tasklet);
+
return 0;
}
@@ -1619,8 +1647,9 @@ static int dw_mci_probe(struct platform_device *pdev)
*/
fifo_size = mci_readl(host, FIFOTH);
fifo_size = (fifo_size >> 16) & 0x7ff;
- mci_writel(host, FIFOTH, ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
- ((fifo_size/2) << 0)));
+ host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
+ ((fifo_size/2) << 0));
+ mci_writel(host, FIFOTH, host->fifoth_val);
/* disable clock to CIU */
mci_writel(host, CLKENA, 0);
@@ -1683,6 +1712,12 @@ err_dmaunmap:
host->sg_cpu, host->sg_dma);
iounmap(host->regs);
+ if (host->vmmc) {
+ regulator_disable(host->vmmc);
+ regulator_put(host->vmmc);
+ }
+
+
err_freehost:
kfree(host);
return ret;
@@ -1714,6 +1749,11 @@ static int __exit dw_mci_remove(struct platform_device *pdev)
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
+ if (host->vmmc) {
+ regulator_disable(host->vmmc);
+ regulator_put(host->vmmc);
+ }
+
iounmap(host->regs);
kfree(host);
@@ -1729,6 +1769,9 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
int i, ret;
struct dw_mci *host = platform_get_drvdata(pdev);
+ if (host->vmmc)
+ regulator_enable(host->vmmc);
+
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
if (!slot)
@@ -1744,6 +1787,9 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
}
}
+ if (host->vmmc)
+ regulator_disable(host->vmmc);
+
return 0;
}
@@ -1752,6 +1798,23 @@ static int dw_mci_resume(struct platform_device *pdev)
int i, ret;
struct dw_mci *host = platform_get_drvdata(pdev);
+ if (host->dma_ops->init)
+ host->dma_ops->init(host);
+
+ if (!mci_wait_reset(&pdev->dev, host)) {
+ ret = -ENODEV;
+ return ret;
+ }
+
+ /* Restore the old value at FIFOTH register */
+ mci_writel(host, FIFOTH, host->fifoth_val);
+
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
+ SDMMC_INT_TXDR | SDMMC_INT_RXDR |
+ DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
+ mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
+
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
if (!slot)
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 5dd55a75233d..23c662af5616 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -43,6 +43,7 @@
#define SDMMC_USRID 0x068
#define SDMMC_VERID 0x06c
#define SDMMC_HCON 0x070
+#define SDMMC_UHS_REG 0x074
#define SDMMC_BMOD 0x080
#define SDMMC_PLDMND 0x084
#define SDMMC_DBADDR 0x088
@@ -51,7 +52,6 @@
#define SDMMC_DSCADDR 0x094
#define SDMMC_BUFADDR 0x098
#define SDMMC_DATA 0x100
-#define SDMMC_DATA_ADR 0x100
/* shift bit field */
#define _SBF(f, v) ((v) << (f))
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 2d6de3e03e2d..5bbb87d10251 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -2,7 +2,7 @@
* linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
*
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- * Copyright (C) 2010 ST-Ericsson AB.
+ * Copyright (C) 2010 ST-Ericsson SA
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -25,8 +25,10 @@
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/gpio.h>
-#include <linux/amba/mmci.h>
#include <linux/regulator/consumer.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/amba/mmci.h>
#include <asm/div64.h>
#include <asm/io.h>
@@ -142,9 +144,6 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
host->mrq = NULL;
host->cmd = NULL;
- if (mrq->data)
- mrq->data->bytes_xfered = host->data_xfered;
-
/*
* Need to drop the host lock here; mmc_request_done may call
* back into the driver...
@@ -189,6 +188,248 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
}
+/*
+ * All the DMA operation mode stuff goes inside this ifdef.
+ * This assumes that you have a generic DMA device interface,
+ * no custom DMA interfaces are supported.
+ */
+#ifdef CONFIG_DMA_ENGINE
+static void __devinit mmci_dma_setup(struct mmci_host *host)
+{
+ struct mmci_platform_data *plat = host->plat;
+ const char *rxname, *txname;
+ dma_cap_mask_t mask;
+
+ if (!plat || !plat->dma_filter) {
+ dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
+ return;
+ }
+
+ /* Try to acquire a generic DMA engine slave channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /*
+ * If only an RX channel is specified, the driver will
+ * attempt to use it bidirectionally, however if it is
+ * is specified but cannot be located, DMA will be disabled.
+ */
+ if (plat->dma_rx_param) {
+ host->dma_rx_channel = dma_request_channel(mask,
+ plat->dma_filter,
+ plat->dma_rx_param);
+ /* E.g if no DMA hardware is present */
+ if (!host->dma_rx_channel)
+ dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
+ }
+
+ if (plat->dma_tx_param) {
+ host->dma_tx_channel = dma_request_channel(mask,
+ plat->dma_filter,
+ plat->dma_tx_param);
+ if (!host->dma_tx_channel)
+ dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
+ } else {
+ host->dma_tx_channel = host->dma_rx_channel;
+ }
+
+ if (host->dma_rx_channel)
+ rxname = dma_chan_name(host->dma_rx_channel);
+ else
+ rxname = "none";
+
+ if (host->dma_tx_channel)
+ txname = dma_chan_name(host->dma_tx_channel);
+ else
+ txname = "none";
+
+ dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
+ rxname, txname);
+
+ /*
+ * Limit the maximum segment size in any SG entry according to
+ * the parameters of the DMA engine device.
+ */
+ if (host->dma_tx_channel) {
+ struct device *dev = host->dma_tx_channel->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+ if (host->dma_rx_channel) {
+ struct device *dev = host->dma_rx_channel->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+}
+
+/*
+ * This is used in __devinit or __devexit so inline it
+ * so it can be discarded.
+ */
+static inline void mmci_dma_release(struct mmci_host *host)
+{
+ struct mmci_platform_data *plat = host->plat;
+
+ if (host->dma_rx_channel)
+ dma_release_channel(host->dma_rx_channel);
+ if (host->dma_tx_channel && plat->dma_tx_param)
+ dma_release_channel(host->dma_tx_channel);
+ host->dma_rx_channel = host->dma_tx_channel = NULL;
+}
+
+static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
+{
+ struct dma_chan *chan = host->dma_current;
+ enum dma_data_direction dir;
+ u32 status;
+ int i;
+
+ /* Wait up to 1ms for the DMA to complete */
+ for (i = 0; ; i++) {
+ status = readl(host->base + MMCISTATUS);
+ if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
+ break;
+ udelay(10);
+ }
+
+ /*
+ * Check to see whether we still have some data left in the FIFO -
+ * this catches DMA controllers which are unable to monitor the
+ * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
+ * contiguous buffers. On TX, we'll get a FIFO underrun error.
+ */
+ if (status & MCI_RXDATAAVLBLMASK) {
+ dmaengine_terminate_all(chan);
+ if (!data->error)
+ data->error = -EIO;
+ }
+
+ if (data->flags & MMC_DATA_WRITE) {
+ dir = DMA_TO_DEVICE;
+ } else {
+ dir = DMA_FROM_DEVICE;
+ }
+
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+
+ /*
+ * Use of DMA with scatter-gather is impossible.
+ * Give up with DMA and switch back to PIO mode.
+ */
+ if (status & MCI_RXDATAAVLBLMASK) {
+ dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
+ mmci_dma_release(host);
+ }
+}
+
+static void mmci_dma_data_error(struct mmci_host *host)
+{
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
+ dmaengine_terminate_all(host->dma_current);
+}
+
+static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+ struct variant_data *variant = host->variant;
+ struct dma_slave_config conf = {
+ .src_addr = host->phybase + MMCIFIFO,
+ .dst_addr = host->phybase + MMCIFIFO,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ };
+ struct mmc_data *data = host->data;
+ struct dma_chan *chan;
+ struct dma_device *device;
+ struct dma_async_tx_descriptor *desc;
+ int nr_sg;
+
+ host->dma_current = NULL;
+
+ if (data->flags & MMC_DATA_READ) {
+ conf.direction = DMA_FROM_DEVICE;
+ chan = host->dma_rx_channel;
+ } else {
+ conf.direction = DMA_TO_DEVICE;
+ chan = host->dma_tx_channel;
+ }
+
+ /* If there's no DMA channel, fall back to PIO */
+ if (!chan)
+ return -EINVAL;
+
+ /* If less than or equal to the fifo size, don't bother with DMA */
+ if (host->size <= variant->fifosize)
+ return -EINVAL;
+
+ device = chan->device;
+ nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ if (nr_sg == 0)
+ return -EINVAL;
+
+ dmaengine_slave_config(chan, &conf);
+ desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
+ conf.direction, DMA_CTRL_ACK);
+ if (!desc)
+ goto unmap_exit;
+
+ /* Okay, go for it. */
+ host->dma_current = chan;
+
+ dev_vdbg(mmc_dev(host->mmc),
+ "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
+ data->sg_len, data->blksz, data->blocks, data->flags);
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+
+ datactrl |= MCI_DPSM_DMAENABLE;
+
+ /* Trigger the DMA transfer */
+ writel(datactrl, host->base + MMCIDATACTRL);
+
+ /*
+ * Let the MMCI say when the data is ended and it's time
+ * to fire next DMA request. When that happens, MMCI will
+ * call mmci_data_end()
+ */
+ writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
+ host->base + MMCIMASK0);
+ return 0;
+
+unmap_exit:
+ dmaengine_terminate_all(chan);
+ dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ return -ENOMEM;
+}
+#else
+/* Blank functions if the DMA engine is not available */
+static inline void mmci_dma_setup(struct mmci_host *host)
+{
+}
+
+static inline void mmci_dma_release(struct mmci_host *host)
+{
+}
+
+static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
+{
+}
+
+static inline void mmci_dma_data_error(struct mmci_host *host)
+{
+}
+
+static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+ return -ENOSYS;
+}
+#endif
+
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
{
struct variant_data *variant = host->variant;
@@ -202,9 +443,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
host->data = data;
host->size = data->blksz * data->blocks;
- host->data_xfered = 0;
-
- mmci_init_sg(host, data);
+ data->bytes_xfered = 0;
clks = (unsigned long long)data->timeout_ns * host->cclk;
do_div(clks, 1000000000UL);
@@ -219,15 +458,29 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
BUG_ON(1 << blksz_bits != data->blksz);
datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
- if (data->flags & MMC_DATA_READ) {
+
+ if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
+
+ /*
+ * Attempt to use DMA operation mode, if this
+ * should fail, fall back to PIO mode
+ */
+ if (!mmci_dma_start_data(host, datactrl))
+ return;
+
+ /* IRQ mode, map the SG list for CPU reading/writing */
+ mmci_init_sg(host, data);
+
+ if (data->flags & MMC_DATA_READ) {
irqmask = MCI_RXFIFOHALFFULLMASK;
/*
- * If we have less than a FIFOSIZE of bytes to transfer,
- * trigger a PIO interrupt as soon as any data is available.
+ * If we have less than the fifo 'half-full' threshold to
+ * transfer, trigger a PIO interrupt as soon as any data
+ * is available.
*/
- if (host->size < variant->fifosize)
+ if (host->size < variant->fifohalfsize)
irqmask |= MCI_RXDATAAVLBLMASK;
} else {
/*
@@ -283,49 +536,51 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
u32 remain, success;
- /* Calculate how far we are into the transfer */
+ /* Terminate the DMA transfer */
+ if (dma_inprogress(host))
+ mmci_dma_data_error(host);
+
+ /*
+ * Calculate how far we are into the transfer. Note that
+ * the data counter gives the number of bytes transferred
+ * on the MMC bus, not on the host side. On reads, this
+ * can be as much as a FIFO-worth of data ahead. This
+ * matters for FIFO overruns only.
+ */
remain = readl(host->base + MMCIDATACNT);
success = data->blksz * data->blocks - remain;
- dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
+ dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
+ status, success);
if (status & MCI_DATACRCFAIL) {
/* Last block was not successful */
- host->data_xfered = round_down(success - 1, data->blksz);
+ success -= 1;
data->error = -EILSEQ;
} else if (status & MCI_DATATIMEOUT) {
- host->data_xfered = round_down(success, data->blksz);
data->error = -ETIMEDOUT;
- } else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
- host->data_xfered = round_down(success, data->blksz);
+ } else if (status & MCI_TXUNDERRUN) {
+ data->error = -EIO;
+ } else if (status & MCI_RXOVERRUN) {
+ if (success > host->variant->fifosize)
+ success -= host->variant->fifosize;
+ else
+ success = 0;
data->error = -EIO;
}
-
- /*
- * We hit an error condition. Ensure that any data
- * partially written to a page is properly coherent.
- */
- if (data->flags & MMC_DATA_READ) {
- struct sg_mapping_iter *sg_miter = &host->sg_miter;
- unsigned long flags;
-
- local_irq_save(flags);
- if (sg_miter_next(sg_miter)) {
- flush_dcache_page(sg_miter->page);
- sg_miter_stop(sg_miter);
- }
- local_irq_restore(flags);
- }
+ data->bytes_xfered = round_down(success, data->blksz);
}
if (status & MCI_DATABLOCKEND)
dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
if (status & MCI_DATAEND || data->error) {
+ if (dma_inprogress(host))
+ mmci_dma_unmap(host, data);
mmci_stop_data(host);
if (!data->error)
/* The error clause is handled above, success! */
- host->data_xfered += data->blksz * data->blocks;
+ data->bytes_xfered = data->blksz * data->blocks;
if (!data->stop) {
mmci_request_end(host, data->mrq);
@@ -498,9 +753,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
if (remain)
break;
- if (status & MCI_RXACTIVE)
- flush_dcache_page(sg_miter->page);
-
status = readl(base + MMCISTATUS);
} while (1);
@@ -509,10 +761,10 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
local_irq_restore(flags);
/*
- * If we're nearing the end of the read, switch to
- * "any data available" mode.
+ * If we have less than the fifo 'half-full' threshold to transfer,
+ * trigger a PIO interrupt as soon as any data is available.
*/
- if (status & MCI_RXACTIVE && host->size < variant->fifosize)
+ if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
/*
@@ -713,7 +965,8 @@ static const struct mmc_host_ops mmci_ops = {
.get_cd = mmci_get_cd,
};
-static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
+static int __devinit mmci_probe(struct amba_device *dev,
+ const struct amba_id *id)
{
struct mmci_platform_data *plat = dev->dev.platform_data;
struct variant_data *variant = id->data;
@@ -776,6 +1029,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
host->mclk);
}
+ host->phybase = dev->res.start;
host->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!host->base) {
ret = -ENOMEM;
@@ -903,9 +1157,12 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
amba_set_drvdata(dev, mmc);
- dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n",
- mmc_hostname(mmc), amba_part(dev), amba_rev(dev),
- (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
+ dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
+ mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
+ amba_rev(dev), (unsigned long long)dev->res.start,
+ dev->irq[0], dev->irq[1]);
+
+ mmci_dma_setup(host);
mmc_add_host(mmc);
@@ -952,6 +1209,7 @@ static int __devexit mmci_remove(struct amba_device *dev)
writel(0, host->base + MMCICOMMAND);
writel(0, host->base + MMCIDATACTRL);
+ mmci_dma_release(host);
free_irq(dev->irq[0], host);
if (!host->singleirq)
free_irq(dev->irq[1], host);
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index c1df7b82d36c..ec9a7bc6d0df 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -148,8 +148,10 @@
struct clk;
struct variant_data;
+struct dma_chan;
struct mmci_host {
+ phys_addr_t phybase;
void __iomem *base;
struct mmc_request *mrq;
struct mmc_command *cmd;
@@ -161,8 +163,6 @@ struct mmci_host {
int gpio_cd_irq;
bool singleirq;
- unsigned int data_xfered;
-
spinlock_t lock;
unsigned int mclk;
@@ -181,5 +181,16 @@ struct mmci_host {
struct sg_mapping_iter sg_miter;
unsigned int size;
struct regulator *vcc;
+
+#ifdef CONFIG_DMA_ENGINE
+ /* DMA stuff */
+ struct dma_chan *dma_current;
+ struct dma_chan *dma_rx_channel;
+ struct dma_chan *dma_tx_channel;
+
+#define dma_inprogress(host) ((host)->dma_current)
+#else
+#define dma_inprogress(host) (0)
+#endif
};
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 153ab977a013..a4c865a5286b 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -36,6 +36,7 @@
#include <linux/io.h>
#include <linux/memory.h>
#include <linux/gfp.h>
+#include <linux/gpio.h>
#include <asm/cacheflush.h>
#include <asm/div64.h>
@@ -266,14 +267,6 @@ msmsdcc_dma_complete_tlet(unsigned long data)
dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
host->dma.dir);
- if (host->curr.user_pages) {
- struct scatterlist *sg = host->dma.sg;
- int i;
-
- for (i = 0; i < host->dma.num_ents; i++)
- flush_dcache_page(sg_page(sg++));
- }
-
host->dma.sg = NULL;
host->dma.busy = 0;
@@ -941,6 +934,38 @@ msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock_irqrestore(&host->lock, flags);
}
+static void msmsdcc_setup_gpio(struct msmsdcc_host *host, bool enable)
+{
+ struct msm_mmc_gpio_data *curr;
+ int i, rc = 0;
+
+ if (!host->plat->gpio_data && host->gpio_config_status == enable)
+ return;
+
+ curr = host->plat->gpio_data;
+ for (i = 0; i < curr->size; i++) {
+ if (enable) {
+ rc = gpio_request(curr->gpio[i].no,
+ curr->gpio[i].name);
+ if (rc) {
+ pr_err("%s: gpio_request(%d, %s) failed %d\n",
+ mmc_hostname(host->mmc),
+ curr->gpio[i].no,
+ curr->gpio[i].name, rc);
+ goto free_gpios;
+ }
+ } else {
+ gpio_free(curr->gpio[i].no);
+ }
+ }
+ host->gpio_config_status = enable;
+ return;
+
+free_gpios:
+ for (; i >= 0; i--)
+ gpio_free(curr->gpio[i].no);
+}
+
static void
msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
@@ -953,6 +978,8 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
msmsdcc_enable_clocks(host);
+ spin_unlock_irqrestore(&host->lock, flags);
+
if (ios->clock) {
if (ios->clock != host->clk_rate) {
rc = clk_set_rate(host->clk, ios->clock);
@@ -979,9 +1006,11 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_OFF:
+ msmsdcc_setup_gpio(host, false);
break;
case MMC_POWER_UP:
pwr |= MCI_PWR_UP;
+ msmsdcc_setup_gpio(host, true);
break;
case MMC_POWER_ON:
pwr |= MCI_PWR_ON;
@@ -998,9 +1027,10 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
msmsdcc_writel(host, pwr, MMCIPOWER);
}
#if BUSCLK_PWRSAVE
+ spin_lock_irqsave(&host->lock, flags);
msmsdcc_disable_clocks(host, 1);
-#endif
spin_unlock_irqrestore(&host->lock, flags);
+#endif
}
static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index 939557af266d..42d7bbc977c5 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -243,6 +243,7 @@ struct msmsdcc_host {
unsigned int cmd_datactrl;
struct mmc_command *cmd_cmd;
u32 cmd_c;
+ bool gpio_config_status;
bool prog_scan;
bool prog_enable;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 4428594261c5..cc20e0259325 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -32,16 +32,14 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
+#include <linux/dmaengine.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/sizes.h>
#include <mach/mmc.h>
-#ifdef CONFIG_ARCH_MX2
-#include <mach/dma-mx1-mx2.h>
-#define HAS_DMA
-#endif
+#include <mach/dma.h>
#define DRIVER_NAME "mxc-mmc"
@@ -118,7 +116,8 @@ struct mxcmci_host {
void __iomem *base;
int irq;
int detect_irq;
- int dma;
+ struct dma_chan *dma;
+ struct dma_async_tx_descriptor *desc;
int do_dma;
int default_irq_mask;
int use_sdio;
@@ -129,7 +128,6 @@ struct mxcmci_host {
struct mmc_command *cmd;
struct mmc_data *data;
- unsigned int dma_nents;
unsigned int datasize;
unsigned int dma_dir;
@@ -144,6 +142,11 @@ struct mxcmci_host {
spinlock_t lock;
struct regulator *vcc;
+
+ int burstlen;
+ int dmareq;
+ struct dma_slave_config dma_slave_config;
+ struct imx_dma_data dma_data;
};
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
@@ -206,17 +209,16 @@ static void mxcmci_softreset(struct mxcmci_host *host)
writew(0xff, host->base + MMC_REG_RES_TO);
}
+static int mxcmci_setup_dma(struct mmc_host *mmc);
static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
{
unsigned int nob = data->blocks;
unsigned int blksz = data->blksz;
unsigned int datasize = nob * blksz;
-#ifdef HAS_DMA
struct scatterlist *sg;
- int i;
- int ret;
-#endif
+ int i, nents;
+
if (data->flags & MMC_DATA_STREAM)
nob = 0xffff;
@@ -227,7 +229,9 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
writew(blksz, host->base + MMC_REG_BLK_LEN);
host->datasize = datasize;
-#ifdef HAS_DMA
+ if (!mxcmci_use_dma(host))
+ return 0;
+
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->offset & 3 || sg->length & 3) {
host->do_dma = 0;
@@ -235,34 +239,30 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
}
}
- if (data->flags & MMC_DATA_READ) {
+ if (data->flags & MMC_DATA_READ)
host->dma_dir = DMA_FROM_DEVICE;
- host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, host->dma_dir);
-
- ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents,
- datasize,
- host->res->start + MMC_REG_BUFFER_ACCESS,
- DMA_MODE_READ);
- } else {
+ else
host->dma_dir = DMA_TO_DEVICE;
- host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, host->dma_dir);
- ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents,
- datasize,
- host->res->start + MMC_REG_BUFFER_ACCESS,
- DMA_MODE_WRITE);
- }
+ nents = dma_map_sg(host->dma->device->dev, data->sg,
+ data->sg_len, host->dma_dir);
+ if (nents != data->sg_len)
+ return -EINVAL;
+
+ host->desc = host->dma->device->device_prep_slave_sg(host->dma,
+ data->sg, data->sg_len, host->dma_dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (ret) {
- dev_err(mmc_dev(host->mmc), "failed to setup DMA : %d\n", ret);
- return ret;
+ if (!host->desc) {
+ dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
+ host->dma_dir);
+ host->do_dma = 0;
+ return 0; /* Fall back to PIO */
}
wmb();
- imx_dma_enable(host->dma);
-#endif /* HAS_DMA */
+ dmaengine_submit(host->desc);
+
return 0;
}
@@ -337,13 +337,11 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
struct mmc_data *data = host->data;
int data_error;
-#ifdef HAS_DMA
if (mxcmci_use_dma(host)) {
- imx_dma_disable(host->dma);
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
+ dmaengine_terminate_all(host->dma);
+ dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
host->dma_dir);
}
-#endif
if (stat & STATUS_ERR_MASK) {
dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
@@ -545,7 +543,6 @@ static void mxcmci_datawork(struct work_struct *work)
}
}
-#ifdef HAS_DMA
static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
{
struct mmc_data *data = host->data;
@@ -568,7 +565,6 @@ static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
mxcmci_finish_request(host, host->req);
}
}
-#endif /* HAS_DMA */
static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
{
@@ -606,12 +602,10 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
spin_unlock_irqrestore(&host->lock, flags);
-#ifdef HAS_DMA
if (mxcmci_use_dma(host) &&
(stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
host->base + MMC_REG_STATUS);
-#endif
if (sdio_irq) {
writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
@@ -621,14 +615,14 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
if (stat & STATUS_END_CMD_RESP)
mxcmci_cmd_done(host, stat);
-#ifdef HAS_DMA
if (mxcmci_use_dma(host) &&
(stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
mxcmci_data_done(host, stat);
-#endif
+
if (host->default_irq_mask &&
(stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+
return IRQ_HANDLED;
}
@@ -642,9 +636,10 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
host->req = req;
host->cmdat &= ~CMD_DAT_CONT_INIT;
-#ifdef HAS_DMA
- host->do_dma = 1;
-#endif
+
+ if (host->dma)
+ host->do_dma = 1;
+
if (req->data) {
error = mxcmci_setup_data(host, req->data);
if (error) {
@@ -660,6 +655,7 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
}
error = mxcmci_start_cmd(host, req->cmd, cmdat);
+
out:
if (error)
mxcmci_finish_request(host, req);
@@ -698,22 +694,46 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
prescaler, divider, clk_in, clk_ios);
}
+static int mxcmci_setup_dma(struct mmc_host *mmc)
+{
+ struct mxcmci_host *host = mmc_priv(mmc);
+ struct dma_slave_config *config = &host->dma_slave_config;
+
+ config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
+ config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
+ config->dst_addr_width = 4;
+ config->src_addr_width = 4;
+ config->dst_maxburst = host->burstlen;
+ config->src_maxburst = host->burstlen;
+
+ return dmaengine_slave_config(host->dma, config);
+}
+
static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mxcmci_host *host = mmc_priv(mmc);
-#ifdef HAS_DMA
- unsigned int blen;
+ int burstlen, ret;
+
/*
* use burstlen of 64 in 4 bit mode (--> reg value 0)
* use burstlen of 16 in 1 bit mode (--> reg value 16)
*/
if (ios->bus_width == MMC_BUS_WIDTH_4)
- blen = 0;
+ burstlen = 64;
else
- blen = 16;
+ burstlen = 16;
+
+ if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
+ host->burstlen = burstlen;
+ ret = mxcmci_setup_dma(mmc);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to config DMA channel. Falling back to PIO\n");
+ dma_release_channel(host->dma);
+ host->do_dma = 0;
+ }
+ }
- imx_dma_config_burstlen(host->dma, blen);
-#endif
if (ios->bus_width == MMC_BUS_WIDTH_4)
host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
else
@@ -794,6 +814,18 @@ static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
host->caps |= MMC_CAP_4_BIT_DATA;
}
+static bool filter(struct dma_chan *chan, void *param)
+{
+ struct mxcmci_host *host = param;
+
+ if (!imx_dma_is_general_purpose(chan))
+ return false;
+
+ chan->private = &host->dma_data;
+
+ return true;
+}
+
static const struct mmc_host_ops mxcmci_ops = {
.request = mxcmci_request,
.set_ios = mxcmci_set_ios,
@@ -808,6 +840,7 @@ static int mxcmci_probe(struct platform_device *pdev)
struct mxcmci_host *host = NULL;
struct resource *iores, *r;
int ret = 0, irq;
+ dma_cap_mask_t mask;
printk(KERN_INFO "i.MX SDHC driver\n");
@@ -883,29 +916,23 @@ static int mxcmci_probe(struct platform_device *pdev)
writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR);
-#ifdef HAS_DMA
- host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW);
- if (host->dma < 0) {
- dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
- ret = -EBUSY;
- goto out_clk_put;
- }
-
r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!r) {
- ret = -EINVAL;
- goto out_free_dma;
+ if (r) {
+ host->dmareq = r->start;
+ host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
+ host->dma_data.priority = DMA_PRIO_LOW;
+ host->dma_data.dma_request = host->dmareq;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma = dma_request_channel(mask, filter, host);
+ if (host->dma)
+ mmc->max_seg_size = dma_get_max_seg_size(
+ host->dma->device->dev);
}
- ret = imx_dma_config_channel(host->dma,
- IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_FIFO,
- IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
- r->start, 0);
- if (ret) {
- dev_err(mmc_dev(host->mmc), "failed to config DMA channel\n");
- goto out_free_dma;
- }
-#endif
+ if (!host->dma)
+ dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
+
INIT_WORK(&host->datawork, mxcmci_datawork);
ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host);
@@ -928,9 +955,8 @@ static int mxcmci_probe(struct platform_device *pdev)
out_free_irq:
free_irq(host->irq, host);
out_free_dma:
-#ifdef HAS_DMA
- imx_dma_free(host->dma);
-#endif
+ if (host->dma)
+ dma_release_channel(host->dma);
out_clk_put:
clk_disable(host->clk);
clk_put(host->clk);
@@ -960,9 +986,10 @@ static int mxcmci_remove(struct platform_device *pdev)
free_irq(host->irq, host);
iounmap(host->base);
-#ifdef HAS_DMA
- imx_dma_free(host->dma);
-#endif
+
+ if (host->dma)
+ dma_release_channel(host->dma);
+
clk_disable(host->clk);
clk_put(host->clk);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
new file mode 100644
index 000000000000..99d39a6a1032
--- /dev/null
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -0,0 +1,874 @@
+/*
+ * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
+ * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
+ *
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/highmem.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+
+#include <mach/mxs.h>
+#include <mach/common.h>
+#include <mach/dma.h>
+#include <mach/mmc.h>
+
+#define DRIVER_NAME "mxs-mmc"
+
+/* card detect polling timeout */
+#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
+
+#define SSP_VERSION_LATEST 4
+#define ssp_is_old() (host->version < SSP_VERSION_LATEST)
+
+/* SSP registers */
+#define HW_SSP_CTRL0 0x000
+#define BM_SSP_CTRL0_RUN (1 << 29)
+#define BM_SSP_CTRL0_SDIO_IRQ_CHECK (1 << 28)
+#define BM_SSP_CTRL0_IGNORE_CRC (1 << 26)
+#define BM_SSP_CTRL0_READ (1 << 25)
+#define BM_SSP_CTRL0_DATA_XFER (1 << 24)
+#define BP_SSP_CTRL0_BUS_WIDTH (22)
+#define BM_SSP_CTRL0_BUS_WIDTH (0x3 << 22)
+#define BM_SSP_CTRL0_WAIT_FOR_IRQ (1 << 21)
+#define BM_SSP_CTRL0_LONG_RESP (1 << 19)
+#define BM_SSP_CTRL0_GET_RESP (1 << 17)
+#define BM_SSP_CTRL0_ENABLE (1 << 16)
+#define BP_SSP_CTRL0_XFER_COUNT (0)
+#define BM_SSP_CTRL0_XFER_COUNT (0xffff)
+#define HW_SSP_CMD0 0x010
+#define BM_SSP_CMD0_DBL_DATA_RATE_EN (1 << 25)
+#define BM_SSP_CMD0_SLOW_CLKING_EN (1 << 22)
+#define BM_SSP_CMD0_CONT_CLKING_EN (1 << 21)
+#define BM_SSP_CMD0_APPEND_8CYC (1 << 20)
+#define BP_SSP_CMD0_BLOCK_SIZE (16)
+#define BM_SSP_CMD0_BLOCK_SIZE (0xf << 16)
+#define BP_SSP_CMD0_BLOCK_COUNT (8)
+#define BM_SSP_CMD0_BLOCK_COUNT (0xff << 8)
+#define BP_SSP_CMD0_CMD (0)
+#define BM_SSP_CMD0_CMD (0xff)
+#define HW_SSP_CMD1 0x020
+#define HW_SSP_XFER_SIZE 0x030
+#define HW_SSP_BLOCK_SIZE 0x040
+#define BP_SSP_BLOCK_SIZE_BLOCK_COUNT (4)
+#define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4)
+#define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0)
+#define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf)
+#define HW_SSP_TIMING (ssp_is_old() ? 0x050 : 0x070)
+#define BP_SSP_TIMING_TIMEOUT (16)
+#define BM_SSP_TIMING_TIMEOUT (0xffff << 16)
+#define BP_SSP_TIMING_CLOCK_DIVIDE (8)
+#define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8)
+#define BP_SSP_TIMING_CLOCK_RATE (0)
+#define BM_SSP_TIMING_CLOCK_RATE (0xff)
+#define HW_SSP_CTRL1 (ssp_is_old() ? 0x060 : 0x080)
+#define BM_SSP_CTRL1_SDIO_IRQ (1 << 31)
+#define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30)
+#define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29)
+#define BM_SSP_CTRL1_RESP_ERR_IRQ_EN (1 << 28)
+#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ (1 << 27)
+#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN (1 << 26)
+#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ (1 << 25)
+#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN (1 << 24)
+#define BM_SSP_CTRL1_DATA_CRC_IRQ (1 << 23)
+#define BM_SSP_CTRL1_DATA_CRC_IRQ_EN (1 << 22)
+#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ (1 << 21)
+#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN (1 << 20)
+#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ (1 << 17)
+#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN (1 << 16)
+#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ (1 << 15)
+#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN (1 << 14)
+#define BM_SSP_CTRL1_DMA_ENABLE (1 << 13)
+#define BM_SSP_CTRL1_POLARITY (1 << 9)
+#define BP_SSP_CTRL1_WORD_LENGTH (4)
+#define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4)
+#define BP_SSP_CTRL1_SSP_MODE (0)
+#define BM_SSP_CTRL1_SSP_MODE (0xf)
+#define HW_SSP_SDRESP0 (ssp_is_old() ? 0x080 : 0x0a0)
+#define HW_SSP_SDRESP1 (ssp_is_old() ? 0x090 : 0x0b0)
+#define HW_SSP_SDRESP2 (ssp_is_old() ? 0x0a0 : 0x0c0)
+#define HW_SSP_SDRESP3 (ssp_is_old() ? 0x0b0 : 0x0d0)
+#define HW_SSP_STATUS (ssp_is_old() ? 0x0c0 : 0x100)
+#define BM_SSP_STATUS_CARD_DETECT (1 << 28)
+#define BM_SSP_STATUS_SDIO_IRQ (1 << 17)
+#define HW_SSP_VERSION (cpu_is_mx23() ? 0x110 : 0x130)
+#define BP_SSP_VERSION_MAJOR (24)
+
+#define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field)
+
+#define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \
+ BM_SSP_CTRL1_RESP_ERR_IRQ | \
+ BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \
+ BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \
+ BM_SSP_CTRL1_DATA_CRC_IRQ | \
+ BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \
+ BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \
+ BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
+
+#define SSP_PIO_NUM 3
+
+struct mxs_mmc_host {
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+
+ void __iomem *base;
+ int irq;
+ struct resource *res;
+ struct resource *dma_res;
+ struct clk *clk;
+ unsigned int clk_rate;
+
+ struct dma_chan *dmach;
+ struct mxs_dma_data dma_data;
+ unsigned int dma_dir;
+ u32 ssp_pio_words[SSP_PIO_NUM];
+
+ unsigned int version;
+ unsigned char bus_width;
+ spinlock_t lock;
+ int sdio_irq_en;
+};
+
+static int mxs_mmc_get_ro(struct mmc_host *mmc)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ struct mxs_mmc_platform_data *pdata =
+ mmc_dev(host->mmc)->platform_data;
+
+ if (!pdata)
+ return -EFAULT;
+
+ if (!gpio_is_valid(pdata->wp_gpio))
+ return -EINVAL;
+
+ return gpio_get_value(pdata->wp_gpio);
+}
+
+static int mxs_mmc_get_cd(struct mmc_host *mmc)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+
+ return !(readl(host->base + HW_SSP_STATUS) &
+ BM_SSP_STATUS_CARD_DETECT);
+}
+
+static void mxs_mmc_reset(struct mxs_mmc_host *host)
+{
+ u32 ctrl0, ctrl1;
+
+ mxs_reset_block(host->base);
+
+ ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
+ ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
+ BF_SSP(0x7, CTRL1_WORD_LENGTH) |
+ BM_SSP_CTRL1_DMA_ENABLE |
+ BM_SSP_CTRL1_POLARITY |
+ BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
+ BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
+ BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
+ BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
+ BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
+
+ writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
+ BF_SSP(2, TIMING_CLOCK_DIVIDE) |
+ BF_SSP(0, TIMING_CLOCK_RATE),
+ host->base + HW_SSP_TIMING);
+
+ if (host->sdio_irq_en) {
+ ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
+ }
+
+ writel(ctrl0, host->base + HW_SSP_CTRL0);
+ writel(ctrl1, host->base + HW_SSP_CTRL1);
+}
+
+static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
+ struct mmc_command *cmd);
+
+static void mxs_mmc_request_done(struct mxs_mmc_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+ struct mmc_data *data = host->data;
+ struct mmc_request *mrq = host->mrq;
+
+ if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
+ if (mmc_resp_type(cmd) & MMC_RSP_136) {
+ cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0);
+ cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1);
+ cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2);
+ cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3);
+ } else {
+ cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0);
+ }
+ }
+
+ if (data) {
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, host->dma_dir);
+ /*
+ * If there was an error on any block, we mark all
+ * data blocks as being in error.
+ */
+ if (!data->error)
+ data->bytes_xfered = data->blocks * data->blksz;
+ else
+ data->bytes_xfered = 0;
+
+ host->data = NULL;
+ if (mrq->stop) {
+ mxs_mmc_start_cmd(host, mrq->stop);
+ return;
+ }
+ }
+
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void mxs_mmc_dma_irq_callback(void *param)
+{
+ struct mxs_mmc_host *host = param;
+
+ mxs_mmc_request_done(host);
+}
+
+static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
+{
+ struct mxs_mmc_host *host = dev_id;
+ struct mmc_command *cmd = host->cmd;
+ struct mmc_data *data = host->data;
+ u32 stat;
+
+ spin_lock(&host->lock);
+
+ stat = readl(host->base + HW_SSP_CTRL1);
+ writel(stat & MXS_MMC_IRQ_BITS,
+ host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+
+ if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
+ mmc_signal_sdio_irq(host->mmc);
+
+ spin_unlock(&host->lock);
+
+ if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
+ cmd->error = -ETIMEDOUT;
+ else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
+ cmd->error = -EIO;
+
+ if (data) {
+ if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
+ BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
+ data->error = -ETIMEDOUT;
+ else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
+ data->error = -EILSEQ;
+ else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
+ BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
+ data->error = -EIO;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
+ struct mxs_mmc_host *host, unsigned int append)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct mmc_data *data = host->data;
+ struct scatterlist * sgl;
+ unsigned int sg_len;
+
+ if (data) {
+ /* data */
+ dma_map_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, host->dma_dir);
+ sgl = data->sg;
+ sg_len = data->sg_len;
+ } else {
+ /* pio */
+ sgl = (struct scatterlist *) host->ssp_pio_words;
+ sg_len = SSP_PIO_NUM;
+ }
+
+ desc = host->dmach->device->device_prep_slave_sg(host->dmach,
+ sgl, sg_len, host->dma_dir, append);
+ if (desc) {
+ desc->callback = mxs_mmc_dma_irq_callback;
+ desc->callback_param = host;
+ } else {
+ if (data)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, host->dma_dir);
+ }
+
+ return desc;
+}
+
+static void mxs_mmc_bc(struct mxs_mmc_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+ struct dma_async_tx_descriptor *desc;
+ u32 ctrl0, cmd0, cmd1;
+
+ ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
+ cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
+ cmd1 = cmd->arg;
+
+ if (host->sdio_irq_en) {
+ ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
+ }
+
+ host->ssp_pio_words[0] = ctrl0;
+ host->ssp_pio_words[1] = cmd0;
+ host->ssp_pio_words[2] = cmd1;
+ host->dma_dir = DMA_NONE;
+ desc = mxs_mmc_prep_dma(host, 0);
+ if (!desc)
+ goto out;
+
+ dmaengine_submit(desc);
+ return;
+
+out:
+ dev_warn(mmc_dev(host->mmc),
+ "%s: failed to prep dma\n", __func__);
+}
+
+static void mxs_mmc_ac(struct mxs_mmc_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+ struct dma_async_tx_descriptor *desc;
+ u32 ignore_crc, get_resp, long_resp;
+ u32 ctrl0, cmd0, cmd1;
+
+ ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
+ 0 : BM_SSP_CTRL0_IGNORE_CRC;
+ get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
+ BM_SSP_CTRL0_GET_RESP : 0;
+ long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
+ BM_SSP_CTRL0_LONG_RESP : 0;
+
+ ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
+ cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
+ cmd1 = cmd->arg;
+
+ if (host->sdio_irq_en) {
+ ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
+ }
+
+ host->ssp_pio_words[0] = ctrl0;
+ host->ssp_pio_words[1] = cmd0;
+ host->ssp_pio_words[2] = cmd1;
+ host->dma_dir = DMA_NONE;
+ desc = mxs_mmc_prep_dma(host, 0);
+ if (!desc)
+ goto out;
+
+ dmaengine_submit(desc);
+ return;
+
+out:
+ dev_warn(mmc_dev(host->mmc),
+ "%s: failed to prep dma\n", __func__);
+}
+
+static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
+{
+ const unsigned int ssp_timeout_mul = 4096;
+ /*
+ * Calculate ticks in ms since ns are large numbers
+ * and might overflow
+ */
+ const unsigned int clock_per_ms = clock_rate / 1000;
+ const unsigned int ms = ns / 1000;
+ const unsigned int ticks = ms * clock_per_ms;
+ const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
+
+ WARN_ON(ssp_ticks == 0);
+ return ssp_ticks;
+}
+
+static void mxs_mmc_adtc(struct mxs_mmc_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+ struct mmc_data *data = cmd->data;
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist *sgl = data->sg, *sg;
+ unsigned int sg_len = data->sg_len;
+ int i;
+
+ unsigned short dma_data_dir, timeout;
+ unsigned int data_size = 0, log2_blksz;
+ unsigned int blocks = data->blocks;
+
+ u32 ignore_crc, get_resp, long_resp, read;
+ u32 ctrl0, cmd0, cmd1, val;
+
+ ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
+ 0 : BM_SSP_CTRL0_IGNORE_CRC;
+ get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
+ BM_SSP_CTRL0_GET_RESP : 0;
+ long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
+ BM_SSP_CTRL0_LONG_RESP : 0;
+
+ if (data->flags & MMC_DATA_WRITE) {
+ dma_data_dir = DMA_TO_DEVICE;
+ read = 0;
+ } else {
+ dma_data_dir = DMA_FROM_DEVICE;
+ read = BM_SSP_CTRL0_READ;
+ }
+
+ ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
+ ignore_crc | get_resp | long_resp |
+ BM_SSP_CTRL0_DATA_XFER | read |
+ BM_SSP_CTRL0_WAIT_FOR_IRQ |
+ BM_SSP_CTRL0_ENABLE;
+
+ cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
+
+ /* get logarithm to base 2 of block size for setting register */
+ log2_blksz = ilog2(data->blksz);
+
+ /*
+ * take special care of the case that data size from data->sg
+ * is not equal to blocks x blksz
+ */
+ for_each_sg(sgl, sg, sg_len, i)
+ data_size += sg->length;
+
+ if (data_size != data->blocks * data->blksz)
+ blocks = 1;
+
+ /* xfer count, block size and count need to be set differently */
+ if (ssp_is_old()) {
+ ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
+ cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
+ BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
+ } else {
+ writel(data_size, host->base + HW_SSP_XFER_SIZE);
+ writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
+ BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
+ host->base + HW_SSP_BLOCK_SIZE);
+ }
+
+ if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
+ (cmd->opcode == SD_IO_RW_EXTENDED))
+ cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
+
+ cmd1 = cmd->arg;
+
+ if (host->sdio_irq_en) {
+ ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
+ }
+
+ /* set the timeout count */
+ timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns);
+ val = readl(host->base + HW_SSP_TIMING);
+ val &= ~(BM_SSP_TIMING_TIMEOUT);
+ val |= BF_SSP(timeout, TIMING_TIMEOUT);
+ writel(val, host->base + HW_SSP_TIMING);
+
+ /* pio */
+ host->ssp_pio_words[0] = ctrl0;
+ host->ssp_pio_words[1] = cmd0;
+ host->ssp_pio_words[2] = cmd1;
+ host->dma_dir = DMA_NONE;
+ desc = mxs_mmc_prep_dma(host, 0);
+ if (!desc)
+ goto out;
+
+ /* append data sg */
+ WARN_ON(host->data != NULL);
+ host->data = data;
+ host->dma_dir = dma_data_dir;
+ desc = mxs_mmc_prep_dma(host, 1);
+ if (!desc)
+ goto out;
+
+ dmaengine_submit(desc);
+ return;
+out:
+ dev_warn(mmc_dev(host->mmc),
+ "%s: failed to prep dma\n", __func__);
+}
+
+static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ host->cmd = cmd;
+
+ switch (mmc_cmd_type(cmd)) {
+ case MMC_CMD_BC:
+ mxs_mmc_bc(host);
+ break;
+ case MMC_CMD_BCR:
+ mxs_mmc_ac(host);
+ break;
+ case MMC_CMD_AC:
+ mxs_mmc_ac(host);
+ break;
+ case MMC_CMD_ADTC:
+ mxs_mmc_adtc(host);
+ break;
+ default:
+ dev_warn(mmc_dev(host->mmc),
+ "%s: unknown MMC command\n", __func__);
+ break;
+ }
+}
+
+static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+
+ WARN_ON(host->mrq != NULL);
+ host->mrq = mrq;
+ mxs_mmc_start_cmd(host, mrq->cmd);
+}
+
+static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
+{
+ unsigned int ssp_rate, bit_rate;
+ u32 div1, div2;
+ u32 val;
+
+ ssp_rate = clk_get_rate(host->clk);
+
+ for (div1 = 2; div1 < 254; div1 += 2) {
+ div2 = ssp_rate / rate / div1;
+ if (div2 < 0x100)
+ break;
+ }
+
+ if (div1 >= 254) {
+ dev_err(mmc_dev(host->mmc),
+ "%s: cannot set clock to %d\n", __func__, rate);
+ return;
+ }
+
+ if (div2 == 0)
+ bit_rate = ssp_rate / div1;
+ else
+ bit_rate = ssp_rate / div1 / div2;
+
+ val = readl(host->base + HW_SSP_TIMING);
+ val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
+ val |= BF_SSP(div1, TIMING_CLOCK_DIVIDE);
+ val |= BF_SSP(div2 - 1, TIMING_CLOCK_RATE);
+ writel(val, host->base + HW_SSP_TIMING);
+
+ host->clk_rate = bit_rate;
+
+ dev_dbg(mmc_dev(host->mmc),
+ "%s: div1 %d, div2 %d, ssp %d, bit %d, rate %d\n",
+ __func__, div1, div2, ssp_rate, bit_rate, rate);
+}
+
+static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+
+ if (ios->bus_width == MMC_BUS_WIDTH_8)
+ host->bus_width = 2;
+ else if (ios->bus_width == MMC_BUS_WIDTH_4)
+ host->bus_width = 1;
+ else
+ host->bus_width = 0;
+
+ if (ios->clock)
+ mxs_mmc_set_clk_rate(host, ios->clock);
+}
+
+static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->sdio_irq_en = enable;
+
+ if (enable) {
+ writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
+ host->base + HW_SSP_CTRL0 + MXS_SET_ADDR);
+ writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
+ host->base + HW_SSP_CTRL1 + MXS_SET_ADDR);
+
+ if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
+ mmc_signal_sdio_irq(host->mmc);
+
+ } else {
+ writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
+ host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR);
+ writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
+ host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static const struct mmc_host_ops mxs_mmc_ops = {
+ .request = mxs_mmc_request,
+ .get_ro = mxs_mmc_get_ro,
+ .get_cd = mxs_mmc_get_cd,
+ .set_ios = mxs_mmc_set_ios,
+ .enable_sdio_irq = mxs_mmc_enable_sdio_irq,
+};
+
+static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct mxs_mmc_host *host = param;
+
+ if (!mxs_dma_is_apbh(chan))
+ return false;
+
+ if (chan->chan_id != host->dma_res->start)
+ return false;
+
+ chan->private = &host->dma_data;
+
+ return true;
+}
+
+static int mxs_mmc_probe(struct platform_device *pdev)
+{
+ struct mxs_mmc_host *host;
+ struct mmc_host *mmc;
+ struct resource *iores, *dmares, *r;
+ struct mxs_mmc_platform_data *pdata;
+ int ret = 0, irq_err, irq_dma;
+ dma_cap_mask_t mask;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ irq_err = platform_get_irq(pdev, 0);
+ irq_dma = platform_get_irq(pdev, 1);
+ if (!iores || !dmares || irq_err < 0 || irq_dma < 0)
+ return -EINVAL;
+
+ r = request_mem_region(iores->start, resource_size(iores), pdev->name);
+ if (!r)
+ return -EBUSY;
+
+ mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto out_release_mem;
+ }
+
+ host = mmc_priv(mmc);
+ host->base = ioremap(r->start, resource_size(r));
+ if (!host->base) {
+ ret = -ENOMEM;
+ goto out_mmc_free;
+ }
+
+ /* only major verion does matter */
+ host->version = readl(host->base + HW_SSP_VERSION) >>
+ BP_SSP_VERSION_MAJOR;
+
+ host->mmc = mmc;
+ host->res = r;
+ host->dma_res = dmares;
+ host->irq = irq_err;
+ host->sdio_irq_en = 0;
+
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ goto out_iounmap;
+ }
+ clk_enable(host->clk);
+
+ mxs_mmc_reset(host);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_data.chan_irq = irq_dma;
+ host->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host);
+ if (!host->dmach) {
+ dev_err(mmc_dev(host->mmc),
+ "%s: failed to request dma\n", __func__);
+ goto out_clk_put;
+ }
+
+ /* set mmc core parameters */
+ mmc->ops = &mxs_mmc_ops;
+ mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
+
+ pdata = mmc_dev(host->mmc)->platform_data;
+ if (pdata) {
+ if (pdata->flags & SLOTF_8_BIT_CAPABLE)
+ mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
+ if (pdata->flags & SLOTF_4_BIT_CAPABLE)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+ }
+
+ mmc->f_min = 400000;
+ mmc->f_max = 288000000;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+
+ mmc->max_segs = 52;
+ mmc->max_blk_size = 1 << 0xf;
+ mmc->max_blk_count = (ssp_is_old()) ? 0xff : 0xffffff;
+ mmc->max_req_size = (ssp_is_old()) ? 0xffff : 0xffffffff;
+ mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev);
+
+ platform_set_drvdata(pdev, mmc);
+
+ ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host);
+ if (ret)
+ goto out_free_dma;
+
+ spin_lock_init(&host->lock);
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto out_free_irq;
+
+ dev_info(mmc_dev(host->mmc), "initialized\n");
+
+ return 0;
+
+out_free_irq:
+ free_irq(host->irq, host);
+out_free_dma:
+ if (host->dmach)
+ dma_release_channel(host->dmach);
+out_clk_put:
+ clk_disable(host->clk);
+ clk_put(host->clk);
+out_iounmap:
+ iounmap(host->base);
+out_mmc_free:
+ mmc_free_host(mmc);
+out_release_mem:
+ release_mem_region(iores->start, resource_size(iores));
+ return ret;
+}
+
+static int mxs_mmc_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ struct resource *res = host->res;
+
+ mmc_remove_host(mmc);
+
+ free_irq(host->irq, host);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (host->dmach)
+ dma_release_channel(host->dmach);
+
+ clk_disable(host->clk);
+ clk_put(host->clk);
+
+ iounmap(host->base);
+
+ mmc_free_host(mmc);
+
+ release_mem_region(res->start, resource_size(res));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mxs_mmc_suspend(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ int ret = 0;
+
+ ret = mmc_suspend_host(mmc);
+
+ clk_disable(host->clk);
+
+ return ret;
+}
+
+static int mxs_mmc_resume(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ int ret = 0;
+
+ clk_enable(host->clk);
+
+ ret = mmc_resume_host(mmc);
+
+ return ret;
+}
+
+static const struct dev_pm_ops mxs_mmc_pm_ops = {
+ .suspend = mxs_mmc_suspend,
+ .resume = mxs_mmc_resume,
+};
+#endif
+
+static struct platform_driver mxs_mmc_driver = {
+ .probe = mxs_mmc_probe,
+ .remove = mxs_mmc_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &mxs_mmc_pm_ops,
+#endif
+ },
+};
+
+static int __init mxs_mmc_init(void)
+{
+ return platform_driver_register(&mxs_mmc_driver);
+}
+
+static void __exit mxs_mmc_exit(void)
+{
+ platform_driver_unregister(&mxs_mmc_driver);
+}
+
+module_init(mxs_mmc_init);
+module_exit(mxs_mmc_exit);
+
+MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index 1247e5de9faa..5530def54e5b 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -34,6 +34,7 @@ enum {
struct of_mmc_spi {
int gpios[NUM_GPIOS];
bool alow_gpios[NUM_GPIOS];
+ int detect_irq;
struct mmc_spi_platform_data pdata;
};
@@ -61,6 +62,22 @@ static int of_mmc_spi_get_ro(struct device *dev)
return of_mmc_spi_read_gpio(dev, WP_GPIO);
}
+static int of_mmc_spi_init(struct device *dev,
+ irqreturn_t (*irqhandler)(int, void *), void *mmc)
+{
+ struct of_mmc_spi *oms = to_of_mmc_spi(dev);
+
+ return request_threaded_irq(oms->detect_irq, NULL, irqhandler, 0,
+ dev_name(dev), mmc);
+}
+
+static void of_mmc_spi_exit(struct device *dev, void *mmc)
+{
+ struct of_mmc_spi *oms = to_of_mmc_spi(dev);
+
+ free_irq(oms->detect_irq, mmc);
+}
+
struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
{
struct device *dev = &spi->dev;
@@ -121,8 +138,13 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
if (gpio_is_valid(oms->gpios[WP_GPIO]))
oms->pdata.get_ro = of_mmc_spi_get_ro;
- /* We don't support interrupts yet, let's poll. */
- oms->pdata.caps |= MMC_CAP_NEEDS_POLL;
+ oms->detect_irq = irq_of_parse_and_map(np, 0);
+ if (oms->detect_irq != NO_IRQ) {
+ oms->pdata.init = of_mmc_spi_init;
+ oms->pdata.exit = of_mmc_spi_exit;
+ } else {
+ oms->pdata.caps |= MMC_CAP_NEEDS_POLL;
+ }
dev->platform_data = &oms->pdata;
return dev->platform_data;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 379d2ffe4c87..2e032f0e8cf4 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1417,7 +1417,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
if (res == NULL || irq < 0)
return -ENXIO;
- res = request_mem_region(res->start, res->end - res->start + 1,
+ res = request_mem_region(res->start, resource_size(res),
pdev->name);
if (res == NULL)
return -EBUSY;
@@ -1457,7 +1457,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
host->irq = irq;
host->phys_base = host->mem_res->start;
- host->virt_base = ioremap(res->start, res->end - res->start + 1);
+ host->virt_base = ioremap(res->start, resource_size(res));
if (!host->virt_base)
goto err_ioremap;
@@ -1514,7 +1514,7 @@ err_free_mmc_host:
err_ioremap:
kfree(host);
err_free_mem_region:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
return ret;
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 078fdf11af03..259ece047afc 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -118,7 +118,7 @@
#define MMC_TIMEOUT_MS 20
#define OMAP_MMC_MASTER_CLOCK 96000000
-#define DRIVER_NAME "mmci-omap-hs"
+#define DRIVER_NAME "omap_hsmmc"
/* Timeouts for entering power saving states on inactivity, msec */
#define OMAP_MMC_DISABLED_TIMEOUT 100
@@ -260,7 +260,7 @@ static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on,
return ret;
}
-static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,
+static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on,
int vdd)
{
struct omap_hsmmc_host *host =
@@ -316,6 +316,12 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,
return ret;
}
+static int omap_hsmmc_4_set_power(struct device *dev, int slot, int power_on,
+ int vdd)
+{
+ return 0;
+}
+
static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep,
int vdd, int cardsleep)
{
@@ -326,7 +332,7 @@ static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep,
return regulator_set_mode(host->vcc, mode);
}
-static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep,
+static int omap_hsmmc_235_set_sleep(struct device *dev, int slot, int sleep,
int vdd, int cardsleep)
{
struct omap_hsmmc_host *host =
@@ -365,6 +371,12 @@ static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep,
return regulator_enable(host->vcc_aux);
}
+static int omap_hsmmc_4_set_sleep(struct device *dev, int slot, int sleep,
+ int vdd, int cardsleep)
+{
+ return 0;
+}
+
static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
{
struct regulator *reg;
@@ -379,10 +391,14 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
break;
case OMAP_MMC2_DEVID:
case OMAP_MMC3_DEVID:
+ case OMAP_MMC5_DEVID:
/* Off-chip level shifting, or none */
- mmc_slot(host).set_power = omap_hsmmc_23_set_power;
- mmc_slot(host).set_sleep = omap_hsmmc_23_set_sleep;
+ mmc_slot(host).set_power = omap_hsmmc_235_set_power;
+ mmc_slot(host).set_sleep = omap_hsmmc_235_set_sleep;
break;
+ case OMAP_MMC4_DEVID:
+ mmc_slot(host).set_power = omap_hsmmc_4_set_power;
+ mmc_slot(host).set_sleep = omap_hsmmc_4_set_sleep;
default:
pr_err("MMC%d configuration not supported!\n", host->id);
return -EINVAL;
@@ -1555,7 +1571,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
break;
}
- if (host->id == OMAP_MMC1_DEVID) {
+ if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
/* Only MMC1 can interface at 3V without some flavor
* of external transceiver; but they all handle 1.8V.
*/
@@ -1647,7 +1663,7 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
u32 hctl, capa, value;
/* Only MMC1 supports 3.0V */
- if (host->id == OMAP_MMC1_DEVID) {
+ if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
hctl = SDVS30;
capa = VS30 | VS18;
} else {
@@ -2031,8 +2047,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
res->start += pdata->reg_offset;
res->end += pdata->reg_offset;
- res = request_mem_region(res->start, res->end - res->start + 1,
- pdev->name);
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
if (res == NULL)
return -EBUSY;
@@ -2101,14 +2116,14 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
/* we start off in DISABLED state */
host->dpm_state = DISABLED;
- if (mmc_host_enable(host->mmc) != 0) {
+ if (clk_enable(host->iclk) != 0) {
clk_put(host->iclk);
clk_put(host->fclk);
goto err1;
}
- if (clk_enable(host->iclk) != 0) {
- mmc_host_disable(host->mmc);
+ if (mmc_host_enable(host->mmc) != 0) {
+ clk_disable(host->iclk);
clk_put(host->iclk);
clk_put(host->fclk);
goto err1;
@@ -2271,7 +2286,7 @@ err1:
err_alloc:
omap_hsmmc_gpio_free(pdata);
err:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
return ret;
}
@@ -2308,7 +2323,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res)
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 9b82910b9dbb..3b5248567973 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -15,9 +15,11 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/gpio.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdhci-pltfm.h>
#include <mach/hardware.h>
+#include <mach/esdhc.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
@@ -30,6 +32,39 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i
writel(((readl(base) & ~(mask << shift)) | (val << shift)), base);
}
+static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
+{
+ /* fake CARD_PRESENT flag on mx25/35 */
+ u32 val = readl(host->ioaddr + reg);
+
+ if (unlikely(reg == SDHCI_PRESENT_STATE)) {
+ struct esdhc_platform_data *boarddata =
+ host->mmc->parent->platform_data;
+
+ if (boarddata && gpio_is_valid(boarddata->cd_gpio)
+ && gpio_get_value(boarddata->cd_gpio))
+ /* no card, if a valid gpio says so... */
+ val &= SDHCI_CARD_PRESENT;
+ else
+ /* ... in all other cases assume card is present */
+ val |= SDHCI_CARD_PRESENT;
+ }
+
+ return val;
+}
+
+static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
+{
+ if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE))
+ /*
+ * these interrupts won't work with a custom card_detect gpio
+ * (only applied to mx25/35)
+ */
+ val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
+
+ writel(val, host->ioaddr + reg);
+}
+
static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
{
if (unlikely(reg == SDHCI_HOST_VERSION))
@@ -100,10 +135,39 @@ static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
return clk_get_rate(pltfm_host->clk) / 256 / 16;
}
+static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
+{
+ struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
+
+ if (boarddata && gpio_is_valid(boarddata->wp_gpio))
+ return gpio_get_value(boarddata->wp_gpio);
+ else
+ return -ENOSYS;
+}
+
+static struct sdhci_ops sdhci_esdhc_ops = {
+ .read_w = esdhc_readw_le,
+ .write_w = esdhc_writew_le,
+ .write_b = esdhc_writeb_le,
+ .set_clock = esdhc_set_clock,
+ .get_max_clock = esdhc_pltfm_get_max_clock,
+ .get_min_clock = esdhc_pltfm_get_min_clock,
+};
+
+static irqreturn_t cd_irq(int irq, void *data)
+{
+ struct sdhci_host *sdhost = (struct sdhci_host *)data;
+
+ tasklet_schedule(&sdhost->card_tasklet);
+ return IRQ_HANDLED;
+};
+
static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
struct clk *clk;
+ int err;
clk = clk_get(mmc_dev(host->mmc), NULL);
if (IS_ERR(clk)) {
@@ -116,32 +180,78 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd
if (cpu_is_mx35() || cpu_is_mx51())
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
- /* Fix errata ENGcm07207 which is present on i.MX25 and i.MX35 */
- if (cpu_is_mx25() || cpu_is_mx35())
+ if (cpu_is_mx25() || cpu_is_mx35()) {
+ /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
+ /* write_protect can't be routed to controller, use gpio */
+ sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro;
+ }
+
+ if (boarddata) {
+ err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP");
+ if (err) {
+ dev_warn(mmc_dev(host->mmc),
+ "no write-protect pin available!\n");
+ boarddata->wp_gpio = err;
+ }
+
+ err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD");
+ if (err) {
+ dev_warn(mmc_dev(host->mmc),
+ "no card-detect pin available!\n");
+ goto no_card_detect_pin;
+ }
+
+ /* i.MX5x has issues to be researched */
+ if (!cpu_is_mx25() && !cpu_is_mx35())
+ goto not_supported;
+
+ err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ mmc_hostname(host->mmc), host);
+ if (err) {
+ dev_warn(mmc_dev(host->mmc), "request irq error\n");
+ goto no_card_detect_irq;
+ }
+
+ sdhci_esdhc_ops.write_l = esdhc_writel_le;
+ sdhci_esdhc_ops.read_l = esdhc_readl_le;
+ /* Now we have a working card_detect again */
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ }
+
+ return 0;
+ no_card_detect_irq:
+ gpio_free(boarddata->cd_gpio);
+ no_card_detect_pin:
+ boarddata->cd_gpio = err;
+ not_supported:
return 0;
}
static void esdhc_pltfm_exit(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
+
+ if (boarddata && gpio_is_valid(boarddata->wp_gpio))
+ gpio_free(boarddata->wp_gpio);
+
+ if (boarddata && gpio_is_valid(boarddata->cd_gpio)) {
+ gpio_free(boarddata->cd_gpio);
+
+ if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION))
+ free_irq(gpio_to_irq(boarddata->cd_gpio), host);
+ }
clk_disable(pltfm_host->clk);
clk_put(pltfm_host->clk);
}
-static struct sdhci_ops sdhci_esdhc_ops = {
- .read_w = esdhc_readw_le,
- .write_w = esdhc_writew_le,
- .write_b = esdhc_writeb_le,
- .set_clock = esdhc_set_clock,
- .get_max_clock = esdhc_pltfm_get_max_clock,
- .get_min_clock = esdhc_pltfm_get_min_clock,
-};
-
struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
- .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA,
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA
+ | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
/* ADMA has issues. Might be fixable */
.ops = &sdhci_esdhc_ops,
.init = esdhc_pltfm_init,
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index afaf1bc4913a..c55aae828aac 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -19,7 +19,6 @@
*/
#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
- SDHCI_QUIRK_BROKEN_CARD_DETECTION | \
SDHCI_QUIRK_NO_BUSY_IRQ | \
SDHCI_QUIRK_NONSTANDARD_CLOCK | \
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index fcd0e1fcba44..08161f690ae8 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -73,7 +73,8 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
}
struct sdhci_of_data sdhci_esdhc = {
- .quirks = ESDHC_DEFAULT_QUIRKS,
+ /* card detection could be handled via GPIO */
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
.ops = {
.read_l = sdhci_be32bs_readl,
.read_w = esdhc_readw,
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 0dc905b20eee..2f8d46854acd 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -547,6 +547,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
},
{
+ .vendor = PCI_VENDOR_ID_RICOH,
+ .device = 0xe823,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc,
+ },
+
+ {
.vendor = PCI_VENDOR_ID_ENE,
.device = PCI_DEVICE_ID_ENE_CB712_SD,
.subvendor = PCI_ANY_ID,
@@ -900,9 +908,6 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
{
struct sdhci_pci_slot *slot;
struct sdhci_host *host;
-
- resource_size_t addr;
-
int ret;
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
@@ -949,7 +954,6 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
goto free;
}
- addr = pci_resource_start(pdev, bar);
host->ioaddr = pci_ioremap_bar(pdev, bar);
if (!host->ioaddr) {
dev_err(&pdev->dev, "failed to remap registers\n");
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 5309ab95aada..69e3ee321eb5 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -499,6 +499,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
* SDHCI block, or a missing configuration that needs to be set. */
host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
+ /* This host supports the Auto CMD12 */
+ host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+
if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 4823ee94a63f..f7e1f964395f 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -169,7 +169,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate wp gpio\n");
- goto out_cd;
+ goto out_irq;
}
tegra_gpio_enable(plat->wp_gpio);
gpio_direction_input(plat->wp_gpio);
@@ -195,6 +195,9 @@ out_wp:
gpio_free(plat->wp_gpio);
}
+out_irq:
+ if (gpio_is_valid(plat->cd_gpio))
+ free_irq(gpio_to_irq(plat->cd_gpio), host);
out_cd:
if (gpio_is_valid(plat->cd_gpio)) {
tegra_gpio_disable(plat->cd_gpio);
@@ -225,6 +228,7 @@ static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
}
if (gpio_is_valid(plat->cd_gpio)) {
+ free_irq(gpio_to_irq(plat->cd_gpio), host);
tegra_gpio_disable(plat->cd_gpio);
gpio_free(plat->cd_gpio);
}
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 12884c270171..af97015a2fc7 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -169,7 +169,7 @@ struct sh_mmcif_host {
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
struct completion dma_complete;
- unsigned int dma_sglen;
+ bool dma_active;
};
static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
@@ -194,10 +194,12 @@ static void mmcif_dma_complete(void *arg)
return;
if (host->data->flags & MMC_DATA_READ)
- dma_unmap_sg(&host->pd->dev, host->data->sg, host->dma_sglen,
+ dma_unmap_sg(host->chan_rx->device->dev,
+ host->data->sg, host->data->sg_len,
DMA_FROM_DEVICE);
else
- dma_unmap_sg(&host->pd->dev, host->data->sg, host->dma_sglen,
+ dma_unmap_sg(host->chan_tx->device->dev,
+ host->data->sg, host->data->sg_len,
DMA_TO_DEVICE);
complete(&host->dma_complete);
@@ -211,9 +213,10 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
dma_cookie_t cookie = -EINVAL;
int ret;
- ret = dma_map_sg(&host->pd->dev, sg, host->data->sg_len, DMA_FROM_DEVICE);
+ ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
+ DMA_FROM_DEVICE);
if (ret > 0) {
- host->dma_sglen = ret;
+ host->dma_active = true;
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
@@ -221,14 +224,9 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
if (desc) {
desc->callback = mmcif_dma_complete;
desc->callback_param = host;
- cookie = desc->tx_submit(desc);
- if (cookie < 0) {
- desc = NULL;
- ret = cookie;
- } else {
- sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
- chan->device->device_issue_pending(chan);
- }
+ cookie = dmaengine_submit(desc);
+ sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
+ dma_async_issue_pending(chan);
}
dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
__func__, host->data->sg_len, ret, cookie);
@@ -238,7 +236,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
if (ret >= 0)
ret = -EIO;
host->chan_rx = NULL;
- host->dma_sglen = 0;
+ host->dma_active = false;
dma_release_channel(chan);
/* Free the Tx channel too */
chan = host->chan_tx;
@@ -263,9 +261,10 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
dma_cookie_t cookie = -EINVAL;
int ret;
- ret = dma_map_sg(&host->pd->dev, sg, host->data->sg_len, DMA_TO_DEVICE);
+ ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
+ DMA_TO_DEVICE);
if (ret > 0) {
- host->dma_sglen = ret;
+ host->dma_active = true;
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
@@ -273,14 +272,9 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
if (desc) {
desc->callback = mmcif_dma_complete;
desc->callback_param = host;
- cookie = desc->tx_submit(desc);
- if (cookie < 0) {
- desc = NULL;
- ret = cookie;
- } else {
- sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
- chan->device->device_issue_pending(chan);
- }
+ cookie = dmaengine_submit(desc);
+ sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
+ dma_async_issue_pending(chan);
}
dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
__func__, host->data->sg_len, ret, cookie);
@@ -290,7 +284,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
if (ret >= 0)
ret = -EIO;
host->chan_tx = NULL;
- host->dma_sglen = 0;
+ host->dma_active = false;
dma_release_channel(chan);
/* Free the Rx channel too */
chan = host->chan_rx;
@@ -317,7 +311,7 @@ static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
struct sh_mmcif_plat_data *pdata)
{
- host->dma_sglen = 0;
+ host->dma_active = false;
/* We can only either use DMA for both Tx and Rx or not use it at all */
if (pdata->dma) {
@@ -364,7 +358,7 @@ static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
dma_release_channel(chan);
}
- host->dma_sglen = 0;
+ host->dma_active = false;
}
static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
@@ -753,7 +747,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
}
sh_mmcif_get_response(host, cmd);
if (host->data) {
- if (!host->dma_sglen) {
+ if (!host->dma_active) {
ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
} else {
long time =
@@ -765,7 +759,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
ret = time;
sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
- host->dma_sglen = 0;
+ host->dma_active = false;
}
if (ret < 0)
mrq->data->bytes_xfered = 0;
@@ -850,15 +844,15 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct sh_mmcif_host *host = mmc_priv(mmc);
struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
- if (ios->power_mode == MMC_POWER_OFF) {
+ if (ios->power_mode == MMC_POWER_UP) {
+ if (p->set_pwr)
+ p->set_pwr(host->pd, ios->power_mode);
+ } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
/* clock stop */
sh_mmcif_clock_control(host, 0);
- if (p->down_pwr)
+ if (ios->power_mode == MMC_POWER_OFF && p->down_pwr)
p->down_pwr(host->pd);
return;
- } else if (ios->power_mode == MMC_POWER_UP) {
- if (p->set_pwr)
- p->set_pwr(host->pd, ios->power_mode);
}
if (ios->clock)
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index e3c6ef208391..ab1adeabdd22 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -152,7 +152,6 @@ struct tmio_mmc_host {
struct tasklet_struct dma_complete;
struct tasklet_struct dma_issue;
#ifdef CONFIG_TMIO_MMC_DMA
- unsigned int dma_sglen;
u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN)));
struct scatterlist bounce_sg;
#endif
@@ -220,44 +219,48 @@ static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
}
-static void tmio_mmc_kunmap_atomic(void *virt, unsigned long *flags)
+static void tmio_mmc_kunmap_atomic(struct scatterlist *sg, unsigned long *flags, void *virt)
{
- kunmap_atomic(virt, KM_BIO_SRC_IRQ);
+ kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ);
local_irq_restore(*flags);
}
#ifdef CONFIG_MMC_DEBUG
-#define STATUS_TO_TEXT(a) \
+#define STATUS_TO_TEXT(a, status, i) \
do { \
- if (status & TMIO_STAT_##a) \
+ if (status & TMIO_STAT_##a) { \
+ if (i++) \
+ printk(" | "); \
printk(#a); \
+ } \
} while (0)
void pr_debug_status(u32 status)
{
+ int i = 0;
printk(KERN_DEBUG "status: %08x = ", status);
- STATUS_TO_TEXT(CARD_REMOVE);
- STATUS_TO_TEXT(CARD_INSERT);
- STATUS_TO_TEXT(SIGSTATE);
- STATUS_TO_TEXT(WRPROTECT);
- STATUS_TO_TEXT(CARD_REMOVE_A);
- STATUS_TO_TEXT(CARD_INSERT_A);
- STATUS_TO_TEXT(SIGSTATE_A);
- STATUS_TO_TEXT(CMD_IDX_ERR);
- STATUS_TO_TEXT(STOPBIT_ERR);
- STATUS_TO_TEXT(ILL_FUNC);
- STATUS_TO_TEXT(CMD_BUSY);
- STATUS_TO_TEXT(CMDRESPEND);
- STATUS_TO_TEXT(DATAEND);
- STATUS_TO_TEXT(CRCFAIL);
- STATUS_TO_TEXT(DATATIMEOUT);
- STATUS_TO_TEXT(CMDTIMEOUT);
- STATUS_TO_TEXT(RXOVERFLOW);
- STATUS_TO_TEXT(TXUNDERRUN);
- STATUS_TO_TEXT(RXRDY);
- STATUS_TO_TEXT(TXRQ);
- STATUS_TO_TEXT(ILL_ACCESS);
+ STATUS_TO_TEXT(CARD_REMOVE, status, i);
+ STATUS_TO_TEXT(CARD_INSERT, status, i);
+ STATUS_TO_TEXT(SIGSTATE, status, i);
+ STATUS_TO_TEXT(WRPROTECT, status, i);
+ STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
+ STATUS_TO_TEXT(CARD_INSERT_A, status, i);
+ STATUS_TO_TEXT(SIGSTATE_A, status, i);
+ STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
+ STATUS_TO_TEXT(STOPBIT_ERR, status, i);
+ STATUS_TO_TEXT(ILL_FUNC, status, i);
+ STATUS_TO_TEXT(CMD_BUSY, status, i);
+ STATUS_TO_TEXT(CMDRESPEND, status, i);
+ STATUS_TO_TEXT(DATAEND, status, i);
+ STATUS_TO_TEXT(CRCFAIL, status, i);
+ STATUS_TO_TEXT(DATATIMEOUT, status, i);
+ STATUS_TO_TEXT(CMDTIMEOUT, status, i);
+ STATUS_TO_TEXT(RXOVERFLOW, status, i);
+ STATUS_TO_TEXT(TXUNDERRUN, status, i);
+ STATUS_TO_TEXT(RXRDY, status, i);
+ STATUS_TO_TEXT(TXRQ, status, i);
+ STATUS_TO_TEXT(ILL_ACCESS, status, i);
printk("\n");
}
@@ -300,8 +303,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
/*
* Testing on sh-mobile showed that SDIO IRQs are unmasked when
@@ -324,8 +326,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
@@ -507,7 +508,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
host->sg_off += count;
- tmio_mmc_kunmap_atomic(sg_virt, &flags);
+ tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
if (host->sg_off == host->sg_ptr->length)
tmio_mmc_next_sg(host);
@@ -666,8 +667,7 @@ out:
static irqreturn_t tmio_mmc_irq(int irq, void *devid)
{
struct tmio_mmc_host *host = devid;
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
unsigned int ireg, irq_mask, status;
unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
@@ -767,7 +767,7 @@ static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
unsigned long flags;
void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
- tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
+ tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
}
}
@@ -796,8 +796,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
@@ -825,23 +824,16 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
sg = host->sg_ptr;
}
- ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
- if (ret > 0) {
- host->dma_sglen = ret;
+ ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
+ if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- }
if (desc) {
desc->callback = tmio_dma_complete;
desc->callback_param = host;
- cookie = desc->tx_submit(desc);
- if (cookie < 0) {
- desc = NULL;
- ret = cookie;
- } else {
- chan->device->device_issue_pending(chan);
- }
+ cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
}
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, cookie, host->mrq);
@@ -873,8 +865,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
@@ -901,26 +892,20 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
- tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
+ tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
host->sg_ptr = &host->bounce_sg;
sg = host->sg_ptr;
}
- ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
- if (ret > 0) {
- host->dma_sglen = ret;
+ ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
+ if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- }
if (desc) {
desc->callback = tmio_dma_complete;
desc->callback_param = host;
- cookie = desc->tx_submit(desc);
- if (cookie < 0) {
- desc = NULL;
- ret = cookie;
- }
+ cookie = dmaengine_submit(desc);
}
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, cookie, host->mrq);
@@ -964,7 +949,7 @@ static void tmio_issue_tasklet_fn(unsigned long priv)
struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
struct dma_chan *chan = host->chan_tx;
- chan->device->device_issue_pending(chan);
+ dma_async_issue_pending(chan);
}
static void tmio_tasklet_fn(unsigned long arg)
@@ -978,10 +963,12 @@ static void tmio_tasklet_fn(unsigned long arg)
goto out;
if (host->data->flags & MMC_DATA_READ)
- dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
+ dma_unmap_sg(host->chan_rx->device->dev,
+ host->sg_ptr, host->sg_len,
DMA_FROM_DEVICE);
else
- dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
+ dma_unmap_sg(host->chan_tx->device->dev,
+ host->sg_ptr, host->sg_len,
DMA_TO_DEVICE);
tmio_mmc_do_data_irq(host);
@@ -1071,8 +1058,7 @@ static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
static int tmio_mmc_start_data(struct tmio_mmc_host *host,
struct mmc_data *data)
{
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
data->blksz, data->blocks);
@@ -1177,8 +1163,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static int tmio_mmc_get_ro(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1;
@@ -1187,8 +1172,7 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
static int tmio_mmc_get_cd(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
if (!pdata->get_cd)
return -ENOSYS;
@@ -1207,7 +1191,7 @@ static const struct mmc_host_ops tmio_mmc_ops = {
#ifdef CONFIG_PM
static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct mmc_host *mmc = platform_get_drvdata(dev);
int ret;
@@ -1222,7 +1206,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
static int tmio_mmc_resume(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct mmc_host *mmc = platform_get_drvdata(dev);
int ret = 0;
@@ -1245,7 +1229,7 @@ out:
static int __devinit tmio_mmc_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct tmio_mmc_data *pdata;
struct resource *res_ctl;
struct tmio_mmc_host *host;
@@ -1260,7 +1244,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
if (!res_ctl)
goto out;
- pdata = cell->driver_data;
+ pdata = mfd_get_data(dev);
if (!pdata || !pdata->hclk)
goto out;
@@ -1360,7 +1344,7 @@ out:
static int __devexit tmio_mmc_remove(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct mmc_host *mmc = platform_get_drvdata(dev);
platform_set_drvdata(dev, NULL);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 9ed84ddb4780..8c5b4881ccd6 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -802,12 +802,9 @@ static const struct mmc_host_ops via_sdc_ops = {
static void via_reset_pcictrl(struct via_crdr_mmc_host *host)
{
- void __iomem *addrbase;
unsigned long flags;
u8 gatt;
- addrbase = host->pcictrl_mmiobase;
-
spin_lock_irqsave(&host->lock, flags);
via_save_pcictrlreg(host);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index c89592239bc7..4f6c06f16328 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -106,23 +106,6 @@ config MTD_NAND_OMAP2
help
Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
-config MTD_NAND_OMAP_PREFETCH
- bool "GPMC prefetch support for NAND Flash device"
- depends on MTD_NAND_OMAP2
- default y
- help
- The NAND device can be accessed for Read/Write using GPMC PREFETCH engine
- to improve the performance.
-
-config MTD_NAND_OMAP_PREFETCH_DMA
- depends on MTD_NAND_OMAP_PREFETCH
- bool "DMA mode"
- default n
- help
- The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode
- or in DMA interrupt mode.
- Say y for DMA mode or MPU mode will be used
-
config MTD_NAND_IDS
tristate
@@ -476,7 +459,7 @@ config MTD_NAND_MPC5121_NFC
config MTD_NAND_MXC
tristate "MXC NAND support"
- depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 || ARCH_MX51
+ depends on IMX_HAVE_PLATFORM_MXC_NAND
help
This enables the driver for the NAND flash controller on the
MXC processors.
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index ef932ba55a0b..5ae1d9ee2cf1 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -722,9 +722,8 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
/*
* MXC NANDFC can only perform full page+spare or
* spare-only read/write. When the upper layers
- * layers perform a read/write buf operation,
- * we will used the saved column address to index into
- * the full page.
+ * perform a read/write buf operation, the saved column
+ * address is used to index into the full page.
*/
host->send_addr(host, 0, page_addr == -1);
if (mtd->writesize > 512)
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 28af71c61834..7b8f1fffc528 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/mtd/mtd.h>
@@ -24,6 +25,7 @@
#include <plat/nand.h>
#define DRIVER_NAME "omap2-nand"
+#define OMAP_NAND_TIMEOUT_MS 5000
#define NAND_Ecc_P1e (1 << 0)
#define NAND_Ecc_P2e (1 << 1)
@@ -96,26 +98,19 @@
static const char *part_probes[] = { "cmdlinepart", NULL };
#endif
-#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH
-static int use_prefetch = 1;
-
-/* "modprobe ... use_prefetch=0" etc */
-module_param(use_prefetch, bool, 0);
-MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH");
-
-#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
-static int use_dma = 1;
+/* oob info generated runtime depending on ecc algorithm and layout selected */
+static struct nand_ecclayout omap_oobinfo;
+/* Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks
+ */
+static uint8_t scan_ff_pattern[] = { 0xff };
+static struct nand_bbt_descr bb_descrip_flashbased = {
+ .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern,
+};
-/* "modprobe ... use_dma=0" etc */
-module_param(use_dma, bool, 0);
-MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
-#else
-static const int use_dma;
-#endif
-#else
-const int use_prefetch;
-static const int use_dma;
-#endif
struct omap_nand_info {
struct nand_hw_control controller;
@@ -129,6 +124,13 @@ struct omap_nand_info {
unsigned long phys_base;
struct completion comp;
int dma_ch;
+ int gpmc_irq;
+ enum {
+ OMAP_NAND_IO_READ = 0, /* read */
+ OMAP_NAND_IO_WRITE, /* write */
+ } iomode;
+ u_char *buf;
+ int buf_len;
};
/**
@@ -256,7 +258,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
@@ -288,9 +291,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
{
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
- uint32_t pref_count = 0, w_count = 0;
+ uint32_t w_count = 0;
int i = 0, ret = 0;
u16 *p;
+ unsigned long tim, limit;
/* take care of subpage writes */
if (len % 2 != 0) {
@@ -300,7 +304,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1);
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
@@ -316,15 +321,17 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
iowrite16(*p++, info->nand.IO_ADDR_W);
}
/* wait for data to flushed-out before reset the prefetch */
- do {
- pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT);
- } while (pref_count);
+ tim = 0;
+ limit = (loops_per_jiffy *
+ msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ cpu_relax();
+
/* disable and stop the PFPW engine */
gpmc_prefetch_reset(info->gpmc_cs);
}
}
-#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
/*
* omap_nand_dma_cb: callback on the completion of dma transfer
* @lch: logical channel
@@ -348,14 +355,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
{
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
- uint32_t prefetch_status = 0;
enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
DMA_FROM_DEVICE;
dma_addr_t dma_addr;
int ret;
+ unsigned long tim, limit;
- /* The fifo depth is 64 bytes. We have a sync at each frame and frame
- * length is 64 bytes.
+ /* The fifo depth is 64 bytes max.
+ * But configure the FIFO-threahold to 32 to get a sync at each frame
+ * and frame length is 32 bytes.
*/
int buf_len = len >> 6;
@@ -396,9 +404,10 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write);
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
if (ret)
- /* PFPW engine is busy, use cpu copy methode */
+ /* PFPW engine is busy, use cpu copy method */
goto out_copy;
init_completion(&info->comp);
@@ -407,10 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
/* setup and start DMA using dma_addr */
wait_for_completion(&info->comp);
+ tim = 0;
+ limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ cpu_relax();
- do {
- prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
- } while (prefetch_status);
/* disable and stop the PFPW engine */
gpmc_prefetch_reset(info->gpmc_cs);
@@ -426,14 +436,6 @@ out_copy:
: omap_write_buf8(mtd, (u_char *) addr, len);
return 0;
}
-#else
-static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {}
-static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
- unsigned int len, int is_write)
-{
- return 0;
-}
-#endif
/**
* omap_read_buf_dma_pref - read data from NAND controller into buffer
@@ -466,6 +468,157 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
}
+/*
+ * omap_nand_irq - GMPC irq handler
+ * @this_irq: gpmc irq number
+ * @dev: omap_nand_info structure pointer is passed here
+ */
+static irqreturn_t omap_nand_irq(int this_irq, void *dev)
+{
+ struct omap_nand_info *info = (struct omap_nand_info *) dev;
+ u32 bytes;
+ u32 irq_stat;
+
+ irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
+ bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
+ if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
+ if (irq_stat & 0x2)
+ goto done;
+
+ if (info->buf_len && (info->buf_len < bytes))
+ bytes = info->buf_len;
+ else if (!info->buf_len)
+ bytes = 0;
+ iowrite32_rep(info->nand.IO_ADDR_W,
+ (u32 *)info->buf, bytes >> 2);
+ info->buf = info->buf + bytes;
+ info->buf_len -= bytes;
+
+ } else {
+ ioread32_rep(info->nand.IO_ADDR_R,
+ (u32 *)info->buf, bytes >> 2);
+ info->buf = info->buf + bytes;
+
+ if (irq_stat & 0x2)
+ goto done;
+ }
+ gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
+
+ return IRQ_HANDLED;
+
+done:
+ complete(&info->comp);
+ /* disable irq */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
+
+ /* clear status */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * omap_read_buf_irq_pref - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ int ret = 0;
+
+ if (len <= mtd->oobsize) {
+ omap_read_buf_pref(mtd, buf, len);
+ return;
+ }
+
+ info->iomode = OMAP_NAND_IO_READ;
+ info->buf = buf;
+ init_completion(&info->comp);
+
+ /* configure and start prefetch transfer */
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy;
+
+ info->buf_len = len;
+ /* enable irq */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
+ (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ /* waiting for read to complete */
+ wait_for_completion(&info->comp);
+
+ /* disable and stop the PFPW engine */
+ gpmc_prefetch_reset(info->gpmc_cs);
+ return;
+
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, buf, len);
+ else
+ omap_read_buf8(mtd, buf, len);
+}
+
+/*
+ * omap_write_buf_irq_pref - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_irq_pref(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ int ret = 0;
+ unsigned long tim, limit;
+
+ if (len <= mtd->oobsize) {
+ omap_write_buf_pref(mtd, buf, len);
+ return;
+ }
+
+ info->iomode = OMAP_NAND_IO_WRITE;
+ info->buf = (u_char *) buf;
+ init_completion(&info->comp);
+
+ /* configure and start prefetch transfer : size=24 */
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy;
+
+ info->buf_len = len;
+ /* enable irq */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
+ (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ /* waiting for write to complete */
+ wait_for_completion(&info->comp);
+ /* wait for data to flushed-out before reset the prefetch */
+ tim = 0;
+ limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ cpu_relax();
+
+ /* disable and stop the PFPW engine */
+ gpmc_prefetch_reset(info->gpmc_cs);
+ return;
+
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_write_buf16(mtd, buf, len);
+ else
+ omap_write_buf8(mtd, buf, len);
+}
+
/**
* omap_verify_buf - Verify chip data against buffer
* @mtd: MTD device structure
@@ -487,8 +640,6 @@ static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
return 0;
}
-#ifdef CONFIG_MTD_NAND_OMAP_HWECC
-
/**
* gen_true_ecc - This function will generate true ECC value
* @ecc_buf: buffer to store ecc code
@@ -708,8 +859,6 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
}
-#endif
-
/**
* omap_wait - wait until the command is done
* @mtd: MTD device structure
@@ -779,6 +928,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
struct omap_nand_info *info;
struct omap_nand_platform_data *pdata;
int err;
+ int i, offset;
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -804,7 +954,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->mtd.name = dev_name(&pdev->dev);
info->mtd.owner = THIS_MODULE;
- info->nand.options |= pdata->devsize ? NAND_BUSWIDTH_16 : 0;
+ info->nand.options = pdata->devsize;
info->nand.options |= NAND_SKIP_BBTSCAN;
/* NAND write protect off */
@@ -842,28 +992,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.chip_delay = 50;
}
- if (use_prefetch) {
-
+ switch (pdata->xfer_type) {
+ case NAND_OMAP_PREFETCH_POLLED:
info->nand.read_buf = omap_read_buf_pref;
info->nand.write_buf = omap_write_buf_pref;
- if (use_dma) {
- err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
- omap_nand_dma_cb, &info->comp, &info->dma_ch);
- if (err < 0) {
- info->dma_ch = -1;
- printk(KERN_WARNING "DMA request failed."
- " Non-dma data transfer mode\n");
- } else {
- omap_set_dma_dest_burst_mode(info->dma_ch,
- OMAP_DMA_DATA_BURST_16);
- omap_set_dma_src_burst_mode(info->dma_ch,
- OMAP_DMA_DATA_BURST_16);
-
- info->nand.read_buf = omap_read_buf_dma_pref;
- info->nand.write_buf = omap_write_buf_dma_pref;
- }
- }
- } else {
+ break;
+
+ case NAND_OMAP_POLLED:
if (info->nand.options & NAND_BUSWIDTH_16) {
info->nand.read_buf = omap_read_buf16;
info->nand.write_buf = omap_write_buf16;
@@ -871,20 +1006,61 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.read_buf = omap_read_buf8;
info->nand.write_buf = omap_write_buf8;
}
+ break;
+
+ case NAND_OMAP_PREFETCH_DMA:
+ err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
+ omap_nand_dma_cb, &info->comp, &info->dma_ch);
+ if (err < 0) {
+ info->dma_ch = -1;
+ dev_err(&pdev->dev, "DMA request failed!\n");
+ goto out_release_mem_region;
+ } else {
+ omap_set_dma_dest_burst_mode(info->dma_ch,
+ OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_src_burst_mode(info->dma_ch,
+ OMAP_DMA_DATA_BURST_16);
+
+ info->nand.read_buf = omap_read_buf_dma_pref;
+ info->nand.write_buf = omap_write_buf_dma_pref;
+ }
+ break;
+
+ case NAND_OMAP_PREFETCH_IRQ:
+ err = request_irq(pdata->gpmc_irq,
+ omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
+ if (err) {
+ dev_err(&pdev->dev, "requesting irq(%d) error:%d",
+ pdata->gpmc_irq, err);
+ goto out_release_mem_region;
+ } else {
+ info->gpmc_irq = pdata->gpmc_irq;
+ info->nand.read_buf = omap_read_buf_irq_pref;
+ info->nand.write_buf = omap_write_buf_irq_pref;
+ }
+ break;
+
+ default:
+ dev_err(&pdev->dev,
+ "xfer_type(%d) not supported!\n", pdata->xfer_type);
+ err = -EINVAL;
+ goto out_release_mem_region;
}
- info->nand.verify_buf = omap_verify_buf;
-#ifdef CONFIG_MTD_NAND_OMAP_HWECC
- info->nand.ecc.bytes = 3;
- info->nand.ecc.size = 512;
- info->nand.ecc.calculate = omap_calculate_ecc;
- info->nand.ecc.hwctl = omap_enable_hwecc;
- info->nand.ecc.correct = omap_correct_data;
- info->nand.ecc.mode = NAND_ECC_HW;
+ info->nand.verify_buf = omap_verify_buf;
-#else
- info->nand.ecc.mode = NAND_ECC_SOFT;
-#endif
+ /* selsect the ecc type */
+ if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
+ info->nand.ecc.mode = NAND_ECC_SOFT;
+ else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
+ (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
+ info->nand.ecc.bytes = 3;
+ info->nand.ecc.size = 512;
+ info->nand.ecc.calculate = omap_calculate_ecc;
+ info->nand.ecc.hwctl = omap_enable_hwecc;
+ info->nand.ecc.correct = omap_correct_data;
+ info->nand.ecc.mode = NAND_ECC_HW;
+ }
/* DIP switches on some boards change between 8 and 16 bit
* bus widths for flash. Try the other width if the first try fails.
@@ -897,6 +1073,26 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
}
}
+ /* rom code layout */
+ if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
+
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ offset = 2;
+ else {
+ offset = 1;
+ info->nand.badblock_pattern = &bb_descrip_flashbased;
+ }
+ omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
+ for (i = 0; i < omap_oobinfo.eccbytes; i++)
+ omap_oobinfo.eccpos[i] = i+offset;
+
+ omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
+ omap_oobinfo.oobfree->length = info->mtd.oobsize -
+ (offset + omap_oobinfo.eccbytes);
+
+ info->nand.ecc.layout = &omap_oobinfo;
+ }
+
#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
if (err > 0)
@@ -926,9 +1122,12 @@ static int omap_nand_remove(struct platform_device *pdev)
mtd);
platform_set_drvdata(pdev, NULL);
- if (use_dma)
+ if (info->dma_ch != -1)
omap_free_dma(info->dma_ch);
+ if (info->gpmc_irq)
+ free_irq(info->gpmc_irq, info);
+
/* Release NAND device, its internal structures and partitions */
nand_release(&info->mtd);
iounmap(info->nand.IO_ADDR_R);
@@ -947,16 +1146,8 @@ static struct platform_driver omap_nand_driver = {
static int __init omap_nand_init(void)
{
- printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
+ pr_info("%s driver initializing\n", DRIVER_NAME);
- /* This check is required if driver is being
- * loaded run time as a module
- */
- if ((1 == use_dma) && (0 == use_prefetch)) {
- printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 "
- "without use_prefetch'. Prefetch will not be"
- " used in either mode (mpu or dma)\n");
- }
return platform_driver_register(&omap_nand_driver);
}
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 3041d1f7ae3f..38fb16771f85 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -319,7 +319,7 @@ static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
+ const struct mfd_cell *cell = mfd_get_cell(dev);
int ret;
if (cell->enable) {
@@ -363,7 +363,7 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
+ const struct mfd_cell *cell = mfd_get_cell(dev);
tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
if (cell->disable)
@@ -372,8 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
static int tmio_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
- struct tmio_nand_data *data = cell->driver_data;
+ struct tmio_nand_data *data = mfd_get_data(dev);
struct resource *fcr = platform_get_resource(dev,
IORESOURCE_MEM, 0);
struct resource *ccr = platform_get_resource(dev,
@@ -516,7 +515,7 @@ static int tmio_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int tmio_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
+ const struct mfd_cell *cell = mfd_get_cell(dev);
if (cell->suspend)
cell->suspend(dev);
@@ -527,7 +526,7 @@ static int tmio_suspend(struct platform_device *dev, pm_message_t state)
static int tmio_resume(struct platform_device *dev)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
+ const struct mfd_cell *cell = mfd_get_cell(dev);
/* FIXME - is this required or merely another attack of the broken
* SHARP platform? Looks suspicious.
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 4dbd0f58eebf..4f426195f8db 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -32,7 +32,7 @@ config MTD_ONENAND_OMAP2
config MTD_ONENAND_SAMSUNG
tristate "OneNAND on Samsung SOC controller support"
- depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5PV310
+ depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS4
help
Support for a OneNAND flash device connected to an Samsung SOC.
S3C64XX/S5PC100 use command mapping method.
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index c849cacf4b2f..14a49abe057e 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -63,7 +63,7 @@ struct omap2_onenand {
struct completion dma_done;
int dma_channel;
int freq;
- int (*setup)(void __iomem *base, int freq);
+ int (*setup)(void __iomem *base, int *freq_ptr);
struct regulator *regulator;
};
@@ -148,11 +148,9 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
wait_err("controller error", state, ctrl, intr);
return -EIO;
}
- if ((intr & intr_flags) != intr_flags) {
- wait_err("timeout", state, ctrl, intr);
- return -EIO;
- }
- return 0;
+ if ((intr & intr_flags) == intr_flags)
+ return 0;
+ /* Continue in wait for interrupt branch */
}
if (state != FL_READING) {
@@ -581,7 +579,7 @@ static int __adjust_timing(struct device *dev, void *data)
/* DMA is not in use so this is all that is needed */
/* Revisit for OMAP3! */
- ret = c->setup(c->onenand.base, c->freq);
+ ret = c->setup(c->onenand.base, &c->freq);
return ret;
}
@@ -673,7 +671,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
}
if (pdata->onenand_setup != NULL) {
- r = pdata->onenand_setup(c->onenand.base, c->freq);
+ r = pdata->onenand_setup(c->onenand.base, &c->freq);
if (r < 0) {
dev_err(&pdev->dev, "Onenand platform setup failed: "
"%d\n", r);
@@ -718,8 +716,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
- "base %p\n", c->gpmc_cs, c->phys_base,
- c->onenand.base);
+ "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
+ c->onenand.base, c->freq);
c->pdev = pdev;
c->mtd.name = dev_name(&pdev->dev);
@@ -754,24 +752,6 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
if ((r = onenand_scan(&c->mtd, 1)) < 0)
goto err_release_regulator;
- switch ((c->onenand.version_id >> 4) & 0xf) {
- case 0:
- c->freq = 40;
- break;
- case 1:
- c->freq = 54;
- break;
- case 2:
- c->freq = 66;
- break;
- case 3:
- c->freq = 83;
- break;
- case 4:
- c->freq = 104;
- break;
- }
-
#ifdef CONFIG_MTD_PARTITIONS
r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
if (r > 0)
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 3cf193fb5e00..6abeb4f13403 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -52,6 +52,12 @@ config MTD_UBI_GLUEBI
work on top of UBI. Do not enable this unless you use legacy
software.
-source "drivers/mtd/ubi/Kconfig.debug"
+config MTD_UBI_DEBUG
+ bool "UBI debugging"
+ depends on SYSFS
+ select DEBUG_FS
+ select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL
+ help
+ This option enables UBI debugging.
endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug
deleted file mode 100644
index fad4adc0fe2c..000000000000
--- a/drivers/mtd/ubi/Kconfig.debug
+++ /dev/null
@@ -1,73 +0,0 @@
-comment "UBI debugging options"
-
-config MTD_UBI_DEBUG
- bool "UBI debugging"
- depends on SYSFS
- select DEBUG_FS
- select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL
- help
- This option enables UBI debugging.
-
-if MTD_UBI_DEBUG
-
-config MTD_UBI_DEBUG_MSG
- bool "UBI debugging messages"
- help
- This option enables UBI debugging messages.
-
-config MTD_UBI_DEBUG_PARANOID
- bool "Extra self-checks"
- help
- This option enables extra checks in UBI code. Note this slows UBI down
- significantly.
-
-config MTD_UBI_DEBUG_DISABLE_BGT
- bool "Do not enable the UBI background thread"
- help
- This option switches the background thread off by default. The thread
- may be also be enabled/disabled via UBI sysfs.
-
-config MTD_UBI_DEBUG_EMULATE_BITFLIPS
- bool "Emulate flash bit-flips"
- help
- This option emulates bit-flips with probability 1/50, which in turn
- causes scrubbing. Useful for debugging and stressing UBI.
-
-config MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES
- bool "Emulate flash write failures"
- help
- This option emulates write failures with probability 1/100. Useful for
- debugging and testing how UBI handlines errors.
-
-config MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES
- bool "Emulate flash erase failures"
- help
- This option emulates erase failures with probability 1/100. Useful for
- debugging and testing how UBI handlines errors.
-
-comment "Additional UBI debugging messages"
-
-config MTD_UBI_DEBUG_MSG_BLD
- bool "Additional UBI initialization and build messages"
- help
- This option enables detailed UBI initialization and device build
- debugging messages.
-
-config MTD_UBI_DEBUG_MSG_EBA
- bool "Eraseblock association unit messages"
- help
- This option enables debugging messages from the UBI eraseblock
- association unit.
-
-config MTD_UBI_DEBUG_MSG_WL
- bool "Wear-leveling unit messages"
- help
- This option enables debugging messages from the UBI wear-leveling
- unit.
-
-config MTD_UBI_DEBUG_MSG_IO
- bool "Input/output unit messages"
- help
- This option enables debugging messages from the UBI input/output unit.
-
-endif # MTD_UBI_DEBUG
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 5ebe280225d6..65626c1c446d 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -690,11 +690,25 @@ static int io_init(struct ubi_device *ubi)
ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
+ ubi->max_write_size = ubi->mtd->writebufsize;
+ /*
+ * Maximum write size has to be greater or equivalent to min. I/O
+ * size, and be multiple of min. I/O size.
+ */
+ if (ubi->max_write_size < ubi->min_io_size ||
+ ubi->max_write_size % ubi->min_io_size ||
+ !is_power_of_2(ubi->max_write_size)) {
+ ubi_err("bad write buffer size %d for %d min. I/O unit",
+ ubi->max_write_size, ubi->min_io_size);
+ return -EINVAL;
+ }
+
/* Calculate default aligned sizes of EC and VID headers */
ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
dbg_msg("min_io_size %d", ubi->min_io_size);
+ dbg_msg("max_write_size %d", ubi->max_write_size);
dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
@@ -711,7 +725,7 @@ static int io_init(struct ubi_device *ubi)
}
/* Similar for the data offset */
- ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE;
+ ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
@@ -923,6 +937,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
spin_lock_init(&ubi->volumes_lock);
ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
+ dbg_msg("sizeof(struct ubi_scan_leb) %zu", sizeof(struct ubi_scan_leb));
+ dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
err = io_init(ubi);
if (err)
@@ -937,13 +953,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (!ubi->peb_buf2)
goto out_free;
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
- mutex_init(&ubi->dbg_buf_mutex);
- ubi->dbg_peb_buf = vmalloc(ubi->peb_size);
- if (!ubi->dbg_peb_buf)
- goto out_free;
-#endif
-
err = attach_by_scanning(ubi);
if (err) {
dbg_err("failed to attach by scanning, error %d", err);
@@ -991,8 +1000,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
* checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
*/
spin_lock(&ubi->wl_lock);
- if (!DBG_DISABLE_BGT)
- ubi->thread_enabled = 1;
+ ubi->thread_enabled = 1;
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
@@ -1009,9 +1017,6 @@ out_detach:
out_free:
vfree(ubi->peb_buf1);
vfree(ubi->peb_buf2);
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
- vfree(ubi->dbg_peb_buf);
-#endif
if (ref)
put_device(&ubi->dev);
else
@@ -1082,9 +1087,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
put_mtd_device(ubi->mtd);
vfree(ubi->peb_buf1);
vfree(ubi->peb_buf2);
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
- vfree(ubi->dbg_peb_buf);
-#endif
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
put_device(&ubi->dev);
return 0;
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 4876977e52cb..d4d07e5f138f 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -27,6 +27,20 @@
#ifdef CONFIG_MTD_UBI_DEBUG
#include "ubi.h"
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+unsigned int ubi_msg_flags;
+unsigned int ubi_chk_flags;
+unsigned int ubi_tst_flags;
+
+module_param_named(debug_msgs, ubi_msg_flags, uint, S_IRUGO | S_IWUSR);
+module_param_named(debug_chks, ubi_chk_flags, uint, S_IRUGO | S_IWUSR);
+module_param_named(debug_tsts, ubi_chk_flags, uint, S_IRUGO | S_IWUSR);
+
+MODULE_PARM_DESC(debug_msgs, "Debug message type flags");
+MODULE_PARM_DESC(debug_chks, "Debug check flags");
+MODULE_PARM_DESC(debug_tsts, "Debug special test flags");
/**
* ubi_dbg_dump_ec_hdr - dump an erase counter header.
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 9eca95074bc2..0b0c2888c656 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -38,6 +38,11 @@
printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
current->pid, __func__, ##__VA_ARGS__)
+#define dbg_do_msg(typ, fmt, ...) do { \
+ if (ubi_msg_flags & typ) \
+ dbg_msg(fmt, ##__VA_ARGS__); \
+} while (0)
+
#define ubi_dbg_dump_stack() dump_stack()
struct ubi_ec_hdr;
@@ -57,62 +62,88 @@ void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
+extern unsigned int ubi_msg_flags;
+
+/*
+ * Debugging message type flags (must match msg_type_names in debug.c).
+ *
+ * UBI_MSG_GEN: general messages
+ * UBI_MSG_EBA: journal messages
+ * UBI_MSG_WL: mount messages
+ * UBI_MSG_IO: commit messages
+ * UBI_MSG_BLD: LEB find messages
+ */
+enum {
+ UBI_MSG_GEN = 0x1,
+ UBI_MSG_EBA = 0x2,
+ UBI_MSG_WL = 0x4,
+ UBI_MSG_IO = 0x8,
+ UBI_MSG_BLD = 0x10,
+};
+
#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
print_hex_dump(l, ps, pt, r, g, b, len, a)
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG
/* General debugging messages */
-#define dbg_gen(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#else
-#define dbg_gen(fmt, ...) ({})
-#endif
+#define dbg_gen(fmt, ...) dbg_do_msg(UBI_MSG_GEN, fmt, ##__VA_ARGS__)
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA
/* Messages from the eraseblock association sub-system */
-#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#else
-#define dbg_eba(fmt, ...) ({})
-#endif
+#define dbg_eba(fmt, ...) dbg_do_msg(UBI_MSG_EBA, fmt, ##__VA_ARGS__)
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL
/* Messages from the wear-leveling sub-system */
-#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#else
-#define dbg_wl(fmt, ...) ({})
-#endif
+#define dbg_wl(fmt, ...) dbg_do_msg(UBI_MSG_WL, fmt, ##__VA_ARGS__)
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO
/* Messages from the input/output sub-system */
-#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#else
-#define dbg_io(fmt, ...) ({})
-#endif
+#define dbg_io(fmt, ...) dbg_do_msg(UBI_MSG_IO, fmt, ##__VA_ARGS__)
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD
/* Initialization and build messages */
-#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#define UBI_IO_DEBUG 1
-#else
-#define dbg_bld(fmt, ...) ({})
-#define UBI_IO_DEBUG 0
-#endif
+#define dbg_bld(fmt, ...) dbg_do_msg(UBI_MSG_BLD, fmt, ##__VA_ARGS__)
+
+extern unsigned int ubi_chk_flags;
+
+/*
+ * Debugging check flags.
+ *
+ * UBI_CHK_GEN: general checks
+ * UBI_CHK_IO: check writes and erases
+ */
+enum {
+ UBI_CHK_GEN = 0x1,
+ UBI_CHK_IO = 0x2,
+};
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len);
int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
int offset, int len);
-#else
-#define ubi_dbg_check_all_ff(ubi, pnum, offset, len) 0
-#define ubi_dbg_check_write(ubi, buf, pnum, offset, len) 0
-#endif
-#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT
-#define DBG_DISABLE_BGT 1
-#else
-#define DBG_DISABLE_BGT 0
-#endif
+extern unsigned int ubi_tst_flags;
+
+/*
+ * Special testing flags.
+ *
+ * UBIFS_TST_DISABLE_BGT: disable the background thread
+ * UBI_TST_EMULATE_BITFLIPS: emulate bit-flips
+ * UBI_TST_EMULATE_WRITE_FAILURES: emulate write failures
+ * UBI_TST_EMULATE_ERASE_FAILURES: emulate erase failures
+ */
+enum {
+ UBI_TST_DISABLE_BGT = 0x1,
+ UBI_TST_EMULATE_BITFLIPS = 0x2,
+ UBI_TST_EMULATE_WRITE_FAILURES = 0x4,
+ UBI_TST_EMULATE_ERASE_FAILURES = 0x8,
+};
+
+/**
+ * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ *
+ * Returns non-zero if the UBI background thread is disabled for testing
+ * purposes.
+ */
+static inline int ubi_dbg_is_bgt_disabled(void)
+{
+ return ubi_tst_flags & UBI_TST_DISABLE_BGT;
+}
-#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS
/**
* ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
*
@@ -120,13 +151,11 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
*/
static inline int ubi_dbg_is_bitflip(void)
{
- return !(random32() % 200);
+ if (ubi_tst_flags & UBI_TST_EMULATE_BITFLIPS)
+ return !(random32() % 200);
+ return 0;
}
-#else
-#define ubi_dbg_is_bitflip() 0
-#endif
-#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES
/**
* ubi_dbg_is_write_failure - if it is time to emulate a write failure.
*
@@ -135,13 +164,11 @@ static inline int ubi_dbg_is_bitflip(void)
*/
static inline int ubi_dbg_is_write_failure(void)
{
- return !(random32() % 500);
+ if (ubi_tst_flags & UBI_TST_EMULATE_WRITE_FAILURES)
+ return !(random32() % 500);
+ return 0;
}
-#else
-#define ubi_dbg_is_write_failure() 0
-#endif
-#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES
/**
* ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
*
@@ -150,11 +177,10 @@ static inline int ubi_dbg_is_write_failure(void)
*/
static inline int ubi_dbg_is_erase_failure(void)
{
+ if (ubi_tst_flags & UBI_TST_EMULATE_ERASE_FAILURES)
return !(random32() % 400);
+ return 0;
}
-#else
-#define ubi_dbg_is_erase_failure() 0
-#endif
#else
@@ -177,8 +203,7 @@ static inline int ubi_dbg_is_erase_failure(void)
#define ubi_dbg_dump_flash(ubi, pnum, offset, len) ({})
#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) ({})
-#define UBI_IO_DEBUG 0
-#define DBG_DISABLE_BGT 0
+#define ubi_dbg_is_bgt_disabled() 0
#define ubi_dbg_is_bitflip() 0
#define ubi_dbg_is_write_failure() 0
#define ubi_dbg_is_erase_failure() 0
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 811775aa8ee8..aaa6e1e83b29 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -91,7 +91,7 @@
#include <linux/slab.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum);
static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
@@ -146,6 +146,28 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
if (err)
return err;
+ /*
+ * Deliberately corrupt the buffer to improve robustness. Indeed, if we
+ * do not do this, the following may happen:
+ * 1. The buffer contains data from previous operation, e.g., read from
+ * another PEB previously. The data looks like expected, e.g., if we
+ * just do not read anything and return - the caller would not
+ * notice this. E.g., if we are reading a VID header, the buffer may
+ * contain a valid VID header from another PEB.
+ * 2. The driver is buggy and returns us success or -EBADMSG or
+ * -EUCLEAN, but it does not actually put any data to the buffer.
+ *
+ * This may confuse UBI or upper layers - they may think the buffer
+ * contains valid data while in fact it is just old data. This is
+ * especially possible because UBI (and UBIFS) relies on CRC, and
+ * treats data as correct even in case of ECC errors if the CRC is
+ * correct.
+ *
+ * Try to prevent this situation by changing the first byte of the
+ * buffer.
+ */
+ *((uint8_t *)buf) ^= 0xFF;
+
addr = (loff_t)pnum * ubi->peb_size + offset;
retry:
err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
@@ -166,7 +188,7 @@ retry:
return UBI_IO_BITFLIPS;
}
- if (read != len && retries++ < UBI_IO_RETRIES) {
+ if (retries++ < UBI_IO_RETRIES) {
dbg_io("error %d%s while reading %d bytes from PEB %d:%d,"
" read only %zd bytes, retry",
err, errstr, len, pnum, offset, read);
@@ -480,6 +502,13 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
size_t written;
loff_t addr;
uint32_t data = 0;
+ /*
+ * Note, we cannot generally define VID header buffers on stack,
+ * because of the way we deal with these buffers (see the header
+ * comment in this file). But we know this is a NOR-specific piece of
+ * code, so we can do this. But yes, this is error-prone and we should
+ * (pre-)allocate VID header buffer instead.
+ */
struct ubi_vid_hdr vid_hdr;
/*
@@ -507,11 +536,13 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
* PEB.
*/
err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
- if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) {
+ if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
+ err1 == UBI_IO_FF) {
struct ubi_ec_hdr ec_hdr;
err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
- if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR)
+ if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
+ err1 == UBI_IO_FF)
/*
* Both VID and EC headers are corrupted, so we can
* safely erase this PEB and not afraid that it will be
@@ -752,9 +783,8 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (verbose)
ubi_warn("no EC header found at PEB %d, "
"only 0xFF bytes", pnum);
- else if (UBI_IO_DEBUG)
- dbg_msg("no EC header found at PEB %d, "
- "only 0xFF bytes", pnum);
+ dbg_bld("no EC header found at PEB %d, "
+ "only 0xFF bytes", pnum);
if (!read_err)
return UBI_IO_FF;
else
@@ -769,9 +799,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
ubi_warn("bad magic number at PEB %d: %08x instead of "
"%08x", pnum, magic, UBI_EC_HDR_MAGIC);
ubi_dbg_dump_ec_hdr(ec_hdr);
- } else if (UBI_IO_DEBUG)
- dbg_msg("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
+ }
+ dbg_bld("bad magic number at PEB %d: %08x instead of "
+ "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
return UBI_IO_BAD_HDR;
}
@@ -783,9 +813,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
ubi_warn("bad EC header CRC at PEB %d, calculated "
"%#08x, read %#08x", pnum, crc, hdr_crc);
ubi_dbg_dump_ec_hdr(ec_hdr);
- } else if (UBI_IO_DEBUG)
- dbg_msg("bad EC header CRC at PEB %d, calculated "
- "%#08x, read %#08x", pnum, crc, hdr_crc);
+ }
+ dbg_bld("bad EC header CRC at PEB %d, calculated "
+ "%#08x, read %#08x", pnum, crc, hdr_crc);
if (!read_err)
return UBI_IO_BAD_HDR;
@@ -1008,9 +1038,8 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (verbose)
ubi_warn("no VID header found at PEB %d, "
"only 0xFF bytes", pnum);
- else if (UBI_IO_DEBUG)
- dbg_msg("no VID header found at PEB %d, "
- "only 0xFF bytes", pnum);
+ dbg_bld("no VID header found at PEB %d, "
+ "only 0xFF bytes", pnum);
if (!read_err)
return UBI_IO_FF;
else
@@ -1021,9 +1050,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
ubi_warn("bad magic number at PEB %d: %08x instead of "
"%08x", pnum, magic, UBI_VID_HDR_MAGIC);
ubi_dbg_dump_vid_hdr(vid_hdr);
- } else if (UBI_IO_DEBUG)
- dbg_msg("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
+ }
+ dbg_bld("bad magic number at PEB %d: %08x instead of "
+ "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
return UBI_IO_BAD_HDR;
}
@@ -1035,9 +1064,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
ubi_warn("bad CRC at PEB %d, calculated %#08x, "
"read %#08x", pnum, crc, hdr_crc);
ubi_dbg_dump_vid_hdr(vid_hdr);
- } else if (UBI_IO_DEBUG)
- dbg_msg("bad CRC at PEB %d, calculated %#08x, "
- "read %#08x", pnum, crc, hdr_crc);
+ }
+ dbg_bld("bad CRC at PEB %d, calculated %#08x, "
+ "read %#08x", pnum, crc, hdr_crc);
if (!read_err)
return UBI_IO_BAD_HDR;
else
@@ -1097,7 +1126,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
return err;
}
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
/**
* paranoid_check_not_bad - ensure that a physical eraseblock is not bad.
@@ -1111,6 +1140,9 @@ static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
{
int err;
+ if (!(ubi_chk_flags & UBI_CHK_IO))
+ return 0;
+
err = ubi_io_is_bad(ubi, pnum);
if (!err)
return err;
@@ -1135,6 +1167,9 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
+ if (!(ubi_chk_flags & UBI_CHK_IO))
+ return 0;
+
magic = be32_to_cpu(ec_hdr->magic);
if (magic != UBI_EC_HDR_MAGIC) {
ubi_err("bad magic %#08x, must be %#08x",
@@ -1170,6 +1205,9 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
uint32_t crc, hdr_crc;
struct ubi_ec_hdr *ec_hdr;
+ if (!(ubi_chk_flags & UBI_CHK_IO))
+ return 0;
+
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
if (!ec_hdr)
return -ENOMEM;
@@ -1211,6 +1249,9 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
+ if (!(ubi_chk_flags & UBI_CHK_IO))
+ return 0;
+
magic = be32_to_cpu(vid_hdr->magic);
if (magic != UBI_VID_HDR_MAGIC) {
ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
@@ -1249,6 +1290,9 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
struct ubi_vid_hdr *vid_hdr;
void *p;
+ if (!(ubi_chk_flags & UBI_CHK_IO))
+ return 0;
+
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
return -ENOMEM;
@@ -1294,15 +1338,26 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
int offset, int len)
{
int err, i;
+ size_t read;
+ void *buf1;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- mutex_lock(&ubi->dbg_buf_mutex);
- err = ubi_io_read(ubi, ubi->dbg_peb_buf, pnum, offset, len);
- if (err)
- goto out_unlock;
+ if (!(ubi_chk_flags & UBI_CHK_IO))
+ return 0;
+
+ buf1 = __vmalloc(len, GFP_KERNEL | GFP_NOFS, PAGE_KERNEL);
+ if (!buf1) {
+ ubi_err("cannot allocate memory to check writes");
+ return 0;
+ }
+
+ err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1);
+ if (err && err != -EUCLEAN)
+ goto out_free;
for (i = 0; i < len; i++) {
uint8_t c = ((uint8_t *)buf)[i];
- uint8_t c1 = ((uint8_t *)ubi->dbg_peb_buf)[i];
+ uint8_t c1 = ((uint8_t *)buf1)[i];
int dump_len;
if (c == c1)
@@ -1319,17 +1374,17 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
ubi_msg("hex dump of the read buffer from %d to %d",
i, i + dump_len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
- ubi->dbg_peb_buf + i, dump_len, 1);
+ buf1 + i, dump_len, 1);
ubi_dbg_dump_stack();
err = -EINVAL;
- goto out_unlock;
+ goto out_free;
}
- mutex_unlock(&ubi->dbg_buf_mutex);
+ vfree(buf1);
return 0;
-out_unlock:
- mutex_unlock(&ubi->dbg_buf_mutex);
+out_free:
+ vfree(buf1);
return err;
}
@@ -1348,36 +1403,44 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
{
size_t read;
int err;
+ void *buf;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- mutex_lock(&ubi->dbg_buf_mutex);
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, ubi->dbg_peb_buf);
+ if (!(ubi_chk_flags & UBI_CHK_IO))
+ return 0;
+
+ buf = __vmalloc(len, GFP_KERNEL | GFP_NOFS, PAGE_KERNEL);
+ if (!buf) {
+ ubi_err("cannot allocate memory to check for 0xFFs");
+ return 0;
+ }
+
+ err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
if (err && err != -EUCLEAN) {
ubi_err("error %d while reading %d bytes from PEB %d:%d, "
"read %zd bytes", err, len, pnum, offset, read);
goto error;
}
- err = ubi_check_pattern(ubi->dbg_peb_buf, 0xFF, len);
+ err = ubi_check_pattern(buf, 0xFF, len);
if (err == 0) {
ubi_err("flash region at PEB %d:%d, length %d does not "
"contain all 0xFF bytes", pnum, offset, len);
goto fail;
}
- mutex_unlock(&ubi->dbg_buf_mutex);
+ vfree(buf);
return 0;
fail:
ubi_err("paranoid check failed for PEB %d", pnum);
ubi_msg("hex dump of the %d-%d region", offset, offset + len);
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
- ubi->dbg_peb_buf, len, 1);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
err = -EINVAL;
error:
ubi_dbg_dump_stack();
- mutex_unlock(&ubi->dbg_buf_mutex);
+ vfree(buf);
return err;
}
-#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
+#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 69fa4ef03c53..d39716e5b204 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -40,7 +40,9 @@ void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di)
{
di->ubi_num = ubi->ubi_num;
di->leb_size = ubi->leb_size;
+ di->leb_start = ubi->leb_start;
di->min_io_size = ubi->min_io_size;
+ di->max_write_size = ubi->max_write_size;
di->ro_mode = ubi->ro_mode;
di->cdev = ubi->cdev.dev;
}
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 79ca304fc4db..11eb8ef12485 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -39,32 +39,46 @@
* eraseblocks are put to the @free list and the physical eraseblock to be
* erased are put to the @erase list.
*
+ * About corruptions
+ * ~~~~~~~~~~~~~~~~~
+ *
+ * UBI protects EC and VID headers with CRC-32 checksums, so it can detect
+ * whether the headers are corrupted or not. Sometimes UBI also protects the
+ * data with CRC-32, e.g., when it executes the atomic LEB change operation, or
+ * when it moves the contents of a PEB for wear-leveling purposes.
+ *
* UBI tries to distinguish between 2 types of corruptions.
- * 1. Corruptions caused by power cuts. These are harmless and expected
- * corruptions and UBI tries to handle them gracefully, without printing too
- * many warnings and error messages. The idea is that we do not lose
- * important data in these case - we may lose only the data which was being
- * written to the media just before the power cut happened, and the upper
- * layers (e.g., UBIFS) are supposed to handle these situations. UBI puts
- * these PEBs to the head of the @erase list and they are scheduled for
- * erasure.
+ *
+ * 1. Corruptions caused by power cuts. These are expected corruptions and UBI
+ * tries to handle them gracefully, without printing too many warnings and
+ * error messages. The idea is that we do not lose important data in these case
+ * - we may lose only the data which was being written to the media just before
+ * the power cut happened, and the upper layers (e.g., UBIFS) are supposed to
+ * handle such data losses (e.g., by using the FS journal).
+ *
+ * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
+ * the reason is a power cut, UBI puts this PEB to the @erase list, and all
+ * PEBs in the @erase list are scheduled for erasure later.
*
* 2. Unexpected corruptions which are not caused by power cuts. During
- * scanning, such PEBs are put to the @corr list and UBI preserves them.
- * Obviously, this lessens the amount of available PEBs, and if at some
- * point UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly
- * informs about such PEBs every time the MTD device is attached.
+ * scanning, such PEBs are put to the @corr list and UBI preserves them.
+ * Obviously, this lessens the amount of available PEBs, and if at some point
+ * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
+ * about such PEBs every time the MTD device is attached.
*
* However, it is difficult to reliably distinguish between these types of
- * corruptions and UBI's strategy is as follows. UBI assumes (2.) if the VID
- * header is corrupted and the data area does not contain all 0xFFs, and there
- * were not bit-flips or integrity errors while reading the data area. Otherwise
- * UBI assumes (1.). The assumptions are:
- * o if the data area contains only 0xFFs, there is no data, and it is safe
- * to just erase this PEB.
- * o if the data area has bit-flips and data integrity errors (ECC errors on
+ * corruptions and UBI's strategy is as follows. UBI assumes corruption type 2
+ * if the VID header is corrupted and the data area does not contain all 0xFFs,
+ * and there were no bit-flips or integrity errors while reading the data area.
+ * Otherwise UBI assumes corruption type 1. So the decision criteria are as
+ * follows.
+ * o If the data area contains only 0xFFs, there is no data, and it is safe
+ * to just erase this PEB - this is corruption type 1.
+ * o If the data area has bit-flips or data integrity errors (ECC errors on
* NAND), it is probably a PEB which was being erased when power cut
- * happened.
+ * happened, so this is corruption type 1. However, this is just a guess,
+ * which might be wrong.
+ * o Otherwise this it corruption type 2.
*/
#include <linux/err.h>
@@ -74,7 +88,7 @@
#include <linux/random.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si);
#else
#define paranoid_check_si(ubi, si) 0
@@ -115,7 +129,7 @@ static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, int to_head,
} else
BUG();
- seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
+ seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
if (!seb)
return -ENOMEM;
@@ -144,7 +158,7 @@ static int add_corrupted(struct ubi_scan_info *si, int pnum, int ec)
dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
- seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
+ seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
if (!seb)
return -ENOMEM;
@@ -553,7 +567,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
if (err)
return err;
- seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
+ seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
if (!seb)
return -ENOMEM;
@@ -1152,9 +1166,15 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
si->volumes = RB_ROOT;
err = -ENOMEM;
+ si->scan_leb_slab = kmem_cache_create("ubi_scan_leb_slab",
+ sizeof(struct ubi_scan_leb),
+ 0, 0, NULL);
+ if (!si->scan_leb_slab)
+ goto out_si;
+
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
- goto out_si;
+ goto out_slab;
vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vidh)
@@ -1215,6 +1235,8 @@ out_vidh:
ubi_free_vid_hdr(ubi, vidh);
out_ech:
kfree(ech);
+out_slab:
+ kmem_cache_destroy(si->scan_leb_slab);
out_si:
ubi_scan_destroy_si(si);
return ERR_PTR(err);
@@ -1223,11 +1245,12 @@ out_si:
/**
* destroy_sv - free the scanning volume information
* @sv: scanning volume information
+ * @si: scanning information
*
* This function destroys the volume RB-tree (@sv->root) and the scanning
* volume information.
*/
-static void destroy_sv(struct ubi_scan_volume *sv)
+static void destroy_sv(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
{
struct ubi_scan_leb *seb;
struct rb_node *this = sv->root.rb_node;
@@ -1247,7 +1270,7 @@ static void destroy_sv(struct ubi_scan_volume *sv)
this->rb_right = NULL;
}
- kfree(seb);
+ kmem_cache_free(si->scan_leb_slab, seb);
}
}
kfree(sv);
@@ -1265,19 +1288,19 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si)
list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) {
list_del(&seb->u.list);
- kfree(seb);
+ kmem_cache_free(si->scan_leb_slab, seb);
}
list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) {
list_del(&seb->u.list);
- kfree(seb);
+ kmem_cache_free(si->scan_leb_slab, seb);
}
list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) {
list_del(&seb->u.list);
- kfree(seb);
+ kmem_cache_free(si->scan_leb_slab, seb);
}
list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) {
list_del(&seb->u.list);
- kfree(seb);
+ kmem_cache_free(si->scan_leb_slab, seb);
}
/* Destroy the volume RB-tree */
@@ -1298,14 +1321,15 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si)
rb->rb_right = NULL;
}
- destroy_sv(sv);
+ destroy_sv(si, sv);
}
}
+ kmem_cache_destroy(si->scan_leb_slab);
kfree(si);
}
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
/**
* paranoid_check_si - check the scanning information.
@@ -1323,6 +1347,9 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
struct ubi_scan_leb *seb, *last_seb;
uint8_t *buf;
+ if (!(ubi_chk_flags & UBI_CHK_GEN))
+ return 0;
+
/*
* At first, check that scanning information is OK.
*/
@@ -1575,4 +1602,4 @@ out:
return -EINVAL;
}
-#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
+#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
index a3264f0bef2b..d48aef15ab5d 100644
--- a/drivers/mtd/ubi/scan.h
+++ b/drivers/mtd/ubi/scan.h
@@ -109,6 +109,7 @@ struct ubi_scan_volume {
* @mean_ec: mean erase counter value
* @ec_sum: a temporary variable used when calculating @mean_ec
* @ec_count: a temporary variable used when calculating @mean_ec
+ * @scan_leb_slab: slab cache for &struct ubi_scan_leb objects
*
* This data structure contains the result of scanning and may be used by other
* UBI sub-systems to build final UBI data structures, further error-recovery
@@ -134,6 +135,7 @@ struct ubi_scan_info {
int mean_ec;
uint64_t ec_sum;
int ec_count;
+ struct kmem_cache *scan_leb_slab;
};
struct ubi_device;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 0b0149c41fe3..f1be8b79663c 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -40,6 +40,7 @@
#include <linux/notifier.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/ubi.h>
+#include <asm/pgtable.h>
#include "ubi-media.h"
#include "scan.h"
@@ -381,14 +382,14 @@ struct ubi_wl_entry;
* @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
* not
* @nor_flash: non-zero if working on top of NOR flash
+ * @max_write_size: maximum amount of bytes the underlying flash can write at a
+ * time (MTD write buffer size)
* @mtd: MTD device descriptor
*
* @peb_buf1: a buffer of PEB size used for different purposes
* @peb_buf2: another buffer of PEB size used for different purposes
* @buf_mutex: protects @peb_buf1 and @peb_buf2
* @ckvol_mutex: serializes static volume checking when opening
- * @dbg_peb_buf: buffer of PEB size used for debugging
- * @dbg_buf_mutex: protects @dbg_peb_buf
*/
struct ubi_device {
struct cdev cdev;
@@ -464,16 +465,13 @@ struct ubi_device {
int vid_hdr_shift;
unsigned int bad_allowed:1;
unsigned int nor_flash:1;
+ int max_write_size;
struct mtd_info *mtd;
void *peb_buf1;
void *peb_buf2;
struct mutex buf_mutex;
struct mutex ckvol_mutex;
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
- void *dbg_peb_buf;
- struct mutex dbg_buf_mutex;
-#endif
};
extern struct kmem_cache *ubi_wl_entry_slab;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index c47620dfc722..b79e0dea3632 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -28,7 +28,7 @@
#include <linux/slab.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
static int paranoid_check_volumes(struct ubi_device *ubi);
#else
#define paranoid_check_volumes(ubi) 0
@@ -711,7 +711,7 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
volume_sysfs_close(vol);
}
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
/**
* paranoid_check_volume - check volume information.
@@ -876,6 +876,9 @@ static int paranoid_check_volumes(struct ubi_device *ubi)
{
int i, err = 0;
+ if (!(ubi_chk_flags & UBI_CHK_GEN))
+ return 0;
+
for (i = 0; i < ubi->vtbl_slots; i++) {
err = paranoid_check_volume(ubi, i);
if (err)
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 0b8141fc5c26..fd3bf770f518 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -62,7 +62,7 @@
#include <asm/div64.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
static void paranoid_vtbl_check(const struct ubi_device *ubi);
#else
#define paranoid_vtbl_check(ubi)
@@ -868,7 +868,7 @@ out_free:
return err;
}
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
/**
* paranoid_vtbl_check - check volume table.
@@ -876,10 +876,13 @@ out_free:
*/
static void paranoid_vtbl_check(const struct ubi_device *ubi)
{
+ if (!(ubi_chk_flags & UBI_CHK_GEN))
+ return;
+
if (vtbl_check(ubi, ubi->vtbl)) {
ubi_err("paranoid check failed");
BUG();
}
}
-#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
+#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 655bbbe415d9..b4cf57db2556 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -161,7 +161,7 @@ struct ubi_work {
int torture;
};
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
struct rb_root *root);
@@ -613,7 +613,7 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
list_add_tail(&wrk->list, &ubi->works);
ubi_assert(ubi->works_count >= 0);
ubi->works_count += 1;
- if (ubi->thread_enabled)
+ if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled())
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
}
@@ -1364,7 +1364,7 @@ int ubi_thread(void *u)
spin_lock(&ubi->wl_lock);
if (list_empty(&ubi->works) || ubi->ro_mode ||
- !ubi->thread_enabled) {
+ !ubi->thread_enabled || ubi_dbg_is_bgt_disabled()) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&ubi->wl_lock);
schedule();
@@ -1561,7 +1561,7 @@ void ubi_wl_close(struct ubi_device *ubi)
kfree(ubi->lookuptbl);
}
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
/**
* paranoid_check_ec - make sure that the erase counter of a PEB is correct.
@@ -1578,6 +1578,9 @@ static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
long long read_ec;
struct ubi_ec_hdr *ec_hdr;
+ if (!(ubi_chk_flags & UBI_CHK_GEN))
+ return 0;
+
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
if (!ec_hdr)
return -ENOMEM;
@@ -1614,6 +1617,9 @@ out_free:
static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
struct rb_root *root)
{
+ if (!(ubi_chk_flags & UBI_CHK_GEN))
+ return 0;
+
if (in_wl_tree(e, root))
return 0;
@@ -1636,6 +1642,9 @@ static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
struct ubi_wl_entry *p;
int i;
+ if (!(ubi_chk_flags & UBI_CHK_GEN))
+ return 0;
+
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
list_for_each_entry(p, &ubi->pq[i], u.list)
if (p == e)
@@ -1646,4 +1655,5 @@ static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
ubi_dbg_dump_stack();
return -EINVAL;
}
-#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
+
+#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 9ab58097fa2e..7cb375e0e29c 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -265,7 +265,7 @@ struct atl1c_recv_ret_status {
__le32 word3;
};
-/* RFD desciptor */
+/* RFD descriptor */
struct atl1c_rx_free_desc {
__le64 buffer_addr;
};
@@ -531,7 +531,7 @@ struct atl1c_rfd_ring {
struct atl1c_buffer *buffer_info;
};
-/* receive return desciptor (rrd) ring */
+/* receive return descriptor (rrd) ring */
struct atl1c_rrd_ring {
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 9d48659e3b28..32e64cc85d2c 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -145,13 +145,6 @@ static struct {
{ "Broadcom NetXtreme II BCM57712E XGb" }
};
-#ifndef PCI_DEVICE_ID_NX2_57712
-#define PCI_DEVICE_ID_NX2_57712 0x1662
-#endif
-#ifndef PCI_DEVICE_ID_NX2_57712E
-#define PCI_DEVICE_ID_NX2_57712E 0x1663
-#endif
-
static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 1a6e9eb7af43..338bea147c64 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2130,7 +2130,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
}
/*
-* First release a slave and than destroy the bond if no more slaves are left.
+* First release a slave and then destroy the bond if no more slaves are left.
* Must be under rtnl_lock when this function is called.
*/
static int bond_release_and_destroy(struct net_device *bond_dev,
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index b38d987da67d..9560b9d624bd 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -1,6 +1,4 @@
-ifeq ($(CONFIG_CAIF_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
# Serial interface
obj-$(CONFIG_CAIF_TTY) += caif_serial.o
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 14050786218a..110eda01843c 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -633,9 +633,6 @@ static void c_can_start(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
- /* enable status change, error and module interrupts */
- c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
-
/* basic c_can configuration */
c_can_chip_config(dev);
@@ -643,6 +640,9 @@ static void c_can_start(struct net_device *dev)
/* reset tx helper pointers */
priv->tx_next = priv->tx_echo = 0;
+
+ /* enable status change, error and module interrupts */
+ c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
}
static void c_can_stop(struct net_device *dev)
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 366f5cc050ae..102b16c6cc97 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/netdevice.h>
#include <linux/can.h>
@@ -1643,7 +1644,7 @@ static int __devinit ican3_probe(struct platform_device *pdev)
struct device *dev;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = mfd_get_data(pdev);
if (!pdata)
return -ENXIO;
diff --git a/drivers/net/ftmac100.c b/drivers/net/ftmac100.c
index 1d6f4b8d393a..a31661948c42 100644
--- a/drivers/net/ftmac100.c
+++ b/drivers/net/ftmac100.c
@@ -1102,7 +1102,7 @@ static int ftmac100_probe(struct platform_device *pdev)
goto err_req_mem;
}
- priv->base = ioremap(res->start, res->end - res->start);
+ priv->base = ioremap(res->start, resource_size(res));
if (!priv->base) {
dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
err = -EIO;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index ccb231c4d933..2a0ad9a501bb 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -949,6 +949,11 @@ static void gfar_detect_errata(struct gfar_private *priv)
(pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
priv->errata |= GFAR_ERRATA_A002;
+ /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
+ if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
+ (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
+ priv->errata |= GFAR_ERRATA_12;
+
if (priv->errata)
dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
priv->errata);
@@ -2154,8 +2159,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Set up checksumming */
if (CHECKSUM_PARTIAL == skb->ip_summed) {
fcb = gfar_add_fcb(skb);
- lstatus |= BD_LFLAG(TXBD_TOE);
- gfar_tx_checksum(skb, fcb);
+ /* as specified by errata */
+ if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
+ && ((unsigned long)fcb % 0x20) > 0x18)) {
+ __skb_pull(skb, GMAC_FCB_LEN);
+ skb_checksum_help(skb);
+ } else {
+ lstatus |= BD_LFLAG(TXBD_TOE);
+ gfar_tx_checksum(skb, fcb);
+ }
}
if (vlan_tx_tag_present(skb)) {
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 54de4135e932..ec5d595ce2e2 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -1039,6 +1039,7 @@ enum gfar_errata {
GFAR_ERRATA_74 = 0x01,
GFAR_ERRATA_76 = 0x02,
GFAR_ERRATA_A002 = 0x04,
+ GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */
};
/* Struct stolen almost completely (and shamelessly) from the FCC enet source
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 928b2b83cef5..efd44afeae83 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -26,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -1145,7 +1146,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
struct resource *iomem;
struct net_device *netdev;
struct ks8842_adapter *adapter;
- struct ks8842_platform_data *pdata = pdev->dev.platform_data;
+ struct ks8842_platform_data *pdata = mfd_get_data(pdev);
u16 id;
unsigned i;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 5b37d3c191e4..78e34e9e4f00 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -39,8 +39,11 @@ struct macvlan_port {
struct list_head vlans;
struct rcu_head rcu;
bool passthru;
+ int count;
};
+static void macvlan_port_destroy(struct net_device *dev);
+
#define macvlan_port_get_rcu(dev) \
((struct macvlan_port *) rcu_dereference(dev->rx_handler_data))
#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data)
@@ -457,8 +460,13 @@ static int macvlan_init(struct net_device *dev)
static void macvlan_uninit(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ struct macvlan_port *port = vlan->port;
free_percpu(vlan->pcpu_stats);
+
+ port->count -= 1;
+ if (!port->count)
+ macvlan_port_destroy(port->dev);
}
static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
@@ -691,12 +699,13 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
- if (!list_empty(&port->vlans))
+ if (port->count)
return -EINVAL;
port->passthru = true;
memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
}
+ port->count += 1;
err = register_netdevice(dev);
if (err < 0)
goto destroy_port;
@@ -707,7 +716,8 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
return 0;
destroy_port:
- if (list_empty(&port->vlans))
+ port->count -= 1;
+ if (!port->count)
macvlan_port_destroy(lowerdev);
return err;
@@ -725,13 +735,9 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev,
void macvlan_dellink(struct net_device *dev, struct list_head *head)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- struct macvlan_port *port = vlan->port;
list_del(&vlan->list);
unregister_netdevice_queue(dev, head);
-
- if (list_empty(&port->vlans))
- macvlan_port_destroy(port->dev);
}
EXPORT_SYMBOL_GPL(macvlan_dellink);
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 40fa59e2fd5c..32678b6c6b39 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -9501,7 +9501,7 @@ static struct niu_parent * __devinit niu_new_parent(struct niu *np,
struct niu_parent *p;
int i;
- plat_dev = platform_device_register_simple("niu", niu_parent_index,
+ plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
NULL, 0);
if (IS_ERR(plat_dev))
return NULL;
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 43583309a65d..31e9407a0739 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -129,7 +129,7 @@ static void *z_comp_alloc(unsigned char *options, int opt_len)
state->strm.next_in = NULL;
state->w_size = w_size;
- state->strm.workspace = vmalloc(zlib_deflate_workspacesize());
+ state->strm.workspace = vmalloc(zlib_deflate_workspacesize(-w_size, 8));
if (state->strm.workspace == NULL)
goto out_free;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 2d21c60085bc..348b4f1367c9 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2460,7 +2460,7 @@ map_error:
* The 3032 supports sglists by using the 3 addr/len pairs (ALP)
* in the IOCB plus a chain of outbound address lists (OAL) that
* each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
- * will used to point to an OAL when more ALP entries are required.
+ * will be used to point to an OAL when more ALP entries are required.
* The IOCB is always the top of the chain followed by one or more
* OALs (when necessary).
*/
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 5e403511289d..493b0de3848b 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2685,9 +2685,9 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
- tp->mii.supports_gmii ?
+ (tp->mii.supports_gmii ?
ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full : 0);
+ ADVERTISED_1000baseT_Full : 0));
if (RTL_R8(PHYstatus) & TBI_Enable)
netif_info(tp, link, dev, "TBI auto-negotiating\n");
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 44150f2f7bfd..26afbaae23f0 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -382,7 +382,7 @@ static void rionet_remove(struct rio_dev *rdev)
struct rionet_peer *peer, *tmp;
free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
- __ilog2(sizeof(void *)) + 4 : 0);
+ __fls(sizeof(void *)) + 4 : 0);
unregister_netdev(ndev);
free_netdev(ndev);
@@ -450,7 +450,7 @@ static int rionet_setup_netdev(struct rio_mport *mport)
}
rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
- mport->sys_size ? __ilog2(sizeof(void *)) + 4 : 0);
+ mport->sys_size ? __fls(sizeof(void *)) + 4 : 0);
if (!rionet_active) {
rc = -ENOMEM;
goto out;
@@ -571,5 +571,5 @@ static void __exit rionet_exit(void)
rio_unregister_driver(&rionet_driver);
}
-module_init(rionet_init);
+late_initcall(rionet_init);
module_exit(rionet_exit);
diff --git a/drivers/net/skfp/Makefile b/drivers/net/skfp/Makefile
index cb23580fcffa..b0be0234abf6 100644
--- a/drivers/net/skfp/Makefile
+++ b/drivers/net/skfp/Makefile
@@ -17,4 +17,4 @@ skfp-objs := skfddi.o hwmtm.o fplustm.o smt.o cfm.o \
# projects. To keep the source common for all those drivers (and
# thus simplify fixes to it), please do not clean it up!
-EXTRA_CFLAGS += -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
+ccflags-y := -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index ede017872367..d225077964e2 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -843,7 +843,7 @@ struct gem_txd {
/* GEM requires that RX descriptors are provided four at a time,
* aligned. Also, the RX ring may not wrap around. This means that
- * there will be at least 4 unused desciptor entries in the middle
+ * there will be at least 4 unused descriptor entries in the middle
* of the RX ring at all times.
*
* Similar to HME, GEM assumes that it can write garbage bytes before
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
index 7cb301da7474..0825db6d883f 100644
--- a/drivers/net/tile/tilepro.c
+++ b/drivers/net/tile/tilepro.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -44,10 +44,6 @@
#include <linux/tcp.h>
-/* There is no singlethread_cpu, so schedule work on the current cpu. */
-#define singlethread_cpu -1
-
-
/*
* First, "tile_net_init_module()" initializes all four "devices" which
* can be used by linux.
@@ -73,15 +69,16 @@
* return, knowing we will be called again later. Otherwise, we
* reenable the ingress interrupt, and call "napi_complete()".
*
+ * HACK: Since disabling the ingress interrupt is not reliable, we
+ * ignore the interrupt if the global "active" flag is false.
+ *
*
* NOTE: The use of "native_driver" ensures that EPP exists, and that
- * "epp_sendv" is legal, and that "LIPP" is being used.
+ * we are using "LIPP" and "LEPP".
*
* NOTE: Failing to free completions for an arbitrarily long time
* (which is defined to be illegal) does in fact cause bizarre
* problems. The "egress_timer" helps prevent this from happening.
- *
- * NOTE: The egress code can be interrupted by the interrupt handler.
*/
@@ -142,6 +139,7 @@
MODULE_AUTHOR("Tilera");
MODULE_LICENSE("GPL");
+
/*
* Queue of incoming packets for a specific cpu and device.
*
@@ -177,7 +175,7 @@ struct tile_net_cpu {
struct tile_netio_queue queue;
/* Statistics. */
struct tile_net_stats_t stats;
- /* ISSUE: Is this needed? */
+ /* True iff NAPI is enabled. */
bool napi_enabled;
/* True if this tile has succcessfully registered with the IPP. */
bool registered;
@@ -200,20 +198,20 @@ struct tile_net_cpu {
struct tile_net_priv {
/* Our network device. */
struct net_device *dev;
- /* The actual egress queue. */
- lepp_queue_t *epp_queue;
- /* Protects "epp_queue->cmd_tail" and "epp_queue->comp_tail" */
- spinlock_t cmd_lock;
- /* Protects "epp_queue->comp_head". */
- spinlock_t comp_lock;
+ /* Pages making up the egress queue. */
+ struct page *eq_pages;
+ /* Address of the actual egress queue. */
+ lepp_queue_t *eq;
+ /* Protects "eq". */
+ spinlock_t eq_lock;
/* The hypervisor handle for this interface. */
int hv_devhdl;
/* The intr bit mask that IDs this device. */
u32 intr_id;
/* True iff "tile_net_open_aux()" has succeeded. */
- int partly_opened;
- /* True iff "tile_net_open_inner()" has succeeded. */
- int fully_opened;
+ bool partly_opened;
+ /* True iff the device is "active". */
+ bool active;
/* Effective network cpus. */
struct cpumask network_cpus_map;
/* Number of network cpus. */
@@ -228,6 +226,10 @@ struct tile_net_priv {
struct tile_net_cpu *cpu[NR_CPUS];
};
+/* Log2 of the number of small pages needed for the egress queue. */
+#define EQ_ORDER get_order(sizeof(lepp_queue_t))
+/* Size of the egress queue's pages. */
+#define EQ_SIZE (1 << (PAGE_SHIFT + EQ_ORDER))
/*
* The actual devices (xgbe0, xgbe1, gbe0, gbe1).
@@ -284,7 +286,11 @@ static void net_printk(char *fmt, ...)
*/
static void dump_packet(unsigned char *data, unsigned long length, char *s)
{
+ int my_cpu = smp_processor_id();
+
unsigned long i;
+ char buf[128];
+
static unsigned int count;
pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n",
@@ -294,10 +300,12 @@ static void dump_packet(unsigned char *data, unsigned long length, char *s)
for (i = 0; i < length; i++) {
if ((i & 0xf) == 0)
- sprintf(buf, "%8.8lx:", i);
+ sprintf(buf, "[%02d] %8.8lx:", my_cpu, i);
sprintf(buf + strlen(buf), " %2.2x", data[i]);
- if ((i & 0xf) == 0xf || i == length - 1)
- pr_info("%s\n", buf);
+ if ((i & 0xf) == 0xf || i == length - 1) {
+ strcat(buf, "\n");
+ pr_info("%s", buf);
+ }
}
}
#endif
@@ -351,60 +359,109 @@ static void tile_net_provide_linux_buffer(struct tile_net_cpu *info,
/*
* Provide a linux buffer for LIPP.
+ *
+ * Note that the ACTUAL allocation for each buffer is a "struct sk_buff",
+ * plus a chunk of memory that includes not only the requested bytes, but
+ * also NET_SKB_PAD bytes of initial padding, and a "struct skb_shared_info".
+ *
+ * Note that "struct skb_shared_info" is 88 bytes with 64K pages and
+ * 268 bytes with 4K pages (since the frags[] array needs 18 entries).
+ *
+ * Without jumbo packets, the maximum packet size will be 1536 bytes,
+ * and we use 2 bytes (NET_IP_ALIGN) of padding. ISSUE: If we told
+ * the hardware to clip at 1518 bytes instead of 1536 bytes, then we
+ * could save an entire cache line, but in practice, we don't need it.
+ *
+ * Since CPAs are 38 bits, and we can only encode the high 31 bits in
+ * a "linux_buffer_t", the low 7 bits must be zero, and thus, we must
+ * align the actual "va" mod 128.
+ *
+ * We assume that the underlying "head" will be aligned mod 64. Note
+ * that in practice, we have seen "head" NOT aligned mod 128 even when
+ * using 2048 byte allocations, which is surprising.
+ *
+ * If "head" WAS always aligned mod 128, we could change LIPP to
+ * assume that the low SIX bits are zero, and the 7th bit is one, that
+ * is, align the actual "va" mod 128 plus 64, which would be "free".
+ *
+ * For now, the actual "head" pointer points at NET_SKB_PAD bytes of
+ * padding, plus 28 or 92 bytes of extra padding, plus the sk_buff
+ * pointer, plus the NET_IP_ALIGN padding, plus 126 or 1536 bytes for
+ * the actual packet, plus 62 bytes of empty padding, plus some
+ * padding and the "struct skb_shared_info".
+ *
+ * With 64K pages, a large buffer thus needs 32+92+4+2+1536+62+88
+ * bytes, or 1816 bytes, which fits comfortably into 2048 bytes.
+ *
+ * With 64K pages, a small buffer thus needs 32+92+4+2+126+88
+ * bytes, or 344 bytes, which means we are wasting 64+ bytes, and
+ * could presumably increase the size of small buffers.
+ *
+ * With 4K pages, a large buffer thus needs 32+92+4+2+1536+62+268
+ * bytes, or 1996 bytes, which fits comfortably into 2048 bytes.
+ *
+ * With 4K pages, a small buffer thus needs 32+92+4+2+126+268
+ * bytes, or 524 bytes, which is annoyingly wasteful.
+ *
+ * Maybe we should increase LIPP_SMALL_PACKET_SIZE to 192?
+ *
+ * ISSUE: Maybe we should increase "NET_SKB_PAD" to 64?
*/
static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
bool small)
{
- /* ISSUE: What should we use here? */
+#if TILE_NET_MTU <= 1536
+ /* Without "jumbo", 2 + 1536 should be sufficient. */
+ unsigned int large_size = NET_IP_ALIGN + 1536;
+#else
+ /* ISSUE: This has not been tested. */
unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100;
+#endif
- /* Round up to ensure to avoid "false sharing" with last cache line. */
- unsigned int buffer_size =
+ /* Avoid "false sharing" with last cache line. */
+ /* ISSUE: This is already done by "dev_alloc_skb()". */
+ unsigned int len =
(((small ? LIPP_SMALL_PACKET_SIZE : large_size) +
CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE());
- /*
- * ISSUE: Since CPAs are 38 bits, and we can only encode the
- * high 31 bits in a "linux_buffer_t", the low 7 bits must be
- * zero, and thus, we must align the actual "va" mod 128.
- */
- const unsigned long align = 128;
+ unsigned int padding = 128 - NET_SKB_PAD;
+ unsigned int align;
struct sk_buff *skb;
void *va;
struct sk_buff **skb_ptr;
- /* Note that "dev_alloc_skb()" adds NET_SKB_PAD more bytes, */
- /* and also "reserves" that many bytes. */
- /* ISSUE: Can we "share" the NET_SKB_PAD bytes with "skb_ptr"? */
- int len = sizeof(*skb_ptr) + align + buffer_size;
-
- while (1) {
-
- /* Allocate (or fail). */
- skb = dev_alloc_skb(len);
- if (skb == NULL)
- return false;
-
- /* Make room for a back-pointer to 'skb'. */
- skb_reserve(skb, sizeof(*skb_ptr));
+ /* Request 96 extra bytes for alignment purposes. */
+ skb = dev_alloc_skb(len + padding);
+ if (skb == NULL)
+ return false;
- /* Make sure we are aligned. */
- skb_reserve(skb, -(long)skb->data & (align - 1));
+ /* Skip 32 or 96 bytes to align "data" mod 128. */
+ align = -(long)skb->data & (128 - 1);
+ BUG_ON(align > padding);
+ skb_reserve(skb, align);
- /* This address is given to IPP. */
- va = skb->data;
+ /* This address is given to IPP. */
+ va = skb->data;
- if (small)
- break;
+ /* Buffers must not span a huge page. */
+ BUG_ON(((((long)va & ~HPAGE_MASK) + len) & HPAGE_MASK) != 0);
- /* ISSUE: This has never been observed! */
- /* Large buffers must not span a huge page. */
- if (((((long)va & ~HPAGE_MASK) + 1535) & HPAGE_MASK) == 0)
- break;
- pr_err("Leaking unaligned linux buffer at %p.\n", va);
+#ifdef TILE_NET_PARANOIA
+#if CHIP_HAS_CBOX_HOME_MAP()
+ if (hash_default) {
+ HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va);
+ if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
+ panic("Non-HFH ingress buffer! VA=%p Mode=%d PTE=%llx",
+ va, hv_pte_get_mode(pte), hv_pte_val(pte));
}
+#endif
+#endif
+
+ /* Invalidate the packet buffer. */
+ if (!hash_default)
+ __inv_buffer(va, len);
/* Skip two bytes to satisfy LIPP assumptions. */
/* Note that this aligns IP on a 16 byte boundary. */
@@ -415,23 +472,9 @@ static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
skb_ptr = va - sizeof(*skb_ptr);
*skb_ptr = skb;
- /* Invalidate the packet buffer. */
- if (!hash_default)
- __inv_buffer(skb->data, buffer_size);
-
/* Make sure "skb_ptr" has been flushed. */
__insn_mf();
-#ifdef TILE_NET_PARANOIA
-#if CHIP_HAS_CBOX_HOME_MAP()
- if (hash_default) {
- HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va);
- if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
- panic("Non-coherent ingress buffer!");
- }
-#endif
-#endif
-
/* Provide the new buffer. */
tile_net_provide_linux_buffer(info, va, small);
@@ -469,48 +512,64 @@ oops:
* Grab some LEPP completions, and store them in "comps", of size
* "comps_size", and return the number of completions which were
* stored, so the caller can free them.
- *
- * If "pending" is not NULL, it will be set to true if there might
- * still be some pending completions caused by this tile, else false.
*/
-static unsigned int tile_net_lepp_grab_comps(struct net_device *dev,
+static unsigned int tile_net_lepp_grab_comps(lepp_queue_t *eq,
struct sk_buff *comps[],
unsigned int comps_size,
- bool *pending)
+ unsigned int min_size)
{
- struct tile_net_priv *priv = netdev_priv(dev);
-
- lepp_queue_t *eq = priv->epp_queue;
-
unsigned int n = 0;
- unsigned int comp_head;
- unsigned int comp_busy;
- unsigned int comp_tail;
-
- spin_lock(&priv->comp_lock);
-
- comp_head = eq->comp_head;
- comp_busy = eq->comp_busy;
- comp_tail = eq->comp_tail;
+ unsigned int comp_head = eq->comp_head;
+ unsigned int comp_busy = eq->comp_busy;
while (comp_head != comp_busy && n < comps_size) {
comps[n++] = eq->comps[comp_head];
LEPP_QINC(comp_head);
}
- if (pending != NULL)
- *pending = (comp_head != comp_tail);
+ if (n < min_size)
+ return 0;
eq->comp_head = comp_head;
- spin_unlock(&priv->comp_lock);
-
return n;
}
/*
+ * Free some comps, and return true iff there are still some pending.
+ */
+static bool tile_net_lepp_free_comps(struct net_device *dev, bool all)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ lepp_queue_t *eq = priv->eq;
+
+ struct sk_buff *olds[64];
+ unsigned int wanted = 64;
+ unsigned int i, n;
+ bool pending;
+
+ spin_lock(&priv->eq_lock);
+
+ if (all)
+ eq->comp_busy = eq->comp_tail;
+
+ n = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
+
+ pending = (eq->comp_head != eq->comp_tail);
+
+ spin_unlock(&priv->eq_lock);
+
+ for (i = 0; i < n; i++)
+ kfree_skb(olds[i]);
+
+ return pending;
+}
+
+
+/*
* Make sure the egress timer is scheduled.
*
* Note that we use "schedule if not scheduled" logic instead of the more
@@ -544,21 +603,11 @@ static void tile_net_handle_egress_timer(unsigned long arg)
struct tile_net_cpu *info = (struct tile_net_cpu *)arg;
struct net_device *dev = info->napi.dev;
- struct sk_buff *olds[32];
- unsigned int wanted = 32;
- unsigned int i, nolds = 0;
- bool pending;
-
/* The timer is no longer scheduled. */
info->egress_timer_scheduled = false;
- nolds = tile_net_lepp_grab_comps(dev, olds, wanted, &pending);
-
- for (i = 0; i < nolds; i++)
- kfree_skb(olds[i]);
-
- /* Reschedule timer if needed. */
- if (pending)
+ /* Free comps, and reschedule timer if more are pending. */
+ if (tile_net_lepp_free_comps(dev, false))
tile_net_schedule_egress_timer(info);
}
@@ -636,8 +685,39 @@ static bool is_dup_ack(char *s1, char *s2, unsigned int len)
+static void tile_net_discard_aux(struct tile_net_cpu *info, int index)
+{
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_impl_t *qsp = queue->__system_part;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+
+ int index2_aux = index + sizeof(netio_pkt_t);
+ int index2 =
+ ((index2_aux ==
+ qsp->__packet_receive_queue.__last_packet_plus_one) ?
+ 0 : index2_aux);
+
+ netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
+
+ /* Extract the "linux_buffer_t". */
+ unsigned int buffer = pkt->__packet.word;
+
+ /* Convert "linux_buffer_t" to "va". */
+ void *va = __va((phys_addr_t)(buffer >> 1) << 7);
+
+ /* Acquire the associated "skb". */
+ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+ struct sk_buff *skb = *skb_ptr;
+
+ kfree_skb(skb);
+
+ /* Consume this packet. */
+ qup->__packet_receive_read = index2;
+}
+
+
/*
- * Like "tile_net_handle_packets()", but just discard packets.
+ * Like "tile_net_poll()", but just discard packets.
*/
static void tile_net_discard_packets(struct net_device *dev)
{
@@ -650,32 +730,8 @@ static void tile_net_discard_packets(struct net_device *dev)
while (qup->__packet_receive_read !=
qsp->__packet_receive_queue.__packet_write) {
-
int index = qup->__packet_receive_read;
-
- int index2_aux = index + sizeof(netio_pkt_t);
- int index2 =
- ((index2_aux ==
- qsp->__packet_receive_queue.__last_packet_plus_one) ?
- 0 : index2_aux);
-
- netio_pkt_t *pkt = (netio_pkt_t *)
- ((unsigned long) &qsp[1] + index);
-
- /* Extract the "linux_buffer_t". */
- unsigned int buffer = pkt->__packet.word;
-
- /* Convert "linux_buffer_t" to "va". */
- void *va = __va((phys_addr_t)(buffer >> 1) << 7);
-
- /* Acquire the associated "skb". */
- struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
- struct sk_buff *skb = *skb_ptr;
-
- kfree_skb(skb);
-
- /* Consume this packet. */
- qup->__packet_receive_read = index2;
+ tile_net_discard_aux(info, index);
}
}
@@ -704,7 +760,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
- /* Extract the packet size. */
+ /* Extract the packet size. FIXME: Shouldn't the second line */
+ /* get subtracted? Mostly moot, since it should be "zero". */
unsigned long len =
(NETIO_PKT_CUSTOM_LENGTH(pkt) +
NET_IP_ALIGN - NETIO_PACKET_PADDING);
@@ -722,15 +779,6 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
/* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
unsigned char *buf = va + NET_IP_ALIGN;
-#ifdef IGNORE_DUP_ACKS
-
- static int other;
- static int final;
- static int keep;
- static int skip;
-
-#endif
-
/* Invalidate the packet buffer. */
if (!hash_default)
__inv_buffer(buf, len);
@@ -745,16 +793,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
#ifdef TILE_NET_VERIFY_INGRESS
if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) &&
NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) {
- /*
- * FIXME: This complains about UDP packets
- * with a "zero" checksum (bug 6624).
- */
-#ifdef TILE_NET_PANIC_ON_BAD
- dump_packet(buf, len, "rx");
- panic("Bad L4 checksum.");
-#else
+ /* Bug 6624: Includes UDP packets with a "zero" checksum. */
pr_warning("Bad L4 checksum on %d byte packet.\n", len);
-#endif
}
if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) &&
NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) {
@@ -769,90 +809,29 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
}
break;
case NETIO_PKT_STATUS_BAD:
-#ifdef TILE_NET_PANIC_ON_BAD
- dump_packet(buf, len, "rx");
- panic("Unexpected BAD packet.");
-#else
- pr_warning("Unexpected BAD %d byte packet.\n", len);
-#endif
+ pr_warning("Unexpected BAD %ld byte packet.\n", len);
}
#endif
filter = 0;
+ /* ISSUE: Filter TCP packets with "bad" checksums? */
+
if (!(dev->flags & IFF_UP)) {
/* Filter packets received before we're up. */
filter = 1;
+ } else if (NETIO_PKT_STATUS_M(metadata, pkt) == NETIO_PKT_STATUS_BAD) {
+ /* Filter "truncated" packets. */
+ filter = 1;
} else if (!(dev->flags & IFF_PROMISC)) {
- /*
- * FIXME: Implement HW multicast filter.
- */
- if (is_unicast_ether_addr(buf)) {
+ /* FIXME: Implement HW multicast filter. */
+ if (!is_multicast_ether_addr(buf)) {
/* Filter packets not for our address. */
const u8 *mine = dev->dev_addr;
filter = compare_ether_addr(mine, buf);
}
}
-#ifdef IGNORE_DUP_ACKS
-
- if (len != 66) {
- /* FIXME: Must check "is_tcp_ack(buf, len)" somehow. */
-
- other++;
-
- } else if (index2 ==
- qsp->__packet_receive_queue.__packet_write) {
-
- final++;
-
- } else {
-
- netio_pkt_t *pkt2 = (netio_pkt_t *)
- ((unsigned long) &qsp[1] + index2);
-
- netio_pkt_metadata_t *metadata2 =
- NETIO_PKT_METADATA(pkt2);
-
- /* Extract the packet size. */
- unsigned long len2 =
- (NETIO_PKT_CUSTOM_LENGTH(pkt2) +
- NET_IP_ALIGN - NETIO_PACKET_PADDING);
-
- if (len2 == 66 &&
- NETIO_PKT_FLOW_HASH_M(metadata, pkt) ==
- NETIO_PKT_FLOW_HASH_M(metadata2, pkt2)) {
-
- /* Extract the "linux_buffer_t". */
- unsigned int buffer2 = pkt2->__packet.word;
-
- /* Convert "linux_buffer_t" to "va". */
- void *va2 =
- __va((phys_addr_t)(buffer2 >> 1) << 7);
-
- /* Extract the packet data pointer. */
- /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
- unsigned char *buf2 = va2 + NET_IP_ALIGN;
-
- /* Invalidate the packet buffer. */
- if (!hash_default)
- __inv_buffer(buf2, len2);
-
- if (is_dup_ack(buf, buf2, len)) {
- skip++;
- filter = 1;
- } else {
- keep++;
- }
- }
- }
-
- if (net_ratelimit())
- pr_info("Other %d Final %d Keep %d Skip %d.\n",
- other, final, keep, skip);
-
-#endif
-
if (filter) {
/* ISSUE: Update "drop" statistics? */
@@ -877,10 +856,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
/* NOTE: This call also sets "skb->dev = dev". */
skb->protocol = eth_type_trans(skb, dev);
- /* ISSUE: Discard corrupt packets? */
- /* ISSUE: Discard packets with bad checksums? */
-
- /* Avoid recomputing TCP/UDP checksums. */
+ /* Avoid recomputing "good" TCP/UDP checksums. */
if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt))
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -912,9 +888,14 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
/*
* Handle some packets for the given device on the current CPU.
*
- * ISSUE: The "rotting packet" race condition occurs if a packet
- * arrives after the queue appears to be empty, and before the
- * hypervisor interrupt is re-enabled.
+ * If "tile_net_stop()" is called on some other tile while this
+ * function is running, we will return, hopefully before that
+ * other tile asks us to call "napi_disable()".
+ *
+ * The "rotting packet" race condition occurs if a packet arrives
+ * during the extremely narrow window between the queue appearing to
+ * be empty, and the ingress interrupt being re-enabled. This happens
+ * a LOT under heavy network load.
*/
static int tile_net_poll(struct napi_struct *napi, int budget)
{
@@ -928,7 +909,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
unsigned int work = 0;
- while (1) {
+ while (priv->active) {
int index = qup->__packet_receive_read;
if (index == qsp->__packet_receive_queue.__packet_write)
break;
@@ -941,19 +922,24 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
napi_complete(&info->napi);
- /* Re-enable hypervisor interrupts. */
+ if (!priv->active)
+ goto done;
+
+ /* Re-enable the ingress interrupt. */
enable_percpu_irq(priv->intr_id);
- /* HACK: Avoid the "rotting packet" problem. */
+ /* HACK: Avoid the "rotting packet" problem (see above). */
if (qup->__packet_receive_read !=
- qsp->__packet_receive_queue.__packet_write)
- napi_schedule(&info->napi);
-
- /* ISSUE: Handle completions? */
+ qsp->__packet_receive_queue.__packet_write) {
+ /* ISSUE: Sometimes this returns zero, presumably */
+ /* because an interrupt was handled for this tile. */
+ (void)napi_reschedule(&info->napi);
+ }
done:
- tile_net_provide_needed_buffers(info);
+ if (priv->active)
+ tile_net_provide_needed_buffers(info);
return work;
}
@@ -961,6 +947,12 @@ done:
/*
* Handle an ingress interrupt for the given device on the current cpu.
+ *
+ * ISSUE: Sometimes this gets called after "disable_percpu_irq()" has
+ * been called! This is probably due to "pending hypervisor downcalls".
+ *
+ * ISSUE: Is there any race condition between the "napi_schedule()" here
+ * and the "napi_complete()" call above?
*/
static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr)
{
@@ -969,9 +961,15 @@ static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr)
int my_cpu = smp_processor_id();
struct tile_net_cpu *info = priv->cpu[my_cpu];
- /* Disable hypervisor interrupt. */
+ /* Disable the ingress interrupt. */
disable_percpu_irq(priv->intr_id);
+ /* Ignore unwanted interrupts. */
+ if (!priv->active)
+ return IRQ_HANDLED;
+
+ /* ISSUE: Sometimes "info->napi_enabled" is false here. */
+
napi_schedule(&info->napi);
return IRQ_HANDLED;
@@ -1005,8 +1003,7 @@ static int tile_net_open_aux(struct net_device *dev)
*/
{
int epp_home = hv_lotar_to_cpu(epp_lotar);
- struct page *page = virt_to_page(priv->epp_queue);
- homecache_change_page_home(page, 0, epp_home);
+ homecache_change_page_home(priv->eq_pages, EQ_ORDER, epp_home);
}
/*
@@ -1015,9 +1012,9 @@ static int tile_net_open_aux(struct net_device *dev)
{
netio_ipp_address_t ea = {
.va = 0,
- .pa = __pa(priv->epp_queue),
+ .pa = __pa(priv->eq),
.pte = hv_pte(0),
- .size = PAGE_SIZE,
+ .size = EQ_SIZE,
};
ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar);
ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3);
@@ -1043,7 +1040,7 @@ static int tile_net_open_aux(struct net_device *dev)
/*
- * Register with hypervisor on each CPU.
+ * Register with hypervisor on the current CPU.
*
* Strangely, this function does important things even if it "fails",
* which is especially common if the link is not up yet. Hopefully
@@ -1092,7 +1089,8 @@ static void tile_net_register(void *dev_ptr)
priv->cpu[my_cpu] = info;
/*
- * Register ourselves with the IPP.
+ * Register ourselves with LIPP. This does a lot of stuff,
+ * including invoking the LIPP registration code.
*/
ret = hv_dev_pwrite(priv->hv_devhdl, 0,
(HV_VirtAddr)&config,
@@ -1101,8 +1099,11 @@ static void tile_net_register(void *dev_ptr)
PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
ret);
if (ret < 0) {
- printk(KERN_DEBUG "hv_dev_pwrite NETIO_IPP_INPUT_REGISTER_OFF"
- " failure %d\n", ret);
+ if (ret != NETIO_LINK_DOWN) {
+ printk(KERN_DEBUG "hv_dev_pwrite "
+ "NETIO_IPP_INPUT_REGISTER_OFF failure %d\n",
+ ret);
+ }
info->link_down = (ret == NETIO_LINK_DOWN);
return;
}
@@ -1145,15 +1146,47 @@ static void tile_net_register(void *dev_ptr)
NETIO_IPP_GET_FASTIO_OFF);
PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret);
- netif_napi_add(dev, &info->napi, tile_net_poll, 64);
-
/* Now we are registered. */
info->registered = true;
}
/*
- * Unregister with hypervisor on each CPU.
+ * Deregister with hypervisor on the current CPU.
+ *
+ * This simply discards all our credits, so no more packets will be
+ * delivered to this tile. There may still be packets in our queue.
+ *
+ * Also, disable the ingress interrupt.
+ */
+static void tile_net_deregister(void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+
+ /* Disable the ingress interrupt. */
+ disable_percpu_irq(priv->intr_id);
+
+ /* Do nothing else if not registered. */
+ if (info == NULL || !info->registered)
+ return;
+
+ {
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+
+ /* Discard all our credits. */
+ __netio_fastio_return_credits(qup->__fastio_index, -1);
+ }
+}
+
+
+/*
+ * Unregister with hypervisor on the current CPU.
+ *
+ * Also, disable the ingress interrupt.
*/
static void tile_net_unregister(void *dev_ptr)
{
@@ -1162,35 +1195,23 @@ static void tile_net_unregister(void *dev_ptr)
int my_cpu = smp_processor_id();
struct tile_net_cpu *info = priv->cpu[my_cpu];
- int ret = 0;
+ int ret;
int dummy = 0;
- /* Do nothing if never registered. */
- if (info == NULL)
- return;
+ /* Disable the ingress interrupt. */
+ disable_percpu_irq(priv->intr_id);
- /* Do nothing if already unregistered. */
- if (!info->registered)
+ /* Do nothing else if not registered. */
+ if (info == NULL || !info->registered)
return;
- /*
- * Unregister ourselves with LIPP.
- */
+ /* Unregister ourselves with LIPP/LEPP. */
ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF);
- PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_UNREGISTER_OFF) returned %d\n",
- ret);
- if (ret < 0) {
- /* FIXME: Just panic? */
- pr_err("hv_dev_pwrite NETIO_IPP_INPUT_UNREGISTER_OFF"
- " failure %d\n", ret);
- }
+ if (ret < 0)
+ panic("Failed to unregister with LIPP/LEPP!\n");
- /*
- * Discard all packets still in our NetIO queue. Hopefully,
- * once the unregister call is complete, there will be no
- * packets still in flight on the IDN.
- */
+ /* Discard all packets still in our NetIO queue. */
tile_net_discard_packets(dev);
/* Reset state. */
@@ -1200,11 +1221,6 @@ static void tile_net_unregister(void *dev_ptr)
/* Cancel egress timer. */
del_timer(&info->egress_timer);
info->egress_timer_scheduled = false;
-
- netif_napi_del(&info->napi);
-
- /* Now we are unregistered. */
- info->registered = false;
}
@@ -1212,18 +1228,28 @@ static void tile_net_unregister(void *dev_ptr)
* Helper function for "tile_net_stop()".
*
* Also used to handle registration failure in "tile_net_open_inner()",
- * when "fully_opened" is known to be false, and the various extra
- * steps in "tile_net_stop()" are not necessary. ISSUE: It might be
- * simpler if we could just call "tile_net_stop()" anyway.
+ * when the various extra steps in "tile_net_stop()" are not necessary.
*/
static void tile_net_stop_aux(struct net_device *dev)
{
struct tile_net_priv *priv = netdev_priv(dev);
+ int i;
int dummy = 0;
- /* Unregister all tiles, so LIPP will stop delivering packets. */
+ /*
+ * Unregister all tiles, so LIPP will stop delivering packets.
+ * Also, delete all the "napi" objects (sequentially, to protect
+ * "dev->napi_list").
+ */
on_each_cpu(tile_net_unregister, (void *)dev, 1);
+ for_each_online_cpu(i) {
+ struct tile_net_cpu *info = priv->cpu[i];
+ if (info != NULL && info->registered) {
+ netif_napi_del(&info->napi);
+ info->registered = false;
+ }
+ }
/* Stop LIPP/LEPP. */
if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
@@ -1235,18 +1261,15 @@ static void tile_net_stop_aux(struct net_device *dev)
/*
- * Disable ingress interrupts for the given device on the current cpu.
+ * Disable NAPI for the given device on the current cpu.
*/
-static void tile_net_disable_intr(void *dev_ptr)
+static void tile_net_stop_disable(void *dev_ptr)
{
struct net_device *dev = (struct net_device *)dev_ptr;
struct tile_net_priv *priv = netdev_priv(dev);
int my_cpu = smp_processor_id();
struct tile_net_cpu *info = priv->cpu[my_cpu];
- /* Disable hypervisor interrupt. */
- disable_percpu_irq(priv->intr_id);
-
/* Disable NAPI if needed. */
if (info != NULL && info->napi_enabled) {
napi_disable(&info->napi);
@@ -1256,21 +1279,24 @@ static void tile_net_disable_intr(void *dev_ptr)
/*
- * Enable ingress interrupts for the given device on the current cpu.
+ * Enable NAPI and the ingress interrupt for the given device
+ * on the current cpu.
+ *
+ * ISSUE: Only do this for "network cpus"?
*/
-static void tile_net_enable_intr(void *dev_ptr)
+static void tile_net_open_enable(void *dev_ptr)
{
struct net_device *dev = (struct net_device *)dev_ptr;
struct tile_net_priv *priv = netdev_priv(dev);
int my_cpu = smp_processor_id();
struct tile_net_cpu *info = priv->cpu[my_cpu];
- /* Enable hypervisor interrupt. */
- enable_percpu_irq(priv->intr_id);
-
/* Enable NAPI. */
napi_enable(&info->napi);
info->napi_enabled = true;
+
+ /* Enable the ingress interrupt. */
+ enable_percpu_irq(priv->intr_id);
}
@@ -1288,8 +1314,9 @@ static int tile_net_open_inner(struct net_device *dev)
int my_cpu = smp_processor_id();
struct tile_net_cpu *info;
struct tile_netio_queue *queue;
- unsigned int irq;
+ int result = 0;
int i;
+ int dummy = 0;
/*
* First try to register just on the local CPU, and handle any
@@ -1307,42 +1334,52 @@ static int tile_net_open_inner(struct net_device *dev)
/*
* Now register everywhere else. If any registration fails,
* even for "link down" (which might not be possible), we
- * clean up using "tile_net_stop_aux()".
+ * clean up using "tile_net_stop_aux()". Also, add all the
+ * "napi" objects (sequentially, to protect "dev->napi_list").
+ * ISSUE: Only use "netif_napi_add()" for "network cpus"?
*/
smp_call_function(tile_net_register, (void *)dev, 1);
for_each_online_cpu(i) {
- if (!priv->cpu[i]->registered) {
- tile_net_stop_aux(dev);
- return -EAGAIN;
- }
+ struct tile_net_cpu *info = priv->cpu[i];
+ if (info->registered)
+ netif_napi_add(dev, &info->napi, tile_net_poll, 64);
+ else
+ result = -EAGAIN;
+ }
+ if (result != 0) {
+ tile_net_stop_aux(dev);
+ return result;
}
queue = &info->queue;
- /*
- * Set the device intr bit mask.
- * The tile_net_register above sets per tile __intr_id.
- */
- priv->intr_id = queue->__system_part->__intr_id;
- BUG_ON(!priv->intr_id);
-
- /*
- * Register the device interrupt handler.
- * The __ffs() function returns the index into the interrupt handler
- * table from the interrupt bit mask which should have one bit
- * and one bit only set.
- */
- irq = __ffs(priv->intr_id);
- tile_irq_activate(irq, TILE_IRQ_PERCPU);
- BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt,
- 0, dev->name, (void *)dev) != 0);
+ if (priv->intr_id == 0) {
+ unsigned int irq;
- /* ISSUE: How could "priv->fully_opened" ever be "true" here? */
-
- if (!priv->fully_opened) {
+ /*
+ * Acquire the irq allocated by the hypervisor. Every
+ * queue gets the same irq. The "__intr_id" field is
+ * "1 << irq", so we use "__ffs()" to extract "irq".
+ */
+ priv->intr_id = queue->__system_part->__intr_id;
+ BUG_ON(priv->intr_id == 0);
+ irq = __ffs(priv->intr_id);
- int dummy = 0;
+ /*
+ * Register the ingress interrupt handler for this
+ * device, permanently.
+ *
+ * We used to call "free_irq()" in "tile_net_stop()",
+ * and then re-register the handler here every time,
+ * but that caused DNP errors in "handle_IRQ_event()"
+ * because "desc->action" was NULL. See bug 9143.
+ */
+ tile_irq_activate(irq, TILE_IRQ_PERCPU);
+ BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt,
+ 0, dev->name, (void *)dev) != 0);
+ }
+ {
/* Allocate initial buffers. */
int max_buffers =
@@ -1359,18 +1396,21 @@ static int tile_net_open_inner(struct net_device *dev)
if (info->num_needed_small_buffers != 0 ||
info->num_needed_large_buffers != 0)
panic("Insufficient memory for buffer stack!");
+ }
- /* Start LIPP/LEPP and activate "ingress" at the shim. */
- if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
- sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0)
- panic("Failed to activate the LIPP Shim!\n");
+ /* We are about to be active. */
+ priv->active = true;
- priv->fully_opened = 1;
- }
+ /* Make sure "active" is visible to all tiles. */
+ mb();
- /* On each tile, enable the hypervisor to trigger interrupts. */
- /* ISSUE: Do this before starting LIPP/LEPP? */
- on_each_cpu(tile_net_enable_intr, (void *)dev, 1);
+ /* On each tile, enable NAPI and the ingress interrupt. */
+ on_each_cpu(tile_net_open_enable, (void *)dev, 1);
+
+ /* Start LIPP/LEPP and activate "ingress" at the shim. */
+ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
+ sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0)
+ panic("Failed to activate the LIPP Shim!\n");
/* Start our transmit queue. */
netif_start_queue(dev);
@@ -1396,9 +1436,9 @@ static void tile_net_open_retry(struct work_struct *w)
* ourselves to try again later; otherwise, tell Linux we now have
* a working link. ISSUE: What if the return value is negative?
*/
- if (tile_net_open_inner(priv->dev))
- schedule_delayed_work_on(singlethread_cpu, &priv->retry_work,
- TILE_NET_RETRY_INTERVAL);
+ if (tile_net_open_inner(priv->dev) != 0)
+ schedule_delayed_work(&priv->retry_work,
+ TILE_NET_RETRY_INTERVAL);
else
netif_carrier_on(priv->dev);
}
@@ -1412,8 +1452,8 @@ static void tile_net_open_retry(struct work_struct *w)
* The open entry point is called when a network interface is made
* active by the system (IFF_UP). At this point all resources needed
* for transmit and receive operations are allocated, the interrupt
- * handler is registered with the OS, the watchdog timer is started,
- * and the stack is notified that the interface is ready.
+ * handler is registered with the OS (if needed), the watchdog timer
+ * is started, and the stack is notified that the interface is ready.
*
* If the actual link is not available yet, then we tell Linux that
* we have no carrier, and we keep checking until the link comes up.
@@ -1468,6 +1508,10 @@ static int tile_net_open(struct net_device *dev)
#endif
priv->partly_opened = 1;
+
+ } else {
+ /* FIXME: Is this possible? */
+ /* printk("Already partly opened.\n"); */
}
/*
@@ -1487,57 +1531,17 @@ static int tile_net_open(struct net_device *dev)
* and then remember to try again later.
*/
netif_carrier_off(dev);
- schedule_delayed_work_on(singlethread_cpu, &priv->retry_work,
- TILE_NET_RETRY_INTERVAL);
+ schedule_delayed_work(&priv->retry_work, TILE_NET_RETRY_INTERVAL);
return 0;
}
-/*
- * Disables a network interface.
- *
- * Returns 0, this is not allowed to fail.
- *
- * The close entry point is called when an interface is de-activated
- * by the OS. The hardware is still under the drivers control, but
- * needs to be disabled. A global MAC reset is issued to stop the
- * hardware, and all transmit and receive resources are freed.
- *
- * ISSUE: Can this can be called while "tile_net_poll()" is running?
- */
-static int tile_net_stop(struct net_device *dev)
+static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv)
{
- struct tile_net_priv *priv = netdev_priv(dev);
-
- bool pending = true;
-
- PDEBUG("tile_net_stop()\n");
-
- /* ISSUE: Only needed if not yet fully open. */
- cancel_delayed_work_sync(&priv->retry_work);
-
- /* Can't transmit any more. */
- netif_stop_queue(dev);
-
- /*
- * Disable hypervisor interrupts on each tile.
- */
- on_each_cpu(tile_net_disable_intr, (void *)dev, 1);
-
- /*
- * Unregister the interrupt handler.
- * The __ffs() function returns the index into the interrupt handler
- * table from the interrupt bit mask which should have one bit
- * and one bit only set.
- */
- if (priv->intr_id)
- free_irq(__ffs(priv->intr_id), dev);
-
- /*
- * Drain all the LIPP buffers.
- */
+ int n = 0;
+ /* Drain all the LIPP buffers. */
while (true) {
int buffer;
@@ -1560,43 +1564,105 @@ static int tile_net_stop(struct net_device *dev)
kfree_skb(skb);
}
+
+ n++;
}
- /* Stop LIPP/LEPP. */
- tile_net_stop_aux(dev);
+ return n;
+}
- priv->fully_opened = 0;
+/*
+ * Disables a network interface.
+ *
+ * Returns 0, this is not allowed to fail.
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ *
+ * ISSUE: How closely does "netif_running(dev)" mirror "priv->active"?
+ *
+ * Before we are called by "__dev_close()", "netif_running()" will
+ * have been cleared, so no NEW calls to "tile_net_poll()" will be
+ * made by "netpoll_poll_dev()".
+ *
+ * Often, this can cause some tiles to still have packets in their
+ * queues, so we must call "tile_net_discard_packets()" later.
+ *
+ * Note that some other tile may still be INSIDE "tile_net_poll()",
+ * and in fact, many will be, if there is heavy network load.
+ *
+ * Calling "on_each_cpu(tile_net_stop_disable, (void *)dev, 1)" when
+ * any tile is still "napi_schedule()"'d will induce a horrible crash
+ * when "msleep()" is called. This includes tiles which are inside
+ * "tile_net_poll()" which have not yet called "napi_complete()".
+ *
+ * So, we must first try to wait long enough for other tiles to finish
+ * with any current "tile_net_poll()" call, and, hopefully, to clear
+ * the "scheduled" flag. ISSUE: It is unclear what happens to tiles
+ * which have called "napi_schedule()" but which had not yet tried to
+ * call "tile_net_poll()", or which exhausted their budget inside
+ * "tile_net_poll()" just before this function was called.
+ */
+static int tile_net_stop(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ PDEBUG("tile_net_stop()\n");
+ /* Start discarding packets. */
+ priv->active = false;
+
+ /* Make sure "active" is visible to all tiles. */
+ mb();
/*
- * XXX: ISSUE: It appears that, in practice anyway, by the
- * time we get here, there are no pending completions.
+ * On each tile, make sure no NEW packets get delivered, and
+ * disable the ingress interrupt.
+ *
+ * Note that the ingress interrupt can fire AFTER this,
+ * presumably due to packets which were recently delivered,
+ * but it will have no effect.
*/
- while (pending) {
+ on_each_cpu(tile_net_deregister, (void *)dev, 1);
- struct sk_buff *olds[32];
- unsigned int wanted = 32;
- unsigned int i, nolds = 0;
+ /* Optimistically drain LIPP buffers. */
+ (void)tile_net_drain_lipp_buffers(priv);
- nolds = tile_net_lepp_grab_comps(dev, olds,
- wanted, &pending);
+ /* ISSUE: Only needed if not yet fully open. */
+ cancel_delayed_work_sync(&priv->retry_work);
- /* ISSUE: We have never actually seen this debug spew. */
- if (nolds != 0)
- pr_info("During tile_net_stop(), grabbed %d comps.\n",
- nolds);
+ /* Can't transmit any more. */
+ netif_stop_queue(dev);
- for (i = 0; i < nolds; i++)
- kfree_skb(olds[i]);
- }
+ /* Disable NAPI on each tile. */
+ on_each_cpu(tile_net_stop_disable, (void *)dev, 1);
+
+ /*
+ * Drain any remaining LIPP buffers. NOTE: This "printk()"
+ * has never been observed, but in theory it could happen.
+ */
+ if (tile_net_drain_lipp_buffers(priv) != 0)
+ printk("Had to drain some extra LIPP buffers!\n");
+ /* Stop LIPP/LEPP. */
+ tile_net_stop_aux(dev);
+
+ /*
+ * ISSUE: It appears that, in practice anyway, by the time we
+ * get here, there are no pending completions, but just in case,
+ * we free (all of) them anyway.
+ */
+ while (tile_net_lepp_free_comps(dev, true))
+ /* loop */;
/* Wipe the EPP queue. */
- memset(priv->epp_queue, 0, sizeof(lepp_queue_t));
+ memset(priv->eq, 0, sizeof(lepp_queue_t));
/* Evict the EPP queue. */
- finv_buffer(priv->epp_queue, PAGE_SIZE);
+ finv_buffer(priv->eq, EQ_SIZE);
return 0;
}
@@ -1620,7 +1686,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
if (b_len != 0) {
if (!hash_default)
- finv_buffer_remote(b_data, b_len);
+ finv_buffer_remote(b_data, b_len, 0);
cpa = __pa(b_data);
frags[n].cpa_lo = cpa;
@@ -1643,7 +1709,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
if (!hash_default) {
void *va = pfn_to_kaddr(pfn) + f->page_offset;
BUG_ON(PageHighMem(f->page));
- finv_buffer_remote(va, f->size);
+ finv_buffer_remote(va, f->size, 0);
}
cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
@@ -1742,17 +1808,15 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
unsigned long irqflags;
- lepp_queue_t *eq = priv->epp_queue;
+ lepp_queue_t *eq = priv->eq;
- struct sk_buff *olds[4];
- unsigned int wanted = 4;
+ struct sk_buff *olds[8];
+ unsigned int wanted = 8;
unsigned int i, nolds = 0;
unsigned int cmd_head, cmd_tail, cmd_next;
unsigned int comp_tail;
- unsigned int free_slots;
-
/* Paranoia. */
BUG_ON(skb->protocol != htons(ETH_P_IP));
@@ -1780,34 +1844,32 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
/* Enqueue the command. */
- spin_lock_irqsave(&priv->cmd_lock, irqflags);
+ spin_lock_irqsave(&priv->eq_lock, irqflags);
/*
* Handle completions if needed to make room.
* HACK: Spin until there is sufficient room.
*/
- free_slots = lepp_num_free_comp_slots(eq);
- if (free_slots < 1) {
-spin:
- nolds += tile_net_lepp_grab_comps(dev, olds + nolds,
- wanted - nolds, NULL);
- if (lepp_num_free_comp_slots(eq) < 1)
- goto spin;
+ if (lepp_num_free_comp_slots(eq) == 0) {
+ nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
+ if (nolds == 0) {
+busy:
+ spin_unlock_irqrestore(&priv->eq_lock, irqflags);
+ return NETDEV_TX_BUSY;
+ }
}
cmd_head = eq->cmd_head;
cmd_tail = eq->cmd_tail;
- /* NOTE: The "gotos" below are untested. */
-
/* Prepare to advance, detecting full queue. */
cmd_next = cmd_tail + cmd_size;
if (cmd_tail < cmd_head && cmd_next >= cmd_head)
- goto spin;
+ goto busy;
if (cmd_next > LEPP_CMD_LIMIT) {
cmd_next = 0;
if (cmd_next == cmd_head)
- goto spin;
+ goto busy;
}
/* Copy the command. */
@@ -1823,14 +1885,18 @@ spin:
eq->comp_tail = comp_tail;
/* Flush before allowing LEPP to handle the command. */
+ /* ISSUE: Is this the optimal location for the flush? */
__insn_mf();
eq->cmd_tail = cmd_tail;
- spin_unlock_irqrestore(&priv->cmd_lock, irqflags);
-
+ /* NOTE: Using "4" here is more efficient than "0" or "2", */
+ /* and, strangely, more efficient than pre-checking the number */
+ /* of available completions, and comparing it to 4. */
if (nolds == 0)
- nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL);
+ nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4);
+
+ spin_unlock_irqrestore(&priv->eq_lock, irqflags);
/* Handle completions. */
for (i = 0; i < nolds; i++)
@@ -1870,10 +1936,10 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
unsigned int num_frags;
- lepp_queue_t *eq = priv->epp_queue;
+ lepp_queue_t *eq = priv->eq;
- struct sk_buff *olds[4];
- unsigned int wanted = 4;
+ struct sk_buff *olds[8];
+ unsigned int wanted = 8;
unsigned int i, nolds = 0;
unsigned int cmd_size = sizeof(lepp_cmd_t);
@@ -1883,8 +1949,6 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
lepp_cmd_t cmds[LEPP_MAX_FRAGS];
- unsigned int free_slots;
-
/*
* This is paranoia, since we think that if the link doesn't come
@@ -1905,7 +1969,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
if (hash_default) {
HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data);
if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
- panic("Non-coherent egress buffer!");
+ panic("Non-HFH egress buffer! VA=%p Mode=%d PTE=%llx",
+ data, hv_pte_get_mode(pte), hv_pte_val(pte));
}
#endif
#endif
@@ -1958,37 +2023,35 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
/* Enqueue the commands. */
- spin_lock_irqsave(&priv->cmd_lock, irqflags);
+ spin_lock_irqsave(&priv->eq_lock, irqflags);
/*
* Handle completions if needed to make room.
* HACK: Spin until there is sufficient room.
*/
- free_slots = lepp_num_free_comp_slots(eq);
- if (free_slots < 1) {
-spin:
- nolds += tile_net_lepp_grab_comps(dev, olds + nolds,
- wanted - nolds, NULL);
- if (lepp_num_free_comp_slots(eq) < 1)
- goto spin;
+ if (lepp_num_free_comp_slots(eq) == 0) {
+ nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
+ if (nolds == 0) {
+busy:
+ spin_unlock_irqrestore(&priv->eq_lock, irqflags);
+ return NETDEV_TX_BUSY;
+ }
}
cmd_head = eq->cmd_head;
cmd_tail = eq->cmd_tail;
- /* NOTE: The "gotos" below are untested. */
-
/* Copy the commands, or fail. */
for (i = 0; i < num_frags; i++) {
/* Prepare to advance, detecting full queue. */
cmd_next = cmd_tail + cmd_size;
if (cmd_tail < cmd_head && cmd_next >= cmd_head)
- goto spin;
+ goto busy;
if (cmd_next > LEPP_CMD_LIMIT) {
cmd_next = 0;
if (cmd_next == cmd_head)
- goto spin;
+ goto busy;
}
/* Copy the command. */
@@ -2005,14 +2068,18 @@ spin:
eq->comp_tail = comp_tail;
/* Flush before allowing LEPP to handle the command. */
+ /* ISSUE: Is this the optimal location for the flush? */
__insn_mf();
eq->cmd_tail = cmd_tail;
- spin_unlock_irqrestore(&priv->cmd_lock, irqflags);
-
+ /* NOTE: Using "4" here is more efficient than "0" or "2", */
+ /* and, strangely, more efficient than pre-checking the number */
+ /* of available completions, and comparing it to 4. */
if (nolds == 0)
- nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL);
+ nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4);
+
+ spin_unlock_irqrestore(&priv->eq_lock, irqflags);
/* Handle completions. */
for (i = 0; i < nolds; i++)
@@ -2261,7 +2328,6 @@ static struct net_device *tile_net_dev_init(const char *name)
int ret;
struct net_device *dev;
struct tile_net_priv *priv;
- struct page *page;
/*
* Allocate the device structure. This allocates "priv", calls
@@ -2285,23 +2351,21 @@ static struct net_device *tile_net_dev_init(const char *name)
INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry);
- spin_lock_init(&priv->cmd_lock);
- spin_lock_init(&priv->comp_lock);
+ spin_lock_init(&priv->eq_lock);
- /* Allocate "epp_queue". */
- BUG_ON(get_order(sizeof(lepp_queue_t)) != 0);
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
- if (!page) {
+ /* Allocate "eq". */
+ priv->eq_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, EQ_ORDER);
+ if (!priv->eq_pages) {
free_netdev(dev);
return NULL;
}
- priv->epp_queue = page_address(page);
+ priv->eq = page_address(priv->eq_pages);
/* Register the network device. */
ret = register_netdev(dev);
if (ret) {
pr_err("register_netdev %s failed %d\n", dev->name, ret);
- free_page((unsigned long)priv->epp_queue);
+ __free_pages(priv->eq_pages, EQ_ORDER);
free_netdev(dev);
return NULL;
}
@@ -2310,7 +2374,7 @@ static struct net_device *tile_net_dev_init(const char *name)
ret = tile_net_get_mac(dev);
if (ret < 0) {
unregister_netdev(dev);
- free_page((unsigned long)priv->epp_queue);
+ __free_pages(priv->eq_pages, EQ_ORDER);
free_netdev(dev);
return NULL;
}
@@ -2321,6 +2385,9 @@ static struct net_device *tile_net_dev_init(const char *name)
/*
* Module cleanup.
+ *
+ * FIXME: If compiled as a module, this module cannot be "unloaded",
+ * because the "ingress interrupt handler" is registered permanently.
*/
static void tile_net_cleanup(void)
{
@@ -2331,8 +2398,8 @@ static void tile_net_cleanup(void)
struct net_device *dev = tile_net_devs[i];
struct tile_net_priv *priv = netdev_priv(dev);
unregister_netdev(dev);
- finv_buffer(priv->epp_queue, PAGE_SIZE);
- free_page((unsigned long)priv->epp_queue);
+ finv_buffer(priv->eq, EQ_SIZE);
+ __free_pages(priv->eq_pages, EQ_ORDER);
free_netdev(dev);
}
}
@@ -2355,7 +2422,12 @@ static int tile_net_init_module(void)
}
+module_init(tile_net_init_module);
+module_exit(tile_net_cleanup);
+
+
#ifndef MODULE
+
/*
* The "network_cpus" boot argument specifies the cpus that are dedicated
* to handle ingress packets.
@@ -2391,8 +2463,5 @@ static int __init network_cpus_setup(char *str)
return 0;
}
__setup("network_cpus=", network_cpus_setup);
-#endif
-
-module_init(tile_net_init_module);
-module_exit(tile_net_cleanup);
+#endif
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 105d7f0630cc..2de9b90c5f8f 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -171,7 +171,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->ip_summed == CHECKSUM_NONE)
skb->ip_summed = rcv_priv->ip_summed;
- length = skb->len + ETH_HLEN;
+ length = skb->len;
if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
goto rx_drop;
diff --git a/drivers/net/wan/lmc/Makefile b/drivers/net/wan/lmc/Makefile
index dabdcfed4efd..609710d64eb5 100644
--- a/drivers/net/wan/lmc/Makefile
+++ b/drivers/net/wan/lmc/Makefile
@@ -14,4 +14,4 @@ lmc-objs := lmc_debug.o lmc_media.o lmc_main.o lmc_proto.o
# -DDEBUG \
# -DLMC_PACKET_LOG
-EXTRA_CFLAGS += -I. $(DBGDEF)
+ccflags-y := -I. $(DBGDEF)
diff --git a/drivers/net/wireless/hostap/hostap_config.h b/drivers/net/wireless/hostap/hostap_config.h
index 30acd39d76a2..2c8f71f0ed45 100644
--- a/drivers/net/wireless/hostap/hostap_config.h
+++ b/drivers/net/wireless/hostap/hostap_config.h
@@ -30,9 +30,9 @@
/* Following defines can be used to remove unneeded parts of the driver, e.g.,
* to limit the size of the kernel module. Definitions can be added here in
- * hostap_config.h or they can be added to make command with EXTRA_CFLAGS,
+ * hostap_config.h or they can be added to make command with ccflags-y,
* e.g.,
- * 'make pccard EXTRA_CFLAGS="-DPRISM2_NO_DEBUG -DPRISM2_NO_PROCFS_DEBUG"'
+ * 'make pccard ccflags-y="-DPRISM2_NO_DEBUG -DPRISM2_NO_PROCFS_DEBUG"'
*/
/* Do not include debug messages into the driver */
diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zd1211rw/Makefile
index 1907eafb9b16..5728a918e508 100644
--- a/drivers/net/wireless/zd1211rw/Makefile
+++ b/drivers/net/wireless/zd1211rw/Makefile
@@ -5,7 +5,5 @@ zd1211rw-objs := zd_chip.o zd_mac.o \
zd_rf_al7230b.o zd_rf_uw2453.o \
zd_rf.o zd_usb.o
-ifeq ($(CONFIG_ZD1211RW_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_ZD1211RW_DEBUG) := -DDEBUG
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 59f55441e075..b8ef8ddcc292 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -258,8 +258,10 @@ op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
*/
static int
log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
- unsigned long backtrace, int is_kernel, unsigned long event)
+ unsigned long backtrace, int is_kernel, unsigned long event,
+ struct task_struct *task)
{
+ struct task_struct *tsk = task ? task : current;
cpu_buf->sample_received++;
if (pc == ESCAPE_CODE) {
@@ -267,7 +269,7 @@ log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
return 0;
}
- if (op_add_code(cpu_buf, backtrace, is_kernel, current))
+ if (op_add_code(cpu_buf, backtrace, is_kernel, tsk))
goto fail;
if (op_add_sample(cpu_buf, pc, event))
@@ -292,7 +294,8 @@ static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
static inline void
__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel)
+ unsigned long event, int is_kernel,
+ struct task_struct *task)
{
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
unsigned long backtrace = oprofile_backtrace_depth;
@@ -301,7 +304,7 @@ __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
* if log_sample() fail we can't backtrace since we lost the
* source of this event
*/
- if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event))
+ if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task))
/* failed */
return;
@@ -313,10 +316,17 @@ __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
oprofile_end_trace(cpu_buf);
}
+void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs,
+ unsigned long event, int is_kernel,
+ struct task_struct *task)
+{
+ __oprofile_add_ext_sample(pc, regs, event, is_kernel, task);
+}
+
void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
unsigned long event, int is_kernel)
{
- __oprofile_add_ext_sample(pc, regs, event, is_kernel);
+ __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
}
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
@@ -332,7 +342,7 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
pc = ESCAPE_CODE; /* as this causes an early return. */
}
- __oprofile_add_ext_sample(pc, regs, event, is_kernel);
+ __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
}
/*
@@ -403,7 +413,7 @@ int oprofile_write_commit(struct op_entry *entry)
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
{
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
- log_sample(cpu_buf, pc, 0, is_kernel, event);
+ log_sample(cpu_buf, pc, 0, is_kernel, event, NULL);
}
void oprofile_add_trace(unsigned long pc)
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 010725117dbb..3ef44624f510 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -97,7 +97,7 @@ static struct notifier_block __refdata oprofile_cpu_notifier = {
.notifier_call = oprofile_cpu_notify,
};
-int __init oprofile_timer_init(struct oprofile_operations *ops)
+int oprofile_timer_init(struct oprofile_operations *ops)
{
int rc;
@@ -113,7 +113,7 @@ int __init oprofile_timer_init(struct oprofile_operations *ops)
return 0;
}
-void __exit oprofile_timer_exit(void)
+void oprofile_timer_exit(void)
{
unregister_hotcpu_notifier(&oprofile_cpu_notifier);
}
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 9383063d2b16..bcd5d54b7d4d 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -296,25 +296,25 @@ static struct pci_port_ops dino_port_ops = {
.outl = dino_out32
};
-static void dino_mask_irq(unsigned int irq)
+static void dino_mask_irq(struct irq_data *d)
{
- struct dino_device *dino_dev = get_irq_chip_data(irq);
- int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
+ struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
+ int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
- DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq);
+ DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
/* Clear the matching bit in the IMR register */
dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq));
__raw_writel(dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR);
}
-static void dino_unmask_irq(unsigned int irq)
+static void dino_unmask_irq(struct irq_data *d)
{
- struct dino_device *dino_dev = get_irq_chip_data(irq);
- int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
+ struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
+ int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
u32 tmp;
- DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq);
+ DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
/*
** clear pending IRQ bits
@@ -346,9 +346,9 @@ static void dino_unmask_irq(unsigned int irq)
}
static struct irq_chip dino_interrupt_type = {
- .name = "GSC-PCI",
- .unmask = dino_unmask_irq,
- .mask = dino_mask_irq,
+ .name = "GSC-PCI",
+ .irq_unmask = dino_unmask_irq,
+ .irq_mask = dino_mask_irq,
};
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index e860038b0b84..deeec32a5803 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -144,8 +144,9 @@ static unsigned int eisa_irq_level __read_mostly; /* default to edge triggered *
/* called by free irq */
-static void eisa_mask_irq(unsigned int irq)
+static void eisa_mask_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
unsigned long flags;
EISA_DBG("disable irq %d\n", irq);
@@ -164,8 +165,9 @@ static void eisa_mask_irq(unsigned int irq)
}
/* called by request irq */
-static void eisa_unmask_irq(unsigned int irq)
+static void eisa_unmask_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
unsigned long flags;
EISA_DBG("enable irq %d\n", irq);
@@ -183,9 +185,9 @@ static void eisa_unmask_irq(unsigned int irq)
}
static struct irq_chip eisa_interrupt_type = {
- .name = "EISA",
- .unmask = eisa_unmask_irq,
- .mask = eisa_mask_irq,
+ .name = "EISA",
+ .irq_unmask = eisa_unmask_irq,
+ .irq_mask = eisa_mask_irq,
};
static irqreturn_t eisa_irq(int wax_irq, void *intr_dev)
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index 772b1939ac21..ef31080cf591 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -105,13 +105,13 @@ int gsc_find_local_irq(unsigned int irq, int *global_irqs, int limit)
return NO_IRQ;
}
-static void gsc_asic_mask_irq(unsigned int irq)
+static void gsc_asic_mask_irq(struct irq_data *d)
{
- struct gsc_asic *irq_dev = get_irq_chip_data(irq);
- int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
+ struct gsc_asic *irq_dev = irq_data_get_irq_chip_data(d);
+ int local_irq = gsc_find_local_irq(d->irq, irq_dev->global_irq, 32);
u32 imr;
- DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, irq,
+ DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, d->irq,
irq_dev->name, imr);
/* Disable the IRQ line by clearing the bit in the IMR */
@@ -120,13 +120,13 @@ static void gsc_asic_mask_irq(unsigned int irq)
gsc_writel(imr, irq_dev->hpa + OFFSET_IMR);
}
-static void gsc_asic_unmask_irq(unsigned int irq)
+static void gsc_asic_unmask_irq(struct irq_data *d)
{
- struct gsc_asic *irq_dev = get_irq_chip_data(irq);
- int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
+ struct gsc_asic *irq_dev = irq_data_get_irq_chip_data(d);
+ int local_irq = gsc_find_local_irq(d->irq, irq_dev->global_irq, 32);
u32 imr;
- DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, irq,
+ DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, d->irq,
irq_dev->name, imr);
/* Enable the IRQ line by setting the bit in the IMR */
@@ -140,9 +140,9 @@ static void gsc_asic_unmask_irq(unsigned int irq)
}
static struct irq_chip gsc_asic_interrupt_type = {
- .name = "GSC-ASIC",
- .unmask = gsc_asic_unmask_irq,
- .mask = gsc_asic_mask_irq,
+ .name = "GSC-ASIC",
+ .irq_unmask = gsc_asic_unmask_irq,
+ .irq_mask = gsc_asic_mask_irq,
};
int gsc_assign_irq(struct irq_chip *type, void *data)
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 0327894bf235..95930d016235 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -615,10 +615,10 @@ iosapic_set_irt_data( struct vector_info *vi, u32 *dp0, u32 *dp1)
}
-static void iosapic_mask_irq(unsigned int irq)
+static void iosapic_mask_irq(struct irq_data *d)
{
unsigned long flags;
- struct vector_info *vi = get_irq_chip_data(irq);
+ struct vector_info *vi = irq_data_get_irq_chip_data(d);
u32 d0, d1;
spin_lock_irqsave(&iosapic_lock, flags);
@@ -628,9 +628,9 @@ static void iosapic_mask_irq(unsigned int irq)
spin_unlock_irqrestore(&iosapic_lock, flags);
}
-static void iosapic_unmask_irq(unsigned int irq)
+static void iosapic_unmask_irq(struct irq_data *d)
{
- struct vector_info *vi = get_irq_chip_data(irq);
+ struct vector_info *vi = irq_data_get_irq_chip_data(d);
u32 d0, d1;
/* data is initialized by fixup_irq */
@@ -666,34 +666,34 @@ printk("\n");
* enables their IRQ. It can lead to "interesting" race conditions
* in the driver initialization sequence.
*/
- DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", irq,
+ DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", d->irq,
vi->eoi_addr, vi->eoi_data);
iosapic_eoi(vi->eoi_addr, vi->eoi_data);
}
-static void iosapic_eoi_irq(unsigned int irq)
+static void iosapic_eoi_irq(struct irq_data *d)
{
- struct vector_info *vi = get_irq_chip_data(irq);
+ struct vector_info *vi = irq_data_get_irq_chip_data(d);
iosapic_eoi(vi->eoi_addr, vi->eoi_data);
- cpu_eoi_irq(irq);
+ cpu_eoi_irq(d);
}
#ifdef CONFIG_SMP
-static int iosapic_set_affinity_irq(unsigned int irq,
- const struct cpumask *dest)
+static int iosapic_set_affinity_irq(struct irq_data *d,
+ const struct cpumask *dest, bool force)
{
- struct vector_info *vi = get_irq_chip_data(irq);
+ struct vector_info *vi = irq_data_get_irq_chip_data(d);
u32 d0, d1, dummy_d0;
unsigned long flags;
int dest_cpu;
- dest_cpu = cpu_check_affinity(irq, dest);
+ dest_cpu = cpu_check_affinity(d, dest);
if (dest_cpu < 0)
return -1;
- cpumask_copy(irq_desc[irq].affinity, cpumask_of(dest_cpu));
- vi->txn_addr = txn_affinity_addr(irq, dest_cpu);
+ cpumask_copy(d->affinity, cpumask_of(dest_cpu));
+ vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu);
spin_lock_irqsave(&iosapic_lock, flags);
/* d1 contains the destination CPU, so only want to set that
@@ -708,13 +708,13 @@ static int iosapic_set_affinity_irq(unsigned int irq,
#endif
static struct irq_chip iosapic_interrupt_type = {
- .name = "IO-SAPIC-level",
- .unmask = iosapic_unmask_irq,
- .mask = iosapic_mask_irq,
- .ack = cpu_ack_irq,
- .eoi = iosapic_eoi_irq,
+ .name = "IO-SAPIC-level",
+ .irq_unmask = iosapic_unmask_irq,
+ .irq_mask = iosapic_mask_irq,
+ .irq_ack = cpu_ack_irq,
+ .irq_eoi = iosapic_eoi_irq,
#ifdef CONFIG_SMP
- .set_affinity = iosapic_set_affinity_irq,
+ .irq_set_affinity = iosapic_set_affinity_irq,
#endif
};
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 28241532c0fd..a4d8ff66a639 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -286,8 +286,9 @@ superio_init(struct pci_dev *pcidev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO, superio_init);
-static void superio_mask_irq(unsigned int irq)
+static void superio_mask_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
u8 r8;
if ((irq < 1) || (irq == 2) || (irq > 7)) {
@@ -303,8 +304,9 @@ static void superio_mask_irq(unsigned int irq)
outb (r8,IC_PIC1+1);
}
-static void superio_unmask_irq(unsigned int irq)
+static void superio_unmask_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
u8 r8;
if ((irq < 1) || (irq == 2) || (irq > 7)) {
@@ -320,9 +322,9 @@ static void superio_unmask_irq(unsigned int irq)
}
static struct irq_chip superio_interrupt_type = {
- .name = SUPERIO,
- .unmask = superio_unmask_irq,
- .mask = superio_mask_irq,
+ .name = SUPERIO,
+ .irq_unmask = superio_unmask_irq,
+ .irq_mask = superio_mask_irq,
};
#ifdef DEBUG_SUPERIO_INIT
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index a9523fdc6911..c8ff646c0b05 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -87,3 +87,5 @@ config PCI_IOAPIC
depends on ACPI
depends on HOTPLUG
default y
+
+select NLS if (DMI || ACPI)
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 77cf813ba264..98d61c8e984b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -54,8 +54,9 @@ obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o
#
# ACPI Related PCI FW Functions
+# ACPI _DSM provided firmware instance and string name
#
-obj-$(CONFIG_ACPI) += pci-acpi.o
+obj-$(CONFIG_ACPI) += pci-acpi.o pci-label.o
# SMBIOS provided firmware instance and labels
obj-$(CONFIG_DMI) += pci-label.o
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index cb23aa2ebf96..e610cfe4f07b 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -212,6 +212,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
if (pdev) {
+ pdev->current_state = PCI_D0;
slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
pci_dev_put(pdev);
}
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 90c0a729cd3a..77cb2a14c896 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -5,6 +5,13 @@
* by Narendra K <Narendra_K@dell.com>,
* Jordan Hargrave <Jordan_Hargrave@dell.com>
*
+ * PCI Firmware Specification Revision 3.1 section 4.6.7 (DSM for Naming a
+ * PCI or PCI Express Device Under Operating Systems) defines an instance
+ * number and string name. This code retrieves them and exports them to sysfs.
+ * If the system firmware does not provide the ACPI _DSM (Device Specific
+ * Method), then the SMBIOS type 41 instance number and string is exported to
+ * sysfs.
+ *
* SMBIOS defines type 41 for onboard pci devices. This code retrieves
* the instance number and string from the type 41 record and exports
* it to sysfs.
@@ -19,8 +26,29 @@
#include <linux/pci_ids.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/nls.h>
+#include <linux/acpi.h>
+#include <linux/pci-acpi.h>
+#include <acpi/acpi_bus.h>
#include "pci.h"
+#define DEVICE_LABEL_DSM 0x07
+
+#ifndef CONFIG_DMI
+
+static inline int
+pci_create_smbiosname_file(struct pci_dev *pdev)
+{
+ return -1;
+}
+
+static inline void
+pci_remove_smbiosname_file(struct pci_dev *pdev)
+{
+}
+
+#else
+
enum smbios_attr_enum {
SMBIOS_ATTR_NONE = 0,
SMBIOS_ATTR_LABEL_SHOW,
@@ -120,9 +148,7 @@ static struct attribute_group smbios_attr_group = {
static int
pci_create_smbiosname_file(struct pci_dev *pdev)
{
- if (!sysfs_create_group(&pdev->dev.kobj, &smbios_attr_group))
- return 0;
- return -ENODEV;
+ return sysfs_create_group(&pdev->dev.kobj, &smbios_attr_group);
}
static void
@@ -131,13 +157,227 @@ pci_remove_smbiosname_file(struct pci_dev *pdev)
sysfs_remove_group(&pdev->dev.kobj, &smbios_attr_group);
}
+#endif
+
+#ifndef CONFIG_ACPI
+
+static inline int
+pci_create_acpi_index_label_files(struct pci_dev *pdev)
+{
+ return -1;
+}
+
+static inline int
+pci_remove_acpi_index_label_files(struct pci_dev *pdev)
+{
+ return -1;
+}
+
+static inline bool
+device_has_dsm(struct device *dev)
+{
+ return false;
+}
+
+#else
+
+static const char device_label_dsm_uuid[] = {
+ 0xD0, 0x37, 0xC9, 0xE5, 0x53, 0x35, 0x7A, 0x4D,
+ 0x91, 0x17, 0xEA, 0x4D, 0x19, 0xC3, 0x43, 0x4D
+};
+
+enum acpi_attr_enum {
+ ACPI_ATTR_NONE = 0,
+ ACPI_ATTR_LABEL_SHOW,
+ ACPI_ATTR_INDEX_SHOW,
+};
+
+static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
+{
+ int len;
+ len = utf16s_to_utf8s((const wchar_t *)obj->
+ package.elements[1].string.pointer,
+ obj->package.elements[1].string.length,
+ UTF16_LITTLE_ENDIAN,
+ buf, PAGE_SIZE);
+ buf[len] = '\n';
+}
+
+static int
+dsm_get_label(acpi_handle handle, int func,
+ struct acpi_buffer *output,
+ char *buf, enum acpi_attr_enum attribute)
+{
+ struct acpi_object_list input;
+ union acpi_object params[4];
+ union acpi_object *obj;
+ int len = 0;
+
+ int err;
+
+ input.count = 4;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_BUFFER;
+ params[0].buffer.length = sizeof(device_label_dsm_uuid);
+ params[0].buffer.pointer = (char *)device_label_dsm_uuid;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = 0x02;
+ params[2].type = ACPI_TYPE_INTEGER;
+ params[2].integer.value = func;
+ params[3].type = ACPI_TYPE_PACKAGE;
+ params[3].package.count = 0;
+ params[3].package.elements = NULL;
+
+ err = acpi_evaluate_object(handle, "_DSM", &input, output);
+ if (err)
+ return -1;
+
+ obj = (union acpi_object *)output->pointer;
+
+ switch (obj->type) {
+ case ACPI_TYPE_PACKAGE:
+ if (obj->package.count != 2)
+ break;
+ len = obj->package.elements[0].integer.value;
+ if (buf) {
+ if (attribute == ACPI_ATTR_INDEX_SHOW)
+ scnprintf(buf, PAGE_SIZE, "%llu\n",
+ obj->package.elements[0].integer.value);
+ else if (attribute == ACPI_ATTR_LABEL_SHOW)
+ dsm_label_utf16s_to_utf8s(obj, buf);
+ kfree(output->pointer);
+ return strlen(buf);
+ }
+ kfree(output->pointer);
+ return len;
+ break;
+ default:
+ kfree(output->pointer);
+ }
+ return -1;
+}
+
+static bool
+device_has_dsm(struct device *dev)
+{
+ acpi_handle handle;
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+
+ handle = DEVICE_ACPI_HANDLE(dev);
+
+ if (!handle)
+ return FALSE;
+
+ if (dsm_get_label(handle, DEVICE_LABEL_DSM, &output, NULL,
+ ACPI_ATTR_NONE) > 0)
+ return TRUE;
+
+ return FALSE;
+}
+
+static mode_t
+acpi_index_string_exist(struct kobject *kobj, struct attribute *attr, int n)
+{
+ struct device *dev;
+
+ dev = container_of(kobj, struct device, kobj);
+
+ if (device_has_dsm(dev))
+ return S_IRUGO;
+
+ return 0;
+}
+
+static ssize_t
+acpilabel_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_handle handle;
+ int length;
+
+ handle = DEVICE_ACPI_HANDLE(dev);
+
+ if (!handle)
+ return -1;
+
+ length = dsm_get_label(handle, DEVICE_LABEL_DSM,
+ &output, buf, ACPI_ATTR_LABEL_SHOW);
+
+ if (length < 1)
+ return -1;
+
+ return length;
+}
+
+static ssize_t
+acpiindex_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_handle handle;
+ int length;
+
+ handle = DEVICE_ACPI_HANDLE(dev);
+
+ if (!handle)
+ return -1;
+
+ length = dsm_get_label(handle, DEVICE_LABEL_DSM,
+ &output, buf, ACPI_ATTR_INDEX_SHOW);
+
+ if (length < 0)
+ return -1;
+
+ return length;
+
+}
+
+static struct device_attribute acpi_attr_label = {
+ .attr = {.name = "label", .mode = 0444},
+ .show = acpilabel_show,
+};
+
+static struct device_attribute acpi_attr_index = {
+ .attr = {.name = "acpi_index", .mode = 0444},
+ .show = acpiindex_show,
+};
+
+static struct attribute *acpi_attributes[] = {
+ &acpi_attr_label.attr,
+ &acpi_attr_index.attr,
+ NULL,
+};
+
+static struct attribute_group acpi_attr_group = {
+ .attrs = acpi_attributes,
+ .is_visible = acpi_index_string_exist,
+};
+
+static int
+pci_create_acpi_index_label_files(struct pci_dev *pdev)
+{
+ return sysfs_create_group(&pdev->dev.kobj, &acpi_attr_group);
+}
+
+static int
+pci_remove_acpi_index_label_files(struct pci_dev *pdev)
+{
+ sysfs_remove_group(&pdev->dev.kobj, &acpi_attr_group);
+ return 0;
+}
+#endif
+
void pci_create_firmware_label_files(struct pci_dev *pdev)
{
- if (!pci_create_smbiosname_file(pdev))
- ;
+ if (device_has_dsm(&pdev->dev))
+ pci_create_acpi_index_label_files(pdev);
+ else
+ pci_create_smbiosname_file(pdev);
}
void pci_remove_firmware_label_files(struct pci_dev *pdev)
{
- pci_remove_smbiosname_file(pdev);
+ if (device_has_dsm(&pdev->dev))
+ pci_remove_acpi_index_label_files(pdev);
+ else
+ pci_remove_smbiosname_file(pdev);
}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index ea25e5bfcf23..a8a277a2e0d0 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -369,7 +369,7 @@ pci_read_config(struct file *filp, struct kobject *kobj,
u8 *data = (u8*) buf;
/* Several chips lock up trying to read undefined config space */
- if (security_capable(filp->f_cred, CAP_SYS_ADMIN) == 0) {
+ if (security_capable(&init_user_ns, filp->f_cred, CAP_SYS_ADMIN) == 0) {
size = dev->cfg_size;
} else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
size = 128;
@@ -1088,7 +1088,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
attr->write = write_vpd_attr;
retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
if (retval) {
- kfree(dev->vpd->attr);
+ kfree(attr);
return retval;
}
dev->vpd->attr = attr;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index f69d6e0fda75..a6ec200fe5ee 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -11,7 +11,7 @@
extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env);
extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
-#ifndef CONFIG_DMI
+#if !defined(CONFIG_DMI) && !defined(CONFIG_ACPI)
static inline void pci_create_firmware_label_files(struct pci_dev *pdev)
{ return; }
static inline void pci_remove_firmware_label_files(struct pci_dev *pdev)
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index b3cf6223f63a..f62079ff06dd 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -27,6 +27,10 @@
#include <linux/stddef.h>
#include "aerdrv.h"
+/* Override the existing corrected and uncorrected error masks */
+static int aer_mask_override;
+module_param(aer_mask_override, bool, 0);
+
struct aer_error_inj {
u8 bus;
u8 dev;
@@ -322,7 +326,7 @@ static int aer_inject(struct aer_error_inj *einj)
unsigned long flags;
unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
int pos_cap_err, rp_pos_cap_err;
- u32 sever, cor_mask, uncor_mask;
+ u32 sever, cor_mask, uncor_mask, cor_mask_orig, uncor_mask_orig;
int ret = 0;
dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn);
@@ -361,6 +365,18 @@ static int aer_inject(struct aer_error_inj *einj)
goto out_put;
}
+ if (aer_mask_override) {
+ cor_mask_orig = cor_mask;
+ cor_mask &= !(einj->cor_status);
+ pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK,
+ cor_mask);
+
+ uncor_mask_orig = uncor_mask;
+ uncor_mask &= !(einj->uncor_status);
+ pci_write_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
+ uncor_mask);
+ }
+
spin_lock_irqsave(&inject_lock, flags);
err = __find_aer_error_by_dev(dev);
@@ -378,14 +394,16 @@ static int aer_inject(struct aer_error_inj *einj)
err->header_log2 = einj->header_log2;
err->header_log3 = einj->header_log3;
- if (einj->cor_status && !(einj->cor_status & ~cor_mask)) {
+ if (!aer_mask_override && einj->cor_status &&
+ !(einj->cor_status & ~cor_mask)) {
ret = -EINVAL;
printk(KERN_WARNING "The correctable error(s) is masked "
"by device\n");
spin_unlock_irqrestore(&inject_lock, flags);
goto out_put;
}
- if (einj->uncor_status && !(einj->uncor_status & ~uncor_mask)) {
+ if (!aer_mask_override && einj->uncor_status &&
+ !(einj->uncor_status & ~uncor_mask)) {
ret = -EINVAL;
printk(KERN_WARNING "The uncorrectable error(s) is masked "
"by device\n");
@@ -425,6 +443,13 @@ static int aer_inject(struct aer_error_inj *einj)
}
spin_unlock_irqrestore(&inject_lock, flags);
+ if (aer_mask_override) {
+ pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK,
+ cor_mask_orig);
+ pci_write_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
+ uncor_mask_orig);
+ }
+
ret = pci_bus_set_aer_ops(dev->bus);
if (ret)
goto out_put;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c84900da3c59..44cbbbaa499d 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -764,6 +764,8 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
if (pci_find_bus(pci_domain_nr(bus), max+1))
goto out;
child = pci_add_new_bus(bus, dev, ++max);
+ if (!child)
+ goto out;
buses = (buses & 0xff000000)
| ((unsigned int)(child->primary) << 0)
| ((unsigned int)(child->secondary) << 8)
@@ -777,7 +779,7 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
buses &= ~0xff000000;
buses |= CARDBUS_LATENCY_TIMER << 24;
}
-
+
/*
* We need to blast all three values with a single write.
*/
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 53a786fd0d40..bd80f6378463 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -533,6 +533,17 @@ static void __devinit quirk_piix4_acpi(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi);
+#define ICH_PMBASE 0x40
+#define ICH_ACPI_CNTL 0x44
+#define ICH4_ACPI_EN 0x10
+#define ICH6_ACPI_EN 0x80
+#define ICH4_GPIOBASE 0x58
+#define ICH4_GPIO_CNTL 0x5c
+#define ICH4_GPIO_EN 0x10
+#define ICH6_GPIOBASE 0x48
+#define ICH6_GPIO_CNTL 0x4c
+#define ICH6_GPIO_EN 0x10
+
/*
* ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at
* 0x40 (128 bytes of ACPI, GPIO & TCO registers)
@@ -541,12 +552,33 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, qui
static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev)
{
u32 region;
+ u8 enable;
- pci_read_config_dword(dev, 0x40, &region);
- quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO");
+ /*
+ * The check for PCIBIOS_MIN_IO is to ensure we won't create a conflict
+ * with low legacy (and fixed) ports. We don't know the decoding
+ * priority and can't tell whether the legacy device or the one created
+ * here is really at that address. This happens on boards with broken
+ * BIOSes.
+ */
+
+ pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
+ if (enable & ICH4_ACPI_EN) {
+ pci_read_config_dword(dev, ICH_PMBASE, &region);
+ region &= PCI_BASE_ADDRESS_IO_MASK;
+ if (region >= PCIBIOS_MIN_IO)
+ quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES,
+ "ICH4 ACPI/GPIO/TCO");
+ }
- pci_read_config_dword(dev, 0x58, &region);
- quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO");
+ pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
+ if (enable & ICH4_GPIO_EN) {
+ pci_read_config_dword(dev, ICH4_GPIOBASE, &region);
+ region &= PCI_BASE_ADDRESS_IO_MASK;
+ if (region >= PCIBIOS_MIN_IO)
+ quirk_io_region(dev, region, 64,
+ PCI_BRIDGE_RESOURCES + 1, "ICH4 GPIO");
+ }
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi);
@@ -562,12 +594,25 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, qui
static void __devinit ich6_lpc_acpi_gpio(struct pci_dev *dev)
{
u32 region;
+ u8 enable;
- pci_read_config_dword(dev, 0x40, &region);
- quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO");
+ pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
+ if (enable & ICH6_ACPI_EN) {
+ pci_read_config_dword(dev, ICH_PMBASE, &region);
+ region &= PCI_BASE_ADDRESS_IO_MASK;
+ if (region >= PCIBIOS_MIN_IO)
+ quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES,
+ "ICH6 ACPI/GPIO/TCO");
+ }
- pci_read_config_dword(dev, 0x48, &region);
- quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO");
+ pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
+ if (enable & ICH4_GPIO_EN) {
+ pci_read_config_dword(dev, ICH6_GPIOBASE, &region);
+ region &= PCI_BASE_ADDRESS_IO_MASK;
+ if (region >= PCIBIOS_MIN_IO)
+ quirk_io_region(dev, region, 64,
+ PCI_BRIDGE_RESOURCES + 1, "ICH6 GPIO");
+ }
}
static void __devinit ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize)
@@ -2618,58 +2663,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
#endif /* CONFIG_PCI_MSI */
-#ifdef CONFIG_PCI_IOV
-
-/*
- * For Intel 82576 SR-IOV NIC, if BIOS doesn't allocate resources for the
- * SR-IOV BARs, zero the Flash BAR and program the SR-IOV BARs to use the
- * old Flash Memory Space.
- */
-static void __devinit quirk_i82576_sriov(struct pci_dev *dev)
-{
- int pos, flags;
- u32 bar, start, size;
-
- if (PAGE_SIZE > 0x10000)
- return;
-
- flags = pci_resource_flags(dev, 0);
- if ((flags & PCI_BASE_ADDRESS_SPACE) !=
- PCI_BASE_ADDRESS_SPACE_MEMORY ||
- (flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) !=
- PCI_BASE_ADDRESS_MEM_TYPE_32)
- return;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
- if (!pos)
- return;
-
- pci_read_config_dword(dev, pos + PCI_SRIOV_BAR, &bar);
- if (bar & PCI_BASE_ADDRESS_MEM_MASK)
- return;
-
- start = pci_resource_start(dev, 1);
- size = pci_resource_len(dev, 1);
- if (!start || size != 0x400000 || start & (size - 1))
- return;
-
- pci_resource_flags(dev, 1) = 0;
- pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0);
- pci_write_config_dword(dev, pos + PCI_SRIOV_BAR, start);
- pci_write_config_dword(dev, pos + PCI_SRIOV_BAR + 12, start + size / 2);
-
- dev_info(&dev->dev, "use Flash Memory Space for SR-IOV BARs\n");
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c9, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov);
-
-#endif /* CONFIG_PCI_IOV */
-
/* Allow manual resource allocation for PCI hotplug bridges
* via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
* some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6),
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 66cb8f4cc5f4..89d0a6a88df7 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -33,11 +33,32 @@ struct resource_list_x {
struct pci_dev *dev;
resource_size_t start;
resource_size_t end;
+ resource_size_t add_size;
unsigned long flags;
};
-static void add_to_failed_list(struct resource_list_x *head,
- struct pci_dev *dev, struct resource *res)
+#define free_list(type, head) do { \
+ struct type *list, *tmp; \
+ for (list = (head)->next; list;) { \
+ tmp = list; \
+ list = list->next; \
+ kfree(tmp); \
+ } \
+ (head)->next = NULL; \
+} while (0)
+
+/**
+ * add_to_list() - add a new resource tracker to the list
+ * @head: Head of the list
+ * @dev: device corresponding to which the resource
+ * belongs
+ * @res: The resource to be tracked
+ * @add_size: additional size to be optionally added
+ * to the resource
+ */
+static void add_to_list(struct resource_list_x *head,
+ struct pci_dev *dev, struct resource *res,
+ resource_size_t add_size)
{
struct resource_list_x *list = head;
struct resource_list_x *ln = list->next;
@@ -45,7 +66,7 @@ static void add_to_failed_list(struct resource_list_x *head,
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp) {
- pr_warning("add_to_failed_list: kmalloc() failed!\n");
+ pr_warning("add_to_list: kmalloc() failed!\n");
return;
}
@@ -55,20 +76,14 @@ static void add_to_failed_list(struct resource_list_x *head,
tmp->start = res->start;
tmp->end = res->end;
tmp->flags = res->flags;
+ tmp->add_size = add_size;
list->next = tmp;
}
-static void free_failed_list(struct resource_list_x *head)
+static void add_to_failed_list(struct resource_list_x *head,
+ struct pci_dev *dev, struct resource *res)
{
- struct resource_list_x *list, *tmp;
-
- for (list = head->next; list;) {
- tmp = list;
- list = list->next;
- kfree(tmp);
- }
-
- head->next = NULL;
+ add_to_list(head, dev, res, 0);
}
static void __dev_sort_resources(struct pci_dev *dev,
@@ -91,18 +106,88 @@ static void __dev_sort_resources(struct pci_dev *dev,
pdev_sort_resources(dev, head);
}
-static void __assign_resources_sorted(struct resource_list *head,
- struct resource_list_x *fail_head)
+static inline void reset_resource(struct resource *res)
+{
+ res->start = 0;
+ res->end = 0;
+ res->flags = 0;
+}
+
+/**
+ * adjust_resources_sorted() - satisfy any additional resource requests
+ *
+ * @add_head : head of the list tracking requests requiring additional
+ * resources
+ * @head : head of the list tracking requests with allocated
+ * resources
+ *
+ * Walk through each element of the add_head and try to procure
+ * additional resources for the element, provided the element
+ * is in the head list.
+ */
+static void adjust_resources_sorted(struct resource_list_x *add_head,
+ struct resource_list *head)
{
struct resource *res;
- struct resource_list *list, *tmp;
+ struct resource_list_x *list, *tmp, *prev;
+ struct resource_list *hlist;
+ resource_size_t add_size;
int idx;
- for (list = head->next; list;) {
+ prev = add_head;
+ for (list = add_head->next; list;) {
res = list->res;
+ /* skip resource that has been reset */
+ if (!res->flags)
+ goto out;
+
+ /* skip this resource if not found in head list */
+ for (hlist = head->next; hlist && hlist->res != res;
+ hlist = hlist->next);
+ if (!hlist) { /* just skip */
+ prev = list;
+ list = list->next;
+ continue;
+ }
+
idx = res - &list->dev->resource[0];
+ add_size=list->add_size;
+ if (!resource_size(res) && add_size) {
+ res->end = res->start + add_size - 1;
+ if(pci_assign_resource(list->dev, idx))
+ reset_resource(res);
+ } else if (add_size) {
+ adjust_resource(res, res->start,
+ resource_size(res) + add_size);
+ }
+out:
+ tmp = list;
+ prev->next = list = list->next;
+ kfree(tmp);
+ }
+}
+
+/**
+ * assign_requested_resources_sorted() - satisfy resource requests
+ *
+ * @head : head of the list tracking requests for resources
+ * @failed_list : head of the list tracking requests that could
+ * not be allocated
+ *
+ * Satisfy resource requests of each element in the list. Add
+ * requests that could not satisfied to the failed_list.
+ */
+static void assign_requested_resources_sorted(struct resource_list *head,
+ struct resource_list_x *fail_head)
+{
+ struct resource *res;
+ struct resource_list *list;
+ int idx;
- if (pci_assign_resource(list->dev, idx)) {
+ for (list = head->next; list; list = list->next) {
+ res = list->res;
+ idx = res - &list->dev->resource[0];
+ if (resource_size(res) && pci_assign_resource(list->dev, idx)) {
if (fail_head && !pci_is_root_bus(list->dev->bus)) {
/*
* if the failed res is for ROM BAR, and it will
@@ -112,16 +197,25 @@ static void __assign_resources_sorted(struct resource_list *head,
(!(res->flags & IORESOURCE_ROM_ENABLE))))
add_to_failed_list(fail_head, list->dev, res);
}
- res->start = 0;
- res->end = 0;
- res->flags = 0;
+ reset_resource(res);
}
- tmp = list;
- list = list->next;
- kfree(tmp);
}
}
+static void __assign_resources_sorted(struct resource_list *head,
+ struct resource_list_x *add_head,
+ struct resource_list_x *fail_head)
+{
+ /* Satisfy the must-have resource requests */
+ assign_requested_resources_sorted(head, fail_head);
+
+ /* Try to satisfy any additional nice-to-have resource
+ requests */
+ if (add_head)
+ adjust_resources_sorted(add_head, head);
+ free_list(resource_list, head);
+}
+
static void pdev_assign_resources_sorted(struct pci_dev *dev,
struct resource_list_x *fail_head)
{
@@ -129,11 +223,12 @@ static void pdev_assign_resources_sorted(struct pci_dev *dev,
head.next = NULL;
__dev_sort_resources(dev, &head);
- __assign_resources_sorted(&head, fail_head);
+ __assign_resources_sorted(&head, NULL, fail_head);
}
static void pbus_assign_resources_sorted(const struct pci_bus *bus,
+ struct resource_list_x *add_head,
struct resource_list_x *fail_head)
{
struct pci_dev *dev;
@@ -143,7 +238,7 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus,
list_for_each_entry(dev, &bus->devices, bus_list)
__dev_sort_resources(dev, &head);
- __assign_resources_sorted(&head, fail_head);
+ __assign_resources_sorted(&head, add_head, fail_head);
}
void pci_setup_cardbus(struct pci_bus *bus)
@@ -404,15 +499,62 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon
return NULL;
}
-/* Sizing the IO windows of the PCI-PCI bridge is trivial,
- since these windows have 4K granularity and the IO ranges
- of non-bridge PCI devices are limited to 256 bytes.
- We must be careful with the ISA aliasing though. */
-static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
+static resource_size_t calculate_iosize(resource_size_t size,
+ resource_size_t min_size,
+ resource_size_t size1,
+ resource_size_t old_size,
+ resource_size_t align)
+{
+ if (size < min_size)
+ size = min_size;
+ if (old_size == 1 )
+ old_size = 0;
+ /* To be fixed in 2.5: we should have sort of HAVE_ISA
+ flag in the struct pci_bus. */
+#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
+ size = (size & 0xff) + ((size & ~0xffUL) << 2);
+#endif
+ size = ALIGN(size + size1, align);
+ if (size < old_size)
+ size = old_size;
+ return size;
+}
+
+static resource_size_t calculate_memsize(resource_size_t size,
+ resource_size_t min_size,
+ resource_size_t size1,
+ resource_size_t old_size,
+ resource_size_t align)
+{
+ if (size < min_size)
+ size = min_size;
+ if (old_size == 1 )
+ old_size = 0;
+ if (size < old_size)
+ size = old_size;
+ size = ALIGN(size + size1, align);
+ return size;
+}
+
+/**
+ * pbus_size_io() - size the io window of a given bus
+ *
+ * @bus : the bus
+ * @min_size : the minimum io window that must to be allocated
+ * @add_size : additional optional io window
+ * @add_head : track the additional io window on this list
+ *
+ * Sizing the IO windows of the PCI-PCI bridge is trivial,
+ * since these windows have 4K granularity and the IO ranges
+ * of non-bridge PCI devices are limited to 256 bytes.
+ * We must be careful with the ISA aliasing though.
+ */
+static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
+ resource_size_t add_size, struct resource_list_x *add_head)
{
struct pci_dev *dev;
struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
- unsigned long size = 0, size1 = 0, old_size;
+ unsigned long size = 0, size0 = 0, size1 = 0;
if (!b_res)
return;
@@ -435,20 +577,12 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
size1 += r_size;
}
}
- if (size < min_size)
- size = min_size;
- old_size = resource_size(b_res);
- if (old_size == 1)
- old_size = 0;
-/* To be fixed in 2.5: we should have sort of HAVE_ISA
- flag in the struct pci_bus. */
-#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
- size = (size & 0xff) + ((size & ~0xffUL) << 2);
-#endif
- size = ALIGN(size + size1, 4096);
- if (size < old_size)
- size = old_size;
- if (!size) {
+ size0 = calculate_iosize(size, min_size, size1,
+ resource_size(b_res), 4096);
+ size1 = !add_size? size0:
+ calculate_iosize(size, min_size+add_size, size1,
+ resource_size(b_res), 4096);
+ if (!size0 && !size1) {
if (b_res->start || b_res->end)
dev_info(&bus->self->dev, "disabling bridge window "
"%pR to [bus %02x-%02x] (unused)\n", b_res,
@@ -458,17 +592,30 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
}
/* Alignment of the IO window is always 4K */
b_res->start = 4096;
- b_res->end = b_res->start + size - 1;
+ b_res->end = b_res->start + size0 - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
+ if (size1 > size0 && add_head)
+ add_to_list(add_head, bus->self, b_res, size1-size0);
}
-/* Calculate the size of the bus and minimal alignment which
- guarantees that all child resources fit in this size. */
+/**
+ * pbus_size_mem() - size the memory window of a given bus
+ *
+ * @bus : the bus
+ * @min_size : the minimum memory window that must to be allocated
+ * @add_size : additional optional memory window
+ * @add_head : track the additional memory window on this list
+ *
+ * Calculate the size of the bus and minimal alignment which
+ * guarantees that all child resources fit in this size.
+ */
static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
- unsigned long type, resource_size_t min_size)
+ unsigned long type, resource_size_t min_size,
+ resource_size_t add_size,
+ struct resource_list_x *add_head)
{
struct pci_dev *dev;
- resource_size_t min_align, align, size, old_size;
+ resource_size_t min_align, align, size, size0, size1;
resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */
int order, max_order;
struct resource *b_res = find_free_bus_resource(bus, type);
@@ -516,14 +663,6 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
mem64_mask &= r->flags & IORESOURCE_MEM_64;
}
}
- if (size < min_size)
- size = min_size;
- old_size = resource_size(b_res);
- if (old_size == 1)
- old_size = 0;
- if (size < old_size)
- size = old_size;
-
align = 0;
min_align = 0;
for (order = 0; order <= max_order; order++) {
@@ -537,8 +676,11 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
min_align = align1 >> 1;
align += aligns[order];
}
- size = ALIGN(size, min_align);
- if (!size) {
+ size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), align);
+ size1 = !add_size ? size :
+ calculate_memsize(size, min_size+add_size, 0,
+ resource_size(b_res), align);
+ if (!size0 && !size1) {
if (b_res->start || b_res->end)
dev_info(&bus->self->dev, "disabling bridge window "
"%pR to [bus %02x-%02x] (unused)\n", b_res,
@@ -547,9 +689,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
return 1;
}
b_res->start = min_align;
- b_res->end = size + min_align - 1;
- b_res->flags |= IORESOURCE_STARTALIGN;
- b_res->flags |= mem64_mask;
+ b_res->end = size0 + min_align - 1;
+ b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask;
+ if (size1 > size0 && add_head)
+ add_to_list(add_head, bus->self, b_res, size1-size0);
return 1;
}
@@ -602,11 +745,12 @@ static void pci_bus_size_cardbus(struct pci_bus *bus)
}
}
-void __ref pci_bus_size_bridges(struct pci_bus *bus)
+void __ref __pci_bus_size_bridges(struct pci_bus *bus,
+ struct resource_list_x *add_head)
{
struct pci_dev *dev;
unsigned long mask, prefmask;
- resource_size_t min_mem_size = 0, min_io_size = 0;
+ resource_size_t additional_mem_size = 0, additional_io_size = 0;
list_for_each_entry(dev, &bus->devices, bus_list) {
struct pci_bus *b = dev->subordinate;
@@ -620,7 +764,7 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
case PCI_CLASS_BRIDGE_PCI:
default:
- pci_bus_size_bridges(b);
+ __pci_bus_size_bridges(b, add_head);
break;
}
}
@@ -637,11 +781,14 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
case PCI_CLASS_BRIDGE_PCI:
pci_bridge_check_ranges(bus);
if (bus->self->is_hotplug_bridge) {
- min_io_size = pci_hotplug_io_size;
- min_mem_size = pci_hotplug_mem_size;
+ additional_io_size = pci_hotplug_io_size;
+ additional_mem_size = pci_hotplug_mem_size;
}
+ /*
+ * Follow thru
+ */
default:
- pbus_size_io(bus, min_io_size);
+ pbus_size_io(bus, 0, additional_io_size, add_head);
/* If the bridge supports prefetchable range, size it
separately. If it doesn't, or its prefetchable window
has already been allocated by arch code, try
@@ -649,30 +796,36 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
resources. */
mask = IORESOURCE_MEM;
prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if (pbus_size_mem(bus, prefmask, prefmask, min_mem_size))
+ if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, add_head))
mask = prefmask; /* Success, size non-prefetch only. */
else
- min_mem_size += min_mem_size;
- pbus_size_mem(bus, mask, IORESOURCE_MEM, min_mem_size);
+ additional_mem_size += additional_mem_size;
+ pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, add_head);
break;
}
}
+
+void __ref pci_bus_size_bridges(struct pci_bus *bus)
+{
+ __pci_bus_size_bridges(bus, NULL);
+}
EXPORT_SYMBOL(pci_bus_size_bridges);
static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
+ struct resource_list_x *add_head,
struct resource_list_x *fail_head)
{
struct pci_bus *b;
struct pci_dev *dev;
- pbus_assign_resources_sorted(bus, fail_head);
+ pbus_assign_resources_sorted(bus, add_head, fail_head);
list_for_each_entry(dev, &bus->devices, bus_list) {
b = dev->subordinate;
if (!b)
continue;
- __pci_bus_assign_resources(b, fail_head);
+ __pci_bus_assign_resources(b, add_head, fail_head);
switch (dev->class >> 8) {
case PCI_CLASS_BRIDGE_PCI:
@@ -694,7 +847,7 @@ static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
void __ref pci_bus_assign_resources(const struct pci_bus *bus)
{
- __pci_bus_assign_resources(bus, NULL);
+ __pci_bus_assign_resources(bus, NULL, NULL);
}
EXPORT_SYMBOL(pci_bus_assign_resources);
@@ -709,7 +862,7 @@ static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge,
if (!b)
return;
- __pci_bus_assign_resources(b, fail_head);
+ __pci_bus_assign_resources(b, NULL, fail_head);
switch (bridge->class >> 8) {
case PCI_CLASS_BRIDGE_PCI:
@@ -842,17 +995,21 @@ void __init
pci_assign_unassigned_resources(void)
{
struct pci_bus *bus;
-
+ struct resource_list_x add_list; /* list of resources that
+ want additional resources */
+ add_list.next = NULL;
/* Depth first, calculate sizes and alignments of all
subordinate buses. */
list_for_each_entry(bus, &pci_root_buses, node) {
- pci_bus_size_bridges(bus);
+ __pci_bus_size_bridges(bus, &add_list);
}
+
/* Depth last, allocate resources and update the hardware. */
list_for_each_entry(bus, &pci_root_buses, node) {
- pci_bus_assign_resources(bus);
+ __pci_bus_assign_resources(bus, &add_list, NULL);
pci_enable_bridges(bus);
}
+ BUG_ON(add_list.next);
/* dump the resource on buses */
list_for_each_entry(bus, &pci_root_buses, node) {
@@ -882,7 +1039,7 @@ again:
if (tried_times >= 2) {
/* still fail, don't need to try more */
- free_failed_list(&head);
+ free_list(resource_list_x, &head);
goto enable_all;
}
@@ -913,7 +1070,7 @@ again:
list = list->next;
}
- free_failed_list(&head);
+ free_list(resource_list_x, &head);
goto again;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index a59af5b24f0a..222dfb737b11 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -138,6 +138,24 @@ config TC1100_WMI
This is a driver for the WMI extensions (wireless and bluetooth power
control) of the HP Compaq TC1100 tablet.
+config HP_ACCEL
+ tristate "HP laptop accelerometer"
+ depends on INPUT && ACPI
+ select SENSORS_LIS3LV02D
+ select NEW_LEDS
+ select LEDS_CLASS
+ help
+ This driver provides support for the "Mobile Data Protection System 3D"
+ or "3D DriveGuard" feature of HP laptops. On such systems the driver
+ should load automatically (via ACPI alias).
+
+ Support for a led indicating disk protection will be provided as
+ hp::hddprotect. For more information on the feature, refer to
+ Documentation/hwmon/lis3lv02d.
+
+ To compile this driver as a module, choose M here: the module will
+ be called hp_accel.
+
config HP_WMI
tristate "HP WMI extras"
depends on ACPI_WMI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 4ec4ff8f9182..299aefb3e74c 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
obj-$(CONFIG_DELL_WMI) += dell-wmi.o
obj-$(CONFIG_ACER_WMI) += acer-wmi.o
obj-$(CONFIG_ACERHDF) += acerhdf.o
+obj-$(CONFIG_HP_ACCEL) += hp_accel.o
obj-$(CONFIG_HP_WMI) += hp-wmi.o
obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 38b34a73866a..c9784705f6ac 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -39,7 +39,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
-#include <linux/dmi.h>
#include <acpi/acpi_drivers.h>
@@ -1032,6 +1031,7 @@ static int __devinit acer_backlight_init(struct device *dev)
struct backlight_device *bd;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = max_brightness;
bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops,
&props);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index f3aa6a7fdab6..5a6f7d7575d6 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -667,6 +667,7 @@ static int asus_backlight_init(struct asus_laptop *asus)
memset(&props, 0, sizeof(struct backlight_properties));
props.max_brightness = 15;
+ props.type = BACKLIGHT_PLATFORM;
bd = backlight_device_register(ASUS_LAPTOP_FILE,
&asus->platform_device->dev, asus,
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index fe495939c307..f503607c0645 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1507,6 +1507,7 @@ static int __init asus_acpi_init(void)
}
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = 15;
asus_backlight_device = backlight_device_register("asus", NULL, NULL,
&asus_backlight_data,
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 911135425224..94f93b621d7b 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -564,6 +564,7 @@ static int cmpc_ipml_add(struct acpi_device *acpi)
return -ENOMEM;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = 7;
ipml->bd = backlight_device_register("cmpc_bl", &acpi->dev,
acpi->handle, &cmpc_bl_ops,
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 034572b980c9..eb95878fa583 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -970,6 +970,7 @@ static int __init compal_init(void)
if (!acpi_video_backlight_support()) {
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = BACKLIGHT_LEVEL_MAX;
compalbl_device = backlight_device_register(DRIVER_NAME,
NULL, NULL,
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index ad24ef36f9f7..de301aa8e5c3 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -671,6 +671,7 @@ static int __init dell_init(void)
if (max_intensity) {
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = max_intensity;
dell_backlight_device = backlight_device_register("dell_backlight",
&platform_device->dev,
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 49d9ad708f89..6605beac0d0e 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1147,6 +1147,7 @@ static int eeepc_backlight_init(struct eeepc_laptop *eeepc)
struct backlight_device *bd;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = 15;
bd = backlight_device_register(EEEPC_LAPTOP_FILE,
&eeepc->platform_device->dev, eeepc,
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 95e3b0948e9c..493054c2dbe1 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -1128,6 +1128,7 @@ static int __init fujitsu_init(void)
memset(&props, 0, sizeof(struct backlight_properties));
max_brightness = fujitsu->max_brightness;
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = max_brightness - 1;
fujitsu->bl_device = backlight_device_register("fujitsu-laptop",
NULL, NULL,
diff --git a/drivers/hwmon/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 3d21fa2b97cd..1b52d00e2f90 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -35,11 +35,11 @@
#include <linux/freezer.h>
#include <linux/uaccess.h>
#include <linux/leds.h>
+#include <linux/atomic.h>
#include <acpi/acpi_drivers.h>
-#include <asm/atomic.h>
-#include "lis3lv02d.h"
+#include "../../misc/lis3lv02d/lis3lv02d.h"
-#define DRIVER_NAME "lis3lv02d"
+#define DRIVER_NAME "hp_accel"
#define ACPI_MDPS_CLASS "accelerometer"
/* Delayed LEDs infrastructure ------------------------------------ */
@@ -402,4 +402,3 @@ MODULE_LICENSE("GPL");
module_init(lis3lv02d_init_module);
module_exit(lis3lv02d_exit_module);
-
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 7e9bb6df9d39..142d38579314 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -804,6 +804,7 @@ static int __init msi_init(void)
} else {
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = MSI_LCD_LEVEL_MAX - 1;
msibl_device = backlight_device_register("msi-laptop-bl", NULL,
NULL, &msibl_ops,
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index 35278ad7e628..d5419c9ec07a 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -254,6 +254,7 @@ static int __init msi_wmi_init(void)
if (!acpi_video_backlight_support()) {
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
backlight = backlight_device_register(DRV_NAME, NULL, NULL,
&msi_backlight_ops,
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index cc1e0ba104d7..05be30ee158b 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -602,6 +602,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
}
/* initialize backlight */
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT];
pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
&pcc_backlight_ops, &props);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 5e83370b0812..13d8d63bcca9 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1305,8 +1305,9 @@ static int sony_nc_add(struct acpi_device *device)
"controlled by ACPI video driver\n");
} else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
&handle))) {
- struct backlight_properties props;
+ struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = SONY_MAX_BRIGHTNESS - 1;
sony_backlight_device = backlight_device_register("sony", NULL,
NULL,
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index eb9922385ef8..947bdcaa0ce9 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -6307,6 +6307,7 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
return 1;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = bright_maxlvl;
props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
ibm_backlight_device = backlight_device_register(TPACPI_BACKLIGHT_DEV_NAME,
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 209cced786c6..63f42a22e102 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1018,6 +1018,7 @@ static int __init toshiba_acpi_init(void)
create_toshiba_proc_entries();
}
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
toshiba_backlight_device = backlight_device_register("toshiba",
&toshiba_acpi.p_dev->dev,
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index 19bc73695475..fa4e0a5db3f8 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -142,7 +142,9 @@ void __pnp_remove_device(struct pnp_dev *dev);
int pnp_check_port(struct pnp_dev *dev, struct resource *res);
int pnp_check_mem(struct pnp_dev *dev, struct resource *res);
int pnp_check_irq(struct pnp_dev *dev, struct resource *res);
+#ifdef CONFIG_ISA_DMA_API
int pnp_check_dma(struct pnp_dev *dev, struct resource *res);
+#endif
char *pnp_resource_type_name(struct resource *res);
void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc);
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index 0a15664eef1c..ed9ce507149a 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -171,6 +171,7 @@ __add:
return 0;
}
+#ifdef CONFIG_ISA_DMA_API
static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
{
struct resource *res, local_res;
@@ -210,6 +211,7 @@ __add:
pnp_add_dma_resource(dev, res->start, res->flags);
return 0;
}
+#endif /* CONFIG_ISA_DMA_API */
void pnp_init_resources(struct pnp_dev *dev)
{
@@ -234,7 +236,8 @@ static void pnp_clean_resource_table(struct pnp_dev *dev)
static int pnp_assign_resources(struct pnp_dev *dev, int set)
{
struct pnp_option *option;
- int nport = 0, nmem = 0, nirq = 0, ndma = 0;
+ int nport = 0, nmem = 0, nirq = 0;
+ int ndma __maybe_unused = 0;
int ret = 0;
pnp_dbg(&dev->dev, "pnp_assign_resources, try dependent set %d\n", set);
@@ -256,9 +259,11 @@ static int pnp_assign_resources(struct pnp_dev *dev, int set)
case IORESOURCE_IRQ:
ret = pnp_assign_irq(dev, &option->u.irq, nirq++);
break;
+#ifdef CONFIG_ISA_DMA_API
case IORESOURCE_DMA:
ret = pnp_assign_dma(dev, &option->u.dma, ndma++);
break;
+#endif
default:
ret = -EINVAL;
break;
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index a925e6b63d72..b0ecacbe53b1 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -409,9 +409,9 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
return 1;
}
+#ifdef CONFIG_ISA_DMA_API
int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
{
-#ifndef CONFIG_IA64
int i;
struct pnp_dev *tdev;
struct resource *tres;
@@ -466,11 +466,8 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
}
return 1;
-#else
- /* IA64 does not have legacy DMA */
- return 0;
-#endif
}
+#endif /* CONFIG_ISA_DMA_API */
unsigned long pnp_resource_type(struct resource *res)
{
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index 02414db6a94c..763f894ed188 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -39,7 +39,7 @@ struct jz_battery {
int irq;
int charge_irq;
- struct mfd_cell *cell;
+ const struct mfd_cell *cell;
int status;
long voltage;
@@ -258,7 +258,7 @@ static int __devinit jz_battery_probe(struct platform_device *pdev)
return -ENOMEM;
}
- jz_battery->cell = pdev->dev.platform_data;
+ jz_battery->cell = mfd_get_cell(pdev);
jz_battery->irq = platform_get_irq(pdev, 0);
if (jz_battery->irq < 0) {
diff --git a/drivers/pps/clients/Makefile b/drivers/pps/clients/Makefile
index 42517da07049..4feb7e9e71ee 100644
--- a/drivers/pps/clients/Makefile
+++ b/drivers/pps/clients/Makefile
@@ -6,6 +6,4 @@ obj-$(CONFIG_PPS_CLIENT_KTIMER) += pps-ktimer.o
obj-$(CONFIG_PPS_CLIENT_LDISC) += pps-ldisc.o
obj-$(CONFIG_PPS_CLIENT_PARPORT) += pps_parport.o
-ifeq ($(CONFIG_PPS_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG
diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c
index b93af3ebb5ba..dcd39fba6ddd 100644
--- a/drivers/pps/generators/pps_gen_parport.c
+++ b/drivers/pps/generators/pps_gen_parport.c
@@ -216,11 +216,6 @@ static void parport_attach(struct parport *port)
hrtimer_init(&device.timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
device.timer.function = hrtimer_event;
-#ifdef CONFIG_PREEMPT_RT
- /* hrtimer interrupt will run in the interrupt context with this */
- device.timer.irqsafe = 1;
-#endif
-
hrtimer_start(&device.timer, next_intr_time(&device), HRTIMER_MODE_ABS);
return;
diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile
index b6139fe187bf..89b8eca825b5 100644
--- a/drivers/rapidio/Makefile
+++ b/drivers/rapidio/Makefile
@@ -5,6 +5,4 @@ obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o
obj-$(CONFIG_RAPIDIO) += switches/
-ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+subdir-ccflags-$(CONFIG_RAPIDIO_DEBUG) := -DDEBUG
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index a50391b6ba2a..3a59d5f018d3 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -517,7 +517,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
return rdev;
cleanup:
- if (rswitch->route_table)
+ if (rio_is_switch(rdev))
kfree(rswitch->route_table);
kfree(rdev);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 1269fbd2deca..4dbe360989be 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -14,6 +14,7 @@
#include <linux/rio.h>
#include <linux/rio_drv.h>
#include <linux/stat.h>
+#include <linux/capability.h>
#include "rio.h"
@@ -33,6 +34,8 @@ rio_config_attr(device_rev, "0x%08x\n");
rio_config_attr(asm_did, "0x%04x\n");
rio_config_attr(asm_vid, "0x%04x\n");
rio_config_attr(asm_rev, "0x%04x\n");
+rio_config_attr(destid, "0x%04x\n");
+rio_config_attr(hopcount, "0x%02x\n");
static ssize_t routes_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -52,6 +55,35 @@ static ssize_t routes_show(struct device *dev, struct device_attribute *attr, ch
return (str - buf);
}
+static ssize_t lprev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rio_dev *rdev = to_rio_dev(dev);
+
+ return sprintf(buf, "%s\n",
+ (rdev->prev) ? rio_name(rdev->prev) : "root");
+}
+
+static ssize_t lnext_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rio_dev *rdev = to_rio_dev(dev);
+ char *str = buf;
+ int i;
+
+ if (rdev->pef & RIO_PEF_SWITCH) {
+ for (i = 0; i < RIO_GET_TOTAL_PORTS(rdev->swpinfo); i++) {
+ if (rdev->rswitch->nextdev[i])
+ str += sprintf(str, "%s\n",
+ rio_name(rdev->rswitch->nextdev[i]));
+ else
+ str += sprintf(str, "null\n");
+ }
+ }
+
+ return str - buf;
+}
+
struct device_attribute rio_dev_attrs[] = {
__ATTR_RO(did),
__ATTR_RO(vid),
@@ -59,10 +91,14 @@ struct device_attribute rio_dev_attrs[] = {
__ATTR_RO(asm_did),
__ATTR_RO(asm_vid),
__ATTR_RO(asm_rev),
+ __ATTR_RO(lprev),
+ __ATTR_RO(destid),
__ATTR_NULL,
};
static DEVICE_ATTR(routes, S_IRUGO, routes_show, NULL);
+static DEVICE_ATTR(lnext, S_IRUGO, lnext_show, NULL);
+static DEVICE_ATTR(hopcount, S_IRUGO, hopcount_show, NULL);
static ssize_t
rio_read_config(struct file *filp, struct kobject *kobj,
@@ -218,7 +254,9 @@ int rio_create_sysfs_dev_files(struct rio_dev *rdev)
err = device_create_bin_file(&rdev->dev, &rio_config_attr);
if (!err && (rdev->pef & RIO_PEF_SWITCH)) {
- err = device_create_file(&rdev->dev, &dev_attr_routes);
+ err |= device_create_file(&rdev->dev, &dev_attr_routes);
+ err |= device_create_file(&rdev->dev, &dev_attr_lnext);
+ err |= device_create_file(&rdev->dev, &dev_attr_hopcount);
if (!err && rdev->rswitch->sw_sysfs)
err = rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_CREATE);
}
@@ -241,6 +279,8 @@ void rio_remove_sysfs_dev_files(struct rio_dev *rdev)
device_remove_bin_file(&rdev->dev, &rio_config_attr);
if (rdev->pef & RIO_PEF_SWITCH) {
device_remove_file(&rdev->dev, &dev_attr_routes);
+ device_remove_file(&rdev->dev, &dev_attr_lnext);
+ device_remove_file(&rdev->dev, &dev_attr_hopcount);
if (rdev->rswitch->sw_sysfs)
rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE);
}
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index cc2a3b74d0f0..c29719cacbca 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -32,6 +32,7 @@
#include "rio.h"
static LIST_HEAD(rio_mports);
+static unsigned char next_portid;
/**
* rio_local_get_device_id - Get the base/extended device id for a port
@@ -68,9 +69,13 @@ int rio_request_inb_mbox(struct rio_mport *mport,
void (*minb) (struct rio_mport * mport, void *dev_id, int mbox,
int slot))
{
- int rc = 0;
+ int rc = -ENOSYS;
+ struct resource *res;
- struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+ if (mport->ops->open_inb_mbox == NULL)
+ goto out;
+
+ res = kmalloc(sizeof(struct resource), GFP_KERNEL);
if (res) {
rio_init_mbox_res(res, mbox, mbox);
@@ -88,7 +93,7 @@ int rio_request_inb_mbox(struct rio_mport *mport,
/* Hook the inbound message callback */
mport->inb_msg[mbox].mcback = minb;
- rc = rio_open_inb_mbox(mport, dev_id, mbox, entries);
+ rc = mport->ops->open_inb_mbox(mport, dev_id, mbox, entries);
} else
rc = -ENOMEM;
@@ -106,10 +111,13 @@ int rio_request_inb_mbox(struct rio_mport *mport,
*/
int rio_release_inb_mbox(struct rio_mport *mport, int mbox)
{
- rio_close_inb_mbox(mport, mbox);
+ if (mport->ops->close_inb_mbox) {
+ mport->ops->close_inb_mbox(mport, mbox);
- /* Release the mailbox resource */
- return release_resource(mport->inb_msg[mbox].res);
+ /* Release the mailbox resource */
+ return release_resource(mport->inb_msg[mbox].res);
+ } else
+ return -ENOSYS;
}
/**
@@ -129,9 +137,13 @@ int rio_request_outb_mbox(struct rio_mport *mport,
int entries,
void (*moutb) (struct rio_mport * mport, void *dev_id, int mbox, int slot))
{
- int rc = 0;
+ int rc = -ENOSYS;
+ struct resource *res;
- struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+ if (mport->ops->open_outb_mbox == NULL)
+ goto out;
+
+ res = kmalloc(sizeof(struct resource), GFP_KERNEL);
if (res) {
rio_init_mbox_res(res, mbox, mbox);
@@ -149,7 +161,7 @@ int rio_request_outb_mbox(struct rio_mport *mport,
/* Hook the inbound message callback */
mport->outb_msg[mbox].mcback = moutb;
- rc = rio_open_outb_mbox(mport, dev_id, mbox, entries);
+ rc = mport->ops->open_outb_mbox(mport, dev_id, mbox, entries);
} else
rc = -ENOMEM;
@@ -167,10 +179,13 @@ int rio_request_outb_mbox(struct rio_mport *mport,
*/
int rio_release_outb_mbox(struct rio_mport *mport, int mbox)
{
- rio_close_outb_mbox(mport, mbox);
+ if (mport->ops->close_outb_mbox) {
+ mport->ops->close_outb_mbox(mport, mbox);
- /* Release the mailbox resource */
- return release_resource(mport->outb_msg[mbox].res);
+ /* Release the mailbox resource */
+ return release_resource(mport->outb_msg[mbox].res);
+ } else
+ return -ENOSYS;
}
/**
@@ -1120,36 +1135,51 @@ static int __devinit rio_init(void)
return 0;
}
-device_initcall(rio_init);
-
int __devinit rio_init_mports(void)
{
- int rc = 0;
struct rio_mport *port;
list_for_each_entry(port, &rio_mports, node) {
- if (!request_mem_region(port->iores.start,
- resource_size(&port->iores),
- port->name)) {
- printk(KERN_ERR
- "RIO: Error requesting master port region 0x%016llx-0x%016llx\n",
- (u64)port->iores.start, (u64)port->iores.end);
- rc = -ENOMEM;
- goto out;
- }
-
if (port->host_deviceid >= 0)
rio_enum_mport(port);
else
rio_disc_mport(port);
}
- out:
- return rc;
+ rio_init();
+
+ return 0;
}
+device_initcall_sync(rio_init_mports);
+
+static int hdids[RIO_MAX_MPORTS + 1];
+
+static int rio_get_hdid(int index)
+{
+ if (!hdids[0] || hdids[0] <= index || index >= RIO_MAX_MPORTS)
+ return -1;
+
+ return hdids[index + 1];
+}
+
+static int rio_hdid_setup(char *str)
+{
+ (void)get_options(str, ARRAY_SIZE(hdids), hdids);
+ return 1;
+}
+
+__setup("riohdid=", rio_hdid_setup);
+
void rio_register_mport(struct rio_mport *port)
{
+ if (next_portid >= RIO_MAX_MPORTS) {
+ pr_err("RIO: reached specified max number of mports\n");
+ return;
+ }
+
+ port->id = next_portid++;
+ port->host_deviceid = rio_get_hdid(port->id);
list_add_tail(&port->node, &rio_mports);
}
diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile
index 48d67a6b98c8..c4d3acc3c715 100644
--- a/drivers/rapidio/switches/Makefile
+++ b/drivers/rapidio/switches/Makefile
@@ -7,7 +7,3 @@ obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o
obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o
obj-$(CONFIG_RAPIDIO_TSI500) += tsi500.o
obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o
-
-ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index dd6308499bd4..859251250b55 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/88pm860x.h>
struct pm8607_regulator_info {
@@ -394,47 +395,48 @@ static struct pm8607_regulator_info pm8607_regulator_info[] = {
PM8607_LDO(14, LDO14, 0, 4, SUPPLIES_EN12, 6),
};
-static inline struct pm8607_regulator_info *find_regulator_info(int id)
+static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
{
- struct pm8607_regulator_info *info;
+ struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct pm8607_regulator_info *info = NULL;
+ struct regulator_init_data *pdata;
+ struct mfd_cell *cell;
int i;
+ cell = pdev->dev.platform_data;
+ if (cell == NULL)
+ return -ENODEV;
+ pdata = cell->mfd_data;
+ if (pdata == NULL)
+ return -EINVAL;
+
for (i = 0; i < ARRAY_SIZE(pm8607_regulator_info); i++) {
info = &pm8607_regulator_info[i];
- if (info->desc.id == id)
- return info;
+ if (!strcmp(info->desc.name, pdata->constraints.name))
+ break;
}
- return NULL;
-}
-
-static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
-{
- struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct pm860x_platform_data *pdata = chip->dev->platform_data;
- struct pm8607_regulator_info *info = NULL;
-
- info = find_regulator_info(pdev->id);
- if (info == NULL) {
- dev_err(&pdev->dev, "invalid regulator ID specified\n");
+ if (i > ARRAY_SIZE(pm8607_regulator_info)) {
+ dev_err(&pdev->dev, "Failed to find regulator %s\n",
+ pdata->constraints.name);
return -EINVAL;
}
info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion;
info->chip = chip;
+ /* check DVC ramp slope double */
+ if (!strcmp(info->desc.name, "BUCK3"))
+ if (info->chip->buck3_double)
+ info->slope_double = 1;
+
info->regulator = regulator_register(&info->desc, &pdev->dev,
- pdata->regulator[pdev->id], info);
+ pdata, info);
if (IS_ERR(info->regulator)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
return PTR_ERR(info->regulator);
}
- /* check DVC ramp slope double */
- if (info->desc.id == PM8607_ID_BUCK3)
- if (info->chip->buck3_double)
- info->slope_double = 1;
-
platform_set_drvdata(pdev, info);
return 0;
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index e1d943619ab8..de75f67f4cc3 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -108,6 +108,15 @@ config REGULATOR_MAX8952
via I2C bus. Maxim 8952 has one voltage output and supports 4 DVS
modes ranging from 0.77V to 1.40V by 0.01V steps.
+config REGULATOR_MAX8997
+ tristate "Maxim 8997/8966 regulator"
+ depends on MFD_MAX8997
+ help
+ This driver controls a Maxim 8997/8966 regulator
+ via I2C bus. The provided regulator is suitable for S5PC110,
+ S5PV210, and Exynos-4 chips to control VCC_CORE and
+ VCC_USIM voltages.
+
config REGULATOR_MAX8998
tristate "Maxim 8998 voltage regulator"
depends on MFD_MAX8998
@@ -214,6 +223,15 @@ config REGULATOR_AB3100
AB3100 analog baseband dealing with power regulators
for the system.
+config REGULATOR_TPS6105X
+ tristate "TI TPS6105X Power regulators"
+ depends on TPS6105X
+ default y if TPS6105X
+ help
+ This driver supports TPS61050/TPS61052 voltage regulator chips.
+ It is a single boost converter primarily for white LEDs and
+ audio amplifiers.
+
config REGULATOR_TPS65023
tristate "TI TPS65023 Power regulators"
depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 0b5e88c2b8d7..d72a42756778 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
obj-$(CONFIG_REGULATOR_MAX8925) += max8925-regulator.o
obj-$(CONFIG_REGULATOR_MAX8952) += max8952.o
+obj-$(CONFIG_REGULATOR_MAX8997) += max8997.o
obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
@@ -33,7 +34,7 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
-
+obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index ed6feaf9398d..2dec589a8908 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/mfd/abx500.h>
+#include <linux/mfd/core.h>
/* LDO registers and some handy masking definitions for AB3100 */
#define AB3100_LDO_A 0x40
@@ -576,7 +577,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
{
- struct ab3100_platform_data *plfdata = pdev->dev.platform_data;
+ struct ab3100_platform_data *plfdata = mfd_get_data(pdev);
int err = 0;
u8 data;
int i;
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
new file mode 100644
index 000000000000..01ef7e9903bb
--- /dev/null
+++ b/drivers/regulator/max8997.c
@@ -0,0 +1,1213 @@
+/*
+ * max8997.c - Regulator driver for the Maxim 8997/8966
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@smasung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8998.c
+ */
+
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/max8997.h>
+#include <linux/mfd/max8997-private.h>
+
+struct max8997_data {
+ struct device *dev;
+ struct max8997_dev *iodev;
+ int num_regulators;
+ struct regulator_dev **rdev;
+ int ramp_delay; /* in mV/us */
+
+ u8 buck1_vol[8];
+ u8 buck2_vol[8];
+ u8 buck5_vol[8];
+ int buck125_gpioindex;
+
+ u8 saved_states[MAX8997_REG_MAX];
+};
+
+static inline void max8997_set_gpio(struct max8997_data *max8997)
+{
+ struct max8997_platform_data *pdata =
+ dev_get_platdata(max8997->iodev->dev);
+ int set3 = (max8997->buck125_gpioindex) & 0x1;
+ int set2 = ((max8997->buck125_gpioindex) >> 1) & 0x1;
+ int set1 = ((max8997->buck125_gpioindex) >> 2) & 0x1;
+
+ gpio_set_value(pdata->buck125_gpios[0], set1);
+ gpio_set_value(pdata->buck125_gpios[1], set2);
+ gpio_set_value(pdata->buck125_gpios[2], set3);
+}
+
+struct voltage_map_desc {
+ int min;
+ int max;
+ int step;
+ unsigned int n_bits;
+};
+
+/* Voltage maps in mV */
+static const struct voltage_map_desc ldo_voltage_map_desc = {
+ .min = 800, .max = 3950, .step = 50, .n_bits = 6,
+}; /* LDO1 ~ 18, 21 all */
+
+static const struct voltage_map_desc buck1245_voltage_map_desc = {
+ .min = 650, .max = 2225, .step = 25, .n_bits = 6,
+}; /* Buck1, 2, 4, 5 */
+
+static const struct voltage_map_desc buck37_voltage_map_desc = {
+ .min = 750, .max = 3900, .step = 50, .n_bits = 6,
+}; /* Buck3, 7 */
+
+/* current map in mA */
+static const struct voltage_map_desc charger_current_map_desc = {
+ .min = 200, .max = 950, .step = 50, .n_bits = 4,
+};
+
+static const struct voltage_map_desc topoff_current_map_desc = {
+ .min = 50, .max = 200, .step = 10, .n_bits = 4,
+};
+
+static const struct voltage_map_desc *reg_voltage_map[] = {
+ [MAX8997_LDO1] = &ldo_voltage_map_desc,
+ [MAX8997_LDO2] = &ldo_voltage_map_desc,
+ [MAX8997_LDO3] = &ldo_voltage_map_desc,
+ [MAX8997_LDO4] = &ldo_voltage_map_desc,
+ [MAX8997_LDO5] = &ldo_voltage_map_desc,
+ [MAX8997_LDO6] = &ldo_voltage_map_desc,
+ [MAX8997_LDO7] = &ldo_voltage_map_desc,
+ [MAX8997_LDO8] = &ldo_voltage_map_desc,
+ [MAX8997_LDO9] = &ldo_voltage_map_desc,
+ [MAX8997_LDO10] = &ldo_voltage_map_desc,
+ [MAX8997_LDO11] = &ldo_voltage_map_desc,
+ [MAX8997_LDO12] = &ldo_voltage_map_desc,
+ [MAX8997_LDO13] = &ldo_voltage_map_desc,
+ [MAX8997_LDO14] = &ldo_voltage_map_desc,
+ [MAX8997_LDO15] = &ldo_voltage_map_desc,
+ [MAX8997_LDO16] = &ldo_voltage_map_desc,
+ [MAX8997_LDO17] = &ldo_voltage_map_desc,
+ [MAX8997_LDO18] = &ldo_voltage_map_desc,
+ [MAX8997_LDO21] = &ldo_voltage_map_desc,
+ [MAX8997_BUCK1] = &buck1245_voltage_map_desc,
+ [MAX8997_BUCK2] = &buck1245_voltage_map_desc,
+ [MAX8997_BUCK3] = &buck37_voltage_map_desc,
+ [MAX8997_BUCK4] = &buck1245_voltage_map_desc,
+ [MAX8997_BUCK5] = &buck1245_voltage_map_desc,
+ [MAX8997_BUCK6] = NULL,
+ [MAX8997_BUCK7] = &buck37_voltage_map_desc,
+ [MAX8997_EN32KHZ_AP] = NULL,
+ [MAX8997_EN32KHZ_CP] = NULL,
+ [MAX8997_ENVICHG] = NULL,
+ [MAX8997_ESAFEOUT1] = NULL,
+ [MAX8997_ESAFEOUT2] = NULL,
+ [MAX8997_CHARGER_CV] = NULL,
+ [MAX8997_CHARGER] = &charger_current_map_desc,
+ [MAX8997_CHARGER_TOPOFF] = &topoff_current_map_desc,
+};
+
+static inline int max8997_get_rid(struct regulator_dev *rdev)
+{
+ return rdev_get_id(rdev);
+}
+
+static int max8997_list_voltage_safeout(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ int rid = max8997_get_rid(rdev);
+
+ if (rid == MAX8997_ESAFEOUT1 || rid == MAX8997_ESAFEOUT2) {
+ switch (selector) {
+ case 0:
+ return 4850000;
+ case 1:
+ return 4900000;
+ case 2:
+ return 4950000;
+ case 3:
+ return 3300000;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int max8997_list_voltage_charger_cv(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ int rid = max8997_get_rid(rdev);
+
+ if (rid != MAX8997_CHARGER_CV)
+ goto err;
+
+ switch (selector) {
+ case 0x00:
+ return 4200000;
+ case 0x01 ... 0x0E:
+ return 4000000 + 20000 * (selector - 0x01);
+ case 0x0F:
+ return 4350000;
+ default:
+ return -EINVAL;
+ }
+err:
+ return -EINVAL;
+}
+
+static int max8997_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ const struct voltage_map_desc *desc;
+ int rid = max8997_get_rid(rdev);
+ int val;
+
+ if (rid >= ARRAY_SIZE(reg_voltage_map) ||
+ rid < 0)
+ return -EINVAL;
+
+ desc = reg_voltage_map[rid];
+ if (desc == NULL)
+ return -EINVAL;
+
+ val = desc->min + desc->step * selector;
+ if (val > desc->max)
+ return -EINVAL;
+
+ return val * 1000;
+}
+
+static int max8997_get_enable_register(struct regulator_dev *rdev,
+ int *reg, int *mask, int *pattern)
+{
+ int rid = max8997_get_rid(rdev);
+
+ switch (rid) {
+ case MAX8997_LDO1 ... MAX8997_LDO21:
+ *reg = MAX8997_REG_LDO1CTRL + (rid - MAX8997_LDO1);
+ *mask = 0xC0;
+ *pattern = 0xC0;
+ break;
+ case MAX8997_BUCK1:
+ *reg = MAX8997_REG_BUCK1CTRL;
+ *mask = 0x01;
+ *pattern = 0x01;
+ break;
+ case MAX8997_BUCK2:
+ *reg = MAX8997_REG_BUCK2CTRL;
+ *mask = 0x01;
+ *pattern = 0x01;
+ break;
+ case MAX8997_BUCK3:
+ *reg = MAX8997_REG_BUCK3CTRL;
+ *mask = 0x01;
+ *pattern = 0x01;
+ break;
+ case MAX8997_BUCK4:
+ *reg = MAX8997_REG_BUCK4CTRL;
+ *mask = 0x01;
+ *pattern = 0x01;
+ break;
+ case MAX8997_BUCK5:
+ *reg = MAX8997_REG_BUCK5CTRL;
+ *mask = 0x01;
+ *pattern = 0x01;
+ break;
+ case MAX8997_BUCK6:
+ *reg = MAX8997_REG_BUCK6CTRL;
+ *mask = 0x01;
+ *pattern = 0x01;
+ break;
+ case MAX8997_BUCK7:
+ *reg = MAX8997_REG_BUCK7CTRL;
+ *mask = 0x01;
+ *pattern = 0x01;
+ break;
+ case MAX8997_EN32KHZ_AP ... MAX8997_EN32KHZ_CP:
+ *reg = MAX8997_REG_MAINCON1;
+ *mask = 0x01 << (rid - MAX8997_EN32KHZ_AP);
+ *pattern = 0x01 << (rid - MAX8997_EN32KHZ_AP);
+ break;
+ case MAX8997_ENVICHG:
+ *reg = MAX8997_REG_MBCCTRL1;
+ *mask = 0x80;
+ *pattern = 0x80;
+ break;
+ case MAX8997_ESAFEOUT1 ... MAX8997_ESAFEOUT2:
+ *reg = MAX8997_REG_SAFEOUTCTRL;
+ *mask = 0x40 << (rid - MAX8997_ESAFEOUT1);
+ *pattern = 0x40 << (rid - MAX8997_ESAFEOUT1);
+ break;
+ case MAX8997_CHARGER:
+ *reg = MAX8997_REG_MBCCTRL2;
+ *mask = 0x40;
+ *pattern = 0x40;
+ break;
+ default:
+ /* Not controllable or not exists */
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+static int max8997_reg_is_enabled(struct regulator_dev *rdev)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+ int ret, reg, mask, pattern;
+ u8 val;
+
+ ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
+ if (ret == -EINVAL)
+ return 1; /* "not controllable" */
+ else if (ret)
+ return ret;
+
+ ret = max8997_read_reg(i2c, reg, &val);
+ if (ret)
+ return ret;
+
+ return (val & mask) == pattern;
+}
+
+static int max8997_reg_enable(struct regulator_dev *rdev)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+ int ret, reg, mask, pattern;
+
+ ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
+ if (ret)
+ return ret;
+
+ return max8997_update_reg(i2c, reg, pattern, mask);
+}
+
+static int max8997_reg_disable(struct regulator_dev *rdev)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+ int ret, reg, mask, pattern;
+
+ ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
+ if (ret)
+ return ret;
+
+ return max8997_update_reg(i2c, reg, ~pattern, mask);
+}
+
+static int max8997_get_voltage_register(struct regulator_dev *rdev,
+ int *_reg, int *_shift, int *_mask)
+{
+ int rid = max8997_get_rid(rdev);
+ int reg, shift = 0, mask = 0x3f;
+
+ switch (rid) {
+ case MAX8997_LDO1 ... MAX8997_LDO21:
+ reg = MAX8997_REG_LDO1CTRL + (rid - MAX8997_LDO1);
+ break;
+ case MAX8997_BUCK1:
+ reg = MAX8997_REG_BUCK1DVS1;
+ break;
+ case MAX8997_BUCK2:
+ reg = MAX8997_REG_BUCK2DVS1;
+ break;
+ case MAX8997_BUCK3:
+ reg = MAX8997_REG_BUCK3DVS;
+ break;
+ case MAX8997_BUCK4:
+ reg = MAX8997_REG_BUCK4DVS;
+ break;
+ case MAX8997_BUCK5:
+ reg = MAX8997_REG_BUCK5DVS1;
+ break;
+ case MAX8997_BUCK7:
+ reg = MAX8997_REG_BUCK7DVS;
+ break;
+ case MAX8997_ESAFEOUT1 ... MAX8997_ESAFEOUT2:
+ reg = MAX8997_REG_SAFEOUTCTRL;
+ shift = (rid == MAX8997_ESAFEOUT2) ? 2 : 0;
+ mask = 0x3;
+ break;
+ case MAX8997_CHARGER_CV:
+ reg = MAX8997_REG_MBCCTRL3;
+ shift = 0;
+ mask = 0xf;
+ break;
+ case MAX8997_CHARGER:
+ reg = MAX8997_REG_MBCCTRL4;
+ shift = 0;
+ mask = 0xf;
+ break;
+ case MAX8997_CHARGER_TOPOFF:
+ reg = MAX8997_REG_MBCCTRL5;
+ shift = 0;
+ mask = 0xf;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *_reg = reg;
+ *_shift = shift;
+ *_mask = mask;
+
+ return 0;
+}
+
+static int max8997_get_voltage(struct regulator_dev *rdev)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct max8997_platform_data *pdata =
+ dev_get_platdata(max8997->iodev->dev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+ int reg, shift, mask, ret;
+ int rid = max8997_get_rid(rdev);
+ u8 val;
+
+ ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
+ if (ret)
+ return ret;
+
+ if ((rid == MAX8997_BUCK1 && pdata->buck1_gpiodvs) ||
+ (rid == MAX8997_BUCK2 && pdata->buck2_gpiodvs) ||
+ (rid == MAX8997_BUCK5 && pdata->buck5_gpiodvs))
+ reg += max8997->buck125_gpioindex;
+
+ ret = max8997_read_reg(i2c, reg, &val);
+ if (ret)
+ return ret;
+
+ val >>= shift;
+ val &= mask;
+
+ if (rdev->desc && rdev->desc->ops && rdev->desc->ops->list_voltage)
+ return rdev->desc->ops->list_voltage(rdev, val);
+
+ /*
+ * max8997_list_voltage returns value for any rdev with voltage_map,
+ * which works for "CHARGER" and "CHARGER TOPOFF" that do not have
+ * list_voltage ops (they are current regulators).
+ */
+ return max8997_list_voltage(rdev, val);
+}
+
+static inline int max8997_get_voltage_proper_val(
+ const struct voltage_map_desc *desc,
+ int min_vol, int max_vol)
+{
+ int i = 0;
+
+ if (desc == NULL)
+ return -EINVAL;
+
+ if (max_vol < desc->min || min_vol > desc->max)
+ return -EINVAL;
+
+ while (desc->min + desc->step * i < min_vol &&
+ desc->min + desc->step * i < desc->max)
+ i++;
+
+ if (desc->min + desc->step * i > max_vol)
+ return -EINVAL;
+
+ if (i >= (1 << desc->n_bits))
+ return -EINVAL;
+
+ return i;
+}
+
+static int max8997_set_voltage_charger_cv(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+ int rid = max8997_get_rid(rdev);
+ int lb, ub;
+ int reg, shift = 0, mask, ret = 0;
+ u8 val = 0x0;
+
+ if (rid != MAX8997_CHARGER_CV)
+ return -EINVAL;
+
+ ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
+ if (ret)
+ return ret;
+
+ if (max_uV < 4000000 || min_uV > 4350000)
+ return -EINVAL;
+
+ if (min_uV <= 4000000) {
+ if (max_uV >= 4000000)
+ return -EINVAL;
+ else
+ val = 0x1;
+ } else if (min_uV <= 4200000 && max_uV >= 4200000)
+ val = 0x0;
+ else {
+ lb = (min_uV - 4000001) / 20000 + 2;
+ ub = (max_uV - 4000000) / 20000 + 1;
+
+ if (lb > ub)
+ return -EINVAL;
+
+ if (lb < 0xf)
+ val = lb;
+ else {
+ if (ub >= 0xf)
+ val = 0xf;
+ else
+ return -EINVAL;
+ }
+ }
+
+ *selector = val;
+
+ ret = max8997_update_reg(i2c, reg, val << shift, mask);
+
+ return ret;
+}
+
+/*
+ * For LDO1 ~ LDO21, BUCK1~5, BUCK7, CHARGER, CHARGER_TOPOFF
+ * BUCK1, 2, and 5 are available if they are not controlled by gpio
+ */
+static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+ int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
+ const struct voltage_map_desc *desc;
+ int rid = max8997_get_rid(rdev);
+ int reg, shift = 0, mask, ret;
+ int i;
+ u8 org;
+
+ switch (rid) {
+ case MAX8997_LDO1 ... MAX8997_LDO21:
+ break;
+ case MAX8997_BUCK1 ... MAX8997_BUCK5:
+ break;
+ case MAX8997_BUCK6:
+ return -EINVAL;
+ case MAX8997_BUCK7:
+ break;
+ case MAX8997_CHARGER:
+ break;
+ case MAX8997_CHARGER_TOPOFF:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ desc = reg_voltage_map[rid];
+
+ i = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
+ if (i < 0)
+ return i;
+
+ ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
+ if (ret)
+ return ret;
+
+ max8997_read_reg(i2c, reg, &org);
+ org = (org & mask) >> shift;
+
+ ret = max8997_update_reg(i2c, reg, i << shift, mask << shift);
+ *selector = i;
+
+ if (rid == MAX8997_BUCK1 || rid == MAX8997_BUCK2 ||
+ rid == MAX8997_BUCK4 || rid == MAX8997_BUCK5) {
+ /* If the voltage is increasing */
+ if (org < i)
+ udelay(desc->step * (i - org) / max8997->ramp_delay);
+ }
+
+ return ret;
+}
+
+/*
+ * Assess the damage on the voltage setting of BUCK1,2,5 by the change.
+ *
+ * When GPIO-DVS mode is used for multiple bucks, changing the voltage value
+ * of one of the bucks may affect that of another buck, which is the side
+ * effect of the change (set_voltage). This function examines the GPIO-DVS
+ * configurations and checks whether such side-effect exists.
+ */
+static int max8997_assess_side_effect(struct regulator_dev *rdev,
+ u8 new_val, int *best)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct max8997_platform_data *pdata =
+ dev_get_platdata(max8997->iodev->dev);
+ int rid = max8997_get_rid(rdev);
+ u8 *buckx_val[3];
+ bool buckx_gpiodvs[3];
+ int side_effect[8];
+ int min_side_effect = INT_MAX;
+ int i;
+
+ *best = -1;
+
+ switch (rid) {
+ case MAX8997_BUCK1:
+ rid = 0;
+ break;
+ case MAX8997_BUCK2:
+ rid = 1;
+ break;
+ case MAX8997_BUCK5:
+ rid = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ buckx_val[0] = max8997->buck1_vol;
+ buckx_val[1] = max8997->buck2_vol;
+ buckx_val[2] = max8997->buck5_vol;
+ buckx_gpiodvs[0] = pdata->buck1_gpiodvs;
+ buckx_gpiodvs[1] = pdata->buck2_gpiodvs;
+ buckx_gpiodvs[2] = pdata->buck5_gpiodvs;
+
+ for (i = 0; i < 8; i++) {
+ int others;
+
+ if (new_val != (buckx_val[rid])[i]) {
+ side_effect[i] = -1;
+ continue;
+ }
+
+ side_effect[i] = 0;
+ for (others = 0; others < 3; others++) {
+ int diff;
+
+ if (others == rid)
+ continue;
+ if (buckx_gpiodvs[others] == false)
+ continue; /* Not affected */
+ diff = (buckx_val[others])[i] -
+ (buckx_val[others])[max8997->buck125_gpioindex];
+ if (diff > 0)
+ side_effect[i] += diff;
+ else if (diff < 0)
+ side_effect[i] -= diff;
+ }
+ if (side_effect[i] == 0) {
+ *best = i;
+ return 0; /* NO SIDE EFFECT! Use This! */
+ }
+ if (side_effect[i] < min_side_effect) {
+ min_side_effect = side_effect[i];
+ *best = i;
+ }
+ }
+
+ if (*best == -1)
+ return -EINVAL;
+
+ return side_effect[*best];
+}
+
+/*
+ * For Buck 1 ~ 5 and 7. If it is not controlled by GPIO, this calls
+ * max8997_set_voltage_ldobuck to do the job.
+ */
+static int max8997_set_voltage_buck(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct max8997_platform_data *pdata =
+ dev_get_platdata(max8997->iodev->dev);
+ int rid = max8997_get_rid(rdev);
+ const struct voltage_map_desc *desc;
+ int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
+ bool gpio_dvs_mode = false;
+ int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
+
+ if (rid < MAX8997_BUCK1 || rid > MAX8997_BUCK7)
+ return -EINVAL;
+
+ switch (rid) {
+ case MAX8997_BUCK1:
+ if (pdata->buck1_gpiodvs)
+ gpio_dvs_mode = true;
+ break;
+ case MAX8997_BUCK2:
+ if (pdata->buck2_gpiodvs)
+ gpio_dvs_mode = true;
+ break;
+ case MAX8997_BUCK5:
+ if (pdata->buck5_gpiodvs)
+ gpio_dvs_mode = true;
+ break;
+ }
+
+ if (!gpio_dvs_mode)
+ return max8997_set_voltage_ldobuck(rdev, min_uV, max_uV,
+ selector);
+
+ desc = reg_voltage_map[rid];
+ new_val = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
+ if (new_val < 0)
+ return new_val;
+
+ tmp_dmg = INT_MAX;
+ tmp_idx = -1;
+ tmp_val = -1;
+ do {
+ damage = max8997_assess_side_effect(rdev, new_val, &new_idx);
+ if (damage == 0)
+ goto out;
+
+ if (tmp_dmg > damage) {
+ tmp_idx = new_idx;
+ tmp_val = new_val;
+ tmp_dmg = damage;
+ }
+
+ new_val++;
+ } while (desc->min + desc->step + new_val <= desc->max);
+
+ new_idx = tmp_idx;
+ new_val = tmp_val;
+
+ if (pdata->ignore_gpiodvs_side_effect == false)
+ return -EINVAL;
+
+ dev_warn(&rdev->dev, "MAX8997 GPIO-DVS Side Effect Warning: GPIO SET:"
+ " %d -> %d\n", max8997->buck125_gpioindex, tmp_idx);
+
+out:
+ if (new_idx < 0 || new_val < 0)
+ return -EINVAL;
+
+ max8997->buck125_gpioindex = new_idx;
+ max8997_set_gpio(max8997);
+ *selector = new_val;
+
+ return 0;
+}
+
+static const int safeoutvolt[] = {
+ 3300000,
+ 4850000,
+ 4900000,
+ 4950000,
+};
+
+/* For SAFEOUT1 and SAFEOUT2 */
+static int max8997_set_voltage_safeout(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+ int rid = max8997_get_rid(rdev);
+ int reg, shift = 0, mask, ret;
+ int i = 0;
+ u8 val;
+
+ if (rid != MAX8997_ESAFEOUT1 && rid != MAX8997_ESAFEOUT2)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(safeoutvolt); i++) {
+ if (min_uV <= safeoutvolt[i] &&
+ max_uV >= safeoutvolt[i])
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(safeoutvolt))
+ return -EINVAL;
+
+ if (i == 0)
+ val = 0x3;
+ else
+ val = i - 1;
+
+ ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
+ if (ret)
+ return ret;
+
+ ret = max8997_update_reg(i2c, reg, val << shift, mask << shift);
+ *selector = val;
+
+ return ret;
+}
+
+static int max8997_reg_enable_suspend(struct regulator_dev *rdev)
+{
+ return 0;
+}
+
+static int max8997_reg_disable_suspend(struct regulator_dev *rdev)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+ int ret, reg, mask, pattern;
+ int rid = max8997_get_rid(rdev);
+
+ ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
+ if (ret)
+ return ret;
+
+ max8997_read_reg(i2c, reg, &max8997->saved_states[rid]);
+
+ if (rid == MAX8997_LDO1 ||
+ rid == MAX8997_LDO10 ||
+ rid == MAX8997_LDO21) {
+ dev_dbg(&rdev->dev, "Conditional Power-Off for %s\n",
+ rdev->desc->name);
+ return max8997_update_reg(i2c, reg, 0x40, mask);
+ }
+
+ dev_dbg(&rdev->dev, "Full Power-Off for %s (%xh -> %xh)\n",
+ rdev->desc->name, max8997->saved_states[rid] & mask,
+ (~pattern) & mask);
+ return max8997_update_reg(i2c, reg, ~pattern, mask);
+}
+
+static struct regulator_ops max8997_ldo_ops = {
+ .list_voltage = max8997_list_voltage,
+ .is_enabled = max8997_reg_is_enabled,
+ .enable = max8997_reg_enable,
+ .disable = max8997_reg_disable,
+ .get_voltage = max8997_get_voltage,
+ .set_voltage = max8997_set_voltage_ldobuck,
+ .set_suspend_enable = max8997_reg_enable_suspend,
+ .set_suspend_disable = max8997_reg_disable_suspend,
+};
+
+static struct regulator_ops max8997_buck_ops = {
+ .list_voltage = max8997_list_voltage,
+ .is_enabled = max8997_reg_is_enabled,
+ .enable = max8997_reg_enable,
+ .disable = max8997_reg_disable,
+ .get_voltage = max8997_get_voltage,
+ .set_voltage = max8997_set_voltage_buck,
+ .set_suspend_enable = max8997_reg_enable_suspend,
+ .set_suspend_disable = max8997_reg_disable_suspend,
+};
+
+static struct regulator_ops max8997_fixedvolt_ops = {
+ .list_voltage = max8997_list_voltage,
+ .is_enabled = max8997_reg_is_enabled,
+ .enable = max8997_reg_enable,
+ .disable = max8997_reg_disable,
+ .set_suspend_enable = max8997_reg_enable_suspend,
+ .set_suspend_disable = max8997_reg_disable_suspend,
+};
+
+static struct regulator_ops max8997_safeout_ops = {
+ .list_voltage = max8997_list_voltage_safeout,
+ .is_enabled = max8997_reg_is_enabled,
+ .enable = max8997_reg_enable,
+ .disable = max8997_reg_disable,
+ .get_voltage = max8997_get_voltage,
+ .set_voltage = max8997_set_voltage_safeout,
+ .set_suspend_enable = max8997_reg_enable_suspend,
+ .set_suspend_disable = max8997_reg_disable_suspend,
+};
+
+static struct regulator_ops max8997_fixedstate_ops = {
+ .list_voltage = max8997_list_voltage_charger_cv,
+ .get_voltage = max8997_get_voltage,
+ .set_voltage = max8997_set_voltage_charger_cv,
+};
+
+static int max8997_set_voltage_ldobuck_wrap(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ unsigned dummy;
+
+ return max8997_set_voltage_ldobuck(rdev, min_uV, max_uV, &dummy);
+}
+
+
+static struct regulator_ops max8997_charger_ops = {
+ .is_enabled = max8997_reg_is_enabled,
+ .enable = max8997_reg_enable,
+ .disable = max8997_reg_disable,
+ .get_current_limit = max8997_get_voltage,
+ .set_current_limit = max8997_set_voltage_ldobuck_wrap,
+};
+
+static struct regulator_ops max8997_charger_fixedstate_ops = {
+ .is_enabled = max8997_reg_is_enabled,
+ .get_current_limit = max8997_get_voltage,
+ .set_current_limit = max8997_set_voltage_ldobuck_wrap,
+};
+
+#define regulator_desc_ldo(num) { \
+ .name = "LDO"#num, \
+ .id = MAX8997_LDO##num, \
+ .ops = &max8997_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+}
+#define regulator_desc_buck(num) { \
+ .name = "BUCK"#num, \
+ .id = MAX8997_BUCK##num, \
+ .ops = &max8997_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+}
+
+static struct regulator_desc regulators[] = {
+ regulator_desc_ldo(1),
+ regulator_desc_ldo(2),
+ regulator_desc_ldo(3),
+ regulator_desc_ldo(4),
+ regulator_desc_ldo(5),
+ regulator_desc_ldo(6),
+ regulator_desc_ldo(7),
+ regulator_desc_ldo(8),
+ regulator_desc_ldo(9),
+ regulator_desc_ldo(10),
+ regulator_desc_ldo(11),
+ regulator_desc_ldo(12),
+ regulator_desc_ldo(13),
+ regulator_desc_ldo(14),
+ regulator_desc_ldo(15),
+ regulator_desc_ldo(16),
+ regulator_desc_ldo(17),
+ regulator_desc_ldo(18),
+ regulator_desc_ldo(21),
+ regulator_desc_buck(1),
+ regulator_desc_buck(2),
+ regulator_desc_buck(3),
+ regulator_desc_buck(4),
+ regulator_desc_buck(5),
+ {
+ .name = "BUCK6",
+ .id = MAX8997_BUCK6,
+ .ops = &max8997_fixedvolt_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ regulator_desc_buck(7),
+ {
+ .name = "EN32KHz AP",
+ .id = MAX8997_EN32KHZ_AP,
+ .ops = &max8997_fixedvolt_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "EN32KHz CP",
+ .id = MAX8997_EN32KHZ_CP,
+ .ops = &max8997_fixedvolt_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "ENVICHG",
+ .id = MAX8997_ENVICHG,
+ .ops = &max8997_fixedvolt_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "ESAFEOUT1",
+ .id = MAX8997_ESAFEOUT1,
+ .ops = &max8997_safeout_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "ESAFEOUT2",
+ .id = MAX8997_ESAFEOUT2,
+ .ops = &max8997_safeout_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "CHARGER CV",
+ .id = MAX8997_CHARGER_CV,
+ .ops = &max8997_fixedstate_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "CHARGER",
+ .id = MAX8997_CHARGER,
+ .ops = &max8997_charger_ops,
+ .type = REGULATOR_CURRENT,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "CHARGER TOPOFF",
+ .id = MAX8997_CHARGER_TOPOFF,
+ .ops = &max8997_charger_fixedstate_ops,
+ .type = REGULATOR_CURRENT,
+ .owner = THIS_MODULE,
+ },
+};
+
+static __devinit int max8997_pmic_probe(struct platform_device *pdev)
+{
+ struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct regulator_dev **rdev;
+ struct max8997_data *max8997;
+ struct i2c_client *i2c;
+ int i, ret, size;
+ u8 max_buck1 = 0, max_buck2 = 0, max_buck5 = 0;
+
+ if (!pdata) {
+ dev_err(pdev->dev.parent, "No platform init data supplied.\n");
+ return -ENODEV;
+ }
+
+ max8997 = kzalloc(sizeof(struct max8997_data), GFP_KERNEL);
+ if (!max8997)
+ return -ENOMEM;
+
+ size = sizeof(struct regulator_dev *) * pdata->num_regulators;
+ max8997->rdev = kzalloc(size, GFP_KERNEL);
+ if (!max8997->rdev) {
+ kfree(max8997);
+ return -ENOMEM;
+ }
+
+ rdev = max8997->rdev;
+ max8997->dev = &pdev->dev;
+ max8997->iodev = iodev;
+ max8997->num_regulators = pdata->num_regulators;
+ platform_set_drvdata(pdev, max8997);
+ i2c = max8997->iodev->i2c;
+
+ max8997->buck125_gpioindex = pdata->buck125_default_idx;
+
+ for (i = 0; i < 8; i++) {
+ max8997->buck1_vol[i] = ret =
+ max8997_get_voltage_proper_val(
+ &buck1245_voltage_map_desc,
+ pdata->buck1_voltage[i] / 1000,
+ pdata->buck1_voltage[i] / 1000 +
+ buck1245_voltage_map_desc.step);
+ if (ret < 0)
+ goto err_alloc;
+
+ max8997->buck2_vol[i] = ret =
+ max8997_get_voltage_proper_val(
+ &buck1245_voltage_map_desc,
+ pdata->buck2_voltage[i] / 1000,
+ pdata->buck2_voltage[i] / 1000 +
+ buck1245_voltage_map_desc.step);
+ if (ret < 0)
+ goto err_alloc;
+
+ max8997->buck5_vol[i] = ret =
+ max8997_get_voltage_proper_val(
+ &buck1245_voltage_map_desc,
+ pdata->buck5_voltage[i] / 1000,
+ pdata->buck5_voltage[i] / 1000 +
+ buck1245_voltage_map_desc.step);
+ if (ret < 0)
+ goto err_alloc;
+
+ if (max_buck1 < max8997->buck1_vol[i])
+ max_buck1 = max8997->buck1_vol[i];
+ if (max_buck2 < max8997->buck2_vol[i])
+ max_buck2 = max8997->buck2_vol[i];
+ if (max_buck5 < max8997->buck5_vol[i])
+ max_buck5 = max8997->buck5_vol[i];
+ }
+
+ /* For the safety, set max voltage before setting up */
+ for (i = 0; i < 8; i++) {
+ max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS(i + 1),
+ max_buck1, 0x3f);
+ max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS(i + 1),
+ max_buck2, 0x3f);
+ max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS(i + 1),
+ max_buck5, 0x3f);
+ }
+
+ /*
+ * If buck 1, 2, and 5 do not care DVS GPIO settings, ignore them.
+ * If at least one of them cares, set gpios.
+ */
+ if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
+ pdata->buck5_gpiodvs) {
+ bool gpio1set = false, gpio2set = false;
+
+ if (!gpio_is_valid(pdata->buck125_gpios[0]) ||
+ !gpio_is_valid(pdata->buck125_gpios[1]) ||
+ !gpio_is_valid(pdata->buck125_gpios[2])) {
+ dev_err(&pdev->dev, "GPIO NOT VALID\n");
+ ret = -EINVAL;
+ goto err_alloc;
+ }
+
+ ret = gpio_request(pdata->buck125_gpios[0],
+ "MAX8997 SET1");
+ if (ret == -EBUSY)
+ dev_warn(&pdev->dev, "Duplicated gpio request"
+ " on SET1\n");
+ else if (ret)
+ goto err_alloc;
+ else
+ gpio1set = true;
+
+ ret = gpio_request(pdata->buck125_gpios[1],
+ "MAX8997 SET2");
+ if (ret == -EBUSY)
+ dev_warn(&pdev->dev, "Duplicated gpio request"
+ " on SET2\n");
+ else if (ret) {
+ if (gpio1set)
+ gpio_free(pdata->buck125_gpios[0]);
+ goto err_alloc;
+ } else
+ gpio2set = true;
+
+ ret = gpio_request(pdata->buck125_gpios[2],
+ "MAX8997 SET3");
+ if (ret == -EBUSY)
+ dev_warn(&pdev->dev, "Duplicated gpio request"
+ " on SET3\n");
+ else if (ret) {
+ if (gpio1set)
+ gpio_free(pdata->buck125_gpios[0]);
+ if (gpio2set)
+ gpio_free(pdata->buck125_gpios[1]);
+ goto err_alloc;
+ }
+
+ gpio_direction_output(pdata->buck125_gpios[0],
+ (max8997->buck125_gpioindex >> 2)
+ & 0x1); /* SET1 */
+ gpio_direction_output(pdata->buck125_gpios[1],
+ (max8997->buck125_gpioindex >> 1)
+ & 0x1); /* SET2 */
+ gpio_direction_output(pdata->buck125_gpios[2],
+ (max8997->buck125_gpioindex >> 0)
+ & 0x1); /* SET3 */
+ ret = 0;
+ }
+
+ /* DVS-GPIO disabled */
+ max8997_update_reg(i2c, MAX8997_REG_BUCK1CTRL, (pdata->buck1_gpiodvs) ?
+ (1 << 1) : (0 << 1), 1 << 1);
+ max8997_update_reg(i2c, MAX8997_REG_BUCK2CTRL, (pdata->buck2_gpiodvs) ?
+ (1 << 1) : (0 << 1), 1 << 1);
+ max8997_update_reg(i2c, MAX8997_REG_BUCK5CTRL, (pdata->buck5_gpiodvs) ?
+ (1 << 1) : (0 << 1), 1 << 1);
+
+ /* Initialize all the DVS related BUCK registers */
+ for (i = 0; i < 8; i++) {
+ max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS(i + 1),
+ max8997->buck1_vol[i],
+ 0x3f);
+ max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS(i + 1),
+ max8997->buck2_vol[i],
+ 0x3f);
+ max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS(i + 1),
+ max8997->buck5_vol[i],
+ 0x3f);
+ }
+
+ for (i = 0; i < pdata->num_regulators; i++) {
+ const struct voltage_map_desc *desc;
+ int id = pdata->regulators[i].id;
+
+ desc = reg_voltage_map[id];
+ if (desc)
+ regulators[id].n_voltages =
+ (desc->max - desc->min) / desc->step + 1;
+ else if (id == MAX8997_ESAFEOUT1 || id == MAX8997_ESAFEOUT2)
+ regulators[id].n_voltages = 4;
+ else if (id == MAX8997_CHARGER_CV)
+ regulators[id].n_voltages = 16;
+
+ rdev[i] = regulator_register(&regulators[id], max8997->dev,
+ pdata->regulators[i].initdata, max8997);
+ if (IS_ERR(rdev[i])) {
+ ret = PTR_ERR(rdev[i]);
+ dev_err(max8997->dev, "regulator init failed for %d\n",
+ id);
+ rdev[i] = NULL;
+ goto err;
+ }
+ }
+
+ /* Misc Settings */
+ max8997->ramp_delay = 10; /* set 10mV/us, which is the default */
+ max8997_write_reg(i2c, MAX8997_REG_BUCKRAMP, (0xf << 4) | 0x9);
+
+ return 0;
+err:
+ for (i = 0; i < max8997->num_regulators; i++)
+ if (rdev[i])
+ regulator_unregister(rdev[i]);
+err_alloc:
+ kfree(max8997->rdev);
+ kfree(max8997);
+
+ return ret;
+}
+
+static int __devexit max8997_pmic_remove(struct platform_device *pdev)
+{
+ struct max8997_data *max8997 = platform_get_drvdata(pdev);
+ struct regulator_dev **rdev = max8997->rdev;
+ int i;
+
+ for (i = 0; i < max8997->num_regulators; i++)
+ if (rdev[i])
+ regulator_unregister(rdev[i]);
+
+ kfree(max8997->rdev);
+ kfree(max8997);
+
+ return 0;
+}
+
+static const struct platform_device_id max8997_pmic_id[] = {
+ { "max8997-pmic", 0},
+ { },
+};
+
+static struct platform_driver max8997_pmic_driver = {
+ .driver = {
+ .name = "max8997-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8997_pmic_probe,
+ .remove = __devexit_p(max8997_pmic_remove),
+ .id_table = max8997_pmic_id,
+};
+
+static int __init max8997_pmic_init(void)
+{
+ return platform_driver_register(&max8997_pmic_driver);
+}
+subsys_initcall(max8997_pmic_init);
+
+static void __exit max8997_pmic_cleanup(void)
+{
+ platform_driver_unregister(&max8997_pmic_driver);
+}
+module_exit(max8997_pmic_cleanup);
+
+MODULE_DESCRIPTION("MAXIM 8997/8966 Regulator Driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 3e5d0c3b4e53..23249cb0a8bd 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -15,6 +15,7 @@
#include <linux/regulator/driver.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
+#include <linux/mfd/core.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
@@ -336,8 +337,7 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv;
struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
- struct mc13783_regulator_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ struct mc13783_regulator_platform_data *pdata = mfd_get_data(pdev);
struct mc13783_regulator_init_data *init_data;
int i, ret;
@@ -381,8 +381,7 @@ err:
static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
- struct mc13783_regulator_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ struct mc13783_regulator_platform_data *pdata = mfd_get_data(pdev);
int i;
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 1b8f7398a4a8..6f15168e5ed4 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -15,6 +15,7 @@
#include <linux/regulator/driver.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
+#include <linux/mfd/core.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
@@ -520,8 +521,7 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv;
struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent);
- struct mc13xxx_regulator_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ struct mc13xxx_regulator_platform_data *pdata = mfd_get_data(pdev);
struct mc13xxx_regulator_init_data *init_data;
int i, ret;
u32 val;
@@ -595,8 +595,7 @@ err_free:
static int __devexit mc13892_regulator_remove(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
- struct mc13xxx_regulator_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ struct mc13xxx_regulator_platform_data *pdata = mfd_get_data(pdev);
int i;
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
new file mode 100644
index 000000000000..1661499feda4
--- /dev/null
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -0,0 +1,196 @@
+/*
+ * Driver for TPS61050/61052 boost converters, typically used for white LEDs
+ * or audio amplifiers.
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps6105x.h>
+
+static const int tps6105x_voltages[] = {
+ 4500000,
+ 5000000,
+ 5250000,
+ 5000000, /* There is an additional 5V */
+};
+
+static int tps6105x_regulator_enable(struct regulator_dev *rdev)
+{
+ struct tps6105x *tps6105x = rdev_get_drvdata(rdev);
+ int ret;
+
+ /* Activate voltage mode */
+ ret = tps6105x_mask_and_set(tps6105x, TPS6105X_REG_0,
+ TPS6105X_REG0_MODE_MASK,
+ TPS6105X_REG0_MODE_VOLTAGE << TPS6105X_REG0_MODE_SHIFT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps6105x_regulator_disable(struct regulator_dev *rdev)
+{
+ struct tps6105x *tps6105x = rdev_get_drvdata(rdev);
+ int ret;
+
+ /* Set into shutdown mode */
+ ret = tps6105x_mask_and_set(tps6105x, TPS6105X_REG_0,
+ TPS6105X_REG0_MODE_MASK,
+ TPS6105X_REG0_MODE_SHUTDOWN << TPS6105X_REG0_MODE_SHIFT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps6105x_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct tps6105x *tps6105x = rdev_get_drvdata(rdev);
+ u8 regval;
+ int ret;
+
+ ret = tps6105x_get(tps6105x, TPS6105X_REG_0, &regval);
+ if (ret)
+ return ret;
+ regval &= TPS6105X_REG0_MODE_MASK;
+ regval >>= TPS6105X_REG0_MODE_SHIFT;
+
+ if (regval == TPS6105X_REG0_MODE_VOLTAGE)
+ return 1;
+
+ return 0;
+}
+
+static int tps6105x_regulator_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct tps6105x *tps6105x = rdev_get_drvdata(rdev);
+ u8 regval;
+ int ret;
+
+ ret = tps6105x_get(tps6105x, TPS6105X_REG_0, &regval);
+ if (ret)
+ return ret;
+
+ regval &= TPS6105X_REG0_VOLTAGE_MASK;
+ regval >>= TPS6105X_REG0_VOLTAGE_SHIFT;
+ return (int) regval;
+}
+
+static int tps6105x_regulator_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct tps6105x *tps6105x = rdev_get_drvdata(rdev);
+ int ret;
+
+ ret = tps6105x_mask_and_set(tps6105x, TPS6105X_REG_0,
+ TPS6105X_REG0_VOLTAGE_MASK,
+ selector << TPS6105X_REG0_VOLTAGE_SHIFT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps6105x_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ if (selector >= ARRAY_SIZE(tps6105x_voltages))
+ return -EINVAL;
+
+ return tps6105x_voltages[selector];
+}
+
+static struct regulator_ops tps6105x_regulator_ops = {
+ .enable = tps6105x_regulator_enable,
+ .disable = tps6105x_regulator_disable,
+ .is_enabled = tps6105x_regulator_is_enabled,
+ .get_voltage_sel = tps6105x_regulator_get_voltage_sel,
+ .set_voltage_sel = tps6105x_regulator_set_voltage_sel,
+ .list_voltage = tps6105x_regulator_list_voltage,
+};
+
+static struct regulator_desc tps6105x_regulator_desc = {
+ .name = "tps6105x-boost",
+ .ops = &tps6105x_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = 0,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(tps6105x_voltages),
+};
+
+/*
+ * Registers the chip as a voltage regulator
+ */
+static int __devinit tps6105x_regulator_probe(struct platform_device *pdev)
+{
+ struct tps6105x *tps6105x = mfd_get_data(pdev);
+ struct tps6105x_platform_data *pdata = tps6105x->pdata;
+ int ret;
+
+ /* This instance is not set for regulator mode so bail out */
+ if (pdata->mode != TPS6105X_MODE_VOLTAGE) {
+ dev_info(&pdev->dev,
+ "chip not in voltage mode mode, exit probe \n");
+ return 0;
+ }
+
+ /* Register regulator with framework */
+ tps6105x->regulator = regulator_register(&tps6105x_regulator_desc,
+ &tps6105x->client->dev,
+ pdata->regulator_data, tps6105x);
+ if (IS_ERR(tps6105x->regulator)) {
+ ret = PTR_ERR(tps6105x->regulator);
+ dev_err(&tps6105x->client->dev,
+ "failed to register regulator\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devexit tps6105x_regulator_remove(struct platform_device *pdev)
+{
+ struct tps6105x *tps6105x = platform_get_drvdata(pdev);
+ regulator_unregister(tps6105x->regulator);
+ return 0;
+}
+
+static struct platform_driver tps6105x_regulator_driver = {
+ .driver = {
+ .name = "tps6105x-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6105x_regulator_probe,
+ .remove = __devexit_p(tps6105x_regulator_remove),
+};
+
+static __init int tps6105x_regulator_init(void)
+{
+ return platform_driver_register(&tps6105x_regulator_driver);
+}
+subsys_initcall(tps6105x_regulator_init);
+
+static __exit void tps6105x_regulator_exit(void)
+{
+ platform_driver_unregister(&tps6105x_regulator_driver);
+}
+module_exit(tps6105x_regulator_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("TPS6105x regulator driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps6105x-regulator");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index bd332cf1cc3f..6a292852a358 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -475,6 +475,13 @@ static struct regulator_ops twlfixed_ops = {
.get_status = twlreg_get_status,
};
+static struct regulator_ops twl6030_fixed_resource = {
+ .enable = twlreg_enable,
+ .disable = twlreg_disable,
+ .is_enabled = twlreg_is_enabled,
+ .get_status = twlreg_get_status,
+};
+
/*----------------------------------------------------------------------*/
#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
@@ -538,6 +545,20 @@ static struct regulator_ops twlfixed_ops = {
}, \
}
+#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay, remap_conf) { \
+ .base = offset, \
+ .id = num, \
+ .delay = turnon_delay, \
+ .remap = remap_conf, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL6030_REG_##label, \
+ .ops = &twl6030_fixed_resource, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
/*
* We list regulators here if systems need some level of
* software control over them after boot.
@@ -577,7 +598,8 @@ static struct twlreg_info twl_regs[] = {
TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21),
TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21),
TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21),
- TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x21)
+ TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x21),
+ TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0, 0x21),
};
static int __devinit twlreg_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4941cade319f..e1878877399c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -985,4 +985,14 @@ config RTC_DRV_LPC32XX
This driver can also be buillt as a module. If so, the module
will be called rtc-lpc32xx.
+config RTC_DRV_TEGRA
+ tristate "NVIDIA Tegra Internal RTC driver"
+ depends on RTC_CLASS && ARCH_TEGRA
+ help
+ If you say yes here you get support for the
+ Tegra 200 series internal RTC module.
+
+ This drive can also be built as a module. If so, the module
+ will be called rtc-tegra.
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 2afdaf3ff986..ca91c3c42e98 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -2,9 +2,7 @@
# Makefile for RTC class/drivers.
#
-ifeq ($(CONFIG_RTC_DEBUG),y)
- EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_RTC_DEBUG) := -DDEBUG
obj-$(CONFIG_RTC_LIB) += rtc-lib.o
obj-$(CONFIG_RTC_HCTOSYS) += hctosys.o
@@ -93,6 +91,7 @@ obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o
obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o
obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
+obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index d834a63ec4b0..e6e71deb188f 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -25,6 +25,7 @@
#include <linux/bcd.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
+#include <linux/pm.h>
#define DS1374_REG_TOD0 0x00 /* Time of Day */
#define DS1374_REG_TOD1 0x01
@@ -409,32 +410,38 @@ static int __devexit ds1374_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
-static int ds1374_suspend(struct i2c_client *client, pm_message_t state)
+static int ds1374_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
+
if (client->irq >= 0 && device_may_wakeup(&client->dev))
enable_irq_wake(client->irq);
return 0;
}
-static int ds1374_resume(struct i2c_client *client)
+static int ds1374_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
+
if (client->irq >= 0 && device_may_wakeup(&client->dev))
disable_irq_wake(client->irq);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(ds1374_pm, ds1374_suspend, ds1374_resume);
+
+#define DS1374_PM (&ds1374_pm)
#else
-#define ds1374_suspend NULL
-#define ds1374_resume NULL
+#define DS1374_PM NULL
#endif
static struct i2c_driver ds1374_driver = {
.driver = {
.name = "rtc-ds1374",
.owner = THIS_MODULE,
+ .pm = DS1374_PM,
},
.probe = ds1374_probe,
- .suspend = ds1374_suspend,
- .resume = ds1374_resume,
.remove = __devexit_p(ds1374_remove),
.id_table = ds1374_id,
};
diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c
index 26a86d235051..b038d2cfef26 100644
--- a/drivers/rtc/rtc-ds1390.c
+++ b/drivers/rtc/rtc-ds1390.c
@@ -158,7 +158,7 @@ static int __devinit ds1390_probe(struct spi_device *spi)
static int __devexit ds1390_remove(struct spi_device *spi)
{
- struct ds1390 *chip = platform_get_drvdata(spi);
+ struct ds1390 *chip = spi_get_drvdata(spi);
rtc_device_unregister(chip->rtc);
kfree(chip);
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 3fffd708711f..fbabc773dded 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -468,7 +468,7 @@ ds1511_nvram_write(struct file *filp, struct kobject *kobj,
static struct bin_attribute ds1511_nvram_attr = {
.attr = {
.name = "nvram",
- .mode = S_IRUGO | S_IWUGO,
+ .mode = S_IRUGO | S_IWUSR,
},
.size = DS1511_RAM_MAX,
.read = ds1511_nvram_read,
diff --git a/drivers/rtc/rtc-ds3234.c b/drivers/rtc/rtc-ds3234.c
index a774ca35b5f7..bbd26228f532 100644
--- a/drivers/rtc/rtc-ds3234.c
+++ b/drivers/rtc/rtc-ds3234.c
@@ -158,7 +158,7 @@ static int __devinit ds3234_probe(struct spi_device *spi)
static int __devexit ds3234_remove(struct spi_device *spi)
{
- struct rtc_device *rtc = platform_get_drvdata(spi);
+ struct rtc_device *rtc = spi_get_drvdata(spi);
rtc_device_unregister(rtc);
return 0;
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 468200c38ecb..da8beb8cae51 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -39,6 +39,8 @@
#define ISL1208_REG_SR_BAT (1<<1) /* battery */
#define ISL1208_REG_SR_RTCF (1<<0) /* rtc fail */
#define ISL1208_REG_INT 0x08
+#define ISL1208_REG_INT_ALME (1<<6) /* alarm enable */
+#define ISL1208_REG_INT_IM (1<<7) /* interrupt/alarm mode */
#define ISL1208_REG_09 0x09 /* reserved */
#define ISL1208_REG_ATR 0x0a
#define ISL1208_REG_DTR 0x0b
@@ -202,6 +204,30 @@ isl1208_i2c_set_usr(struct i2c_client *client, u16 usr)
}
static int
+isl1208_rtc_toggle_alarm(struct i2c_client *client, int enable)
+{
+ int icr = i2c_smbus_read_byte_data(client, ISL1208_REG_INT);
+
+ if (icr < 0) {
+ dev_err(&client->dev, "%s: reading INT failed\n", __func__);
+ return icr;
+ }
+
+ if (enable)
+ icr |= ISL1208_REG_INT_ALME | ISL1208_REG_INT_IM;
+ else
+ icr &= ~(ISL1208_REG_INT_ALME | ISL1208_REG_INT_IM);
+
+ icr = i2c_smbus_write_byte_data(client, ISL1208_REG_INT, icr);
+ if (icr < 0) {
+ dev_err(&client->dev, "%s: writing INT failed\n", __func__);
+ return icr;
+ }
+
+ return 0;
+}
+
+static int
isl1208_rtc_proc(struct device *dev, struct seq_file *seq)
{
struct i2c_client *const client = to_i2c_client(dev);
@@ -288,9 +314,8 @@ isl1208_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
{
struct rtc_time *const tm = &alarm->time;
u8 regs[ISL1208_ALARM_SECTION_LEN] = { 0, };
- int sr;
+ int icr, yr, sr = isl1208_i2c_get_sr(client);
- sr = isl1208_i2c_get_sr(client);
if (sr < 0) {
dev_err(&client->dev, "%s: reading SR failed\n", __func__);
return sr;
@@ -313,6 +338,73 @@ isl1208_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
bcd2bin(regs[ISL1208_REG_MOA - ISL1208_REG_SCA] & 0x1f) - 1;
tm->tm_wday = bcd2bin(regs[ISL1208_REG_DWA - ISL1208_REG_SCA] & 0x03);
+ /* The alarm doesn't store the year so get it from the rtc section */
+ yr = i2c_smbus_read_byte_data(client, ISL1208_REG_YR);
+ if (yr < 0) {
+ dev_err(&client->dev, "%s: reading RTC YR failed\n", __func__);
+ return yr;
+ }
+ tm->tm_year = bcd2bin(yr) + 100;
+
+ icr = i2c_smbus_read_byte_data(client, ISL1208_REG_INT);
+ if (icr < 0) {
+ dev_err(&client->dev, "%s: reading INT failed\n", __func__);
+ return icr;
+ }
+ alarm->enabled = !!(icr & ISL1208_REG_INT_ALME);
+
+ return 0;
+}
+
+static int
+isl1208_i2c_set_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
+{
+ struct rtc_time *alarm_tm = &alarm->time;
+ u8 regs[ISL1208_ALARM_SECTION_LEN] = { 0, };
+ const int offs = ISL1208_REG_SCA;
+ unsigned long rtc_secs, alarm_secs;
+ struct rtc_time rtc_tm;
+ int err, enable;
+
+ err = isl1208_i2c_read_time(client, &rtc_tm);
+ if (err)
+ return err;
+ err = rtc_tm_to_time(&rtc_tm, &rtc_secs);
+ if (err)
+ return err;
+ err = rtc_tm_to_time(alarm_tm, &alarm_secs);
+ if (err)
+ return err;
+
+ /* If the alarm time is before the current time disable the alarm */
+ if (!alarm->enabled || alarm_secs <= rtc_secs)
+ enable = 0x00;
+ else
+ enable = 0x80;
+
+ /* Program the alarm and enable it for each setting */
+ regs[ISL1208_REG_SCA - offs] = bin2bcd(alarm_tm->tm_sec) | enable;
+ regs[ISL1208_REG_MNA - offs] = bin2bcd(alarm_tm->tm_min) | enable;
+ regs[ISL1208_REG_HRA - offs] = bin2bcd(alarm_tm->tm_hour) |
+ ISL1208_REG_HR_MIL | enable;
+
+ regs[ISL1208_REG_DTA - offs] = bin2bcd(alarm_tm->tm_mday) | enable;
+ regs[ISL1208_REG_MOA - offs] = bin2bcd(alarm_tm->tm_mon + 1) | enable;
+ regs[ISL1208_REG_DWA - offs] = bin2bcd(alarm_tm->tm_wday & 7) | enable;
+
+ /* write ALARM registers */
+ err = isl1208_i2c_set_regs(client, offs, regs,
+ ISL1208_ALARM_SECTION_LEN);
+ if (err < 0) {
+ dev_err(&client->dev, "%s: writing ALARM section failed\n",
+ __func__);
+ return err;
+ }
+
+ err = isl1208_rtc_toggle_alarm(client, enable);
+ if (err)
+ return err;
+
return 0;
}
@@ -391,12 +483,63 @@ isl1208_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
return isl1208_i2c_read_alarm(to_i2c_client(dev), alarm);
}
+static int
+isl1208_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ return isl1208_i2c_set_alarm(to_i2c_client(dev), alarm);
+}
+
+static irqreturn_t
+isl1208_rtc_interrupt(int irq, void *data)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ struct i2c_client *client = data;
+ int handled = 0, sr, err;
+
+ /*
+ * I2C reads get NAK'ed if we read straight away after an interrupt?
+ * Using a mdelay/msleep didn't seem to help either, so we work around
+ * this by continually trying to read the register for a short time.
+ */
+ while (1) {
+ sr = isl1208_i2c_get_sr(client);
+ if (sr >= 0)
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(&client->dev, "%s: reading SR failed\n",
+ __func__);
+ return sr;
+ }
+ }
+
+ if (sr & ISL1208_REG_SR_ALM) {
+ dev_dbg(&client->dev, "alarm!\n");
+
+ /* Clear the alarm */
+ sr &= ~ISL1208_REG_SR_ALM;
+ sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
+ if (sr < 0)
+ dev_err(&client->dev, "%s: writing SR failed\n",
+ __func__);
+ else
+ handled = 1;
+
+ /* Disable the alarm */
+ err = isl1208_rtc_toggle_alarm(client, 0);
+ if (err)
+ return err;
+ }
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
static const struct rtc_class_ops isl1208_rtc_ops = {
.proc = isl1208_rtc_proc,
.read_time = isl1208_rtc_read_time,
.set_time = isl1208_rtc_set_time,
.read_alarm = isl1208_rtc_read_alarm,
- /*.set_alarm = isl1208_rtc_set_alarm, */
+ .set_alarm = isl1208_rtc_set_alarm,
};
/* sysfs interface */
@@ -488,11 +631,29 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev_info(&client->dev,
"chip found, driver version " DRV_VERSION "\n");
+ if (client->irq > 0) {
+ rc = request_threaded_irq(client->irq, NULL,
+ isl1208_rtc_interrupt,
+ IRQF_SHARED,
+ isl1208_driver.driver.name, client);
+ if (!rc) {
+ device_init_wakeup(&client->dev, 1);
+ enable_irq_wake(client->irq);
+ } else {
+ dev_err(&client->dev,
+ "Unable to request irq %d, no alarm support\n",
+ client->irq);
+ client->irq = 0;
+ }
+ }
+
rtc = rtc_device_register(isl1208_driver.driver.name,
&client->dev, &isl1208_rtc_ops,
THIS_MODULE);
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
+ if (IS_ERR(rtc)) {
+ rc = PTR_ERR(rtc);
+ goto exit_free_irq;
+ }
i2c_set_clientdata(client, rtc);
@@ -514,6 +675,9 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
exit_unregister:
rtc_device_unregister(rtc);
+exit_free_irq:
+ if (client->irq)
+ free_irq(client->irq, client);
return rc;
}
@@ -525,6 +689,8 @@ isl1208_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj, &isl1208_rtc_sysfs_files);
rtc_device_unregister(rtc);
+ if (client->irq)
+ free_irq(client->irq, client);
return 0;
}
diff --git a/drivers/rtc/rtc-m41t94.c b/drivers/rtc/rtc-m41t94.c
index c8c97a4169d4..e259ed76ae85 100644
--- a/drivers/rtc/rtc-m41t94.c
+++ b/drivers/rtc/rtc-m41t94.c
@@ -136,7 +136,7 @@ static int __devinit m41t94_probe(struct spi_device *spi)
static int __devexit m41t94_remove(struct spi_device *spi)
{
- struct rtc_device *rtc = platform_get_drvdata(spi);
+ struct rtc_device *rtc = spi_get_drvdata(spi);
if (rtc)
rtc_device_unregister(rtc);
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index d554368c9f57..1d28d4451dae 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -97,7 +97,7 @@ static const struct rtc_class_ops pl030_ops = {
.set_alarm = pl030_set_alarm,
};
-static int pl030_probe(struct amba_device *dev, struct amba_id *id)
+static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
{
struct pl030_rtc *rtc;
int ret;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index d829ea63c4fb..ff1b84bd9bb5 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -307,7 +307,7 @@ static int pl031_remove(struct amba_device *adev)
return 0;
}
-static int pl031_probe(struct amba_device *adev, struct amba_id *id)
+static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
struct pl031_local *ldata;
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
new file mode 100644
index 000000000000..2fc31aac3f4e
--- /dev/null
+++ b/drivers/rtc/rtc-tegra.c
@@ -0,0 +1,488 @@
+/*
+ * An RTC driver for the NVIDIA Tegra 200 series internal RTC.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+
+/* set to 1 = busy every eight 32kHz clocks during copy of sec+msec to AHB */
+#define TEGRA_RTC_REG_BUSY 0x004
+#define TEGRA_RTC_REG_SECONDS 0x008
+/* when msec is read, the seconds are buffered into shadow seconds. */
+#define TEGRA_RTC_REG_SHADOW_SECONDS 0x00c
+#define TEGRA_RTC_REG_MILLI_SECONDS 0x010
+#define TEGRA_RTC_REG_SECONDS_ALARM0 0x014
+#define TEGRA_RTC_REG_SECONDS_ALARM1 0x018
+#define TEGRA_RTC_REG_MILLI_SECONDS_ALARM0 0x01c
+#define TEGRA_RTC_REG_INTR_MASK 0x028
+/* write 1 bits to clear status bits */
+#define TEGRA_RTC_REG_INTR_STATUS 0x02c
+
+/* bits in INTR_MASK */
+#define TEGRA_RTC_INTR_MASK_MSEC_CDN_ALARM (1<<4)
+#define TEGRA_RTC_INTR_MASK_SEC_CDN_ALARM (1<<3)
+#define TEGRA_RTC_INTR_MASK_MSEC_ALARM (1<<2)
+#define TEGRA_RTC_INTR_MASK_SEC_ALARM1 (1<<1)
+#define TEGRA_RTC_INTR_MASK_SEC_ALARM0 (1<<0)
+
+/* bits in INTR_STATUS */
+#define TEGRA_RTC_INTR_STATUS_MSEC_CDN_ALARM (1<<4)
+#define TEGRA_RTC_INTR_STATUS_SEC_CDN_ALARM (1<<3)
+#define TEGRA_RTC_INTR_STATUS_MSEC_ALARM (1<<2)
+#define TEGRA_RTC_INTR_STATUS_SEC_ALARM1 (1<<1)
+#define TEGRA_RTC_INTR_STATUS_SEC_ALARM0 (1<<0)
+
+struct tegra_rtc_info {
+ struct platform_device *pdev;
+ struct rtc_device *rtc_dev;
+ void __iomem *rtc_base; /* NULL if not initialized. */
+ int tegra_rtc_irq; /* alarm and periodic irq */
+ spinlock_t tegra_rtc_lock;
+};
+
+/* RTC hardware is busy when it is updating its values over AHB once
+ * every eight 32kHz clocks (~250uS).
+ * outside of these updates the CPU is free to write.
+ * CPU is always free to read.
+ */
+static inline u32 tegra_rtc_check_busy(struct tegra_rtc_info *info)
+{
+ return readl(info->rtc_base + TEGRA_RTC_REG_BUSY) & 1;
+}
+
+/* Wait for hardware to be ready for writing.
+ * This function tries to maximize the amount of time before the next update.
+ * It does this by waiting for the RTC to become busy with its periodic update,
+ * then returning once the RTC first becomes not busy.
+ * This periodic update (where the seconds and milliseconds are copied to the
+ * AHB side) occurs every eight 32kHz clocks (~250uS).
+ * The behavior of this function allows us to make some assumptions without
+ * introducing a race, because 250uS is plenty of time to read/write a value.
+ */
+static int tegra_rtc_wait_while_busy(struct device *dev)
+{
+ struct tegra_rtc_info *info = dev_get_drvdata(dev);
+
+ int retries = 500; /* ~490 us is the worst case, ~250 us is best. */
+
+ /* first wait for the RTC to become busy. this is when it
+ * posts its updated seconds+msec registers to AHB side. */
+ while (tegra_rtc_check_busy(info)) {
+ if (!retries--)
+ goto retry_failed;
+ udelay(1);
+ }
+
+ /* now we have about 250 us to manipulate registers */
+ return 0;
+
+retry_failed:
+ dev_err(dev, "write failed:retry count exceeded.\n");
+ return -ETIMEDOUT;
+}
+
+static int tegra_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct tegra_rtc_info *info = dev_get_drvdata(dev);
+ unsigned long sec, msec;
+ unsigned long sl_irq_flags;
+
+ /* RTC hardware copies seconds to shadow seconds when a read
+ * of milliseconds occurs. use a lock to keep other threads out. */
+ spin_lock_irqsave(&info->tegra_rtc_lock, sl_irq_flags);
+
+ msec = readl(info->rtc_base + TEGRA_RTC_REG_MILLI_SECONDS);
+ sec = readl(info->rtc_base + TEGRA_RTC_REG_SHADOW_SECONDS);
+
+ spin_unlock_irqrestore(&info->tegra_rtc_lock, sl_irq_flags);
+
+ rtc_time_to_tm(sec, tm);
+
+ dev_vdbg(dev, "time read as %lu. %d/%d/%d %d:%02u:%02u\n",
+ sec,
+ tm->tm_mon + 1,
+ tm->tm_mday,
+ tm->tm_year + 1900,
+ tm->tm_hour,
+ tm->tm_min,
+ tm->tm_sec
+ );
+
+ return 0;
+}
+
+static int tegra_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct tegra_rtc_info *info = dev_get_drvdata(dev);
+ unsigned long sec;
+ int ret;
+
+ /* convert tm to seconds. */
+ ret = rtc_valid_tm(tm);
+ if (ret)
+ return ret;
+
+ rtc_tm_to_time(tm, &sec);
+
+ dev_vdbg(dev, "time set to %lu. %d/%d/%d %d:%02u:%02u\n",
+ sec,
+ tm->tm_mon+1,
+ tm->tm_mday,
+ tm->tm_year+1900,
+ tm->tm_hour,
+ tm->tm_min,
+ tm->tm_sec
+ );
+
+ /* seconds only written if wait succeeded. */
+ ret = tegra_rtc_wait_while_busy(dev);
+ if (!ret)
+ writel(sec, info->rtc_base + TEGRA_RTC_REG_SECONDS);
+
+ dev_vdbg(dev, "time read back as %d\n",
+ readl(info->rtc_base + TEGRA_RTC_REG_SECONDS));
+
+ return ret;
+}
+
+static int tegra_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct tegra_rtc_info *info = dev_get_drvdata(dev);
+ unsigned long sec;
+ unsigned tmp;
+
+ sec = readl(info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0);
+
+ if (sec == 0) {
+ /* alarm is disabled. */
+ alarm->enabled = 0;
+ alarm->time.tm_mon = -1;
+ alarm->time.tm_mday = -1;
+ alarm->time.tm_year = -1;
+ alarm->time.tm_hour = -1;
+ alarm->time.tm_min = -1;
+ alarm->time.tm_sec = -1;
+ } else {
+ /* alarm is enabled. */
+ alarm->enabled = 1;
+ rtc_time_to_tm(sec, &alarm->time);
+ }
+
+ tmp = readl(info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
+ alarm->pending = (tmp & TEGRA_RTC_INTR_STATUS_SEC_ALARM0) != 0;
+
+ return 0;
+}
+
+static int tegra_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct tegra_rtc_info *info = dev_get_drvdata(dev);
+ unsigned status;
+ unsigned long sl_irq_flags;
+
+ tegra_rtc_wait_while_busy(dev);
+ spin_lock_irqsave(&info->tegra_rtc_lock, sl_irq_flags);
+
+ /* read the original value, and OR in the flag. */
+ status = readl(info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
+ if (enabled)
+ status |= TEGRA_RTC_INTR_MASK_SEC_ALARM0; /* set it */
+ else
+ status &= ~TEGRA_RTC_INTR_MASK_SEC_ALARM0; /* clear it */
+
+ writel(status, info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
+
+ spin_unlock_irqrestore(&info->tegra_rtc_lock, sl_irq_flags);
+
+ return 0;
+}
+
+static int tegra_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct tegra_rtc_info *info = dev_get_drvdata(dev);
+ unsigned long sec;
+
+ if (alarm->enabled)
+ rtc_tm_to_time(&alarm->time, &sec);
+ else
+ sec = 0;
+
+ tegra_rtc_wait_while_busy(dev);
+ writel(sec, info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0);
+ dev_vdbg(dev, "alarm read back as %d\n",
+ readl(info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0));
+
+ /* if successfully written and alarm is enabled ... */
+ if (sec) {
+ tegra_rtc_alarm_irq_enable(dev, 1);
+
+ dev_vdbg(dev, "alarm set as %lu. %d/%d/%d %d:%02u:%02u\n",
+ sec,
+ alarm->time.tm_mon+1,
+ alarm->time.tm_mday,
+ alarm->time.tm_year+1900,
+ alarm->time.tm_hour,
+ alarm->time.tm_min,
+ alarm->time.tm_sec);
+ } else {
+ /* disable alarm if 0 or write error. */
+ dev_vdbg(dev, "alarm disabled\n");
+ tegra_rtc_alarm_irq_enable(dev, 0);
+ }
+
+ return 0;
+}
+
+static int tegra_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+ if (!dev || !dev->driver)
+ return 0;
+
+ return seq_printf(seq, "name\t\t: %s\n", dev_name(dev));
+}
+
+static irqreturn_t tegra_rtc_irq_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct tegra_rtc_info *info = dev_get_drvdata(dev);
+ unsigned long events = 0;
+ unsigned status;
+ unsigned long sl_irq_flags;
+
+ status = readl(info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
+ if (status) {
+ /* clear the interrupt masks and status on any irq. */
+ tegra_rtc_wait_while_busy(dev);
+ spin_lock_irqsave(&info->tegra_rtc_lock, sl_irq_flags);
+ writel(0, info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
+ writel(status, info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
+ spin_unlock_irqrestore(&info->tegra_rtc_lock, sl_irq_flags);
+ }
+
+ /* check if Alarm */
+ if ((status & TEGRA_RTC_INTR_STATUS_SEC_ALARM0))
+ events |= RTC_IRQF | RTC_AF;
+
+ /* check if Periodic */
+ if ((status & TEGRA_RTC_INTR_STATUS_SEC_CDN_ALARM))
+ events |= RTC_IRQF | RTC_PF;
+
+ rtc_update_irq(info->rtc_dev, 1, events);
+
+ return IRQ_HANDLED;
+}
+
+static struct rtc_class_ops tegra_rtc_ops = {
+ .read_time = tegra_rtc_read_time,
+ .set_time = tegra_rtc_set_time,
+ .read_alarm = tegra_rtc_read_alarm,
+ .set_alarm = tegra_rtc_set_alarm,
+ .proc = tegra_rtc_proc,
+ .alarm_irq_enable = tegra_rtc_alarm_irq_enable,
+};
+
+static int __devinit tegra_rtc_probe(struct platform_device *pdev)
+{
+ struct tegra_rtc_info *info;
+ struct resource *res;
+ int ret;
+
+ info = kzalloc(sizeof(struct tegra_rtc_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Unable to allocate resources for device.\n");
+ ret = -EBUSY;
+ goto err_free_info;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev,
+ "Unable to request mem region for device.\n");
+ ret = -EBUSY;
+ goto err_free_info;
+ }
+
+ info->tegra_rtc_irq = platform_get_irq(pdev, 0);
+ if (info->tegra_rtc_irq <= 0) {
+ ret = -EBUSY;
+ goto err_release_mem_region;
+ }
+
+ info->rtc_base = ioremap_nocache(res->start, resource_size(res));
+ if (!info->rtc_base) {
+ dev_err(&pdev->dev, "Unable to grab IOs for device.\n");
+ ret = -EBUSY;
+ goto err_release_mem_region;
+ }
+
+ /* set context info. */
+ info->pdev = pdev;
+ info->tegra_rtc_lock = __SPIN_LOCK_UNLOCKED(info->tegra_rtc_lock);
+
+ platform_set_drvdata(pdev, info);
+
+ /* clear out the hardware. */
+ writel(0, info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0);
+ writel(0xffffffff, info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
+ writel(0, info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ info->rtc_dev = rtc_device_register(
+ pdev->name, &pdev->dev, &tegra_rtc_ops, THIS_MODULE);
+ if (IS_ERR(info->rtc_dev)) {
+ ret = PTR_ERR(info->rtc_dev);
+ info->rtc_dev = NULL;
+ dev_err(&pdev->dev,
+ "Unable to register device (err=%d).\n",
+ ret);
+ goto err_iounmap;
+ }
+
+ ret = request_irq(info->tegra_rtc_irq, tegra_rtc_irq_handler,
+ IRQF_TRIGGER_HIGH, "rtc alarm", &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Unable to request interrupt for device (err=%d).\n",
+ ret);
+ goto err_dev_unreg;
+ }
+
+ dev_notice(&pdev->dev, "Tegra internal Real Time Clock\n");
+
+ return 0;
+
+err_dev_unreg:
+ rtc_device_unregister(info->rtc_dev);
+err_iounmap:
+ iounmap(info->rtc_base);
+err_release_mem_region:
+ release_mem_region(res->start, resource_size(res));
+err_free_info:
+ kfree(info);
+
+ return ret;
+}
+
+static int __devexit tegra_rtc_remove(struct platform_device *pdev)
+{
+ struct tegra_rtc_info *info = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EBUSY;
+
+ free_irq(info->tegra_rtc_irq, &pdev->dev);
+ rtc_device_unregister(info->rtc_dev);
+ iounmap(info->rtc_base);
+ release_mem_region(res->start, resource_size(res));
+ kfree(info);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_rtc_info *info = platform_get_drvdata(pdev);
+
+ tegra_rtc_wait_while_busy(dev);
+
+ /* only use ALARM0 as a wake source. */
+ writel(0xffffffff, info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
+ writel(TEGRA_RTC_INTR_STATUS_SEC_ALARM0,
+ info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
+
+ dev_vdbg(dev, "alarm sec = %d\n",
+ readl(info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0));
+
+ dev_vdbg(dev, "Suspend (device_may_wakeup=%d) irq:%d\n",
+ device_may_wakeup(dev), info->tegra_rtc_irq);
+
+ /* leave the alarms on as a wake source. */
+ if (device_may_wakeup(dev))
+ enable_irq_wake(info->tegra_rtc_irq);
+
+ return 0;
+}
+
+static int tegra_rtc_resume(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_rtc_info *info = platform_get_drvdata(pdev);
+
+ dev_vdbg(dev, "Resume (device_may_wakeup=%d)\n",
+ device_may_wakeup(dev));
+ /* alarms were left on as a wake source, turn them off. */
+ if (device_may_wakeup(dev))
+ disable_irq_wake(info->tegra_rtc_irq);
+
+ return 0;
+}
+#endif
+
+static void tegra_rtc_shutdown(struct platform_device *pdev)
+{
+ dev_vdbg(&pdev->dev, "disabling interrupts.\n");
+ tegra_rtc_alarm_irq_enable(&pdev->dev, 0);
+}
+
+MODULE_ALIAS("platform:tegra_rtc");
+static struct platform_driver tegra_rtc_driver = {
+ .remove = __devexit_p(tegra_rtc_remove),
+ .shutdown = tegra_rtc_shutdown,
+ .driver = {
+ .name = "tegra_rtc",
+ .owner = THIS_MODULE,
+ },
+#ifdef CONFIG_PM
+ .suspend = tegra_rtc_suspend,
+ .resume = tegra_rtc_resume,
+#endif
+};
+
+static int __init tegra_rtc_init(void)
+{
+ return platform_driver_probe(&tegra_rtc_driver, tegra_rtc_probe);
+}
+module_init(tegra_rtc_init);
+
+static void __exit tegra_rtc_exit(void)
+{
+ platform_driver_unregister(&tegra_rtc_driver);
+}
+module_exit(tegra_rtc_exit);
+
+MODULE_AUTHOR("Jon Mayo <jmayo@nvidia.com>");
+MODULE_DESCRIPTION("driver for Tegra internal RTC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 51c666fb67a4..645b0fcbb370 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -122,36 +122,21 @@ static int __init zfcp_module_init(void)
{
int retval = -ENOMEM;
- zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn",
- sizeof(struct zfcp_fc_gpn_ft_req));
- if (!zfcp_data.gpn_ft_cache)
- goto out;
-
- zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb",
- sizeof(struct fsf_qtcb));
- if (!zfcp_data.qtcb_cache)
+ zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
+ sizeof(struct fsf_qtcb));
+ if (!zfcp_fsf_qtcb_cache)
goto out_qtcb_cache;
- zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr",
- sizeof(struct fsf_status_read_buffer));
- if (!zfcp_data.sr_buffer_cache)
- goto out_sr_cache;
+ zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req",
+ sizeof(struct zfcp_fc_req));
+ if (!zfcp_fc_req_cache)
+ goto out_fc_cache;
- zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
- sizeof(struct zfcp_fc_gid_pn));
- if (!zfcp_data.gid_pn_cache)
- goto out_gid_cache;
-
- zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc",
- sizeof(struct zfcp_fc_els_adisc));
- if (!zfcp_data.adisc_cache)
- goto out_adisc_cache;
-
- zfcp_data.scsi_transport_template =
+ zfcp_scsi_transport_template =
fc_attach_transport(&zfcp_transport_functions);
- if (!zfcp_data.scsi_transport_template)
+ if (!zfcp_scsi_transport_template)
goto out_transport;
- scsi_transport_reserve_device(zfcp_data.scsi_transport_template,
+ scsi_transport_reserve_device(zfcp_scsi_transport_template,
sizeof(struct zfcp_scsi_dev));
@@ -175,18 +160,12 @@ static int __init zfcp_module_init(void)
out_ccw_register:
misc_deregister(&zfcp_cfdc_misc);
out_misc:
- fc_release_transport(zfcp_data.scsi_transport_template);
+ fc_release_transport(zfcp_scsi_transport_template);
out_transport:
- kmem_cache_destroy(zfcp_data.adisc_cache);
-out_adisc_cache:
- kmem_cache_destroy(zfcp_data.gid_pn_cache);
-out_gid_cache:
- kmem_cache_destroy(zfcp_data.sr_buffer_cache);
-out_sr_cache:
- kmem_cache_destroy(zfcp_data.qtcb_cache);
+ kmem_cache_destroy(zfcp_fc_req_cache);
+out_fc_cache:
+ kmem_cache_destroy(zfcp_fsf_qtcb_cache);
out_qtcb_cache:
- kmem_cache_destroy(zfcp_data.gpn_ft_cache);
-out:
return retval;
}
@@ -196,12 +175,9 @@ static void __exit zfcp_module_exit(void)
{
ccw_driver_unregister(&zfcp_ccw_driver);
misc_deregister(&zfcp_cfdc_misc);
- fc_release_transport(zfcp_data.scsi_transport_template);
- kmem_cache_destroy(zfcp_data.adisc_cache);
- kmem_cache_destroy(zfcp_data.gid_pn_cache);
- kmem_cache_destroy(zfcp_data.sr_buffer_cache);
- kmem_cache_destroy(zfcp_data.qtcb_cache);
- kmem_cache_destroy(zfcp_data.gpn_ft_cache);
+ fc_release_transport(zfcp_scsi_transport_template);
+ kmem_cache_destroy(zfcp_fc_req_cache);
+ kmem_cache_destroy(zfcp_fsf_qtcb_cache);
}
module_exit(zfcp_module_exit);
@@ -260,18 +236,18 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
return -ENOMEM;
adapter->pool.qtcb_pool =
- mempool_create_slab_pool(4, zfcp_data.qtcb_cache);
+ mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache);
if (!adapter->pool.qtcb_pool)
return -ENOMEM;
- adapter->pool.status_read_data =
- mempool_create_slab_pool(FSF_STATUS_READS_RECOM,
- zfcp_data.sr_buffer_cache);
- if (!adapter->pool.status_read_data)
+ BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE);
+ adapter->pool.sr_data =
+ mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0);
+ if (!adapter->pool.sr_data)
return -ENOMEM;
adapter->pool.gid_pn =
- mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
+ mempool_create_slab_pool(1, zfcp_fc_req_cache);
if (!adapter->pool.gid_pn)
return -ENOMEM;
@@ -290,8 +266,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
mempool_destroy(adapter->pool.qtcb_pool);
if (adapter->pool.status_read_req)
mempool_destroy(adapter->pool.status_read_req);
- if (adapter->pool.status_read_data)
- mempool_destroy(adapter->pool.status_read_data);
+ if (adapter->pool.sr_data)
+ mempool_destroy(adapter->pool.sr_data);
if (adapter->pool.gid_pn)
mempool_destroy(adapter->pool.gid_pn);
}
@@ -386,6 +362,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
+ INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
if (zfcp_qdio_setup(adapter))
goto failed;
@@ -437,7 +414,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
- if (!zfcp_adapter_scsi_register(adapter))
+ if (!zfcp_scsi_adapter_register(adapter))
return adapter;
failed:
@@ -451,10 +428,11 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
cancel_work_sync(&adapter->scan_work);
cancel_work_sync(&adapter->stat_work);
+ cancel_work_sync(&adapter->ns_up_work);
zfcp_destroy_adapter_work_queue(adapter);
zfcp_fc_wka_ports_force_offline(adapter->gs);
- zfcp_adapter_scsi_unregister(adapter);
+ zfcp_scsi_adapter_unregister(adapter);
sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
zfcp_erp_thread_kill(adapter);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 9ae1d0a6f627..527ba48eea57 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -89,7 +89,6 @@ struct zfcp_reqlist;
#define ZFCP_STATUS_LUN_READONLY 0x00000008
/* FSF request status (this does not have a common part) */
-#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
@@ -108,7 +107,7 @@ struct zfcp_adapter_mempool {
mempool_t *scsi_req;
mempool_t *scsi_abort;
mempool_t *status_read_req;
- mempool_t *status_read_data;
+ mempool_t *sr_data;
mempool_t *gid_pn;
mempool_t *qtcb_pool;
};
@@ -190,6 +189,7 @@ struct zfcp_adapter {
struct fsf_qtcb_bottom_port *stats_reset_data;
unsigned long stats_reset;
struct work_struct scan_work;
+ struct work_struct ns_up_work;
struct service_level service_level;
struct workqueue_struct *work_queue;
struct device_dma_parameters dma_parms;
@@ -314,15 +314,4 @@ struct zfcp_fsf_req {
void (*handler)(struct zfcp_fsf_req *);
};
-/* driver data */
-struct zfcp_data {
- struct scsi_host_template scsi_host_template;
- struct scsi_transport_template *scsi_transport_template;
- struct kmem_cache *gpn_ft_cache;
- struct kmem_cache *qtcb_cache;
- struct kmem_cache *sr_buffer_cache;
- struct kmem_cache *gid_pn_cache;
- struct kmem_cache *adisc_cache;
-};
-
#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index e003e306f870..e1b4f800e226 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -732,7 +732,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
- if (mempool_resize(act->adapter->pool.status_read_data,
+ if (mempool_resize(act->adapter->pool.sr_data,
act->adapter->stat_read_buf_num, GFP_KERNEL))
return ZFCP_ERP_FAILED;
@@ -1231,8 +1231,10 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
if (result == ZFCP_ERP_SUCCEEDED) {
register_service_level(&adapter->service_level);
queue_work(adapter->work_queue, &adapter->scan_work);
+ queue_work(adapter->work_queue, &adapter->ns_up_work);
} else
unregister_service_level(&adapter->service_level);
+
kref_put(&adapter->ref, zfcp_adapter_release);
break;
}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 6e325284fbe7..03627cfd81cd 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -80,6 +80,7 @@ extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
extern void zfcp_erp_timeout_handler(unsigned long);
/* zfcp_fc.c */
+extern struct kmem_cache *zfcp_fc_req_cache;
extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
enum fc_host_event_code event_code, u32);
extern void zfcp_fc_post_event(struct work_struct *);
@@ -95,8 +96,10 @@ extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
+extern void zfcp_fc_sym_name_update(struct work_struct *);
/* zfcp_fsf.c */
+extern struct kmem_cache *zfcp_fsf_qtcb_cache;
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
@@ -139,9 +142,9 @@ extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *,
struct qdio_buffer *);
/* zfcp_scsi.c */
-extern struct zfcp_data zfcp_data;
-extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
-extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
+extern struct scsi_transport_template *zfcp_scsi_transport_template;
+extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
+extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
extern struct fc_function_template zfcp_transport_functions;
extern void zfcp_scsi_rport_work(struct work_struct *);
extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 30cf91a787a3..297e6b71ce9c 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -11,11 +11,14 @@
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/utsname.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include "zfcp_ext.h"
#include "zfcp_fc.h"
+struct kmem_cache *zfcp_fc_req_cache;
+
static u32 zfcp_fc_rscn_range_mask[] = {
[ELS_ADDR_FMT_PORT] = 0xFFFFFF,
[ELS_ADDR_FMT_AREA] = 0xFFFF00,
@@ -260,24 +263,18 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
zfcp_fc_incoming_rscn(fsf_req);
}
-static void zfcp_fc_ns_gid_pn_eval(void *data)
+static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
{
- struct zfcp_fc_gid_pn *gid_pn = data;
- struct zfcp_fsf_ct_els *ct = &gid_pn->ct;
- struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req);
- struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp);
- struct zfcp_port *port = gid_pn->port;
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
- if (ct->status)
+ if (ct_els->status)
return;
- if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC)
+ if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC)
return;
- /* paranoia */
- if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn)
- return;
/* looks like a valid d_id */
- port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid);
+ ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
}
static void zfcp_fc_complete(void *data)
@@ -285,69 +282,73 @@ static void zfcp_fc_complete(void *data)
complete(data);
}
+static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
+{
+ ct_hdr->ct_rev = FC_CT_REV;
+ ct_hdr->ct_fs_type = FC_FST_DIR;
+ ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
+ ct_hdr->ct_cmd = cmd;
+ ct_hdr->ct_mr_size = mr_size / 4;
+}
+
static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
- struct zfcp_fc_gid_pn *gid_pn)
+ struct zfcp_fc_req *fc_req)
{
struct zfcp_adapter *adapter = port->adapter;
DECLARE_COMPLETION_ONSTACK(completion);
+ struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
+ struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
int ret;
/* setup parameters for send generic command */
- gid_pn->port = port;
- gid_pn->ct.handler = zfcp_fc_complete;
- gid_pn->ct.handler_data = &completion;
- gid_pn->ct.req = &gid_pn->sg_req;
- gid_pn->ct.resp = &gid_pn->sg_resp;
- sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req,
- sizeof(struct zfcp_fc_gid_pn_req));
- sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp,
- sizeof(struct zfcp_fc_gid_pn_resp));
-
- /* setup nameserver request */
- gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV;
- gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR;
- gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
- gid_pn->gid_pn_req.ct_hdr.ct_options = 0;
- gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN;
- gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4;
- gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn;
-
- ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct,
+ fc_req->ct_els.port = port;
+ fc_req->ct_els.handler = zfcp_fc_complete;
+ fc_req->ct_els.handler_data = &completion;
+ fc_req->ct_els.req = &fc_req->sg_req;
+ fc_req->ct_els.resp = &fc_req->sg_rsp;
+ sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
+ sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));
+
+ zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
+ FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
+ gid_pn_req->gid_pn.fn_wwpn = port->wwpn;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
adapter->pool.gid_pn_req,
ZFCP_FC_CTELS_TMO);
if (!ret) {
wait_for_completion(&completion);
- zfcp_fc_ns_gid_pn_eval(gid_pn);
+ zfcp_fc_ns_gid_pn_eval(fc_req);
}
return ret;
}
/**
- * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
+ * zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
* @port: port where GID_PN request is needed
* return: -ENOMEM on error, 0 otherwise
*/
static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
{
int ret;
- struct zfcp_fc_gid_pn *gid_pn;
+ struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
- gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
- if (!gid_pn)
+ fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
+ if (!fc_req)
return -ENOMEM;
- memset(gid_pn, 0, sizeof(*gid_pn));
+ memset(fc_req, 0, sizeof(*fc_req));
ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
if (ret)
goto out;
- ret = zfcp_fc_ns_gid_pn_request(port, gid_pn);
+ ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
zfcp_fc_wka_port_put(&adapter->gs->ds);
out:
- mempool_free(gid_pn, adapter->pool.gid_pn);
+ mempool_free(fc_req, adapter->pool.gid_pn);
return ret;
}
@@ -419,11 +420,11 @@ void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
static void zfcp_fc_adisc_handler(void *data)
{
- struct zfcp_fc_els_adisc *adisc = data;
- struct zfcp_port *port = adisc->els.port;
- struct fc_els_adisc *adisc_resp = &adisc->adisc_resp;
+ struct zfcp_fc_req *fc_req = data;
+ struct zfcp_port *port = fc_req->ct_els.port;
+ struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
- if (adisc->els.status) {
+ if (fc_req->ct_els.status) {
/* request rejected or timed out */
zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
"fcadh_1");
@@ -445,42 +446,42 @@ static void zfcp_fc_adisc_handler(void *data)
out:
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
put_device(&port->dev);
- kmem_cache_free(zfcp_data.adisc_cache, adisc);
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
}
static int zfcp_fc_adisc(struct zfcp_port *port)
{
- struct zfcp_fc_els_adisc *adisc;
+ struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
+ struct Scsi_Host *shost = adapter->scsi_host;
int ret;
- adisc = kmem_cache_zalloc(zfcp_data.adisc_cache, GFP_ATOMIC);
- if (!adisc)
+ fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
+ if (!fc_req)
return -ENOMEM;
- adisc->els.port = port;
- adisc->els.req = &adisc->req;
- adisc->els.resp = &adisc->resp;
- sg_init_one(adisc->els.req, &adisc->adisc_req,
+ fc_req->ct_els.port = port;
+ fc_req->ct_els.req = &fc_req->sg_req;
+ fc_req->ct_els.resp = &fc_req->sg_rsp;
+ sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
sizeof(struct fc_els_adisc));
- sg_init_one(adisc->els.resp, &adisc->adisc_resp,
+ sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
sizeof(struct fc_els_adisc));
- adisc->els.handler = zfcp_fc_adisc_handler;
- adisc->els.handler_data = adisc;
+ fc_req->ct_els.handler = zfcp_fc_adisc_handler;
+ fc_req->ct_els.handler_data = fc_req;
/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
without FC-AL-2 capability, so we don't set it */
- adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host);
- adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host);
- adisc->adisc_req.adisc_cmd = ELS_ADISC;
- hton24(adisc->adisc_req.adisc_port_id,
- fc_host_port_id(adapter->scsi_host));
+ fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost);
+ fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost);
+ fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
+ hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
- ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els,
+ ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
ZFCP_FC_CTELS_TMO);
if (ret)
- kmem_cache_free(zfcp_data.adisc_cache, adisc);
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
return ret;
}
@@ -528,68 +529,42 @@ void zfcp_fc_test_link(struct zfcp_port *port)
put_device(&port->dev);
}
-static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
+static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num)
{
- struct scatterlist *sg = &gpn_ft->sg_req;
-
- kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg));
- zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
-
- kfree(gpn_ft);
-}
+ struct zfcp_fc_req *fc_req;
-static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
-{
- struct zfcp_fc_gpn_ft *gpn_ft;
- struct zfcp_fc_gpn_ft_req *req;
-
- gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
- if (!gpn_ft)
+ fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
+ if (!fc_req)
return NULL;
- req = kmem_cache_zalloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
- if (!req) {
- kfree(gpn_ft);
- gpn_ft = NULL;
- goto out;
+ if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
+ return NULL;
}
- sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));
- if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) {
- zfcp_free_sg_env(gpn_ft, buf_num);
- gpn_ft = NULL;
- }
-out:
- return gpn_ft;
-}
+ sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
+ sizeof(struct zfcp_fc_gpn_ft_req));
+ return fc_req;
+}
-static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
+static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req,
struct zfcp_adapter *adapter, int max_bytes)
{
- struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
- struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
DECLARE_COMPLETION_ONSTACK(completion);
int ret;
- /* prepare CT IU for GPN_FT */
- req->ct_hdr.ct_rev = FC_CT_REV;
- req->ct_hdr.ct_fs_type = FC_FST_DIR;
- req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
- req->ct_hdr.ct_options = 0;
- req->ct_hdr.ct_cmd = FC_NS_GPN_FT;
- req->ct_hdr.ct_mr_size = max_bytes / 4;
- req->gpn_ft.fn_domain_id_scope = 0;
- req->gpn_ft.fn_area_id_scope = 0;
+ zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
- /* prepare zfcp_send_ct */
- ct->handler = zfcp_fc_complete;
- ct->handler_data = &completion;
- ct->req = &gpn_ft->sg_req;
- ct->resp = gpn_ft->sg_resp;
+ ct_els->handler = zfcp_fc_complete;
+ ct_els->handler_data = &completion;
+ ct_els->req = &fc_req->sg_req;
+ ct_els->resp = &fc_req->sg_rsp;
- ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL,
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
ZFCP_FC_CTELS_TMO);
if (!ret)
wait_for_completion(&completion);
@@ -610,11 +585,11 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
list_move_tail(&port->list, lh);
}
-static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
+static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
struct zfcp_adapter *adapter, int max_entries)
{
- struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
- struct scatterlist *sg = gpn_ft->sg_resp;
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct scatterlist *sg = &fc_req->sg_rsp;
struct fc_ct_hdr *hdr = sg_virt(sg);
struct fc_gpn_ft_resp *acc = sg_virt(sg);
struct zfcp_port *port, *tmp;
@@ -623,7 +598,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
u32 d_id;
int ret = 0, x, last = 0;
- if (ct->status)
+ if (ct_els->status)
return -EIO;
if (hdr->ct_cmd != FC_FS_ACC) {
@@ -687,7 +662,7 @@ void zfcp_fc_scan_ports(struct work_struct *work)
struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
scan_work);
int ret, i;
- struct zfcp_fc_gpn_ft *gpn_ft;
+ struct zfcp_fc_req *fc_req;
int chain, max_entries, buf_num, max_bytes;
chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
@@ -702,25 +677,145 @@ void zfcp_fc_scan_ports(struct work_struct *work)
if (zfcp_fc_wka_port_get(&adapter->gs->ds))
return;
- gpn_ft = zfcp_alloc_sg_env(buf_num);
- if (!gpn_ft)
+ fc_req = zfcp_alloc_sg_env(buf_num);
+ if (!fc_req)
goto out;
for (i = 0; i < 3; i++) {
- ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes);
+ ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes);
if (!ret) {
- ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries);
+ ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries);
if (ret == -EAGAIN)
ssleep(1);
else
break;
}
}
- zfcp_free_sg_env(gpn_ft, buf_num);
+ zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
out:
zfcp_fc_wka_port_put(&adapter->gs->ds);
}
+static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
+ struct zfcp_fc_req *fc_req)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ char devno[] = "DEVNO:";
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
+ struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp;
+ int ret;
+
+ zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID,
+ FC_SYMBOLIC_NAME_SIZE);
+ hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host));
+
+ sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req));
+ sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp));
+
+ ct_els->handler = zfcp_fc_complete;
+ ct_els->handler_data = &completion;
+ ct_els->req = &fc_req->sg_req;
+ ct_els->resp = &fc_req->sg_rsp;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+ ZFCP_FC_CTELS_TMO);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&completion);
+ if (ct_els->status)
+ return ct_els->status;
+
+ if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV &&
+ !(strstr(gspn_rsp->gspn.fp_name, devno)))
+ snprintf(fc_host_symbolic_name(adapter->scsi_host),
+ FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s",
+ gspn_rsp->gspn.fp_name, devno,
+ dev_name(&adapter->ccw_device->dev),
+ init_utsname()->nodename);
+ else
+ strlcpy(fc_host_symbolic_name(adapter->scsi_host),
+ gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
+
+ return 0;
+}
+
+static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
+ struct zfcp_fc_req *fc_req)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct Scsi_Host *shost = adapter->scsi_host;
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
+ struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp;
+ int ret, len;
+
+ zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
+ FC_SYMBOLIC_NAME_SIZE);
+ hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
+ len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
+ FC_SYMBOLIC_NAME_SIZE);
+ rspn_req->rspn.fr_name_len = len;
+
+ sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
+ sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp));
+
+ ct_els->handler = zfcp_fc_complete;
+ ct_els->handler_data = &completion;
+ ct_els->req = &fc_req->sg_req;
+ ct_els->resp = &fc_req->sg_rsp;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+ ZFCP_FC_CTELS_TMO);
+ if (!ret)
+ wait_for_completion(&completion);
+}
+
+/**
+ * zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
+ * @work: ns_up_work of the adapter where to update the symbolic port name
+ *
+ * Retrieve the current symbolic port name that may have been set by
+ * the hardware using the GSPN request and update the fc_host
+ * symbolic_name sysfs attribute. When running in NPIV mode (and hence
+ * the port name is unique for this system), update the symbolic port
+ * name to add Linux specific information and update the FC nameserver
+ * using the RSPN request.
+ */
+void zfcp_fc_sym_name_update(struct work_struct *work)
+{
+ struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
+ ns_up_work);
+ int ret;
+ struct zfcp_fc_req *fc_req;
+
+ if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
+ fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+ return;
+
+ fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
+ if (!fc_req)
+ return;
+
+ ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
+ if (ret)
+ goto out_free;
+
+ ret = zfcp_fc_gspn(adapter, fc_req);
+ if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+ goto out_ds_put;
+
+ memset(fc_req, 0, sizeof(*fc_req));
+ zfcp_fc_rspn(adapter, fc_req);
+
+out_ds_put:
+ zfcp_fc_wka_port_put(&adapter->gs->ds);
+out_free:
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
+}
+
static void zfcp_fc_ct_els_job_handler(void *data)
{
struct fc_bsg_job *job = data;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index b464ae01086c..4561f3bf7300 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -64,33 +64,16 @@ struct zfcp_fc_gid_pn_req {
} __packed;
/**
- * struct zfcp_fc_gid_pn_resp - container for ct header plus gid_pn response
+ * struct zfcp_fc_gid_pn_rsp - container for ct header plus gid_pn response
* @ct_hdr: FC GS common transport header
* @gid_pn: GID_PN response
*/
-struct zfcp_fc_gid_pn_resp {
+struct zfcp_fc_gid_pn_rsp {
struct fc_ct_hdr ct_hdr;
struct fc_gid_pn_resp gid_pn;
} __packed;
/**
- * struct zfcp_fc_gid_pn - everything required in zfcp for gid_pn request
- * @ct: data passed to zfcp_fsf for issuing fsf request
- * @sg_req: scatterlist entry for request data
- * @sg_resp: scatterlist entry for response data
- * @gid_pn_req: GID_PN request data
- * @gid_pn_resp: GID_PN response data
- */
-struct zfcp_fc_gid_pn {
- struct zfcp_fsf_ct_els ct;
- struct scatterlist sg_req;
- struct scatterlist sg_resp;
- struct zfcp_fc_gid_pn_req gid_pn_req;
- struct zfcp_fc_gid_pn_resp gid_pn_resp;
- struct zfcp_port *port;
-};
-
-/**
* struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
* @ct_hdr: FC GS common transport header
* @gpn_ft: GPN_FT request
@@ -101,41 +84,72 @@ struct zfcp_fc_gpn_ft_req {
} __packed;
/**
- * struct zfcp_fc_gpn_ft_resp - container for ct header plus gpn_ft response
+ * struct zfcp_fc_gspn_req - container for ct header plus GSPN_ID request
* @ct_hdr: FC GS common transport header
- * @gpn_ft: Array of gpn_ft response data to fill one memory page
+ * @gspn: GSPN_ID request
*/
-struct zfcp_fc_gpn_ft_resp {
+struct zfcp_fc_gspn_req {
struct fc_ct_hdr ct_hdr;
- struct fc_gpn_ft_resp gpn_ft[ZFCP_FC_GPN_FT_ENT_PAGE];
+ struct fc_gid_pn_resp gspn;
} __packed;
/**
- * struct zfcp_fc_gpn_ft - zfcp data for gpn_ft request
- * @ct: data passed to zfcp_fsf for issuing fsf request
- * @sg_req: scatter list entry for gpn_ft request
- * @sg_resp: scatter list entries for gpn_ft responses (per memory page)
+ * struct zfcp_fc_gspn_rsp - container for ct header plus GSPN_ID response
+ * @ct_hdr: FC GS common transport header
+ * @gspn: GSPN_ID response
+ * @name: The name string of the GSPN_ID response
*/
-struct zfcp_fc_gpn_ft {
- struct zfcp_fsf_ct_els ct;
- struct scatterlist sg_req;
- struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS];
-};
+struct zfcp_fc_gspn_rsp {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_gspn_resp gspn;
+ char name[FC_SYMBOLIC_NAME_SIZE];
+} __packed;
/**
- * struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC
- * @els: data required for issuing els fsf command
- * @req: scatterlist entry for ELS ADISC request
- * @resp: scatterlist entry for ELS ADISC response
- * @adisc_req: ELS ADISC request data
- * @adisc_resp: ELS ADISC response data
+ * struct zfcp_fc_rspn_req - container for ct header plus RSPN_ID request
+ * @ct_hdr: FC GS common transport header
+ * @rspn: RSPN_ID request
+ * @name: The name string of the RSPN_ID request
*/
-struct zfcp_fc_els_adisc {
- struct zfcp_fsf_ct_els els;
- struct scatterlist req;
- struct scatterlist resp;
- struct fc_els_adisc adisc_req;
- struct fc_els_adisc adisc_resp;
+struct zfcp_fc_rspn_req {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_ns_rspn rspn;
+ char name[FC_SYMBOLIC_NAME_SIZE];
+} __packed;
+
+/**
+ * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp
+ * @ct_els: data required for issuing fsf command
+ * @sg_req: scatterlist entry for request data
+ * @sg_rsp: scatterlist entry for response data
+ * @u: request specific data
+ */
+struct zfcp_fc_req {
+ struct zfcp_fsf_ct_els ct_els;
+ struct scatterlist sg_req;
+ struct scatterlist sg_rsp;
+ union {
+ struct {
+ struct fc_els_adisc req;
+ struct fc_els_adisc rsp;
+ } adisc;
+ struct {
+ struct zfcp_fc_gid_pn_req req;
+ struct zfcp_fc_gid_pn_rsp rsp;
+ } gid_pn;
+ struct {
+ struct scatterlist sg_rsp2[ZFCP_FC_GPN_FT_NUM_BUFS - 1];
+ struct zfcp_fc_gpn_ft_req req;
+ } gpn_ft;
+ struct {
+ struct zfcp_fc_gspn_req req;
+ struct zfcp_fc_gspn_rsp rsp;
+ } gspn;
+ struct {
+ struct zfcp_fc_rspn_req req;
+ struct fc_ct_hdr rsp;
+ } rspn;
+ } u;
};
/**
@@ -192,14 +206,21 @@ struct zfcp_fc_wka_ports {
* zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
* @fcp: fcp_cmnd to setup
* @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
+ * @tm: task management flags to setup task management command
*/
static inline
-void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
+void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
+ u8 tm_flags)
{
char tag[2];
int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
+ if (unlikely(tm_flags)) {
+ fcp->fc_tm_flags = tm_flags;
+ return;
+ }
+
if (scsi_populate_tag_msg(scsi, tag)) {
switch (tag[0]) {
case MSG_ORDERED_TAG:
@@ -226,19 +247,6 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
}
/**
- * zfcp_fc_fcp_tm - setup FCP command as task management command
- * @fcp: fcp_cmnd to setup
- * @dev: scsi_device where to send the task management command
- * @tm: task management flags to setup tm command
- */
-static inline
-void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags)
-{
- int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun);
- fcp->fc_tm_flags |= tm_flags;
-}
-
-/**
* zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
* @fcp_rsp: FCP RSP IU to evaluate
* @scsi: SCSI command where to update status and sense buffer
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 60ff9d172c79..a0e05ef65924 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -18,6 +18,8 @@
#include "zfcp_qdio.h"
#include "zfcp_reqlist.h"
+struct kmem_cache *zfcp_fsf_qtcb_cache;
+
static void zfcp_fsf_request_timeout_handler(unsigned long data)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
@@ -83,7 +85,7 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
}
if (likely(req->qtcb))
- kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb);
+ kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
kfree(req);
}
@@ -212,7 +214,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
zfcp_dbf_hba_fsf_uss("fssrh_1", req);
- mempool_free(sr_buf, adapter->pool.status_read_data);
+ mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
zfcp_fsf_req_free(req);
return;
}
@@ -265,7 +267,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
break;
}
- mempool_free(sr_buf, adapter->pool.status_read_data);
+ mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
zfcp_fsf_req_free(req);
atomic_inc(&adapter->stat_miss);
@@ -628,7 +630,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
if (likely(pool))
qtcb = mempool_alloc(pool, GFP_ATOMIC);
else
- qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC);
+ qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
if (unlikely(!qtcb))
return NULL;
@@ -723,6 +725,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
struct zfcp_adapter *adapter = qdio->adapter;
struct zfcp_fsf_req *req;
struct fsf_status_read_buffer *sr_buf;
+ struct page *page;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -736,11 +739,12 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
goto out;
}
- sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
- if (!sr_buf) {
+ page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
+ if (!page) {
retval = -ENOMEM;
goto failed_buf;
}
+ sr_buf = page_address(page);
memset(sr_buf, 0, sizeof(*sr_buf));
req->data = sr_buf;
@@ -755,7 +759,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
failed_req_send:
req->data = NULL;
- mempool_free(sr_buf, adapter->pool.status_read_data);
+ mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
failed_buf:
zfcp_dbf_hba_fsf_uss("fssr__1", req);
zfcp_fsf_req_free(req);
@@ -1552,7 +1556,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
- if (unlikely(IS_ERR(req))) {
+ if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
@@ -1605,7 +1609,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
- if (unlikely(IS_ERR(req))) {
+ if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
@@ -2206,7 +2210,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
- zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
+ zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
if (scsi_prot_sg_count(scsi_cmnd)) {
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
@@ -2284,7 +2288,6 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
goto out;
}
- req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
req->data = scmnd;
req->handler = zfcp_fsf_fcp_task_mgmt_handler;
req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
@@ -2296,7 +2299,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
- zfcp_fc_fcp_tm(fcp_cmnd, scmnd->device, tm_flags);
+ zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req))
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index ddb5800823a9..2a4991d6d4d5 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -292,7 +292,37 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
return SUCCESS;
}
-int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
+struct scsi_transport_template *zfcp_scsi_transport_template;
+
+static struct scsi_host_template zfcp_scsi_host_template = {
+ .module = THIS_MODULE,
+ .name = "zfcp",
+ .queuecommand = zfcp_scsi_queuecommand,
+ .eh_abort_handler = zfcp_scsi_eh_abort_handler,
+ .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
+ .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
+ .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
+ .slave_alloc = zfcp_scsi_slave_alloc,
+ .slave_configure = zfcp_scsi_slave_configure,
+ .slave_destroy = zfcp_scsi_slave_destroy,
+ .change_queue_depth = zfcp_scsi_change_queue_depth,
+ .proc_name = "zfcp",
+ .can_queue = 4096,
+ .this_id = -1,
+ .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
+ .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
+ .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
+ .cmd_per_lun = 1,
+ .use_clustering = 1,
+ .shost_attrs = zfcp_sysfs_shost_attrs,
+ .sdev_attrs = zfcp_sysfs_sdev_attrs,
+};
+
+/**
+ * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
+ * @adapter: The zfcp adapter to register with the SCSI midlayer
+ */
+int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
struct ccw_dev_id dev_id;
@@ -301,7 +331,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
ccw_device_get_id(adapter->ccw_device, &dev_id);
/* register adapter as SCSI host with mid layer of SCSI stack */
- adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template,
+ adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
sizeof (struct zfcp_adapter *));
if (!adapter->scsi_host) {
dev_err(&adapter->ccw_device->dev,
@@ -316,7 +346,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
adapter->scsi_host->max_channel = 0;
adapter->scsi_host->unique_id = dev_id.devno;
adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
- adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
+ adapter->scsi_host->transportt = zfcp_scsi_transport_template;
adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
@@ -328,7 +358,11 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
return 0;
}
-void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
+/**
+ * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
+ * @adapter: The zfcp adapter to unregister.
+ */
+void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
{
struct Scsi_Host *shost;
struct zfcp_port *port;
@@ -346,8 +380,6 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
scsi_remove_host(shost);
scsi_host_put(shost);
adapter->scsi_host = NULL;
-
- return;
}
static struct fc_host_statistics*
@@ -688,33 +720,8 @@ struct fc_function_template zfcp_transport_functions = {
/* no functions registered for following dynamic attributes but
directly set by LLDD */
.show_host_port_type = 1,
+ .show_host_symbolic_name = 1,
.show_host_speed = 1,
.show_host_port_id = 1,
.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
};
-
-struct zfcp_data zfcp_data = {
- .scsi_host_template = {
- .name = "zfcp",
- .module = THIS_MODULE,
- .proc_name = "zfcp",
- .change_queue_depth = zfcp_scsi_change_queue_depth,
- .slave_alloc = zfcp_scsi_slave_alloc,
- .slave_configure = zfcp_scsi_slave_configure,
- .slave_destroy = zfcp_scsi_slave_destroy,
- .queuecommand = zfcp_scsi_queuecommand,
- .eh_abort_handler = zfcp_scsi_eh_abort_handler,
- .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
- .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
- .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
- .can_queue = 4096,
- .this_id = -1,
- .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
- .cmd_per_lun = 1,
- .use_clustering = 1,
- .sdev_attrs = zfcp_sysfs_sdev_attrs,
- .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
- .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
- .shost_attrs = zfcp_sysfs_shost_attrs,
- },
-};
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8616496ffc02..4a1f029c4fe9 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -381,6 +381,7 @@ config ISCSI_BOOT_SYSFS
source "drivers/scsi/cxgbi/Kconfig"
source "drivers/scsi/bnx2i/Kconfig"
+source "drivers/scsi/bnx2fc/Kconfig"
source "drivers/scsi/be2iscsi/Kconfig"
config SGIWD93_SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index ef6de669424b..7ad0b8a79ae8 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_LIBFC) += libfc/
obj-$(CONFIG_LIBFCOE) += fcoe/
obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_FCOE_FNIC) += fnic/
+obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 9a5629f94f95..e7cd2fcbe036 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -936,8 +936,7 @@ static void NCR5380_exit(struct Scsi_Host *instance)
{
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
- cancel_delayed_work(&hostdata->coroutine);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&hostdata->coroutine);
}
/**
diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile
index f1cca4ee5410..92df4d6b6147 100644
--- a/drivers/scsi/aacraid/Makefile
+++ b/drivers/scsi/aacraid/Makefile
@@ -5,4 +5,4 @@ obj-$(CONFIG_SCSI_AACRAID) := aacraid.o
aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \
dpcsup.o rx.o sa.o rkt.o nark.o
-EXTRA_CFLAGS := -Idrivers/scsi
+ccflags-y := -Idrivers/scsi
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index be5558ab84ea..95ee50385188 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -672,7 +672,7 @@ struct scb_data {
/************************ Target Mode Definitions *****************************/
/*
- * Connection desciptor for select-in requests in target mode.
+ * Connection descriptor for select-in requests in target mode.
*/
struct target_cmd {
uint8_t scsiid; /* Our ID and the initiator's ID */
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index e4e651cca3e4..17444bc18bca 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -618,7 +618,7 @@ struct scb_data {
/************************ Target Mode Definitions *****************************/
/*
- * Connection desciptor for select-in requests in target mode.
+ * Connection descriptor for select-in requests in target mode.
*/
struct target_cmd {
uint8_t scsiid; /* Our ID and the initiator's ID */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 3f5a542a7793..e021b4812d58 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -4780,7 +4780,7 @@ ahc_init_scbdata(struct ahc_softc *ahc)
SLIST_INIT(&scb_data->sg_maps);
/* Allocate SCB resources */
- scb_data->scbarray = (struct scb *)kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC);
+ scb_data->scbarray = kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC);
if (scb_data->scbarray == NULL)
return (ENOMEM);
memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC);
diff --git a/drivers/scsi/aic94xx/Makefile b/drivers/scsi/aic94xx/Makefile
index e78ce0fa44d2..c0a15c754585 100644
--- a/drivers/scsi/aic94xx/Makefile
+++ b/drivers/scsi/aic94xx/Makefile
@@ -22,9 +22,7 @@
# along with the aic94xx driver; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-ifeq ($(CONFIG_AIC94XX_DEBUG),y)
- EXTRA_CFLAGS += -DASD_DEBUG -DASD_ENTER_EXIT
-endif
+ccflags-$(CONFIG_AIC94XX_DEBUG) := -DASD_DEBUG -DASD_ENTER_EXIT
obj-$(CONFIG_SCSI_AIC94XX) += aic94xx.o
aic94xx-y += aic94xx_init.o \
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 984bd527c6c9..da7b9887ec48 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1020,7 +1020,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
int poll_count = 0;
arcmsr_free_sysfs_attr(acb);
scsi_remove_host(host);
- flush_scheduled_work();
+ flush_work_sync(&acb->arcmsr_do_message_isr_bh);
del_timer_sync(&acb->eternal_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_stop_adapter_bgrb(acb);
@@ -1066,7 +1066,7 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
(struct AdapterControlBlock *)host->hostdata;
del_timer_sync(&acb->eternal_timer);
arcmsr_disable_outbound_ints(acb);
- flush_scheduled_work();
+ flush_work_sync(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index eaaa8813067d..868cc5590145 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -210,28 +210,20 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
}
/**
- * beiscsi_conn_get_param - get the iscsi parameter
- * @cls_conn: pointer to iscsi cls conn
+ * beiscsi_ep_get_param - get the iscsi parameter
+ * @ep: pointer to iscsi ep
* @param: parameter type identifier
* @buf: buffer pointer
*
* returns iscsi parameter
*/
-int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
enum iscsi_param param, char *buf)
{
- struct beiscsi_endpoint *beiscsi_ep;
- struct iscsi_conn *conn = cls_conn->dd_data;
- struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
int len = 0;
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param);
- beiscsi_ep = beiscsi_conn->ep;
- if (!beiscsi_ep) {
- SE_DEBUG(DBG_LVL_1,
- "In beiscsi_conn_get_param , no beiscsi_ep\n");
- return -ENODEV;
- }
switch (param) {
case ISCSI_PARAM_CONN_PORT:
@@ -244,7 +236,7 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr);
break;
default:
- return iscsi_conn_get_param(cls_conn, param, buf);
+ return -ENOSYS;
}
return len;
}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 8950a702b9f4..9c532797c29e 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -48,8 +48,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn,
uint64_t transport_fd, int is_leading);
-int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf);
+int beiscsi_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param,
+ char *buf);
int beiscsi_get_host_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 638c72b7f94a..24e20ba9633c 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -4384,7 +4384,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
.bind_conn = beiscsi_conn_bind,
.destroy_conn = iscsi_conn_teardown,
.set_param = beiscsi_set_param,
- .get_conn_param = beiscsi_conn_get_param,
+ .get_conn_param = iscsi_conn_get_param,
.get_session_param = iscsi_session_get_param,
.get_host_param = beiscsi_get_host_param,
.start_conn = beiscsi_conn_start,
@@ -4395,6 +4395,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
.alloc_pdu = beiscsi_alloc_pdu,
.parse_pdu_itt = beiscsi_parse_pdu,
.get_stats = beiscsi_conn_get_stats,
+ .get_ep_param = beiscsi_ep_get_param,
.ep_connect = beiscsi_ep_connect,
.ep_poll = beiscsi_ep_poll,
.ep_disconnect = beiscsi_ep_disconnect,
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
new file mode 100644
index 000000000000..69d031d98469
--- /dev/null
+++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
@@ -0,0 +1,1080 @@
+#ifndef __57XX_FCOE_HSI_LINUX_LE__
+#define __57XX_FCOE_HSI_LINUX_LE__
+
+/*
+ * common data for all protocols
+ */
+struct b577xx_doorbell_hdr {
+ u8 header;
+#define B577XX_DOORBELL_HDR_RX (0x1<<0)
+#define B577XX_DOORBELL_HDR_RX_SHIFT 0
+#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1)
+#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1
+#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2)
+#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2
+#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4)
+#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4
+};
+
+/*
+ * doorbell message sent to the chip
+ */
+struct b577xx_doorbell_set_prod {
+#if defined(__BIG_ENDIAN)
+ u16 prod;
+ u8 zero_fill1;
+ struct b577xx_doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct b577xx_doorbell_hdr header;
+ u8 zero_fill1;
+ u16 prod;
+#endif
+};
+
+
+struct regpair {
+ __le32 lo;
+ __le32 hi;
+};
+
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ */
+struct fcoe_abts_rsp_union {
+ u32 r_ctl;
+ u32 abts_rsp_payload[7];
+};
+
+
+/*
+ * 4 regs size
+ */
+struct fcoe_bd_ctx {
+ u32 buf_addr_hi;
+ u32 buf_addr_lo;
+#if defined(__BIG_ENDIAN)
+ u16 rsrv0;
+ u16 buf_len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 buf_len;
+ u16 rsrv0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 rsrv1;
+ u16 flags;
+#elif defined(__LITTLE_ENDIAN)
+ u16 flags;
+ u16 rsrv1;
+#endif
+};
+
+
+struct fcoe_cleanup_flow_info {
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u16 task_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 task_id;
+ u16 reserved1;
+#endif
+ u32 reserved2[7];
+};
+
+
+struct fcoe_fcp_cmd_payload {
+ u32 opaque[8];
+};
+
+struct fcoe_fc_hdr {
+#if defined(__BIG_ENDIAN)
+ u8 cs_ctl;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 cs_ctl;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 r_ctl;
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 r_ctl;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 seq_id;
+ u8 df_ctl;
+ u16 seq_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u16 seq_cnt;
+ u8 df_ctl;
+ u8 seq_id;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 type;
+ u8 f_ctl[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 f_ctl[3];
+ u8 type;
+#endif
+ u32 parameters;
+#if defined(__BIG_ENDIAN)
+ u16 ox_id;
+ u16 rx_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_id;
+ u16 ox_id;
+#endif
+};
+
+struct fcoe_fc_frame {
+ struct fcoe_fc_hdr fc_hdr;
+ u32 reserved0[2];
+};
+
+union fcoe_cmd_flow_info {
+ struct fcoe_fcp_cmd_payload fcp_cmd_payload;
+ struct fcoe_fc_frame mp_fc_frame;
+};
+
+
+
+struct fcoe_fcp_rsp_flags {
+ u8 flags;
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4)
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5)
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
+};
+
+
+struct fcoe_fcp_rsp_payload {
+ struct regpair reserved0;
+ u32 fcp_resid;
+#if defined(__BIG_ENDIAN)
+ u16 retry_delay_timer;
+ struct fcoe_fcp_rsp_flags fcp_flags;
+ u8 scsi_status_code;
+#elif defined(__LITTLE_ENDIAN)
+ u8 scsi_status_code;
+ struct fcoe_fcp_rsp_flags fcp_flags;
+ u16 retry_delay_timer;
+#endif
+ u32 fcp_rsp_len;
+ u32 fcp_sns_len;
+};
+
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ */
+struct fcoe_fcp_rsp_union {
+ struct fcoe_fcp_rsp_payload payload;
+ struct regpair reserved0;
+};
+
+
+struct fcoe_fcp_xfr_rdy_payload {
+ u32 burst_len;
+ u32 data_ro;
+};
+
+struct fcoe_read_flow_info {
+ struct fcoe_fc_hdr fc_data_in_hdr;
+ u32 reserved[2];
+};
+
+struct fcoe_write_flow_info {
+ struct fcoe_fc_hdr fc_data_out_hdr;
+ struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload;
+};
+
+union fcoe_rsp_flow_info {
+ struct fcoe_fcp_rsp_union fcp_rsp;
+ struct fcoe_abts_rsp_union abts_rsp;
+};
+
+/*
+ * 32 bytes used for general purposes
+ */
+union fcoe_general_task_ctx {
+ union fcoe_cmd_flow_info cmd_info;
+ struct fcoe_read_flow_info read_info;
+ struct fcoe_write_flow_info write_info;
+ union fcoe_rsp_flow_info rsp_info;
+ struct fcoe_cleanup_flow_info cleanup_info;
+ u32 comp_info[8];
+};
+
+
+/*
+ * FCoE KCQ CQE parameters
+ */
+union fcoe_kcqe_params {
+ u32 reserved0[4];
+};
+
+/*
+ * FCoE KCQ CQE
+ */
+struct fcoe_kcqe {
+ u32 fcoe_conn_id;
+ u32 completion_status;
+ u32 fcoe_conn_context_id;
+ union fcoe_kcqe_params params;
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+ u16 qe_self_seq;
+ u8 op_code;
+ u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE KWQE header
+ */
+struct fcoe_kwqe_header {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+ u8 op_code;
+#elif defined(__LITTLE_ENDIAN)
+ u8 op_code;
+ u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE firmware init request 1
+ */
+struct fcoe_kwqe_init1 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 num_tasks;
+#elif defined(__LITTLE_ENDIAN)
+ u16 num_tasks;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 task_list_pbl_addr_lo;
+ u32 task_list_pbl_addr_hi;
+ u32 dummy_buffer_addr_lo;
+ u32 dummy_buffer_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 rq_num_wqes;
+ u16 sq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_num_wqes;
+ u16 rq_num_wqes;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 cq_num_wqes;
+ u16 rq_buffer_log_size;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_buffer_log_size;
+ u16 cq_num_wqes;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+ u8 num_sessions_log;
+ u16 mtu;
+#elif defined(__LITTLE_ENDIAN)
+ u16 mtu;
+ u8 num_sessions_log;
+ u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE firmware init request 2
+ */
+struct fcoe_kwqe_init2 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 hash_tbl_pbl_addr_lo;
+ u32 hash_tbl_pbl_addr_hi;
+ u32 t2_hash_tbl_addr_lo;
+ u32 t2_hash_tbl_addr_hi;
+ u32 t2_ptr_hash_tbl_addr_lo;
+ u32 t2_ptr_hash_tbl_addr_hi;
+ u32 free_list_count;
+};
+
+/*
+ * FCoE firmware init request 3
+ */
+struct fcoe_kwqe_init3 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 error_bit_map_lo;
+ u32 error_bit_map_hi;
+#if defined(__BIG_ENDIAN)
+ u8 reserved21[3];
+ u8 cached_session_enable;
+#elif defined(__LITTLE_ENDIAN)
+ u8 cached_session_enable;
+ u8 reserved21[3];
+#endif
+ u32 reserved2[4];
+};
+
+/*
+ * FCoE connection offload request 1
+ */
+struct fcoe_kwqe_conn_offload1 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 fcoe_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 fcoe_conn_id;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 sq_addr_lo;
+ u32 sq_addr_hi;
+ u32 rq_pbl_addr_lo;
+ u32 rq_pbl_addr_hi;
+ u32 rq_first_pbe_addr_lo;
+ u32 rq_first_pbe_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u16 rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_prod;
+ u16 reserved0;
+#endif
+};
+
+/*
+ * FCoE connection offload request 2
+ */
+struct fcoe_kwqe_conn_offload2 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 tx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tx_max_fc_pay_len;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 cq_addr_lo;
+ u32 cq_addr_hi;
+ u32 xferq_addr_lo;
+ u32 xferq_addr_hi;
+ u32 conn_db_addr_lo;
+ u32 conn_db_addr_hi;
+ u32 reserved1;
+};
+
+/*
+ * FCoE connection offload request 3
+ */
+struct fcoe_kwqe_conn_offload3 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+#elif defined(__LITTLE_ENDIAN)
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+ struct fcoe_kwqe_header hdr;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 tx_max_conc_seqs_c3;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 tx_max_conc_seqs_c3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+#endif
+ u32 reserved;
+ u32 confq_first_pbe_addr_lo;
+ u32 confq_first_pbe_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 rx_max_fc_pay_len;
+ u16 tx_total_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tx_total_conc_seqs;
+ u16 rx_max_fc_pay_len;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 rx_open_seqs_exch_c3;
+ u8 rx_max_conc_seqs_c3;
+ u16 rx_total_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_total_conc_seqs;
+ u8 rx_max_conc_seqs_c3;
+ u8 rx_open_seqs_exch_c3;
+#endif
+};
+
+/*
+ * FCoE connection offload request 4
+ */
+struct fcoe_kwqe_conn_offload4 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u8 reserved2;
+ u8 e_d_tov_timer_val;
+#elif defined(__LITTLE_ENDIAN)
+ u8 e_d_tov_timer_val;
+ u8 reserved2;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u8 src_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+ u8 dst_mac_addr_hi16[2];
+ u8 src_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+ u8 src_mac_addr_hi16[2];
+ u8 dst_mac_addr_hi16[2];
+#endif
+ u8 dst_mac_addr_lo32[4];
+ u32 lcq_addr_lo;
+ u32 lcq_addr_hi;
+ u32 confq_pbl_base_addr_lo;
+ u32 confq_pbl_base_addr_hi;
+};
+
+/*
+ * FCoE connection enable request
+ */
+struct fcoe_kwqe_conn_enable_disable {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u8 src_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+ u8 src_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+ u8 src_mac_addr_hi16[2];
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+#endif
+ u8 dst_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u8 dst_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+ u8 dst_mac_addr_hi16[2];
+ u16 reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 vlan_flag;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 vlan_flag;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 reserved3;
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 reserved3;
+#endif
+ u32 context_id;
+ u32 conn_id;
+ u32 reserved4;
+};
+
+/*
+ * FCoE connection destroy request
+ */
+struct fcoe_kwqe_conn_destroy {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 context_id;
+ u32 conn_id;
+ u32 reserved1[5];
+};
+
+/*
+ * FCoe destroy request
+ */
+struct fcoe_kwqe_destroy {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 reserved1[7];
+};
+
+/*
+ * FCoe statistics request
+ */
+struct fcoe_kwqe_stat {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 stat_params_addr_lo;
+ u32 stat_params_addr_hi;
+ u32 reserved1[5];
+};
+
+/*
+ * FCoE KWQ WQE
+ */
+union fcoe_kwqe {
+ struct fcoe_kwqe_init1 init1;
+ struct fcoe_kwqe_init2 init2;
+ struct fcoe_kwqe_init3 init3;
+ struct fcoe_kwqe_conn_offload1 conn_offload1;
+ struct fcoe_kwqe_conn_offload2 conn_offload2;
+ struct fcoe_kwqe_conn_offload3 conn_offload3;
+ struct fcoe_kwqe_conn_offload4 conn_offload4;
+ struct fcoe_kwqe_conn_enable_disable conn_enable_disable;
+ struct fcoe_kwqe_conn_destroy conn_destroy;
+ struct fcoe_kwqe_destroy destroy;
+ struct fcoe_kwqe_stat statistics;
+};
+
+struct fcoe_mul_sges_ctx {
+ struct regpair cur_sge_addr;
+#if defined(__BIG_ENDIAN)
+ u8 sgl_size;
+ u8 cur_sge_idx;
+ u16 cur_sge_off;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cur_sge_off;
+ u8 cur_sge_idx;
+ u8 sgl_size;
+#endif
+};
+
+struct fcoe_s_stat_ctx {
+ u8 flags;
+#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
+#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1)
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2)
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3)
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3
+#define FCOE_S_STAT_CTX_P_RJT (0x1<<4)
+#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4
+#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5)
+#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5
+#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6)
+#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
+};
+
+struct fcoe_seq_ctx {
+#if defined(__BIG_ENDIAN)
+ u16 low_seq_cnt;
+ struct fcoe_s_stat_ctx s_stat;
+ u8 seq_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 seq_id;
+ struct fcoe_s_stat_ctx s_stat;
+ u16 low_seq_cnt;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 err_seq_cnt;
+ u16 high_seq_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u16 high_seq_cnt;
+ u16 err_seq_cnt;
+#endif
+ u32 low_exp_ro;
+ u32 high_exp_ro;
+};
+
+
+struct fcoe_single_sge_ctx {
+ struct regpair cur_buf_addr;
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u16 cur_buf_rem;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cur_buf_rem;
+ u16 reserved0;
+#endif
+};
+
+union fcoe_sgl_ctx {
+ struct fcoe_single_sge_ctx single_sge;
+ struct fcoe_mul_sges_ctx mul_sges;
+};
+
+
+
+/*
+ * FCoE SQ element
+ */
+struct fcoe_sqe {
+ u16 wqe;
+#define FCOE_SQE_TASK_ID (0x7FFF<<0)
+#define FCOE_SQE_TASK_ID_SHIFT 0
+#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_SQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+
+struct fcoe_task_ctx_entry_tx_only {
+ union fcoe_sgl_ctx sgl_ctx;
+};
+
+struct fcoe_task_ctx_entry_txwr_rxrd {
+#if defined(__BIG_ENDIAN)
+ u16 verify_tx_seq;
+ u8 init_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
+ u8 tx_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
+#elif defined(__LITTLE_ENDIAN)
+ u8 tx_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
+ u8 init_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
+ u16 verify_tx_seq;
+#endif
+};
+
+/*
+ * Common section. Both TX and RX processing might write and read from it in
+ * different flows
+ */
+struct fcoe_task_ctx_entry_tx_rx_cmn {
+ u32 data_2_trns;
+ union fcoe_general_task_ctx general;
+#if defined(__BIG_ENDIAN)
+ u16 tx_low_seq_cnt;
+ struct fcoe_s_stat_ctx tx_s_stat;
+ u8 tx_seq_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 tx_seq_id;
+ struct fcoe_s_stat_ctx tx_s_stat;
+ u16 tx_low_seq_cnt;
+#endif
+ u32 common_flags;
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29
+};
+
+struct fcoe_task_ctx_entry_rxwr_txrd {
+#if defined(__BIG_ENDIAN)
+ u16 rx_id;
+ u16 rx_flags;
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_flags;
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
+ u16 rx_id;
+#endif
+};
+
+struct fcoe_task_ctx_entry_rx_only {
+ struct fcoe_seq_ctx seq_ctx;
+ struct fcoe_seq_ctx ooo_seq_ctx;
+ u32 rsrv3;
+ union fcoe_sgl_ctx sgl_ctx;
+};
+
+struct fcoe_task_ctx_entry {
+ struct fcoe_task_ctx_entry_tx_only tx_wr_only;
+ struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
+ struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
+ struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
+ struct fcoe_task_ctx_entry_rx_only rx_wr_only;
+ u32 reserved[4];
+};
+
+
+/*
+ * FCoE XFRQ element
+ */
+struct fcoe_xfrqe {
+ u16 wqe;
+#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
+#define FCOE_XFRQE_TASK_ID_SHIFT 0
+#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE CONFQ element
+ */
+struct fcoe_confqe {
+#if defined(__BIG_ENDIAN)
+ u16 rx_id;
+ u16 ox_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 ox_id;
+ u16 rx_id;
+#endif
+ u32 param;
+};
+
+
+/*
+ * FCoE conection data base
+ */
+struct fcoe_conn_db {
+#if defined(__BIG_ENDIAN)
+ u16 rsrv0;
+ u16 rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_prod;
+ u16 rsrv0;
+#endif
+ u32 rsrv1;
+ struct regpair cq_arm;
+};
+
+
+/*
+ * FCoE CQ element
+ */
+struct fcoe_cqe {
+ u16 wqe;
+#define FCOE_CQE_CQE_INFO (0x3FFF<<0)
+#define FCOE_CQE_CQE_INFO_SHIFT 0
+#define FCOE_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE error/warning resporting entry
+ */
+struct fcoe_err_report_entry {
+ u32 err_warn_bitmap_lo;
+ u32 err_warn_bitmap_hi;
+ u32 tx_buf_off;
+ u32 rx_buf_off;
+ struct fcoe_fc_hdr fc_hdr;
+};
+
+
+/*
+ * FCoE hash table entry (32 bytes)
+ */
+struct fcoe_hash_table_entry {
+#if defined(__BIG_ENDIAN)
+ u8 d_id_0;
+ u8 s_id_2;
+ u8 s_id_1;
+ u8 s_id_0;
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id_0;
+ u8 s_id_1;
+ u8 s_id_2;
+ u8 d_id_0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 dst_mac_addr_hi;
+ u8 d_id_2;
+ u8 d_id_1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id_1;
+ u8 d_id_2;
+ u16 dst_mac_addr_hi;
+#endif
+ u32 dst_mac_addr_lo;
+#if defined(__BIG_ENDIAN)
+ u16 vlan_id;
+ u16 src_mac_addr_hi;
+#elif defined(__LITTLE_ENDIAN)
+ u16 src_mac_addr_hi;
+ u16 vlan_id;
+#endif
+ u32 src_mac_addr_lo;
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u8 reserved0;
+ u8 vlan_flag;
+#elif defined(__LITTLE_ENDIAN)
+ u8 vlan_flag;
+ u8 reserved0;
+ u16 reserved1;
+#endif
+ u32 reserved2;
+ u32 field_id;
+#define FCOE_HASH_TABLE_ENTRY_CID (0xFFFFFF<<0)
+#define FCOE_HASH_TABLE_ENTRY_CID_SHIFT 0
+#define FCOE_HASH_TABLE_ENTRY_RESERVED3 (0x7F<<24)
+#define FCOE_HASH_TABLE_ENTRY_RESERVED3_SHIFT 24
+#define FCOE_HASH_TABLE_ENTRY_VALID (0x1<<31)
+#define FCOE_HASH_TABLE_ENTRY_VALID_SHIFT 31
+};
+
+/*
+ * FCoE pending work request CQE
+ */
+struct fcoe_pend_wq_cqe {
+ u16 wqe;
+#define FCOE_PEND_WQ_CQE_TASK_ID (0x3FFF<<0)
+#define FCOE_PEND_WQ_CQE_TASK_ID_SHIFT 0
+#define FCOE_PEND_WQ_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_PEND_WQ_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_PEND_WQ_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_PEND_WQ_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE RX statistics parameters section#0
+ */
+struct fcoe_rx_stat_params_section0 {
+ u32 fcoe_ver_cnt;
+ u32 fcoe_rx_pkt_cnt;
+ u32 fcoe_rx_byte_cnt;
+ u32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#1
+ */
+struct fcoe_rx_stat_params_section1 {
+ u32 fc_crc_cnt;
+ u32 eofa_del_cnt;
+ u32 miss_frame_cnt;
+ u32 seq_timeout_cnt;
+ u32 drop_seq_cnt;
+ u32 fcoe_rx_drop_pkt_cnt;
+ u32 fcp_rx_pkt_cnt;
+ u32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters
+ */
+struct fcoe_tx_stat_params {
+ u32 fcoe_tx_pkt_cnt;
+ u32 fcoe_tx_byte_cnt;
+ u32 fcp_tx_pkt_cnt;
+ u32 reserved0;
+};
+
+/*
+ * FCoE statistics parameters
+ */
+struct fcoe_statistics_params {
+ struct fcoe_tx_stat_params tx_stat;
+ struct fcoe_rx_stat_params_section0 rx_stat0;
+ struct fcoe_rx_stat_params_section1 rx_stat1;
+};
+
+
+/*
+ * FCoE t2 hash table entry (64 bytes)
+ */
+struct fcoe_t2_hash_table_entry {
+ struct fcoe_hash_table_entry data;
+ struct regpair next;
+ struct regpair reserved0[3];
+};
+
+/*
+ * FCoE unsolicited CQE
+ */
+struct fcoe_unsolicited_cqe {
+ u16 wqe;
+#define FCOE_UNSOLICITED_CQE_SUBTYPE (0x3<<0)
+#define FCOE_UNSOLICITED_CQE_SUBTYPE_SHIFT 0
+#define FCOE_UNSOLICITED_CQE_PKT_LEN (0xFFF<<2)
+#define FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT 2
+#define FCOE_UNSOLICITED_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_UNSOLICITED_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+
+#endif /* __57XX_FCOE_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
new file mode 100644
index 000000000000..6a38080e35ed
--- /dev/null
+++ b/drivers/scsi/bnx2fc/Kconfig
@@ -0,0 +1,11 @@
+config SCSI_BNX2X_FCOE
+ tristate "Broadcom NetXtreme II FCoE support"
+ depends on PCI
+ select NETDEVICES
+ select NETDEV_1000
+ select LIBFC
+ select LIBFCOE
+ select CNIC
+ ---help---
+ This driver supports FCoE offload for the Broadcom NetXtreme II
+ devices.
diff --git a/drivers/scsi/bnx2fc/Makefile b/drivers/scsi/bnx2fc/Makefile
new file mode 100644
index 000000000000..a92695a25176
--- /dev/null
+++ b/drivers/scsi/bnx2fc/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o
+
+bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
new file mode 100644
index 000000000000..df2fc09ba479
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -0,0 +1,511 @@
+#ifndef _BNX2FC_H_
+#define _BNX2FC_H_
+/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/kthread.h>
+#include <linux/crc32.h>
+#include <linux/cpu.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/libfc.h>
+#include <scsi/libfcoe.h>
+#include <scsi/fc_encode.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_fc2.h>
+#include <scsi/fc_frame.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/fc/fc_fcp.h>
+
+#include "57xx_hsi_bnx2fc.h"
+#include "bnx2fc_debug.h"
+#include "../../net/cnic_if.h"
+#include "bnx2fc_constants.h"
+
+#define BNX2FC_NAME "bnx2fc"
+#define BNX2FC_VERSION "1.0.0"
+
+#define PFX "bnx2fc: "
+
+#define BNX2X_DOORBELL_PCI_BAR 2
+
+#define BNX2FC_MAX_BD_LEN 0xffff
+#define BNX2FC_BD_SPLIT_SZ 0x8000
+#define BNX2FC_MAX_BDS_PER_CMD 256
+
+#define BNX2FC_SQ_WQES_MAX 256
+
+#define BNX2FC_SCSI_MAX_SQES ((3 * BNX2FC_SQ_WQES_MAX) / 8)
+#define BNX2FC_TM_MAX_SQES ((BNX2FC_SQ_WQES_MAX) / 2)
+#define BNX2FC_ELS_MAX_SQES (BNX2FC_TM_MAX_SQES - 1)
+
+#define BNX2FC_RQ_WQES_MAX 16
+#define BNX2FC_CQ_WQES_MAX (BNX2FC_SQ_WQES_MAX + BNX2FC_RQ_WQES_MAX)
+
+#define BNX2FC_NUM_MAX_SESS 128
+#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
+
+#define BNX2FC_MAX_OUTSTANDING_CMNDS 4096
+#define BNX2FC_MIN_PAYLOAD 256
+#define BNX2FC_MAX_PAYLOAD 2048
+
+#define BNX2FC_RQ_BUF_SZ 256
+#define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ))
+
+#define BNX2FC_SQ_WQE_SIZE (sizeof(struct fcoe_sqe))
+#define BNX2FC_CQ_WQE_SIZE (sizeof(struct fcoe_cqe))
+#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ)
+#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe))
+#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
+#define BNX2FC_5771X_DB_PAGE_SIZE 128
+
+#define BNX2FC_MAX_TASKS BNX2FC_MAX_OUTSTANDING_CMNDS
+#define BNX2FC_TASK_SIZE 128
+#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
+#define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE)
+
+#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8
+#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024)
+
+#define BNX2FC_MAX_SEQS 255
+
+#define BNX2FC_READ (1 << 1)
+#define BNX2FC_WRITE (1 << 0)
+
+#define BNX2FC_MIN_XID 0
+#define BNX2FC_MAX_XID (BNX2FC_MAX_OUTSTANDING_CMNDS - 1)
+#define FCOE_MIN_XID (BNX2FC_MAX_OUTSTANDING_CMNDS)
+#define FCOE_MAX_XID \
+ (BNX2FC_MAX_OUTSTANDING_CMNDS + (nr_cpu_ids * 256))
+#define BNX2FC_MAX_LUN 0xFFFF
+#define BNX2FC_MAX_FCP_TGT 256
+#define BNX2FC_MAX_CMD_LEN 16
+
+#define BNX2FC_TM_TIMEOUT 60 /* secs */
+#define BNX2FC_IO_TIMEOUT 20000UL /* msecs */
+
+#define BNX2FC_WAIT_CNT 120
+#define BNX2FC_FW_TIMEOUT (3 * HZ)
+
+#define PORT_MAX 2
+
+#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
+
+/* FC FCP Status */
+#define FC_GOOD 0
+
+#define BNX2FC_RNID_HBA 0x7
+
+/* bnx2fc driver uses only one instance of fcoe_percpu_s */
+extern struct fcoe_percpu_s bnx2fc_global;
+
+extern struct workqueue_struct *bnx2fc_wq;
+
+struct bnx2fc_percpu_s {
+ struct task_struct *iothread;
+ struct list_head work_list;
+ spinlock_t fp_work_lock;
+};
+
+
+struct bnx2fc_hba {
+ struct list_head link;
+ struct cnic_dev *cnic;
+ struct pci_dev *pcidev;
+ struct net_device *netdev;
+ struct net_device *phys_dev;
+ unsigned long reg_with_cnic;
+ #define BNX2FC_CNIC_REGISTERED 1
+ struct packet_type fcoe_packet_type;
+ struct packet_type fip_packet_type;
+ struct bnx2fc_cmd_mgr *cmd_mgr;
+ struct workqueue_struct *timer_work_queue;
+ struct kref kref;
+ spinlock_t hba_lock;
+ struct mutex hba_mutex;
+ unsigned long adapter_state;
+ #define ADAPTER_STATE_UP 0
+ #define ADAPTER_STATE_GOING_DOWN 1
+ #define ADAPTER_STATE_LINK_DOWN 2
+ #define ADAPTER_STATE_READY 3
+ u32 flags;
+ unsigned long init_done;
+ #define BNX2FC_FW_INIT_DONE 0
+ #define BNX2FC_CTLR_INIT_DONE 1
+ #define BNX2FC_CREATE_DONE 2
+ struct fcoe_ctlr ctlr;
+ u8 vlan_enabled;
+ int vlan_id;
+ u32 next_conn_id;
+ struct fcoe_task_ctx_entry **task_ctx;
+ dma_addr_t *task_ctx_dma;
+ struct regpair *task_ctx_bd_tbl;
+ dma_addr_t task_ctx_bd_dma;
+
+ int hash_tbl_segment_count;
+ void **hash_tbl_segments;
+ void *hash_tbl_pbl;
+ dma_addr_t hash_tbl_pbl_dma;
+ struct fcoe_t2_hash_table_entry *t2_hash_tbl;
+ dma_addr_t t2_hash_tbl_dma;
+ char *t2_hash_tbl_ptr;
+ dma_addr_t t2_hash_tbl_ptr_dma;
+
+ char *dummy_buffer;
+ dma_addr_t dummy_buf_dma;
+
+ struct fcoe_statistics_params *stats_buffer;
+ dma_addr_t stats_buf_dma;
+
+ /*
+ * PCI related info.
+ */
+ u16 pci_did;
+ u16 pci_vid;
+ u16 pci_sdid;
+ u16 pci_svid;
+ u16 pci_func;
+ u16 pci_devno;
+
+ struct task_struct *l2_thread;
+
+ /* linkdown handling */
+ wait_queue_head_t shutdown_wait;
+ int wait_for_link_down;
+
+ /*destroy handling */
+ struct timer_list destroy_timer;
+ wait_queue_head_t destroy_wait;
+
+ /* Active list of offloaded sessions */
+ struct bnx2fc_rport *tgt_ofld_list[BNX2FC_NUM_MAX_SESS];
+ int num_ofld_sess;
+
+ /* statistics */
+ struct completion stat_req_done;
+};
+
+#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr)
+
+struct bnx2fc_cmd_mgr {
+ struct bnx2fc_hba *hba;
+ u16 next_idx;
+ struct list_head *free_list;
+ spinlock_t *free_list_lock;
+ struct io_bdt **io_bdt_pool;
+ struct bnx2fc_cmd **cmds;
+};
+
+struct bnx2fc_rport {
+ struct fcoe_port *port;
+ struct fc_rport *rport;
+ struct fc_rport_priv *rdata;
+ void __iomem *ctx_base;
+#define DPM_TRIGER_TYPE 0x40
+ u32 fcoe_conn_id;
+ u32 context_id;
+ u32 sid;
+
+ unsigned long flags;
+#define BNX2FC_FLAG_SESSION_READY 0x1
+#define BNX2FC_FLAG_OFFLOADED 0x2
+#define BNX2FC_FLAG_DISABLED 0x3
+#define BNX2FC_FLAG_DESTROYED 0x4
+#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5
+#define BNX2FC_FLAG_DESTROY_CMPL 0x6
+#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x7
+#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8
+#define BNX2FC_FLAG_EXPL_LOGO 0x9
+
+ u32 max_sqes;
+ u32 max_rqes;
+ u32 max_cqes;
+
+ struct fcoe_sqe *sq;
+ dma_addr_t sq_dma;
+ u16 sq_prod_idx;
+ u8 sq_curr_toggle_bit;
+ u32 sq_mem_size;
+
+ struct fcoe_cqe *cq;
+ dma_addr_t cq_dma;
+ u32 cq_cons_idx;
+ u8 cq_curr_toggle_bit;
+ u32 cq_mem_size;
+
+ void *rq;
+ dma_addr_t rq_dma;
+ u32 rq_prod_idx;
+ u32 rq_cons_idx;
+ u32 rq_mem_size;
+
+ void *rq_pbl;
+ dma_addr_t rq_pbl_dma;
+ u32 rq_pbl_size;
+
+ struct fcoe_xfrqe *xferq;
+ dma_addr_t xferq_dma;
+ u32 xferq_mem_size;
+
+ struct fcoe_confqe *confq;
+ dma_addr_t confq_dma;
+ u32 confq_mem_size;
+
+ void *confq_pbl;
+ dma_addr_t confq_pbl_dma;
+ u32 confq_pbl_size;
+
+ struct fcoe_conn_db *conn_db;
+ dma_addr_t conn_db_dma;
+ u32 conn_db_mem_size;
+
+ struct fcoe_sqe *lcq;
+ dma_addr_t lcq_dma;
+ u32 lcq_mem_size;
+
+ void *ofld_req[4];
+ dma_addr_t ofld_req_dma[4];
+ void *enbl_req;
+ dma_addr_t enbl_req_dma;
+
+ spinlock_t tgt_lock;
+ spinlock_t cq_lock;
+ atomic_t num_active_ios;
+ u32 flush_in_prog;
+ unsigned long work_time_slice;
+ unsigned long timestamp;
+ struct list_head free_task_list;
+ struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
+ atomic_t pi;
+ atomic_t ci;
+ struct list_head active_cmd_queue;
+ struct list_head els_queue;
+ struct list_head io_retire_queue;
+ struct list_head active_tm_queue;
+
+ struct timer_list ofld_timer;
+ wait_queue_head_t ofld_wait;
+
+ struct timer_list upld_timer;
+ wait_queue_head_t upld_wait;
+};
+
+struct bnx2fc_mp_req {
+ u8 tm_flags;
+
+ u32 req_len;
+ void *req_buf;
+ dma_addr_t req_buf_dma;
+ struct fcoe_bd_ctx *mp_req_bd;
+ dma_addr_t mp_req_bd_dma;
+ struct fc_frame_header req_fc_hdr;
+
+ u32 resp_len;
+ void *resp_buf;
+ dma_addr_t resp_buf_dma;
+ struct fcoe_bd_ctx *mp_resp_bd;
+ dma_addr_t mp_resp_bd_dma;
+ struct fc_frame_header resp_fc_hdr;
+};
+
+struct bnx2fc_els_cb_arg {
+ struct bnx2fc_cmd *aborted_io_req;
+ struct bnx2fc_cmd *io_req;
+ u16 l2_oxid;
+};
+
+/* bnx2fc command structure */
+struct bnx2fc_cmd {
+ struct list_head link;
+ u8 on_active_queue;
+ u8 on_tmf_queue;
+ u8 cmd_type;
+#define BNX2FC_SCSI_CMD 1
+#define BNX2FC_TASK_MGMT_CMD 2
+#define BNX2FC_ABTS 3
+#define BNX2FC_ELS 4
+#define BNX2FC_CLEANUP 5
+ u8 io_req_flags;
+ struct kref refcount;
+ struct fcoe_port *port;
+ struct bnx2fc_rport *tgt;
+ struct scsi_cmnd *sc_cmd;
+ struct bnx2fc_cmd_mgr *cmd_mgr;
+ struct bnx2fc_mp_req mp_req;
+ void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg);
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct delayed_work timeout_work; /* timer for ULP timeouts */
+ struct completion tm_done;
+ int wait_for_comp;
+ u16 xid;
+ struct fcoe_task_ctx_entry *task;
+ struct io_bdt *bd_tbl;
+ struct fcp_rsp *rsp;
+ size_t data_xfer_len;
+ unsigned long req_flags;
+#define BNX2FC_FLAG_ISSUE_RRQ 0x1
+#define BNX2FC_FLAG_ISSUE_ABTS 0x2
+#define BNX2FC_FLAG_ABTS_DONE 0x3
+#define BNX2FC_FLAG_TM_COMPL 0x4
+#define BNX2FC_FLAG_TM_TIMEOUT 0x5
+#define BNX2FC_FLAG_IO_CLEANUP 0x6
+#define BNX2FC_FLAG_RETIRE_OXID 0x7
+#define BNX2FC_FLAG_EH_ABORT 0x8
+#define BNX2FC_FLAG_IO_COMPL 0x9
+#define BNX2FC_FLAG_ELS_DONE 0xa
+#define BNX2FC_FLAG_ELS_TIMEOUT 0xb
+ u32 fcp_resid;
+ u32 fcp_rsp_len;
+ u32 fcp_sns_len;
+ u8 cdb_status; /* SCSI IO status */
+ u8 fcp_status; /* FCP IO status */
+ u8 fcp_rsp_code;
+ u8 scsi_comp_flags;
+};
+
+struct io_bdt {
+ struct bnx2fc_cmd *io_req;
+ struct fcoe_bd_ctx *bd_tbl;
+ dma_addr_t bd_tbl_dma;
+ u16 bd_valid;
+};
+
+struct bnx2fc_work {
+ struct list_head list;
+ struct bnx2fc_rport *tgt;
+ u16 wqe;
+};
+struct bnx2fc_unsol_els {
+ struct fc_lport *lport;
+ struct fc_frame *fp;
+ struct work_struct unsol_els_work;
+};
+
+
+
+struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
+void bnx2fc_cmd_release(struct kref *ref);
+int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd);
+int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba);
+int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba);
+int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_disable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt);
+void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
+ u32 num_cqe);
+int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba);
+void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba);
+int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba);
+void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba);
+struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
+ u16 min_xid, u16 max_xid);
+void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr);
+void bnx2fc_get_link_state(struct bnx2fc_hba *hba);
+char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items);
+void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items);
+int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen);
+int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req);
+int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req);
+int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req);
+void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
+ unsigned int timer_msec);
+int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
+void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u16 orig_xid);
+void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task);
+void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task);
+void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid);
+void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt);
+int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_host_reset(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd);
+void bnx2fc_rport_event_handler(struct fc_lport *lport,
+ struct fc_rport_priv *rport,
+ enum fc_rport_event event);
+void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
+ struct fcp_cmnd *fcp_cmnd);
+
+
+
+void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt);
+struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
+ struct fc_frame *fp, unsigned int op,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *,
+ void *),
+ void *arg, u32 timeout);
+int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
+struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
+ u32 port_id);
+void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
+ unsigned char *buf,
+ u32 frame_len, u16 l2_oxid);
+int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
+
+#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h
new file mode 100644
index 000000000000..fe7769173c43
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h
@@ -0,0 +1,206 @@
+#ifndef __BNX2FC_CONSTANTS_H_
+#define __BNX2FC_CONSTANTS_H_
+
+/**
+ * This file defines HSI constants for the FCoE flows
+ */
+
+/* KWQ/KCQ FCoE layer code */
+#define FCOE_KWQE_LAYER_CODE (7)
+
+/* KWQ (kernel work queue) request op codes */
+#define FCOE_KWQE_OPCODE_INIT1 (0)
+#define FCOE_KWQE_OPCODE_INIT2 (1)
+#define FCOE_KWQE_OPCODE_INIT3 (2)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6)
+#define FCOE_KWQE_OPCODE_ENABLE_CONN (7)
+#define FCOE_KWQE_OPCODE_DISABLE_CONN (8)
+#define FCOE_KWQE_OPCODE_DESTROY_CONN (9)
+#define FCOE_KWQE_OPCODE_DESTROY (10)
+#define FCOE_KWQE_OPCODE_STAT (11)
+
+/* KCQ (kernel completion queue) response op codes */
+#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10)
+#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11)
+#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12)
+#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15)
+#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16)
+#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17)
+#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18)
+#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
+#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21)
+
+/* KCQ (kernel completion queue) completion status */
+#define FCOE_KCQE_COMPLETION_STATUS_SUCCESS (0x0)
+#define FCOE_KCQE_COMPLETION_STATUS_ERROR (0x1)
+#define FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x2)
+#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3)
+#define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4)
+#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5)
+
+/* Unsolicited CQE type */
+#define FCOE_UNSOLICITED_FRAME_CQE_TYPE 0
+#define FCOE_ERROR_DETECTION_CQE_TYPE 1
+#define FCOE_WARNING_DETECTION_CQE_TYPE 2
+
+/* Task context constants */
+/* After driver has initialize the task in case timer services required */
+#define FCOE_TASK_TX_STATE_INIT 0
+/* In case timer services are required then shall be updated by Xstorm after
+ * start processing the task. In case no timer facilities are required then the
+ * driver would initialize the state to this value */
+#define FCOE_TASK_TX_STATE_NORMAL 1
+/* Task is under abort procedure. Updated in order to stop processing of
+ * pending WQEs on this task */
+#define FCOE_TASK_TX_STATE_ABORT 2
+/* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */
+#define FCOE_TASK_TX_STATE_ERROR 3
+/* For REC_TOV timer expiration indication received from Xstorm */
+#define FCOE_TASK_TX_STATE_WARNING 4
+/* For completed unsolicited task */
+#define FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED 5
+/* For exchange cleanup request task */
+#define FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP 6
+/* For sequence cleanup request task */
+#define FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP 7
+/* Mark task as aborted and indicate that ABTS was not transmitted */
+#define FCOE_TASK_TX_STATE_BEFORE_ABTS_TX 8
+/* Mark task as aborted and indicate that ABTS was transmitted */
+#define FCOE_TASK_TX_STATE_AFTER_ABTS_TX 9
+/* For completion the ABTS task. */
+#define FCOE_TASK_TX_STATE_ABTS_TX_COMPLETED 10
+/* Mark task as aborted and indicate that Exchange cleanup was not transmitted
+ */
+#define FCOE_TASK_TX_STATE_BEFORE_EXCHANGE_CLEANUP_TX 11
+/* Mark task as aborted and indicate that Exchange cleanup was transmitted */
+#define FCOE_TASK_TX_STATE_AFTER_EXCHANGE_CLEANUP_TX 12
+
+#define FCOE_TASK_RX_STATE_NORMAL 0
+#define FCOE_TASK_RX_STATE_COMPLETED 1
+/* Obsolete: Intermediate completion (middle path with local completion) */
+#define FCOE_TASK_RX_STATE_INTER_COMP 2
+/* For REC_TOV timer expiration indication received from Xstorm */
+#define FCOE_TASK_RX_STATE_WARNING 3
+/* For E_D_T_TOV timer expiration in Ustorm */
+#define FCOE_TASK_RX_STATE_ERROR 4
+/* ABTS ACC arrived wait for local completion to finally complete the task. */
+#define FCOE_TASK_RX_STATE_ABTS_ACC_ARRIVED 5
+/* local completion arrived wait for ABTS ACC to finally complete the task. */
+#define FCOE_TASK_RX_STATE_ABTS_LOCAL_COMP_ARRIVED 6
+/* Special completion indication in case of task was aborted. */
+#define FCOE_TASK_RX_STATE_ABTS_COMPLETED 7
+/* Special completion indication in case of task was cleaned. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 8
+/* Special completion indication (in task requested the exchange cleanup) in
+ * case cleaned task is in non-valid. */
+#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 9
+/* Special completion indication (in task requested the sequence cleanup) in
+ * case cleaned task was already returned to normal. */
+#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 10
+/* Exchange cleanup arrived wait until xfer will be handled to finally
+ * complete the task. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_ARRIVED 11
+/* Xfer handled, wait for exchange cleanup to finally complete the task. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_HANDLED_XFER 12
+
+#define FCOE_TASK_TYPE_WRITE 0
+#define FCOE_TASK_TYPE_READ 1
+#define FCOE_TASK_TYPE_MIDPATH 2
+#define FCOE_TASK_TYPE_UNSOLICITED 3
+#define FCOE_TASK_TYPE_ABTS 4
+#define FCOE_TASK_TYPE_EXCHANGE_CLEANUP 5
+#define FCOE_TASK_TYPE_SEQUENCE_CLEANUP 6
+
+#define FCOE_TASK_DEV_TYPE_DISK 0
+#define FCOE_TASK_DEV_TYPE_TAPE 1
+
+#define FCOE_TASK_CLASS_TYPE_3 0
+#define FCOE_TASK_CLASS_TYPE_2 1
+
+/* Everest FCoE connection type */
+#define B577XX_FCOE_CONNECTION_TYPE 4
+
+/* Error codes for Error Reporting in fast path flows */
+/* XFER error codes */
+#define FCOE_ERROR_CODE_XFER_OOO_RO 0
+#define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED 1
+#define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN 2
+#define FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS 3
+#define FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE 4
+#define FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE 5
+#define FCOE_ERROR_CODE_XFER_PEND_XFER_SET 6
+#define FCOE_ERROR_CODE_XFER_OPENED_SEQ 7
+#define FCOE_ERROR_CODE_XFER_FCTL 8
+
+/* FCP RSP error codes */
+#define FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET 9
+#define FCOE_ERROR_CODE_FCP_RSP_UNDERFLOW 10
+#define FCOE_ERROR_CODE_FCP_RSP_OVERFLOW 11
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD 12
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD 13
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE 14
+#define FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET 15
+#define FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ 16
+#define FCOE_ERROR_CODE_FCP_RSP_FCTL 17
+#define FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET 18
+#define FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET 19
+
+/* FCP DATA error codes */
+#define FCOE_ERROR_CODE_DATA_OOO_RO 20
+#define FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE 21
+#define FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS 22
+#define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET 23
+#define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET 24
+#define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET 25
+#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26
+#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27
+#define FCOE_ERROR_CODE_DATA_FCTL 28
+
+/* Middle path error codes */
+#define FCOE_ERROR_CODE_MIDPATH_TYPE_NOT_ELS 29
+#define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET 30
+#define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET 31
+#define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET 32
+#define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET 33
+#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_FCTL 34
+#define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY 35
+#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL 36
+
+/* ABTS error codes */
+#define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL 37
+#define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD 38
+#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL 39
+#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40
+#define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH 41
+
+/* Common error codes */
+#define FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD 42
+#define FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE 43
+#define FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH 44
+#define FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT 45
+#define FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH 46
+#define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES 47
+#define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR 48
+#define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG 49
+#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50
+
+/* Unsolicited Rx error codes */
+#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS 51
+#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_BLS 52
+#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_ELS 53
+#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_BLS 54
+#define FCOE_ERROR_CODE_UNSOLICITED_R_CTL 55
+
+#define FCOE_ERROR_CODE_RW_TASK_DDF_RCTL_INFO_FIELD 56
+#define FCOE_ERROR_CODE_RW_TASK_INVALID_RCTL 57
+#define FCOE_ERROR_CODE_RW_TASK_RCTL_GENERAL_MISMATCH 58
+
+/* Timer error codes */
+#define FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION 60
+#define FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION 61
+
+
+#endif /* BNX2FC_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h
new file mode 100644
index 000000000000..7f6aff68cc53
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h
@@ -0,0 +1,70 @@
+#ifndef __BNX2FC_DEBUG__
+#define __BNX2FC_DEBUG__
+
+/* Log level bit mask */
+#define LOG_IO 0x01 /* scsi cmd error, cleanup */
+#define LOG_TGT 0x02 /* Session setup, cleanup, etc' */
+#define LOG_HBA 0x04 /* lport events, link, mtu, etc' */
+#define LOG_ELS 0x08 /* ELS logs */
+#define LOG_MISC 0x10 /* fcoe L2 frame related logs*/
+#define LOG_ALL 0xff /* LOG all messages */
+
+extern unsigned int bnx2fc_debug_level;
+
+#define BNX2FC_CHK_LOGGING(LEVEL, CMD) \
+ do { \
+ if (unlikely(bnx2fc_debug_level & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+ } while (0)
+
+#define BNX2FC_ELS_DBG(fmt, arg...) \
+ BNX2FC_CHK_LOGGING(LOG_ELS, \
+ printk(KERN_ALERT PFX fmt, ##arg))
+
+#define BNX2FC_MISC_DBG(fmt, arg...) \
+ BNX2FC_CHK_LOGGING(LOG_MISC, \
+ printk(KERN_ALERT PFX fmt, ##arg))
+
+#define BNX2FC_IO_DBG(io_req, fmt, arg...) \
+ do { \
+ if (!io_req || !io_req->port || !io_req->port->lport || \
+ !io_req->port->lport->host) \
+ BNX2FC_CHK_LOGGING(LOG_IO, \
+ printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ else \
+ BNX2FC_CHK_LOGGING(LOG_IO, \
+ shost_printk(KERN_ALERT, \
+ (io_req)->port->lport->host, \
+ PFX "xid:0x%x " fmt, \
+ (io_req)->xid, ##arg)); \
+ } while (0)
+
+#define BNX2FC_TGT_DBG(tgt, fmt, arg...) \
+ do { \
+ if (!tgt || !tgt->port || !tgt->port->lport || \
+ !tgt->port->lport->host || !tgt->rport) \
+ BNX2FC_CHK_LOGGING(LOG_TGT, \
+ printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ else \
+ BNX2FC_CHK_LOGGING(LOG_TGT, \
+ shost_printk(KERN_ALERT, \
+ (tgt)->port->lport->host, \
+ PFX "port:%x " fmt, \
+ (tgt)->rport->port_id, ##arg)); \
+ } while (0)
+
+
+#define BNX2FC_HBA_DBG(lport, fmt, arg...) \
+ do { \
+ if (!lport || !lport->host) \
+ BNX2FC_CHK_LOGGING(LOG_HBA, \
+ printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ else \
+ BNX2FC_CHK_LOGGING(LOG_HBA, \
+ shost_printk(KERN_ALERT, lport->host, \
+ PFX fmt, ##arg)); \
+ } while (0)
+
+#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
new file mode 100644
index 000000000000..7a11a255157f
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -0,0 +1,515 @@
+/*
+ * bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * This file contains helper routines that handle ELS requests
+ * and responses.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg);
+static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg);
+static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
+ void *data, u32 data_len,
+ void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
+ struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
+
+static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+ struct bnx2fc_cmd *orig_io_req;
+ struct bnx2fc_cmd *rrq_req;
+ int rc = 0;
+
+ BUG_ON(!cb_arg);
+ rrq_req = cb_arg->io_req;
+ orig_io_req = cb_arg->aborted_io_req;
+ BUG_ON(!orig_io_req);
+ BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
+ orig_io_req->xid, rrq_req->xid);
+
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+
+ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
+ /*
+ * els req is timed out. cleanup the IO with FW and
+ * drop the completion. Remove from active_cmd_queue.
+ */
+ BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
+ rrq_req->xid);
+
+ if (rrq_req->on_active_queue) {
+ list_del_init(&rrq_req->link);
+ rrq_req->on_active_queue = 0;
+ rc = bnx2fc_initiate_cleanup(rrq_req);
+ BUG_ON(rc);
+ }
+ }
+ kfree(cb_arg);
+}
+int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
+{
+
+ struct fc_els_rrq rrq;
+ struct bnx2fc_rport *tgt = aborted_io_req->tgt;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ struct bnx2fc_els_cb_arg *cb_arg = NULL;
+ u32 sid = tgt->sid;
+ u32 r_a_tov = lport->r_a_tov;
+ unsigned long start = jiffies;
+ int rc;
+
+ BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
+ aborted_io_req->xid);
+ memset(&rrq, 0, sizeof(rrq));
+
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
+ rc = -ENOMEM;
+ goto rrq_err;
+ }
+
+ cb_arg->aborted_io_req = aborted_io_req;
+
+ rrq.rrq_cmd = ELS_RRQ;
+ hton24(rrq.rrq_s_id, sid);
+ rrq.rrq_ox_id = htons(aborted_io_req->xid);
+ rrq.rrq_rx_id = htons(aborted_io_req->task->rx_wr_tx_rd.rx_id);
+
+retry_rrq:
+ rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
+ bnx2fc_rrq_compl, cb_arg,
+ r_a_tov);
+ if (rc == -ENOMEM) {
+ if (time_after(jiffies, start + (10 * HZ))) {
+ BNX2FC_ELS_DBG("rrq Failed\n");
+ rc = FAILED;
+ goto rrq_err;
+ }
+ msleep(20);
+ goto retry_rrq;
+ }
+rrq_err:
+ if (rc) {
+ BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
+ aborted_io_req->xid);
+ kfree(cb_arg);
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ }
+ return rc;
+}
+
+static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+ struct bnx2fc_cmd *els_req;
+ struct bnx2fc_rport *tgt;
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr;
+ unsigned char *buf;
+ void *resp_buf;
+ u32 resp_len, hdr_len;
+ u16 l2_oxid;
+ int frame_len;
+ int rc = 0;
+
+ l2_oxid = cb_arg->l2_oxid;
+ BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
+
+ els_req = cb_arg->io_req;
+ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
+ /*
+ * els req is timed out. cleanup the IO with FW and
+ * drop the completion. libfc will handle the els timeout
+ */
+ if (els_req->on_active_queue) {
+ list_del_init(&els_req->link);
+ els_req->on_active_queue = 0;
+ rc = bnx2fc_initiate_cleanup(els_req);
+ BUG_ON(rc);
+ }
+ goto free_arg;
+ }
+
+ tgt = els_req->tgt;
+ mp_req = &(els_req->mp_req);
+ fc_hdr = &(mp_req->resp_fc_hdr);
+ resp_len = mp_req->resp_len;
+ resp_buf = mp_req->resp_buf;
+
+ buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!buf) {
+ printk(KERN_ERR PFX "Unable to alloc mp buf\n");
+ goto free_arg;
+ }
+ hdr_len = sizeof(*fc_hdr);
+ if (hdr_len + resp_len > PAGE_SIZE) {
+ printk(KERN_ERR PFX "l2_els_compl: resp len is "
+ "beyond page size\n");
+ goto free_buf;
+ }
+ memcpy(buf, fc_hdr, hdr_len);
+ memcpy(buf + hdr_len, resp_buf, resp_len);
+ frame_len = hdr_len + resp_len;
+
+ bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
+
+free_buf:
+ kfree(buf);
+free_arg:
+ kfree(cb_arg);
+}
+
+int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+ struct fc_els_adisc *adisc;
+ struct fc_frame_header *fh;
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ fh = fc_frame_header_get(fp);
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
+ return -ENOMEM;
+ }
+
+ cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+ BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
+ adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+ /* adisc is initialized by libfc */
+ rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
+ bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+ if (rc)
+ kfree(cb_arg);
+ return rc;
+}
+
+int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+ struct fc_els_logo *logo;
+ struct fc_frame_header *fh;
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ fh = fc_frame_header_get(fp);
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
+ return -ENOMEM;
+ }
+
+ cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+ BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
+ logo = fc_frame_payload_get(fp, sizeof(*logo));
+ /* logo is initialized by libfc */
+ rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
+ bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+ if (rc)
+ kfree(cb_arg);
+ return rc;
+}
+
+int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+ struct fc_els_rls *rls;
+ struct fc_frame_header *fh;
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ fh = fc_frame_header_get(fp);
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
+ return -ENOMEM;
+ }
+
+ cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+ rls = fc_frame_payload_get(fp, sizeof(*rls));
+ /* rls is initialized by libfc */
+ rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
+ bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+ if (rc)
+ kfree(cb_arg);
+ return rc;
+}
+
+static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
+ void *data, u32 data_len,
+ void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
+ struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
+{
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct fc_rport *rport = tgt->rport;
+ struct fc_lport *lport = port->lport;
+ struct bnx2fc_cmd *els_req;
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ int rc = 0;
+ int task_idx, index;
+ u32 did, sid;
+ u16 xid;
+
+ rc = fc_remote_port_chkready(rport);
+ if (rc) {
+ printk(KERN_ALERT PFX "els 0x%x: rport not ready\n", op);
+ rc = -EINVAL;
+ goto els_err;
+ }
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ printk(KERN_ALERT PFX "els 0x%x: link is not ready\n", op);
+ rc = -EINVAL;
+ goto els_err;
+ }
+ if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
+ (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
+ printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
+ rc = -EINVAL;
+ goto els_err;
+ }
+ els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
+ if (!els_req) {
+ rc = -ENOMEM;
+ goto els_err;
+ }
+
+ els_req->sc_cmd = NULL;
+ els_req->port = port;
+ els_req->tgt = tgt;
+ els_req->cb_func = cb_func;
+ cb_arg->io_req = els_req;
+ els_req->cb_arg = cb_arg;
+
+ mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
+ rc = bnx2fc_init_mp_req(els_req);
+ if (rc == FAILED) {
+ printk(KERN_ALERT PFX "ELS MP request init failed\n");
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = -ENOMEM;
+ goto els_err;
+ } else {
+ /* rc SUCCESS */
+ rc = 0;
+ }
+
+ /* Set the data_xfer_len to the size of ELS payload */
+ mp_req->req_len = data_len;
+ els_req->data_xfer_len = mp_req->req_len;
+
+ /* Fill ELS Payload */
+ if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
+ memcpy(mp_req->req_buf, data, data_len);
+ } else {
+ printk(KERN_ALERT PFX "Invalid ELS op 0x%x\n", op);
+ els_req->cb_func = NULL;
+ els_req->cb_arg = NULL;
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = -EINVAL;
+ }
+
+ if (rc)
+ goto els_err;
+
+ /* Fill FC header */
+ fc_hdr = &(mp_req->req_fc_hdr);
+
+ did = tgt->rport->port_id;
+ sid = tgt->sid;
+
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
+ FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+
+ /* Obtain exchange id */
+ xid = els_req->xid;
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_mp_task(els_req, task);
+
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ printk(KERN_ERR PFX "initiate_els.. session not ready\n");
+ els_req->cb_func = NULL;
+ els_req->cb_arg = NULL;
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return -EINVAL;
+ }
+
+ if (timer_msec)
+ bnx2fc_cmd_timer_set(els_req, timer_msec);
+ bnx2fc_add_2_sq(tgt, xid);
+
+ els_req->on_active_queue = 1;
+ list_add_tail(&els_req->link, &tgt->els_queue);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+els_err:
+ return rc;
+}
+
+void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
+ struct fcoe_task_ctx_entry *task, u8 num_rq)
+{
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr;
+ u64 *hdr;
+ u64 *temp_hdr;
+
+ BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
+ "cmd_type = %d\n", els_req->xid, els_req->cmd_type);
+
+ if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
+ &els_req->req_flags)) {
+ BNX2FC_ELS_DBG("Timer context finished processing this "
+ "els - 0x%x\n", els_req->xid);
+ /* This IO doesnt receive cleanup completion */
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ return;
+ }
+
+ /* Cancel the timeout_work, as we received the response */
+ if (cancel_delayed_work(&els_req->timeout_work))
+ kref_put(&els_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ if (els_req->on_active_queue) {
+ list_del_init(&els_req->link);
+ els_req->on_active_queue = 0;
+ }
+
+ mp_req = &(els_req->mp_req);
+ fc_hdr = &(mp_req->resp_fc_hdr);
+
+ hdr = (u64 *)fc_hdr;
+ temp_hdr = (u64 *)
+ &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+ hdr[0] = cpu_to_be64(temp_hdr[0]);
+ hdr[1] = cpu_to_be64(temp_hdr[1]);
+ hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+ mp_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
+
+ /* Parse ELS response */
+ if ((els_req->cb_func) && (els_req->cb_arg)) {
+ els_req->cb_func(els_req->cb_arg);
+ els_req->cb_arg = NULL;
+ }
+
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+}
+
+static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg)
+{
+ struct fcoe_ctlr *fip = arg;
+ struct fc_exch *exch = fc_seq_exch(seq);
+ struct fc_lport *lport = exch->lp;
+ u8 *mac;
+ struct fc_frame_header *fh;
+ u8 op;
+
+ if (IS_ERR(fp))
+ goto done;
+
+ mac = fr_cb(fp)->granted_mac;
+ if (is_zero_ether_addr(mac)) {
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_type != FC_TYPE_ELS) {
+ printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
+ "fh_type != FC_TYPE_ELS\n");
+ fc_frame_free(fp);
+ return;
+ }
+ op = fc_frame_payload_op(fp);
+ if (lport->vport) {
+ if (op == ELS_LS_RJT) {
+ printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
+ fc_vport_terminate(lport->vport);
+ fc_frame_free(fp);
+ return;
+ }
+ }
+ if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
+ fc_frame_free(fp);
+ return;
+ }
+ }
+ fip->update_mac(lport, mac);
+done:
+ fc_lport_flogi_resp(seq, fp, lport);
+}
+
+static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg)
+{
+ struct fcoe_ctlr *fip = arg;
+ struct fc_exch *exch = fc_seq_exch(seq);
+ struct fc_lport *lport = exch->lp;
+ static u8 zero_mac[ETH_ALEN] = { 0 };
+
+ if (!IS_ERR(fp))
+ fip->update_mac(lport, zero_mac);
+ fc_lport_logo_resp(seq, fp, lport);
+}
+
+struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
+ struct fc_frame *fp, unsigned int op,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *,
+ void *),
+ void *arg, u32 timeout)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct fcoe_ctlr *fip = &hba->ctlr;
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+
+ switch (op) {
+ case ELS_FLOGI:
+ case ELS_FDISC:
+ return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
+ fip, timeout);
+ case ELS_LOGO:
+ /* only hook onto fabric logouts, not port logouts */
+ if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
+ break;
+ return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
+ fip, timeout);
+ }
+ return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
new file mode 100644
index 000000000000..e476e8753079
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -0,0 +1,2535 @@
+/* bnx2fc_fcoe.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * This file contains the code that interacts with libfc, libfcoe,
+ * cnic modules to create FCoE instances, send/receive non-offloaded
+ * FIP/FCoE packets, listen to link events etc.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+static struct list_head adapter_list;
+static u32 adapter_count;
+static DEFINE_MUTEX(bnx2fc_dev_lock);
+DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
+
+#define DRV_MODULE_NAME "bnx2fc"
+#define DRV_MODULE_VERSION BNX2FC_VERSION
+#define DRV_MODULE_RELDATE "Jan 25, 2011"
+
+
+static char version[] __devinitdata =
+ "Broadcom NetXtreme II FCoE Driver " DRV_MODULE_NAME \
+ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+
+MODULE_AUTHOR("Bhanu Prakash Gollapudi <bprakash@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 FCoE Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define BNX2FC_MAX_QUEUE_DEPTH 256
+#define BNX2FC_MIN_QUEUE_DEPTH 32
+#define FCOE_WORD_TO_BYTE 4
+
+static struct scsi_transport_template *bnx2fc_transport_template;
+static struct scsi_transport_template *bnx2fc_vport_xport_template;
+
+struct workqueue_struct *bnx2fc_wq;
+
+/* bnx2fc structure needs only one instance of the fcoe_percpu_s structure.
+ * Here the io threads are per cpu but the l2 thread is just one
+ */
+struct fcoe_percpu_s bnx2fc_global;
+DEFINE_SPINLOCK(bnx2fc_global_lock);
+
+static struct cnic_ulp_ops bnx2fc_cnic_cb;
+static struct libfc_function_template bnx2fc_libfc_fcn_templ;
+static struct scsi_host_template bnx2fc_shost_template;
+static struct fc_function_template bnx2fc_transport_function;
+static struct fc_function_template bnx2fc_vport_xport_function;
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
+static int bnx2fc_destroy(struct net_device *net_device);
+static int bnx2fc_enable(struct net_device *netdev);
+static int bnx2fc_disable(struct net_device *netdev);
+
+static void bnx2fc_recv_frame(struct sk_buff *skb);
+
+static void bnx2fc_start_disc(struct bnx2fc_hba *hba);
+static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
+static int bnx2fc_net_config(struct fc_lport *lp);
+static int bnx2fc_lport_config(struct fc_lport *lport);
+static int bnx2fc_em_config(struct fc_lport *lport);
+static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
+static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
+static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
+static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
+static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
+ struct device *parent, int npiv);
+static void bnx2fc_destroy_work(struct work_struct *work);
+
+static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
+static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
+
+static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
+static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba);
+
+static void bnx2fc_port_shutdown(struct fc_lport *lport);
+static void bnx2fc_stop(struct bnx2fc_hba *hba);
+static int __init bnx2fc_mod_init(void);
+static void __exit bnx2fc_mod_exit(void);
+
+unsigned int bnx2fc_debug_level;
+module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
+
+static int bnx2fc_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu);
+/* notification function for CPU hotplug events */
+static struct notifier_block bnx2fc_cpu_notifier = {
+ .notifier_call = bnx2fc_cpu_callback,
+};
+
+static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
+{
+ struct fcoe_percpu_s *bg;
+ struct fcoe_rcv_info *fr;
+ struct sk_buff_head *list;
+ struct sk_buff *skb, *next;
+ struct sk_buff *head;
+
+ bg = &bnx2fc_global;
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ list = &bg->fcoe_rx_list;
+ head = list->next;
+ for (skb = head; skb != (struct sk_buff *)list;
+ skb = next) {
+ next = skb->next;
+ fr = fcoe_dev_from_skb(skb);
+ if (fr->fr_dev == lp) {
+ __skb_unlink(skb, list);
+ kfree_skb(skb);
+ }
+ }
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+}
+
+int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen)
+{
+ int rc;
+ spin_lock(&bnx2fc_global_lock);
+ rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global);
+ spin_unlock(&bnx2fc_global_lock);
+
+ return rc;
+}
+
+static void bnx2fc_abort_io(struct fc_lport *lport)
+{
+ /*
+ * This function is no-op for bnx2fc, but we do
+ * not want to leave it as NULL either, as libfc
+ * can call the default function which is
+ * fc_fcp_abort_io.
+ */
+}
+
+static void bnx2fc_cleanup(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_rport *tgt;
+ int i;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ mutex_lock(&hba->hba_mutex);
+ spin_lock_bh(&hba->hba_lock);
+ for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+ tgt = hba->tgt_ofld_list[i];
+ if (tgt) {
+ /* Cleanup IOs belonging to requested vport */
+ if (tgt->port == port) {
+ spin_unlock_bh(&hba->hba_lock);
+ BNX2FC_TGT_DBG(tgt, "flush/cleanup\n");
+ bnx2fc_flush_active_ios(tgt);
+ spin_lock_bh(&hba->hba_lock);
+ }
+ }
+ }
+ spin_unlock_bh(&hba->hba_lock);
+ mutex_unlock(&hba->hba_mutex);
+}
+
+static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt,
+ struct fc_frame *fp)
+{
+ struct fc_rport_priv *rdata = tgt->rdata;
+ struct fc_frame_header *fh;
+ int rc = 0;
+
+ fh = fc_frame_header_get(fp);
+ BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, "
+ "r_ctl = 0x%x\n", rdata->ids.port_id,
+ ntohs(fh->fh_ox_id), fh->fh_r_ctl);
+ if ((fh->fh_type == FC_TYPE_ELS) &&
+ (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_ADISC:
+ rc = bnx2fc_send_adisc(tgt, fp);
+ break;
+ case ELS_LOGO:
+ rc = bnx2fc_send_logo(tgt, fp);
+ break;
+ case ELS_RLS:
+ rc = bnx2fc_send_rls(tgt, fp);
+ break;
+ default:
+ break;
+ }
+ } else if ((fh->fh_type == FC_TYPE_BLS) &&
+ (fh->fh_r_ctl == FC_RCTL_BA_ABTS))
+ BNX2FC_TGT_DBG(tgt, "ABTS frame\n");
+ else {
+ BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x "
+ "rctl 0x%x thru non-offload path\n",
+ fh->fh_type, fh->fh_r_ctl);
+ return -ENODEV;
+ }
+ if (rc)
+ return -ENOMEM;
+ else
+ return 0;
+}
+
+/**
+ * bnx2fc_xmit - bnx2fc's FCoE frame transmit function
+ *
+ * @lport: the associated local port
+ * @fp: the fc_frame to be transmitted
+ */
+static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct ethhdr *eh;
+ struct fcoe_crc_eof *cp;
+ struct sk_buff *skb;
+ struct fc_frame_header *fh;
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ struct fcoe_hdr *hp;
+ struct bnx2fc_rport *tgt;
+ struct fcoe_dev_stats *stats;
+ u8 sof, eof;
+ u32 crc;
+ unsigned int hlen, tlen, elen;
+ int wlen, rc = 0;
+
+ port = (struct fcoe_port *)lport_priv(lport);
+ hba = port->priv;
+
+ fh = fc_frame_header_get(fp);
+
+ skb = fp_skb(fp);
+ if (!lport->link_up) {
+ BNX2FC_HBA_DBG(lport, "bnx2fc_xmit link down\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+ if (!hba->ctlr.sel_fcf) {
+ BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+ if (fcoe_ctlr_els_send(&hba->ctlr, lport, skb))
+ return 0;
+ }
+
+ sof = fr_sof(fp);
+ eof = fr_eof(fp);
+
+ /*
+ * Snoop the frame header to check if the frame is for
+ * an offloaded session
+ */
+ /*
+ * tgt_ofld_list access is synchronized using
+ * both hba mutex and hba lock. Atleast hba mutex or
+ * hba lock needs to be held for read access.
+ */
+
+ spin_lock_bh(&hba->hba_lock);
+ tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id));
+ if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
+ /* This frame is for offloaded session */
+ BNX2FC_HBA_DBG(lport, "xmit: Frame is for offloaded session "
+ "port_id = 0x%x\n", ntoh24(fh->fh_d_id));
+ spin_unlock_bh(&hba->hba_lock);
+ rc = bnx2fc_xmit_l2_frame(tgt, fp);
+ if (rc != -ENODEV) {
+ kfree_skb(skb);
+ return rc;
+ }
+ } else {
+ spin_unlock_bh(&hba->hba_lock);
+ }
+
+ elen = sizeof(struct ethhdr);
+ hlen = sizeof(struct fcoe_hdr);
+ tlen = sizeof(struct fcoe_crc_eof);
+ wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
+
+ skb->ip_summed = CHECKSUM_NONE;
+ crc = fcoe_fc_crc(fp);
+
+ /* copy port crc and eof to the skb buff */
+ if (skb_is_nonlinear(skb)) {
+ skb_frag_t *frag;
+ if (bnx2fc_get_paged_crc_eof(skb, tlen)) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
+ cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
+ + frag->page_offset;
+ } else {
+ cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+ }
+
+ memset(cp, 0, sizeof(*cp));
+ cp->fcoe_eof = eof;
+ cp->fcoe_crc32 = cpu_to_le32(~crc);
+ if (skb_is_nonlinear(skb)) {
+ kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
+ cp = NULL;
+ }
+
+ /* adjust skb network/transport offsets to match mac/fcoe/port */
+ skb_push(skb, elen + hlen);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb->mac_len = elen;
+ skb->protocol = htons(ETH_P_FCOE);
+ skb->dev = hba->netdev;
+
+ /* fill up mac and fcoe headers */
+ eh = eth_hdr(skb);
+ eh->h_proto = htons(ETH_P_FCOE);
+ if (hba->ctlr.map_dest)
+ fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
+ else
+ /* insert GW address */
+ memcpy(eh->h_dest, hba->ctlr.dest_addr, ETH_ALEN);
+
+ if (unlikely(hba->ctlr.flogi_oxid != FC_XID_UNKNOWN))
+ memcpy(eh->h_source, hba->ctlr.ctl_src_addr, ETH_ALEN);
+ else
+ memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
+
+ hp = (struct fcoe_hdr *)(eh + 1);
+ memset(hp, 0, sizeof(*hp));
+ if (FC_FCOE_VER)
+ FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
+ hp->fcoe_sof = sof;
+
+ /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
+ if (lport->seq_offload && fr_max_payload(fp)) {
+ skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
+ skb_shinfo(skb)->gso_size = fr_max_payload(fp);
+ } else {
+ skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_size = 0;
+ }
+
+ /*update tx stats */
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats->TxFrames++;
+ stats->TxWords += wlen;
+ put_cpu();
+
+ /* send down to lld */
+ fr_dev(fp) = lport;
+ if (port->fcoe_pending_queue.qlen)
+ fcoe_check_wait_queue(lport, skb);
+ else if (fcoe_start_io(skb))
+ fcoe_check_wait_queue(lport, skb);
+
+ return 0;
+}
+
+/**
+ * bnx2fc_rcv - This is bnx2fc's receive function called by NET_RX_SOFTIRQ
+ *
+ * @skb: the receive socket buffer
+ * @dev: associated net device
+ * @ptype: context
+ * @olddev: last device
+ *
+ * This function receives the packet and builds FC frame and passes it up
+ */
+static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *olddev)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_hba *hba;
+ struct fc_frame_header *fh;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_percpu_s *bg;
+ unsigned short oxid;
+
+ hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type);
+ lport = hba->ctlr.lp;
+
+ if (unlikely(lport == NULL)) {
+ printk(KERN_ALERT PFX "bnx2fc_rcv: lport is NULL\n");
+ goto err;
+ }
+
+ if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
+ printk(KERN_ALERT PFX "bnx2fc_rcv: Wrong FC type frame\n");
+ goto err;
+ }
+
+ /*
+ * Check for minimum frame length, and make sure required FCoE
+ * and FC headers are pulled into the linear data area.
+ */
+ if (unlikely((skb->len < FCOE_MIN_FRAME) ||
+ !pskb_may_pull(skb, FCOE_HEADER_LEN)))
+ goto err;
+
+ skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+
+ oxid = ntohs(fh->fh_ox_id);
+
+ fr = fcoe_dev_from_skb(skb);
+ fr->fr_dev = lport;
+ fr->ptype = ptype;
+
+ bg = &bnx2fc_global;
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+
+ __skb_queue_tail(&bg->fcoe_rx_list, skb);
+ if (bg->fcoe_rx_list.qlen == 1)
+ wake_up_process(bg->thread);
+
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+ return 0;
+err:
+ kfree_skb(skb);
+ return -1;
+}
+
+static int bnx2fc_l2_rcv_thread(void *arg)
+{
+ struct fcoe_percpu_s *bg = arg;
+ struct sk_buff *skb;
+
+ set_user_nice(current, -20);
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_RUNNING);
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) {
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+ bnx2fc_recv_frame(skb);
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ }
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+
+static void bnx2fc_recv_frame(struct sk_buff *skb)
+{
+ u32 fr_len;
+ struct fc_lport *lport;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_dev_stats *stats;
+ struct fc_frame_header *fh;
+ struct fcoe_crc_eof crc_eof;
+ struct fc_frame *fp;
+ struct fc_lport *vn_port;
+ struct fcoe_port *port;
+ u8 *mac = NULL;
+ u8 *dest_mac = NULL;
+ struct fcoe_hdr *hp;
+
+ fr = fcoe_dev_from_skb(skb);
+ lport = fr->fr_dev;
+ if (unlikely(lport == NULL)) {
+ printk(KERN_ALERT PFX "Invalid lport struct\n");
+ kfree_skb(skb);
+ return;
+ }
+
+ if (skb_is_nonlinear(skb))
+ skb_linearize(skb);
+ mac = eth_hdr(skb)->h_source;
+ dest_mac = eth_hdr(skb)->h_dest;
+
+ /* Pull the header */
+ hp = (struct fcoe_hdr *) skb_network_header(skb);
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+ skb_pull(skb, sizeof(struct fcoe_hdr));
+ fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats->RxFrames++;
+ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+
+ fp = (struct fc_frame *)skb;
+ fc_frame_init(fp);
+ fr_dev(fp) = lport;
+ fr_sof(fp) = hp->fcoe_sof;
+ if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ fr_eof(fp) = crc_eof.fcoe_eof;
+ fr_crc(fp) = crc_eof.fcoe_crc32;
+ if (pskb_trim(skb, fr_len)) {
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+
+ fh = fc_frame_header_get(fp);
+
+ vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
+ if (vn_port) {
+ port = lport_priv(vn_port);
+ if (compare_ether_addr(port->data_src_addr, dest_mac)
+ != 0) {
+ BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ }
+ if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
+ fh->fh_type == FC_TYPE_FCP) {
+ /* Drop FCP data. We dont this in L2 path */
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
+ fh->fh_type == FC_TYPE_ELS) {
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_LOGO:
+ if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
+ /* drop non-FIP LOGO */
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ break;
+ }
+ }
+ if (le32_to_cpu(fr_crc(fp)) !=
+ ~crc32(~0, skb->data, fr_len)) {
+ if (stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING PFX "dropping frame with "
+ "CRC error\n");
+ stats->InvalidCRCCount++;
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ put_cpu();
+ fc_exch_recv(lport, fp);
+}
+
+/**
+ * bnx2fc_percpu_io_thread - thread per cpu for ios
+ *
+ * @arg: ptr to bnx2fc_percpu_info structure
+ */
+int bnx2fc_percpu_io_thread(void *arg)
+{
+ struct bnx2fc_percpu_s *p = arg;
+ struct bnx2fc_work *work, *tmp;
+ LIST_HEAD(work_list);
+
+ set_user_nice(current, -20);
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_RUNNING);
+ spin_lock_bh(&p->fp_work_lock);
+ while (!list_empty(&p->work_list)) {
+ list_splice_init(&p->work_list, &work_list);
+ spin_unlock_bh(&p->fp_work_lock);
+
+ list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_del_init(&work->list);
+ bnx2fc_process_cq_compl(work->tgt, work->wqe);
+ kfree(work);
+ }
+
+ spin_lock_bh(&p->fp_work_lock);
+ }
+ spin_unlock_bh(&p->fp_work_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
+{
+ struct fc_host_statistics *bnx2fc_stats;
+ struct fc_lport *lport = shost_priv(shost);
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct fcoe_statistics_params *fw_stats;
+ int rc = 0;
+
+ fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer;
+ if (!fw_stats)
+ return NULL;
+
+ bnx2fc_stats = fc_get_host_stats(shost);
+
+ init_completion(&hba->stat_req_done);
+ if (bnx2fc_send_stat_req(hba))
+ return bnx2fc_stats;
+ rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
+ if (!rc) {
+ BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
+ return bnx2fc_stats;
+ }
+ bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat1.fc_crc_cnt;
+ bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
+ bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
+ bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;
+ bnx2fc_stats->rx_words += (fw_stats->rx_stat0.fcoe_rx_byte_cnt) / 4;
+
+ bnx2fc_stats->dumped_frames = 0;
+ bnx2fc_stats->lip_count = 0;
+ bnx2fc_stats->nos_count = 0;
+ bnx2fc_stats->loss_of_sync_count = 0;
+ bnx2fc_stats->loss_of_signal_count = 0;
+ bnx2fc_stats->prim_seq_protocol_err_count = 0;
+
+ return bnx2fc_stats;
+}
+
+static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct Scsi_Host *shost = lport->host;
+ int rc = 0;
+
+ shost->max_cmd_len = BNX2FC_MAX_CMD_LEN;
+ shost->max_lun = BNX2FC_MAX_LUN;
+ shost->max_id = BNX2FC_MAX_FCP_TGT;
+ shost->max_channel = 0;
+ if (lport->vport)
+ shost->transportt = bnx2fc_vport_xport_template;
+ else
+ shost->transportt = bnx2fc_transport_template;
+
+ /* Add the new host to SCSI-ml */
+ rc = scsi_add_host(lport->host, dev);
+ if (rc) {
+ printk(KERN_ERR PFX "Error on scsi_add_host\n");
+ return rc;
+ }
+ if (!lport->vport)
+ fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
+ sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
+ BNX2FC_NAME, BNX2FC_VERSION,
+ hba->netdev->name);
+
+ return 0;
+}
+
+static int bnx2fc_mfs_update(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct net_device *netdev = hba->netdev;
+ u32 mfs;
+ u32 max_mfs;
+
+ mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
+ sizeof(struct fcoe_crc_eof));
+ max_mfs = BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header);
+ BNX2FC_HBA_DBG(lport, "mfs = %d, max_mfs = %d\n", mfs, max_mfs);
+ if (mfs > max_mfs)
+ mfs = max_mfs;
+
+ /* Adjust mfs to be a multiple of 256 bytes */
+ mfs = (((mfs - sizeof(struct fc_frame_header)) / BNX2FC_MIN_PAYLOAD) *
+ BNX2FC_MIN_PAYLOAD);
+ mfs = mfs + sizeof(struct fc_frame_header);
+
+ BNX2FC_HBA_DBG(lport, "Set MFS = %d\n", mfs);
+ if (fc_set_mfs(lport, mfs))
+ return -EINVAL;
+ return 0;
+}
+static void bnx2fc_link_speed_update(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct net_device *netdev = hba->netdev;
+ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+
+ if (!dev_ethtool_get_settings(netdev, &ecmd)) {
+ lport->link_supported_speeds &=
+ ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+ if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full))
+ lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+ if (ecmd.supported & SUPPORTED_10000baseT_Full)
+ lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
+
+ if (ecmd.speed == SPEED_1000)
+ lport->link_speed = FC_PORTSPEED_1GBIT;
+ if (ecmd.speed == SPEED_10000)
+ lport->link_speed = FC_PORTSPEED_10GBIT;
+ }
+ return;
+}
+static int bnx2fc_link_ok(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct net_device *dev = hba->phys_dev;
+ int rc = 0;
+
+ if ((dev->flags & IFF_UP) && netif_carrier_ok(dev))
+ clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ else {
+ set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ rc = -1;
+ }
+ return rc;
+}
+
+/**
+ * bnx2fc_get_link_state - get network link state
+ *
+ * @hba: adapter instance pointer
+ *
+ * updates adapter structure flag based on netdev state
+ */
+void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
+{
+ if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
+ set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ else
+ clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+}
+
+static int bnx2fc_net_config(struct fc_lport *lport)
+{
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ u64 wwnn, wwpn;
+
+ port = lport_priv(lport);
+ hba = port->priv;
+
+ /* require support for get_pauseparam ethtool op. */
+ if (!hba->phys_dev->ethtool_ops ||
+ !hba->phys_dev->ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+
+ if (bnx2fc_mfs_update(lport))
+ return -EINVAL;
+
+ skb_queue_head_init(&port->fcoe_pending_queue);
+ port->fcoe_pending_queue_active = 0;
+ setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
+
+ bnx2fc_link_speed_update(lport);
+
+ if (!lport->vport) {
+ wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0);
+ BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
+ fc_set_wwnn(lport, wwnn);
+
+ wwpn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 2, 0);
+ BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
+ fc_set_wwpn(lport, wwpn);
+ }
+
+ return 0;
+}
+
+static void bnx2fc_destroy_timer(unsigned long data)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
+
+ BNX2FC_HBA_DBG(hba->ctlr.lp, "ERROR:bnx2fc_destroy_timer - "
+ "Destroy compl not received!!\n");
+ hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
+ wake_up_interruptible(&hba->destroy_wait);
+}
+
+/**
+ * bnx2fc_indicate_netevent - Generic netdev event handler
+ *
+ * @context: adapter structure pointer
+ * @event: event type
+ *
+ * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and
+ * NETDEV_CHANGE_MTU events
+ */
+static void bnx2fc_indicate_netevent(void *context, unsigned long event)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+ struct fc_lport *lport = hba->ctlr.lp;
+ struct fc_lport *vport;
+ u32 link_possible = 1;
+
+ if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ BNX2FC_MISC_DBG("driver not ready. event=%s %ld\n",
+ hba->netdev->name, event);
+ return;
+ }
+
+ /*
+ * ASSUMPTION:
+ * indicate_netevent cannot be called from cnic unless bnx2fc
+ * does register_device
+ */
+ BUG_ON(!lport);
+
+ BNX2FC_HBA_DBG(lport, "enter netevent handler - event=%s %ld\n",
+ hba->netdev->name, event);
+
+ switch (event) {
+ case NETDEV_UP:
+ BNX2FC_HBA_DBG(lport, "Port up, adapter_state = %ld\n",
+ hba->adapter_state);
+ if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+ printk(KERN_ERR "indicate_netevent: "\
+ "adapter is not UP!!\n");
+ /* fall thru to update mfs if MTU has changed */
+ case NETDEV_CHANGEMTU:
+ BNX2FC_HBA_DBG(lport, "NETDEV_CHANGEMTU event\n");
+ bnx2fc_mfs_update(lport);
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ bnx2fc_mfs_update(vport);
+ mutex_unlock(&lport->lp_mutex);
+ break;
+
+ case NETDEV_DOWN:
+ BNX2FC_HBA_DBG(lport, "Port down\n");
+ clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ link_possible = 0;
+ break;
+
+ case NETDEV_GOING_DOWN:
+ BNX2FC_HBA_DBG(lport, "Port going down\n");
+ set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ link_possible = 0;
+ break;
+
+ case NETDEV_CHANGE:
+ BNX2FC_HBA_DBG(lport, "NETDEV_CHANGE\n");
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Unkonwn netevent %ld", event);
+ return;
+ }
+
+ bnx2fc_link_speed_update(lport);
+
+ if (link_possible && !bnx2fc_link_ok(lport)) {
+ printk(KERN_ERR "indicate_netevent: call ctlr_link_up\n");
+ fcoe_ctlr_link_up(&hba->ctlr);
+ } else {
+ printk(KERN_ERR "indicate_netevent: call ctlr_link_down\n");
+ if (fcoe_ctlr_link_down(&hba->ctlr)) {
+ clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ fc_host_port_type(vport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ mutex_unlock(&lport->lp_mutex);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+ per_cpu_ptr(lport->dev_stats,
+ get_cpu())->LinkFailureCount++;
+ put_cpu();
+ fcoe_clean_pending_queue(lport);
+
+ init_waitqueue_head(&hba->shutdown_wait);
+ BNX2FC_HBA_DBG(lport, "indicate_netevent "
+ "num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ hba->wait_for_link_down = 1;
+ BNX2FC_HBA_DBG(lport, "waiting for uploads to "
+ "compl proc = %s\n",
+ current->comm);
+ wait_event_interruptible(hba->shutdown_wait,
+ (hba->num_ofld_sess == 0));
+ BNX2FC_HBA_DBG(lport, "wakeup - num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ hba->wait_for_link_down = 0;
+
+ if (signal_pending(current))
+ flush_signals(current);
+ }
+ }
+}
+
+static int bnx2fc_libfc_config(struct fc_lport *lport)
+{
+
+ /* Set the function pointers set by bnx2fc driver */
+ memcpy(&lport->tt, &bnx2fc_libfc_fcn_templ,
+ sizeof(struct libfc_function_template));
+ fc_elsct_init(lport);
+ fc_exch_init(lport);
+ fc_rport_init(lport);
+ fc_disc_init(lport);
+ return 0;
+}
+
+static int bnx2fc_em_config(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+
+ if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
+ FCOE_MAX_XID, NULL)) {
+ printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
+ BNX2FC_MAX_XID);
+
+ if (!hba->cmd_mgr) {
+ printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
+ fc_exch_mgr_free(lport);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int bnx2fc_lport_config(struct fc_lport *lport)
+{
+ lport->link_up = 0;
+ lport->qfull = 0;
+ lport->max_retry_count = 3;
+ lport->max_rport_retry_count = 3;
+ lport->e_d_tov = 2 * 1000;
+ lport->r_a_tov = 10 * 1000;
+
+ /* REVISIT: enable when supporting tape devices
+ lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+ FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
+ */
+ lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS);
+ lport->does_npiv = 1;
+
+ memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen));
+ lport->rnid_gen.rnid_atype = BNX2FC_RNID_HBA;
+
+ /* alloc stats structure */
+ if (fc_lport_init_stats(lport))
+ return -ENOMEM;
+
+ /* Finish fc_lport configuration */
+ fc_lport_config(lport);
+
+ return 0;
+}
+
+/**
+ * bnx2fc_fip_recv - handle a received FIP frame.
+ *
+ * @skb: the received skb
+ * @dev: associated &net_device
+ * @ptype: the &packet_type structure which was used to register this handler.
+ * @orig_dev: original receive &net_device, in case @ dev is a bond.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev)
+{
+ struct bnx2fc_hba *hba;
+ hba = container_of(ptype, struct bnx2fc_hba, fip_packet_type);
+ fcoe_ctlr_recv(&hba->ctlr, skb);
+ return 0;
+}
+
+/**
+ * bnx2fc_update_src_mac - Update Ethernet MAC filters.
+ *
+ * @fip: FCoE controller.
+ * @old: Unicast MAC address to delete if the MAC is non-zero.
+ * @new: Unicast MAC address to add.
+ *
+ * Remove any previously-set unicast MAC filter.
+ * Add secondary FCoE MAC address filter for our OUI.
+ */
+static void bnx2fc_update_src_mac(struct fc_lport *lport, u8 *addr)
+{
+ struct fcoe_port *port = lport_priv(lport);
+
+ memcpy(port->data_src_addr, addr, ETH_ALEN);
+}
+
+/**
+ * bnx2fc_get_src_mac - return the ethernet source address for an lport
+ *
+ * @lport: libfc port
+ */
+static u8 *bnx2fc_get_src_mac(struct fc_lport *lport)
+{
+ struct fcoe_port *port;
+
+ port = (struct fcoe_port *)lport_priv(lport);
+ return port->data_src_addr;
+}
+
+/**
+ * bnx2fc_fip_send - send an Ethernet-encapsulated FIP frame.
+ *
+ * @fip: FCoE controller.
+ * @skb: FIP Packet.
+ */
+static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+ skb->dev = bnx2fc_from_ctlr(fip)->netdev;
+ dev_queue_xmit(skb);
+}
+
+static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fcoe_port *port = lport_priv(n_port);
+ struct bnx2fc_hba *hba = port->priv;
+ struct net_device *netdev = hba->netdev;
+ struct fc_lport *vn_port;
+
+ if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ printk(KERN_ERR PFX "vn ports cannot be created on"
+ "this hba\n");
+ return -EIO;
+ }
+ mutex_lock(&bnx2fc_dev_lock);
+ vn_port = bnx2fc_if_create(hba, &vport->dev, 1);
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ if (IS_ERR(vn_port)) {
+ printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n",
+ netdev->name);
+ return -EIO;
+ }
+
+ if (disabled) {
+ fc_vport_set_state(vport, FC_VPORT_DISABLED);
+ } else {
+ vn_port->boot_time = jiffies;
+ fc_lport_init(vn_port);
+ fc_fabric_login(vn_port);
+ fc_vport_setlink(vn_port);
+ }
+ return 0;
+}
+
+static int bnx2fc_vport_destroy(struct fc_vport *vport)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fc_lport *vn_port = vport->dd_data;
+ struct fcoe_port *port = lport_priv(vn_port);
+
+ mutex_lock(&n_port->lp_mutex);
+ list_del(&vn_port->list);
+ mutex_unlock(&n_port->lp_mutex);
+ queue_work(bnx2fc_wq, &port->destroy_work);
+ return 0;
+}
+
+static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
+{
+ struct fc_lport *lport = vport->dd_data;
+
+ if (disable) {
+ fc_vport_set_state(vport, FC_VPORT_DISABLED);
+ fc_fabric_logoff(lport);
+ } else {
+ lport->boot_time = jiffies;
+ fc_fabric_login(lport);
+ fc_vport_setlink(lport);
+ }
+ return 0;
+}
+
+
+static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
+{
+ struct net_device *netdev = hba->netdev;
+ struct net_device *physdev = hba->phys_dev;
+ struct netdev_hw_addr *ha;
+ int sel_san_mac = 0;
+
+ /* Do not support for bonding device */
+ if ((netdev->priv_flags & IFF_MASTER_ALB) ||
+ (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
+ (netdev->priv_flags & IFF_MASTER_8023AD)) {
+ return -EOPNOTSUPP;
+ }
+
+ /* setup Source MAC Address */
+ rcu_read_lock();
+ for_each_dev_addr(physdev, ha) {
+ BNX2FC_MISC_DBG("net_config: ha->type = %d, fip_mac = ",
+ ha->type);
+ printk(KERN_INFO "%2x:%2x:%2x:%2x:%2x:%2x\n", ha->addr[0],
+ ha->addr[1], ha->addr[2], ha->addr[3],
+ ha->addr[4], ha->addr[5]);
+
+ if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
+ (is_valid_ether_addr(ha->addr))) {
+ memcpy(hba->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
+ sel_san_mac = 1;
+ BNX2FC_MISC_DBG("Found SAN MAC\n");
+ }
+ }
+ rcu_read_unlock();
+
+ if (!sel_san_mac)
+ return -ENODEV;
+
+ hba->fip_packet_type.func = bnx2fc_fip_recv;
+ hba->fip_packet_type.type = htons(ETH_P_FIP);
+ hba->fip_packet_type.dev = netdev;
+ dev_add_pack(&hba->fip_packet_type);
+
+ hba->fcoe_packet_type.func = bnx2fc_rcv;
+ hba->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
+ hba->fcoe_packet_type.dev = netdev;
+ dev_add_pack(&hba->fcoe_packet_type);
+
+ return 0;
+}
+
+static int bnx2fc_attach_transport(void)
+{
+ bnx2fc_transport_template =
+ fc_attach_transport(&bnx2fc_transport_function);
+
+ if (bnx2fc_transport_template == NULL) {
+ printk(KERN_ERR PFX "Failed to attach FC transport\n");
+ return -ENODEV;
+ }
+
+ bnx2fc_vport_xport_template =
+ fc_attach_transport(&bnx2fc_vport_xport_function);
+ if (bnx2fc_vport_xport_template == NULL) {
+ printk(KERN_ERR PFX
+ "Failed to attach FC transport for vport\n");
+ fc_release_transport(bnx2fc_transport_template);
+ bnx2fc_transport_template = NULL;
+ return -ENODEV;
+ }
+ return 0;
+}
+static void bnx2fc_release_transport(void)
+{
+ fc_release_transport(bnx2fc_transport_template);
+ fc_release_transport(bnx2fc_vport_xport_template);
+ bnx2fc_transport_template = NULL;
+ bnx2fc_vport_xport_template = NULL;
+}
+
+static void bnx2fc_interface_release(struct kref *kref)
+{
+ struct bnx2fc_hba *hba;
+ struct net_device *netdev;
+ struct net_device *phys_dev;
+
+ hba = container_of(kref, struct bnx2fc_hba, kref);
+ BNX2FC_HBA_DBG(hba->ctlr.lp, "Interface is being released\n");
+
+ netdev = hba->netdev;
+ phys_dev = hba->phys_dev;
+
+ /* tear-down FIP controller */
+ if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done))
+ fcoe_ctlr_destroy(&hba->ctlr);
+
+ /* Free the command manager */
+ if (hba->cmd_mgr) {
+ bnx2fc_cmd_mgr_free(hba->cmd_mgr);
+ hba->cmd_mgr = NULL;
+ }
+ dev_put(netdev);
+ module_put(THIS_MODULE);
+}
+
+static inline void bnx2fc_interface_get(struct bnx2fc_hba *hba)
+{
+ kref_get(&hba->kref);
+}
+
+static inline void bnx2fc_interface_put(struct bnx2fc_hba *hba)
+{
+ kref_put(&hba->kref, bnx2fc_interface_release);
+}
+static void bnx2fc_interface_destroy(struct bnx2fc_hba *hba)
+{
+ bnx2fc_unbind_pcidev(hba);
+ kfree(hba);
+}
+
+/**
+ * bnx2fc_interface_create - create a new fcoe instance
+ *
+ * @cnic: pointer to cnic device
+ *
+ * Creates a new FCoE instance on the given device which include allocating
+ * hba structure, scsi_host and lport structures.
+ */
+static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic)
+{
+ struct bnx2fc_hba *hba;
+ int rc;
+
+ hba = kzalloc(sizeof(*hba), GFP_KERNEL);
+ if (!hba) {
+ printk(KERN_ERR PFX "Unable to allocate hba structure\n");
+ return NULL;
+ }
+ spin_lock_init(&hba->hba_lock);
+ mutex_init(&hba->hba_mutex);
+
+ hba->cnic = cnic;
+ rc = bnx2fc_bind_pcidev(hba);
+ if (rc)
+ goto bind_err;
+ hba->phys_dev = cnic->netdev;
+ /* will get overwritten after we do vlan discovery */
+ hba->netdev = hba->phys_dev;
+
+ init_waitqueue_head(&hba->shutdown_wait);
+ init_waitqueue_head(&hba->destroy_wait);
+
+ return hba;
+bind_err:
+ printk(KERN_ERR PFX "create_interface: bind error\n");
+ kfree(hba);
+ return NULL;
+}
+
+static int bnx2fc_interface_setup(struct bnx2fc_hba *hba,
+ enum fip_state fip_mode)
+{
+ int rc = 0;
+ struct net_device *netdev = hba->netdev;
+ struct fcoe_ctlr *fip = &hba->ctlr;
+
+ dev_hold(netdev);
+ kref_init(&hba->kref);
+
+ hba->flags = 0;
+
+ /* Initialize FIP */
+ memset(fip, 0, sizeof(*fip));
+ fcoe_ctlr_init(fip, fip_mode);
+ hba->ctlr.send = bnx2fc_fip_send;
+ hba->ctlr.update_mac = bnx2fc_update_src_mac;
+ hba->ctlr.get_src_addr = bnx2fc_get_src_mac;
+ set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done);
+
+ rc = bnx2fc_netdev_setup(hba);
+ if (rc)
+ goto setup_err;
+
+ hba->next_conn_id = 0;
+
+ memset(hba->tgt_ofld_list, 0, sizeof(hba->tgt_ofld_list));
+ hba->num_ofld_sess = 0;
+
+ return 0;
+
+setup_err:
+ fcoe_ctlr_destroy(&hba->ctlr);
+ dev_put(netdev);
+ bnx2fc_interface_put(hba);
+ return rc;
+}
+
+/**
+ * bnx2fc_if_create - Create FCoE instance on a given interface
+ *
+ * @hba: FCoE interface to create a local port on
+ * @parent: Device pointer to be the parent in sysfs for the SCSI host
+ * @npiv: Indicates if the port is vport or not
+ *
+ * Creates a fc_lport instance and a Scsi_Host instance and configure them.
+ *
+ * Returns: Allocated fc_lport or an error pointer
+ */
+static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
+ struct device *parent, int npiv)
+{
+ struct fc_lport *lport = NULL;
+ struct fcoe_port *port;
+ struct Scsi_Host *shost;
+ struct fc_vport *vport = dev_to_vport(parent);
+ int rc = 0;
+
+ /* Allocate Scsi_Host structure */
+ if (!npiv) {
+ lport = libfc_host_alloc(&bnx2fc_shost_template,
+ sizeof(struct fcoe_port));
+ } else {
+ lport = libfc_vport_create(vport,
+ sizeof(struct fcoe_port));
+ }
+
+ if (!lport) {
+ printk(KERN_ERR PFX "could not allocate scsi host structure\n");
+ return NULL;
+ }
+ shost = lport->host;
+ port = lport_priv(lport);
+ port->lport = lport;
+ port->priv = hba;
+ INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
+
+ /* Configure fcoe_port */
+ rc = bnx2fc_lport_config(lport);
+ if (rc)
+ goto lp_config_err;
+
+ if (npiv) {
+ vport = dev_to_vport(parent);
+ printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n",
+ vport->node_name, vport->port_name);
+ fc_set_wwnn(lport, vport->node_name);
+ fc_set_wwpn(lport, vport->port_name);
+ }
+ /* Configure netdev and networking properties of the lport */
+ rc = bnx2fc_net_config(lport);
+ if (rc) {
+ printk(KERN_ERR PFX "Error on bnx2fc_net_config\n");
+ goto lp_config_err;
+ }
+
+ rc = bnx2fc_shost_config(lport, parent);
+ if (rc) {
+ printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
+ hba->netdev->name);
+ goto lp_config_err;
+ }
+
+ /* Initialize the libfc library */
+ rc = bnx2fc_libfc_config(lport);
+ if (rc) {
+ printk(KERN_ERR PFX "Couldnt configure libfc\n");
+ goto shost_err;
+ }
+ fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+
+ /* Allocate exchange manager */
+ if (!npiv) {
+ rc = bnx2fc_em_config(lport);
+ if (rc) {
+ printk(KERN_ERR PFX "Error on bnx2fc_em_config\n");
+ goto shost_err;
+ }
+ }
+
+ bnx2fc_interface_get(hba);
+ return lport;
+
+shost_err:
+ scsi_remove_host(shost);
+lp_config_err:
+ scsi_host_put(lport->host);
+ return NULL;
+}
+
+static void bnx2fc_netdev_cleanup(struct bnx2fc_hba *hba)
+{
+ /* Dont listen for Ethernet packets anymore */
+ __dev_remove_pack(&hba->fcoe_packet_type);
+ __dev_remove_pack(&hba->fip_packet_type);
+ synchronize_net();
+}
+
+static void bnx2fc_if_destroy(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+
+ BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n");
+ /* Stop the transmit retry timer */
+ del_timer_sync(&port->timer);
+
+ /* Free existing transmit skbs */
+ fcoe_clean_pending_queue(lport);
+
+ bnx2fc_interface_put(hba);
+
+ /* Free queued packets for the receive thread */
+ bnx2fc_clean_rx_queue(lport);
+
+ /* Detach from scsi-ml */
+ fc_remove_host(lport->host);
+ scsi_remove_host(lport->host);
+
+ /*
+ * Note that only the physical lport will have the exchange manager.
+ * for vports, this function is NOP
+ */
+ fc_exch_mgr_free(lport);
+
+ /* Free memory used by statistical counters */
+ fc_lport_free_stats(lport);
+
+ /* Release Scsi_Host */
+ scsi_host_put(lport->host);
+}
+
+/**
+ * bnx2fc_destroy - Destroy a bnx2fc FCoE interface
+ *
+ * @buffer: The name of the Ethernet interface to be destroyed
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_destroy(struct net_device *netdev)
+{
+ struct bnx2fc_hba *hba = NULL;
+ struct net_device *phys_dev;
+ int rc = 0;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ mutex_lock(&bnx2fc_dev_lock);
+#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto netdev_err;
+ }
+#endif
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ phys_dev = vlan_dev_real_dev(netdev);
+ else {
+ printk(KERN_ERR PFX "Not a vlan device\n");
+ rc = -ENODEV;
+ goto netdev_err;
+ }
+
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba || !hba->ctlr.lp) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_destroy: hba or lport not found\n");
+ goto netdev_err;
+ }
+
+ if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ printk(KERN_ERR PFX "bnx2fc_destroy: Create not called\n");
+ goto netdev_err;
+ }
+
+ bnx2fc_netdev_cleanup(hba);
+
+ bnx2fc_stop(hba);
+
+ bnx2fc_if_destroy(hba->ctlr.lp);
+
+ destroy_workqueue(hba->timer_work_queue);
+
+ if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
+ bnx2fc_fw_destroy(hba);
+
+ clear_bit(BNX2FC_CREATE_DONE, &hba->init_done);
+netdev_err:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+static void bnx2fc_destroy_work(struct work_struct *work)
+{
+ struct fcoe_port *port;
+ struct fc_lport *lport;
+
+ port = container_of(work, struct fcoe_port, destroy_work);
+ lport = port->lport;
+
+ BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
+
+ bnx2fc_port_shutdown(lport);
+ rtnl_lock();
+ mutex_lock(&bnx2fc_dev_lock);
+ bnx2fc_if_destroy(lport);
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+}
+
+static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba)
+{
+ bnx2fc_free_fw_resc(hba);
+ bnx2fc_free_task_ctx(hba);
+}
+
+/**
+ * bnx2fc_bind_adapter_devices - binds bnx2fc adapter with the associated
+ * pci structure
+ *
+ * @hba: Adapter instance
+ */
+static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba)
+{
+ if (bnx2fc_setup_task_ctx(hba))
+ goto mem_err;
+
+ if (bnx2fc_setup_fw_resc(hba))
+ goto mem_err;
+
+ return 0;
+mem_err:
+ bnx2fc_unbind_adapter_devices(hba);
+ return -ENOMEM;
+}
+
+static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
+{
+ struct cnic_dev *cnic;
+
+ if (!hba->cnic) {
+ printk(KERN_ERR PFX "cnic is NULL\n");
+ return -ENODEV;
+ }
+ cnic = hba->cnic;
+ hba->pcidev = cnic->pcidev;
+ if (hba->pcidev)
+ pci_dev_get(hba->pcidev);
+
+ return 0;
+}
+
+static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
+{
+ if (hba->pcidev)
+ pci_dev_put(hba->pcidev);
+ hba->pcidev = NULL;
+}
+
+
+
+/**
+ * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance
+ *
+ * @handle: transport handle pointing to adapter struture
+ *
+ * This function maps adapter structure to pcidev structure and initiates
+ * firmware handshake to enable/initialize on-chip FCoE components.
+ * This bnx2fc - cnic interface api callback is used after following
+ * conditions are met -
+ * a) underlying network interface is up (marked by event NETDEV_UP
+ * from netdev
+ * b) bnx2fc adatper structure is registered.
+ */
+static void bnx2fc_ulp_start(void *handle)
+{
+ struct bnx2fc_hba *hba = handle;
+ struct fc_lport *lport = hba->ctlr.lp;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ mutex_lock(&bnx2fc_dev_lock);
+
+ if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
+ goto start_disc;
+
+ if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done))
+ bnx2fc_fw_init(hba);
+
+start_disc:
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ BNX2FC_MISC_DBG("bnx2fc started.\n");
+
+ /* Kick off Fabric discovery*/
+ if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ printk(KERN_ERR PFX "ulp_init: start discovery\n");
+ lport->tt.frame_send = bnx2fc_xmit;
+ bnx2fc_start_disc(hba);
+ }
+}
+
+static void bnx2fc_port_shutdown(struct fc_lport *lport)
+{
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ fc_fabric_logoff(lport);
+ fc_lport_destroy(lport);
+}
+
+static void bnx2fc_stop(struct bnx2fc_hba *hba)
+{
+ struct fc_lport *lport;
+ struct fc_lport *vport;
+
+ BNX2FC_MISC_DBG("ENTERED %s - init_done = %ld\n", __func__,
+ hba->init_done);
+ if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done) &&
+ test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ lport = hba->ctlr.lp;
+ bnx2fc_port_shutdown(lport);
+ BNX2FC_HBA_DBG(lport, "bnx2fc_stop: waiting for %d "
+ "offloaded sessions\n",
+ hba->num_ofld_sess);
+ wait_event_interruptible(hba->shutdown_wait,
+ (hba->num_ofld_sess == 0));
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN;
+ mutex_unlock(&lport->lp_mutex);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+ fcoe_ctlr_link_down(&hba->ctlr);
+ fcoe_clean_pending_queue(lport);
+
+ mutex_lock(&hba->hba_mutex);
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+
+ clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ mutex_unlock(&hba->hba_mutex);
+ }
+}
+
+static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
+{
+#define BNX2FC_INIT_POLL_TIME (1000 / HZ)
+ int rc = -1;
+ int i = HZ;
+
+ rc = bnx2fc_bind_adapter_devices(hba);
+ if (rc) {
+ printk(KERN_ALERT PFX
+ "bnx2fc_bind_adapter_devices failed - rc = %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnx2fc_send_fw_fcoe_init_msg(hba);
+ if (rc) {
+ printk(KERN_ALERT PFX
+ "bnx2fc_send_fw_fcoe_init_msg failed - rc = %d\n", rc);
+ goto err_unbind;
+ }
+
+ /*
+ * Wait until the adapter init message is complete, and adapter
+ * state is UP.
+ */
+ while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
+ msleep(BNX2FC_INIT_POLL_TIME);
+
+ if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) {
+ printk(KERN_ERR PFX "bnx2fc_start: %s failed to initialize. "
+ "Ignoring...\n",
+ hba->cnic->netdev->name);
+ rc = -1;
+ goto err_unbind;
+ }
+
+
+ /* Mark HBA to indicate that the FW INIT is done */
+ set_bit(BNX2FC_FW_INIT_DONE, &hba->init_done);
+ return 0;
+
+err_unbind:
+ bnx2fc_unbind_adapter_devices(hba);
+err_out:
+ return rc;
+}
+
+static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
+{
+ if (test_and_clear_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) {
+ init_timer(&hba->destroy_timer);
+ hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT +
+ jiffies;
+ hba->destroy_timer.function = bnx2fc_destroy_timer;
+ hba->destroy_timer.data = (unsigned long)hba;
+ add_timer(&hba->destroy_timer);
+ wait_event_interruptible(hba->destroy_wait,
+ (hba->flags &
+ BNX2FC_FLAG_DESTROY_CMPL));
+ /* This should never happen */
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&hba->destroy_timer);
+ }
+ bnx2fc_unbind_adapter_devices(hba);
+ }
+}
+
+/**
+ * bnx2fc_ulp_stop - cnic callback to shutdown adapter instance
+ *
+ * @handle: transport handle pointing to adapter structure
+ *
+ * Driver checks if adapter is already in shutdown mode, if not start
+ * the shutdown process.
+ */
+static void bnx2fc_ulp_stop(void *handle)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)handle;
+
+ printk(KERN_ERR "ULP_STOP\n");
+
+ mutex_lock(&bnx2fc_dev_lock);
+ bnx2fc_stop(hba);
+ bnx2fc_fw_destroy(hba);
+ mutex_unlock(&bnx2fc_dev_lock);
+}
+
+static void bnx2fc_start_disc(struct bnx2fc_hba *hba)
+{
+ struct fc_lport *lport;
+ int wait_cnt = 0;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ /* Kick off FIP/FLOGI */
+ if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ printk(KERN_ERR PFX "Init not done yet\n");
+ return;
+ }
+
+ lport = hba->ctlr.lp;
+ BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
+
+ if (!bnx2fc_link_ok(lport)) {
+ BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
+ fcoe_ctlr_link_up(&hba->ctlr);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
+ set_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ }
+
+ /* wait for the FCF to be selected before issuing FLOGI */
+ while (!hba->ctlr.sel_fcf) {
+ msleep(250);
+ /* give up after 3 secs */
+ if (++wait_cnt > 12)
+ break;
+ }
+ fc_lport_init(lport);
+ fc_fabric_login(lport);
+}
+
+
+/**
+ * bnx2fc_ulp_init - Initialize an adapter instance
+ *
+ * @dev : cnic device handle
+ * Called from cnic_register_driver() context to initialize all
+ * enumerated cnic devices. This routine allocates adapter structure
+ * and other device specific resources.
+ */
+static void bnx2fc_ulp_init(struct cnic_dev *dev)
+{
+ struct bnx2fc_hba *hba;
+ int rc = 0;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ /* bnx2fc works only when bnx2x is loaded */
+ if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s,"
+ " flags: %lx\n",
+ dev->netdev->name, dev->flags);
+ return;
+ }
+
+ /* Configure FCoE interface */
+ hba = bnx2fc_interface_create(dev);
+ if (!hba) {
+ printk(KERN_ERR PFX "hba initialization failed\n");
+ return;
+ }
+
+ /* Add HBA to the adapter list */
+ mutex_lock(&bnx2fc_dev_lock);
+ list_add_tail(&hba->link, &adapter_list);
+ adapter_count++;
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
+ rc = dev->register_device(dev, CNIC_ULP_FCOE,
+ (void *) hba);
+ if (rc)
+ printk(KERN_ALERT PFX "register_device failed, rc = %d\n", rc);
+ else
+ set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
+}
+
+
+static int bnx2fc_disable(struct net_device *netdev)
+{
+ struct bnx2fc_hba *hba;
+ struct net_device *phys_dev;
+ struct ethtool_drvinfo drvinfo;
+ int rc = 0;
+
+ if (!rtnl_trylock()) {
+ printk(KERN_ERR PFX "retrying for rtnl_lock\n");
+ return -EIO;
+ }
+
+ mutex_lock(&bnx2fc_dev_lock);
+
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ phys_dev = vlan_dev_real_dev(netdev);
+ else {
+ printk(KERN_ERR PFX "Not a vlan device\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ /* verify if the physical device is a netxtreme2 device */
+ if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
+ if (strcmp(drvinfo.driver, "bnx2x")) {
+ printk(KERN_ERR PFX "Not a netxtreme2 device\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+ } else {
+ printk(KERN_ERR PFX "unable to obtain drv_info\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ printk(KERN_ERR PFX "phys_dev is netxtreme2 device\n");
+
+ /* obtain hba and initialize rest of the structure */
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba || !hba->ctlr.lp) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_disable: hba or lport not found\n");
+ } else {
+ fcoe_ctlr_link_down(&hba->ctlr);
+ fcoe_clean_pending_queue(hba->ctlr.lp);
+ }
+
+nodev:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+
+static int bnx2fc_enable(struct net_device *netdev)
+{
+ struct bnx2fc_hba *hba;
+ struct net_device *phys_dev;
+ struct ethtool_drvinfo drvinfo;
+ int rc = 0;
+
+ if (!rtnl_trylock()) {
+ printk(KERN_ERR PFX "retrying for rtnl_lock\n");
+ return -EIO;
+ }
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ mutex_lock(&bnx2fc_dev_lock);
+
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ phys_dev = vlan_dev_real_dev(netdev);
+ else {
+ printk(KERN_ERR PFX "Not a vlan device\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+ /* verify if the physical device is a netxtreme2 device */
+ if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
+ if (strcmp(drvinfo.driver, "bnx2x")) {
+ printk(KERN_ERR PFX "Not a netxtreme2 device\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+ } else {
+ printk(KERN_ERR PFX "unable to obtain drv_info\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ /* obtain hba and initialize rest of the structure */
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba || !hba->ctlr.lp) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_enable: hba or lport not found\n");
+ } else if (!bnx2fc_link_ok(hba->ctlr.lp))
+ fcoe_ctlr_link_up(&hba->ctlr);
+
+nodev:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+/**
+ * bnx2fc_create - Create bnx2fc FCoE interface
+ *
+ * @buffer: The name of Ethernet interface to create on
+ * @kp: The associated kernel param
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+ struct bnx2fc_hba *hba;
+ struct net_device *phys_dev;
+ struct fc_lport *lport;
+ struct ethtool_drvinfo drvinfo;
+ int rc = 0;
+ int vlan_id;
+
+ BNX2FC_MISC_DBG("Entered bnx2fc_create\n");
+ if (fip_mode != FIP_MODE_FABRIC) {
+ printk(KERN_ERR "fip mode not FABRIC\n");
+ return -EIO;
+ }
+
+ if (!rtnl_trylock()) {
+ printk(KERN_ERR "trying for rtnl_lock\n");
+ return -EIO;
+ }
+ mutex_lock(&bnx2fc_dev_lock);
+
+#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto mod_err;
+ }
+#endif
+
+ if (!try_module_get(THIS_MODULE)) {
+ rc = -EINVAL;
+ goto mod_err;
+ }
+
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN) {
+ phys_dev = vlan_dev_real_dev(netdev);
+ vlan_id = vlan_dev_vlan_id(netdev);
+ } else {
+ printk(KERN_ERR PFX "Not a vlan device\n");
+ rc = -EINVAL;
+ goto netdev_err;
+ }
+ /* verify if the physical device is a netxtreme2 device */
+ if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
+ if (strcmp(drvinfo.driver, "bnx2x")) {
+ printk(KERN_ERR PFX "Not a netxtreme2 device\n");
+ rc = -EINVAL;
+ goto netdev_err;
+ }
+ } else {
+ printk(KERN_ERR PFX "unable to obtain drv_info\n");
+ rc = -EINVAL;
+ goto netdev_err;
+ }
+
+ /* obtain hba and initialize rest of the structure */
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_create: hba not found\n");
+ goto netdev_err;
+ }
+
+ if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ rc = bnx2fc_fw_init(hba);
+ if (rc)
+ goto netdev_err;
+ }
+
+ if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ rc = -EEXIST;
+ goto netdev_err;
+ }
+
+ /* update netdev with vlan netdev */
+ hba->netdev = netdev;
+ hba->vlan_id = vlan_id;
+ hba->vlan_enabled = 1;
+
+ rc = bnx2fc_interface_setup(hba, fip_mode);
+ if (rc) {
+ printk(KERN_ERR PFX "bnx2fc_interface_setup failed\n");
+ goto ifput_err;
+ }
+
+ hba->timer_work_queue =
+ create_singlethread_workqueue("bnx2fc_timer_wq");
+ if (!hba->timer_work_queue) {
+ printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
+ rc = -EINVAL;
+ goto ifput_err;
+ }
+
+ lport = bnx2fc_if_create(hba, &hba->pcidev->dev, 0);
+ if (!lport) {
+ printk(KERN_ERR PFX "Failed to create interface (%s)\n",
+ netdev->name);
+ bnx2fc_netdev_cleanup(hba);
+ rc = -EINVAL;
+ goto if_create_err;
+ }
+
+ lport->boot_time = jiffies;
+
+ /* Make this master N_port */
+ hba->ctlr.lp = lport;
+
+ set_bit(BNX2FC_CREATE_DONE, &hba->init_done);
+ printk(KERN_ERR PFX "create: START DISC\n");
+ bnx2fc_start_disc(hba);
+ /*
+ * Release from kref_init in bnx2fc_interface_setup, on success
+ * lport should be holding a reference taken in bnx2fc_if_create
+ */
+ bnx2fc_interface_put(hba);
+ /* put netdev that was held while calling dev_get_by_name */
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return 0;
+
+if_create_err:
+ destroy_workqueue(hba->timer_work_queue);
+ifput_err:
+ bnx2fc_interface_put(hba);
+netdev_err:
+ module_put(THIS_MODULE);
+mod_err:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+/**
+ * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc adapter instance
+ *
+ * @cnic: Pointer to cnic device instance
+ *
+ **/
+static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
+{
+ struct list_head *list;
+ struct list_head *temp;
+ struct bnx2fc_hba *hba;
+
+ /* Called with bnx2fc_dev_lock held */
+ list_for_each_safe(list, temp, &adapter_list) {
+ hba = (struct bnx2fc_hba *)list;
+ if (hba->cnic == cnic)
+ return hba;
+ }
+ return NULL;
+}
+
+static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev)
+{
+ struct list_head *list;
+ struct list_head *temp;
+ struct bnx2fc_hba *hba;
+
+ /* Called with bnx2fc_dev_lock held */
+ list_for_each_safe(list, temp, &adapter_list) {
+ hba = (struct bnx2fc_hba *)list;
+ if (hba->phys_dev == phys_dev)
+ return hba;
+ }
+ printk(KERN_ERR PFX "hba_lookup: hba NULL\n");
+ return NULL;
+}
+
+/**
+ * bnx2fc_ulp_exit - shuts down adapter instance and frees all resources
+ *
+ * @dev cnic device handle
+ */
+static void bnx2fc_ulp_exit(struct cnic_dev *dev)
+{
+ struct bnx2fc_hba *hba;
+
+ BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
+
+ if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ printk(KERN_ERR PFX "bnx2fc port check: %s, flags: %lx\n",
+ dev->netdev->name, dev->flags);
+ return;
+ }
+
+ mutex_lock(&bnx2fc_dev_lock);
+ hba = bnx2fc_find_hba_for_cnic(dev);
+ if (!hba) {
+ printk(KERN_ERR PFX "bnx2fc_ulp_exit: hba not found, dev 0%p\n",
+ dev);
+ mutex_unlock(&bnx2fc_dev_lock);
+ return;
+ }
+
+ list_del_init(&hba->link);
+ adapter_count--;
+
+ if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ /* destroy not called yet, move to quiesced list */
+ bnx2fc_netdev_cleanup(hba);
+ bnx2fc_if_destroy(hba->ctlr.lp);
+ }
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ bnx2fc_ulp_stop(hba);
+ /* unregister cnic device */
+ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
+ bnx2fc_interface_destroy(hba);
+}
+
+/**
+ * bnx2fc_fcoe_reset - Resets the fcoe
+ *
+ * @shost: shost the reset is from
+ *
+ * Returns: always 0
+ */
+static int bnx2fc_fcoe_reset(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+ fc_lport_reset(lport);
+ return 0;
+}
+
+
+static bool bnx2fc_match(struct net_device *netdev)
+{
+ mutex_lock(&bnx2fc_dev_lock);
+ if (netdev->priv_flags & IFF_802_1Q_VLAN) {
+ struct net_device *phys_dev = vlan_dev_real_dev(netdev);
+
+ if (bnx2fc_hba_lookup(phys_dev)) {
+ mutex_unlock(&bnx2fc_dev_lock);
+ return true;
+ }
+ }
+ mutex_unlock(&bnx2fc_dev_lock);
+ return false;
+}
+
+
+static struct fcoe_transport bnx2fc_transport = {
+ .name = {"bnx2fc"},
+ .attached = false,
+ .list = LIST_HEAD_INIT(bnx2fc_transport.list),
+ .match = bnx2fc_match,
+ .create = bnx2fc_create,
+ .destroy = bnx2fc_destroy,
+ .enable = bnx2fc_enable,
+ .disable = bnx2fc_disable,
+};
+
+/**
+ * bnx2fc_percpu_thread_create - Create a receive thread for an
+ * online CPU
+ *
+ * @cpu: cpu index for the online cpu
+ */
+static void bnx2fc_percpu_thread_create(unsigned int cpu)
+{
+ struct bnx2fc_percpu_s *p;
+ struct task_struct *thread;
+
+ p = &per_cpu(bnx2fc_percpu, cpu);
+
+ thread = kthread_create(bnx2fc_percpu_io_thread,
+ (void *)p,
+ "bnx2fc_thread/%d", cpu);
+ /* bind thread to the cpu */
+ if (likely(!IS_ERR(p->iothread))) {
+ kthread_bind(thread, cpu);
+ p->iothread = thread;
+ wake_up_process(thread);
+ }
+}
+
+static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
+{
+ struct bnx2fc_percpu_s *p;
+ struct task_struct *thread;
+ struct bnx2fc_work *work, *tmp;
+ LIST_HEAD(work_list);
+
+ BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu);
+
+ /* Prevent any new work from being queued for this CPU */
+ p = &per_cpu(bnx2fc_percpu, cpu);
+ spin_lock_bh(&p->fp_work_lock);
+ thread = p->iothread;
+ p->iothread = NULL;
+
+
+ /* Free all work in the list */
+ list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_del_init(&work->list);
+ bnx2fc_process_cq_compl(work->tgt, work->wqe);
+ kfree(work);
+ }
+
+ spin_unlock_bh(&p->fp_work_lock);
+
+ if (thread)
+ kthread_stop(thread);
+}
+
+/**
+ * bnx2fc_cpu_callback - Handler for CPU hotplug events
+ *
+ * @nfb: The callback data block
+ * @action: The event triggering the callback
+ * @hcpu: The index of the CPU that the event is for
+ *
+ * This creates or destroys per-CPU data for fcoe
+ *
+ * Returns NOTIFY_OK always.
+ */
+static int bnx2fc_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ printk(PFX "CPU %x online: Create Rx thread\n", cpu);
+ bnx2fc_percpu_thread_create(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
+ bnx2fc_percpu_thread_destroy(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+/**
+ * bnx2fc_mod_init - module init entry point
+ *
+ * Initialize driver wide global data structures, and register
+ * with cnic module
+ **/
+static int __init bnx2fc_mod_init(void)
+{
+ struct fcoe_percpu_s *bg;
+ struct task_struct *l2_thread;
+ int rc = 0;
+ unsigned int cpu = 0;
+ struct bnx2fc_percpu_s *p;
+
+ printk(KERN_INFO PFX "%s", version);
+
+ /* register as a fcoe transport */
+ rc = fcoe_transport_attach(&bnx2fc_transport);
+ if (rc) {
+ printk(KERN_ERR "failed to register an fcoe transport, check "
+ "if libfcoe is loaded\n");
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&adapter_list);
+ mutex_init(&bnx2fc_dev_lock);
+ adapter_count = 0;
+
+ /* Attach FC transport template */
+ rc = bnx2fc_attach_transport();
+ if (rc)
+ goto detach_ft;
+
+ bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0);
+ if (!bnx2fc_wq) {
+ rc = -ENOMEM;
+ goto release_bt;
+ }
+
+ bg = &bnx2fc_global;
+ skb_queue_head_init(&bg->fcoe_rx_list);
+ l2_thread = kthread_create(bnx2fc_l2_rcv_thread,
+ (void *)bg,
+ "bnx2fc_l2_thread");
+ if (IS_ERR(l2_thread)) {
+ rc = PTR_ERR(l2_thread);
+ goto free_wq;
+ }
+ wake_up_process(l2_thread);
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ bg->thread = l2_thread;
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+ for_each_possible_cpu(cpu) {
+ p = &per_cpu(bnx2fc_percpu, cpu);
+ INIT_LIST_HEAD(&p->work_list);
+ spin_lock_init(&p->fp_work_lock);
+ }
+
+ for_each_online_cpu(cpu) {
+ bnx2fc_percpu_thread_create(cpu);
+ }
+
+ /* Initialize per CPU interrupt thread */
+ register_hotcpu_notifier(&bnx2fc_cpu_notifier);
+
+ cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
+
+ return 0;
+
+free_wq:
+ destroy_workqueue(bnx2fc_wq);
+release_bt:
+ bnx2fc_release_transport();
+detach_ft:
+ fcoe_transport_detach(&bnx2fc_transport);
+out:
+ return rc;
+}
+
+static void __exit bnx2fc_mod_exit(void)
+{
+ LIST_HEAD(to_be_deleted);
+ struct bnx2fc_hba *hba, *next;
+ struct fcoe_percpu_s *bg;
+ struct task_struct *l2_thread;
+ struct sk_buff *skb;
+ unsigned int cpu = 0;
+
+ /*
+ * NOTE: Since cnic calls register_driver routine rtnl_lock,
+ * it will have higher precedence than bnx2fc_dev_lock.
+ * unregister_device() cannot be called with bnx2fc_dev_lock
+ * held.
+ */
+ mutex_lock(&bnx2fc_dev_lock);
+ list_splice(&adapter_list, &to_be_deleted);
+ INIT_LIST_HEAD(&adapter_list);
+ adapter_count = 0;
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ /* Unregister with cnic */
+ list_for_each_entry_safe(hba, next, &to_be_deleted, link) {
+ list_del_init(&hba->link);
+ printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p, kref = %d\n",
+ hba, atomic_read(&hba->kref.refcount));
+ bnx2fc_ulp_stop(hba);
+ /* unregister cnic device */
+ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED,
+ &hba->reg_with_cnic))
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
+ bnx2fc_interface_destroy(hba);
+ }
+ cnic_unregister_driver(CNIC_ULP_FCOE);
+
+ /* Destroy global thread */
+ bg = &bnx2fc_global;
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ l2_thread = bg->thread;
+ bg->thread = NULL;
+ while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL)
+ kfree_skb(skb);
+
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+ if (l2_thread)
+ kthread_stop(l2_thread);
+
+ unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
+
+ /* Destroy per cpu threads */
+ for_each_online_cpu(cpu) {
+ bnx2fc_percpu_thread_destroy(cpu);
+ }
+
+ destroy_workqueue(bnx2fc_wq);
+ /*
+ * detach from scsi transport
+ * must happen after all destroys are done
+ */
+ bnx2fc_release_transport();
+
+ /* detach from fcoe transport */
+ fcoe_transport_detach(&bnx2fc_transport);
+}
+
+module_init(bnx2fc_mod_init);
+module_exit(bnx2fc_mod_exit);
+
+static struct fc_function_template bnx2fc_transport_function = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fc_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+
+ .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
+ sizeof(struct bnx2fc_rport)),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .get_fc_host_stats = bnx2fc_get_host_stats,
+
+ .issue_fc_host_lip = bnx2fc_fcoe_reset,
+
+ .terminate_rport_io = fc_rport_terminate_io,
+
+ .vport_create = bnx2fc_vport_create,
+ .vport_delete = bnx2fc_vport_destroy,
+ .vport_disable = bnx2fc_vport_disable,
+};
+
+static struct fc_function_template bnx2fc_vport_xport_function = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fc_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+
+ .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
+ sizeof(struct bnx2fc_rport)),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .get_fc_host_stats = fc_get_host_stats,
+ .issue_fc_host_lip = bnx2fc_fcoe_reset,
+ .terminate_rport_io = fc_rport_terminate_io,
+};
+
+/**
+ * scsi_host_template structure used while registering with SCSI-ml
+ */
+static struct scsi_host_template bnx2fc_shost_template = {
+ .module = THIS_MODULE,
+ .name = "Broadcom Offload FCoE Initiator",
+ .queuecommand = bnx2fc_queuecommand,
+ .eh_abort_handler = bnx2fc_eh_abort, /* abts */
+ .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
+ .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
+ .eh_host_reset_handler = fc_eh_host_reset,
+ .slave_alloc = fc_slave_alloc,
+ .change_queue_depth = fc_change_queue_depth,
+ .change_queue_type = fc_change_queue_type,
+ .this_id = -1,
+ .cmd_per_lun = 3,
+ .can_queue = (BNX2FC_MAX_OUTSTANDING_CMNDS/2),
+ .use_clustering = ENABLE_CLUSTERING,
+ .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
+ .max_sectors = 512,
+};
+
+static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
+ .frame_send = bnx2fc_xmit,
+ .elsct_send = bnx2fc_elsct_send,
+ .fcp_abort_io = bnx2fc_abort_io,
+ .fcp_cleanup = bnx2fc_cleanup,
+ .rport_event_callback = bnx2fc_rport_event_handler,
+};
+
+/**
+ * bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface
+ * structure carrying callback function pointers
+ */
+static struct cnic_ulp_ops bnx2fc_cnic_cb = {
+ .owner = THIS_MODULE,
+ .cnic_init = bnx2fc_ulp_init,
+ .cnic_exit = bnx2fc_ulp_exit,
+ .cnic_start = bnx2fc_ulp_start,
+ .cnic_stop = bnx2fc_ulp_stop,
+ .indicate_kcqes = bnx2fc_indicate_kcqe,
+ .indicate_netevent = bnx2fc_indicate_netevent,
+};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
new file mode 100644
index 000000000000..4f4096836742
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -0,0 +1,1868 @@
+/* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * This file contains the code that low level functions that interact
+ * with 57712 FCoE firmware.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
+
+static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *new_cqe_kcqe);
+static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe);
+static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe);
+static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
+static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *conn_destroy);
+
+int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
+{
+ struct fcoe_kwqe_stat stat_req;
+ struct kwqe *kwqe_arr[2];
+ int num_kwqes = 1;
+ int rc = 0;
+
+ memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
+ stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
+ stat_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
+ stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
+
+ kwqe_arr[0] = (struct kwqe *) &stat_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+/**
+ * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
+ *
+ * @hba: adapter structure pointer
+ *
+ * Send down FCoE firmware init KWQEs which initiates the initial handshake
+ * with the f/w.
+ *
+ */
+int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
+{
+ struct fcoe_kwqe_init1 fcoe_init1;
+ struct fcoe_kwqe_init2 fcoe_init2;
+ struct fcoe_kwqe_init3 fcoe_init3;
+ struct kwqe *kwqe_arr[3];
+ int num_kwqes = 3;
+ int rc = 0;
+
+ if (!hba->cnic) {
+ printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n");
+ return -ENODEV;
+ }
+
+ /* fill init1 KWQE */
+ memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
+ fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
+ fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
+ fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
+ fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
+ fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
+ fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
+ fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
+ fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
+ fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
+ fcoe_init1.task_list_pbl_addr_hi =
+ (u32) ((u64) hba->task_ctx_bd_dma >> 32);
+ fcoe_init1.mtu = hba->netdev->mtu;
+
+ fcoe_init1.flags = (PAGE_SHIFT <<
+ FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
+
+ fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
+
+ /* fill init2 KWQE */
+ memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
+ fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
+ fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
+ fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
+ ((u64) hba->hash_tbl_pbl_dma >> 32);
+
+ fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
+ fcoe_init2.t2_hash_tbl_addr_hi = (u32)
+ ((u64) hba->t2_hash_tbl_dma >> 32);
+
+ fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
+ fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
+ ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
+
+ fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
+
+ /* fill init3 KWQE */
+ memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
+ fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
+ fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+ fcoe_init3.error_bit_map_lo = 0xffffffff;
+ fcoe_init3.error_bit_map_hi = 0xffffffff;
+
+
+ kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
+ kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
+ kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
+{
+ struct fcoe_kwqe_destroy fcoe_destroy;
+ struct kwqe *kwqe_arr[2];
+ int num_kwqes = 1;
+ int rc = -1;
+
+ /* fill destroy KWQE */
+ memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
+ fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
+ fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+ kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct fc_lport *lport = port->lport;
+ struct bnx2fc_hba *hba = port->priv;
+ struct kwqe *kwqe_arr[4];
+ struct fcoe_kwqe_conn_offload1 ofld_req1;
+ struct fcoe_kwqe_conn_offload2 ofld_req2;
+ struct fcoe_kwqe_conn_offload3 ofld_req3;
+ struct fcoe_kwqe_conn_offload4 ofld_req4;
+ struct fc_rport_priv *rdata = tgt->rdata;
+ struct fc_rport *rport = tgt->rport;
+ int num_kwqes = 4;
+ u32 port_id;
+ int rc = 0;
+ u16 conn_id;
+
+ /* Initialize offload request 1 structure */
+ memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
+
+ ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
+ ofld_req1.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+
+ conn_id = (u16)tgt->fcoe_conn_id;
+ ofld_req1.fcoe_conn_id = conn_id;
+
+
+ ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
+ ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
+
+ ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
+ ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
+
+ ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
+ ofld_req1.rq_first_pbe_addr_hi =
+ (u32)((u64) tgt->rq_dma >> 32);
+
+ ofld_req1.rq_prod = 0x8000;
+
+ /* Initialize offload request 2 structure */
+ memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
+
+ ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
+ ofld_req2.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
+
+ ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
+ ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
+
+ ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
+ ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
+
+ ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
+ ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
+
+ /* Initialize offload request 3 structure */
+ memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
+
+ ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
+ ofld_req3.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req3.vlan_tag = hba->vlan_id <<
+ FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
+ ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
+
+ port_id = fc_host_port_id(lport->host);
+ if (port_id == 0) {
+ BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Store s_id of the initiator for further reference. This will
+ * be used during disable/destroy during linkdown processing as
+ * when the lport is reset, the port_id also is reset to 0
+ */
+ tgt->sid = port_id;
+ ofld_req3.s_id[0] = (port_id & 0x000000FF);
+ ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
+ ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+ port_id = rport->port_id;
+ ofld_req3.d_id[0] = (port_id & 0x000000FF);
+ ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
+ ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
+
+ ofld_req3.tx_total_conc_seqs = rdata->max_seq;
+
+ ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
+ ofld_req3.rx_max_fc_pay_len = lport->mfs;
+
+ ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
+ ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
+ ofld_req3.rx_open_seqs_exch_c3 = 1;
+
+ ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
+ ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
+
+ /* set mul_n_port_ids supported flag to 0, until it is supported */
+ ofld_req3.flags = 0;
+ /*
+ ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
+ */
+ /* Info from PLOGI response */
+ ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
+
+ ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
+
+ /* vlan flag */
+ ofld_req3.flags |= (hba->vlan_enabled <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
+
+ /* C2_VALID and ACK flags are not set as they are not suppported */
+
+
+ /* Initialize offload request 4 structure */
+ memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
+ ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
+ ofld_req4.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
+
+
+ ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5];
+ /* local mac */
+ ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4];
+ ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3];
+ ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2];
+ ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1];
+ ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0];
+ ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
+ ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
+ ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
+ ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
+ ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
+ ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
+
+ ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
+ ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
+
+ ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
+ ofld_req4.confq_pbl_base_addr_hi =
+ (u32)((u64) tgt->confq_pbl_dma >> 32);
+
+ kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+ kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+ kwqe_arr[2] = (struct kwqe *) &ofld_req3;
+ kwqe_arr[3] = (struct kwqe *) &ofld_req4;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct kwqe *kwqe_arr[2];
+ struct bnx2fc_hba *hba = port->priv;
+ struct fcoe_kwqe_conn_enable_disable enbl_req;
+ struct fc_lport *lport = port->lport;
+ struct fc_rport *rport = tgt->rport;
+ int num_kwqes = 1;
+ int rc = 0;
+ u32 port_id;
+
+ memset(&enbl_req, 0x00,
+ sizeof(struct fcoe_kwqe_conn_enable_disable));
+ enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
+ enbl_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
+ /* local mac */
+ enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4];
+ enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
+ enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
+ enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
+ enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
+
+ enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
+ enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
+ enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
+ enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
+ enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
+ enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
+
+ port_id = fc_host_port_id(lport->host);
+ if (port_id != tgt->sid) {
+ printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
+ "sid = 0x%x\n", port_id, tgt->sid);
+ port_id = tgt->sid;
+ }
+ enbl_req.s_id[0] = (port_id & 0x000000FF);
+ enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
+ enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+ port_id = rport->port_id;
+ enbl_req.d_id[0] = (port_id & 0x000000FF);
+ enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
+ enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
+ enbl_req.vlan_tag = hba->vlan_id <<
+ FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
+ enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
+ enbl_req.vlan_flag = hba->vlan_enabled;
+ enbl_req.context_id = tgt->context_id;
+ enbl_req.conn_id = tgt->fcoe_conn_id;
+
+ kwqe_arr[0] = (struct kwqe *) &enbl_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_disable_req - initiates FCoE Session disable
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_disable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct bnx2fc_hba *hba = port->priv;
+ struct fcoe_kwqe_conn_enable_disable disable_req;
+ struct kwqe *kwqe_arr[2];
+ struct fc_rport *rport = tgt->rport;
+ int num_kwqes = 1;
+ int rc = 0;
+ u32 port_id;
+
+ memset(&disable_req, 0x00,
+ sizeof(struct fcoe_kwqe_conn_enable_disable));
+ disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
+ disable_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
+ disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
+ disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
+ disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
+ disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
+
+ disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
+ disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
+ disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
+ disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
+ disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
+ disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
+
+ port_id = tgt->sid;
+ disable_req.s_id[0] = (port_id & 0x000000FF);
+ disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
+ disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+
+ port_id = rport->port_id;
+ disable_req.d_id[0] = (port_id & 0x000000FF);
+ disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
+ disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
+ disable_req.context_id = tgt->context_id;
+ disable_req.conn_id = tgt->fcoe_conn_id;
+ disable_req.vlan_tag = hba->vlan_id <<
+ FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
+ disable_req.vlan_tag |=
+ 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
+ disable_req.vlan_flag = hba->vlan_enabled;
+
+ kwqe_arr[0] = (struct kwqe *) &disable_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ struct fcoe_kwqe_conn_destroy destroy_req;
+ struct kwqe *kwqe_arr[2];
+ int num_kwqes = 1;
+ int rc = 0;
+
+ memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
+ destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
+ destroy_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ destroy_req.context_id = tgt->context_id;
+ destroy_req.conn_id = tgt->fcoe_conn_id;
+
+ kwqe_arr[0] = (struct kwqe *) &destroy_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+static void bnx2fc_unsol_els_work(struct work_struct *work)
+{
+ struct bnx2fc_unsol_els *unsol_els;
+ struct fc_lport *lport;
+ struct fc_frame *fp;
+
+ unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
+ lport = unsol_els->lport;
+ fp = unsol_els->fp;
+ fc_exch_recv(lport, fp);
+ kfree(unsol_els);
+}
+
+void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
+ unsigned char *buf,
+ u32 frame_len, u16 l2_oxid)
+{
+ struct fcoe_port *port = tgt->port;
+ struct fc_lport *lport = port->lport;
+ struct bnx2fc_unsol_els *unsol_els;
+ struct fc_frame_header *fh;
+ struct fc_frame *fp;
+ struct sk_buff *skb;
+ u32 payload_len;
+ u32 crc;
+ u8 op;
+
+
+ unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
+ if (!unsol_els) {
+ BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
+ l2_oxid, frame_len);
+
+ payload_len = frame_len - sizeof(struct fc_frame_header);
+
+ fp = fc_frame_alloc(lport, payload_len);
+ if (!fp) {
+ printk(KERN_ERR PFX "fc_frame_alloc failure\n");
+ return;
+ }
+
+ fh = (struct fc_frame_header *) fc_frame_header_get(fp);
+ /* Copy FC Frame header and payload into the frame */
+ memcpy(fh, buf, frame_len);
+
+ if (l2_oxid != FC_XID_UNKNOWN)
+ fh->fh_ox_id = htons(l2_oxid);
+
+ skb = fp_skb(fp);
+
+ if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
+ (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
+
+ if (fh->fh_type == FC_TYPE_ELS) {
+ op = fc_frame_payload_op(fp);
+ if ((op == ELS_TEST) || (op == ELS_ESTC) ||
+ (op == ELS_FAN) || (op == ELS_CSU)) {
+ /*
+ * No need to reply for these
+ * ELS requests
+ */
+ printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
+ kfree_skb(skb);
+ return;
+ }
+ }
+ crc = fcoe_fc_crc(fp);
+ fc_frame_init(fp);
+ fr_dev(fp) = lport;
+ fr_sof(fp) = FC_SOF_I3;
+ fr_eof(fp) = FC_EOF_T;
+ fr_crc(fp) = cpu_to_le32(~crc);
+ unsol_els->lport = lport;
+ unsol_els->fp = fp;
+ INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
+ queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
+ } else {
+ BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
+ kfree_skb(skb);
+ }
+}
+
+static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
+{
+ u8 num_rq;
+ struct fcoe_err_report_entry *err_entry;
+ unsigned char *rq_data;
+ unsigned char *buf = NULL, *buf1;
+ int i;
+ u16 xid;
+ u32 frame_len, len;
+ struct bnx2fc_cmd *io_req = NULL;
+ struct fcoe_task_ctx_entry *task, *task_page;
+ struct bnx2fc_hba *hba = tgt->port->priv;
+ int task_idx, index;
+ int rc = 0;
+
+
+ BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
+ switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
+ case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
+ frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
+ FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
+
+ num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
+
+ rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
+ if (rq_data) {
+ buf = rq_data;
+ } else {
+ buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
+ GFP_ATOMIC);
+
+ if (!buf1) {
+ BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
+ break;
+ }
+
+ for (i = 0; i < num_rq; i++) {
+ rq_data = (unsigned char *)
+ bnx2fc_get_next_rqe(tgt, 1);
+ len = BNX2FC_RQ_BUF_SZ;
+ memcpy(buf1, rq_data, len);
+ buf1 += len;
+ }
+ }
+ bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
+ FC_XID_UNKNOWN);
+
+ if (buf != rq_data)
+ kfree(buf);
+ bnx2fc_return_rqe(tgt, num_rq);
+ break;
+
+ case FCOE_ERROR_DETECTION_CQE_TYPE:
+ /*
+ *In case of error reporting CQE a single RQ entry
+ * is consumes.
+ */
+ spin_lock_bh(&tgt->tgt_lock);
+ num_rq = 1;
+ err_entry = (struct fcoe_err_report_entry *)
+ bnx2fc_get_next_rqe(tgt, 1);
+ xid = err_entry->fc_hdr.ox_id;
+ BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
+ BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
+ err_entry->err_warn_bitmap_hi,
+ err_entry->err_warn_bitmap_lo);
+ BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
+ err_entry->tx_buf_off, err_entry->rx_buf_off);
+
+ bnx2fc_return_rqe(tgt, 1);
+
+ if (xid > BNX2FC_MAX_XID) {
+ BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
+ xid);
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+ }
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)
+ hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+
+ io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
+ if (!io_req) {
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+ }
+
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
+ printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+ }
+
+ if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
+ "progress.. ignore unsol err\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+ }
+
+ /*
+ * If ABTS is already in progress, and FW error is
+ * received after that, do not cancel the timeout_work
+ * and let the error recovery continue by explicitly
+ * logging out the target, when the ABTS eventually
+ * times out.
+ */
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &io_req->req_flags)) {
+ /*
+ * Cancel the timeout_work, as we received IO
+ * completion with FW error.
+ */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* timer hold */
+
+ rc = bnx2fc_initiate_abts(io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
+ "failed. issue cleanup\n");
+ rc = bnx2fc_initiate_cleanup(io_req);
+ BUG_ON(rc);
+ }
+ } else
+ printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
+ "in ABTS processing\n", xid);
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+
+ case FCOE_WARNING_DETECTION_CQE_TYPE:
+ /*
+ *In case of warning reporting CQE a single RQ entry
+ * is consumes.
+ */
+ num_rq = 1;
+ err_entry = (struct fcoe_err_report_entry *)
+ bnx2fc_get_next_rqe(tgt, 1);
+ xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
+ BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
+ BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
+ err_entry->err_warn_bitmap_hi,
+ err_entry->err_warn_bitmap_lo);
+ BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
+ err_entry->tx_buf_off, err_entry->rx_buf_off);
+
+ bnx2fc_return_rqe(tgt, 1);
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
+ break;
+ }
+}
+
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
+{
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_cmd *io_req;
+ int task_idx, index;
+ u16 xid;
+ u8 cmd_type;
+ u8 rx_state = 0;
+ u8 num_rq;
+
+ spin_lock_bh(&tgt->tgt_lock);
+ xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
+ if (xid >= BNX2FC_MAX_TASKS) {
+ printk(KERN_ALERT PFX "ERROR:xid out of range\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ return;
+ }
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+
+ num_rq = ((task->rx_wr_tx_rd.rx_flags &
+ FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >>
+ FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT);
+
+ io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
+
+ if (io_req == NULL) {
+ printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ return;
+ }
+
+ /* Timestamp IO completion time */
+ cmd_type = io_req->cmd_type;
+
+ /* optimized completion path */
+ if (cmd_type == BNX2FC_SCSI_CMD) {
+ rx_state = ((task->rx_wr_tx_rd.rx_flags &
+ FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >>
+ FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT);
+
+ if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
+ bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return;
+ }
+ }
+
+ /* Process other IO completion types */
+ switch (cmd_type) {
+ case BNX2FC_SCSI_CMD:
+ if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
+ bnx2fc_process_abts_compl(io_req, task, num_rq);
+ else if (rx_state ==
+ FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
+ bnx2fc_process_cleanup_compl(io_req, task, num_rq);
+ else
+ printk(KERN_ERR PFX "Invalid rx state - %d\n",
+ rx_state);
+ break;
+
+ case BNX2FC_TASK_MGMT_CMD:
+ BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
+ bnx2fc_process_tm_compl(io_req, task, num_rq);
+ break;
+
+ case BNX2FC_ABTS:
+ /*
+ * ABTS request received by firmware. ABTS response
+ * will be delivered to the task belonging to the IO
+ * that was aborted
+ */
+ BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ break;
+
+ case BNX2FC_ELS:
+ BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n");
+ bnx2fc_process_els_compl(io_req, task, num_rq);
+ break;
+
+ case BNX2FC_CLEANUP:
+ BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
+ break;
+ }
+ spin_unlock_bh(&tgt->tgt_lock);
+}
+
+struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
+{
+ struct bnx2fc_work *work;
+ work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
+ if (!work)
+ return NULL;
+
+ INIT_LIST_HEAD(&work->list);
+ work->tgt = tgt;
+ work->wqe = wqe;
+ return work;
+}
+
+int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
+{
+ struct fcoe_cqe *cq;
+ u32 cq_cons;
+ struct fcoe_cqe *cqe;
+ u16 wqe;
+ bool more_cqes_found = false;
+
+ /*
+ * cq_lock is a low contention lock used to protect
+ * the CQ data structure from being freed up during
+ * the upload operation
+ */
+ spin_lock_bh(&tgt->cq_lock);
+
+ if (!tgt->cq) {
+ printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
+ spin_unlock_bh(&tgt->cq_lock);
+ return 0;
+ }
+ cq = tgt->cq;
+ cq_cons = tgt->cq_cons_idx;
+ cqe = &cq[cq_cons];
+
+ do {
+ more_cqes_found ^= true;
+
+ while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
+ (tgt->cq_curr_toggle_bit <<
+ FCOE_CQE_TOGGLE_BIT_SHIFT)) {
+
+ /* new entry on the cq */
+ if (wqe & FCOE_CQE_CQE_TYPE) {
+ /* Unsolicited event notification */
+ bnx2fc_process_unsol_compl(tgt, wqe);
+ } else {
+ struct bnx2fc_work *work = NULL;
+ struct bnx2fc_percpu_s *fps = NULL;
+ unsigned int cpu = wqe % num_possible_cpus();
+
+ fps = &per_cpu(bnx2fc_percpu, cpu);
+ spin_lock_bh(&fps->fp_work_lock);
+ if (unlikely(!fps->iothread))
+ goto unlock;
+
+ work = bnx2fc_alloc_work(tgt, wqe);
+ if (work)
+ list_add_tail(&work->list,
+ &fps->work_list);
+unlock:
+ spin_unlock_bh(&fps->fp_work_lock);
+
+ /* Pending work request completion */
+ if (fps->iothread && work)
+ wake_up_process(fps->iothread);
+ else
+ bnx2fc_process_cq_compl(tgt, wqe);
+ }
+ cqe++;
+ tgt->cq_cons_idx++;
+
+ if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
+ tgt->cq_cons_idx = 0;
+ cqe = cq;
+ tgt->cq_curr_toggle_bit =
+ 1 - tgt->cq_curr_toggle_bit;
+ }
+ }
+ /* Re-arm CQ */
+ if (more_cqes_found) {
+ tgt->conn_db->cq_arm.lo = -1;
+ wmb();
+ }
+ } while (more_cqes_found);
+
+ /*
+ * Commit tgt->cq_cons_idx change to the memory
+ * spin_lock implies full memory barrier, no need to smp_wmb
+ */
+
+ spin_unlock_bh(&tgt->cq_lock);
+ return 0;
+}
+
+/**
+ * bnx2fc_fastpath_notification - process global event queue (KCQ)
+ *
+ * @hba: adapter structure pointer
+ * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry
+ *
+ * Fast path event notification handler
+ */
+static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *new_cqe_kcqe)
+{
+ u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
+ struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
+
+ if (!tgt) {
+ printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id);
+ return;
+ }
+
+ bnx2fc_process_new_cqes(tgt);
+}
+
+/**
+ * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
+ *
+ * @hba: adapter structure pointer
+ * @ofld_kcqe: connection offload kcqe pointer
+ *
+ * handle session offload completion, enable the session if offload is
+ * successful.
+ */
+static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe)
+{
+ struct bnx2fc_rport *tgt;
+ struct fcoe_port *port;
+ u32 conn_id;
+ u32 context_id;
+ int rc;
+
+ conn_id = ofld_kcqe->fcoe_conn_id;
+ context_id = ofld_kcqe->fcoe_conn_context_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
+ return;
+ }
+ BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
+ ofld_kcqe->fcoe_conn_context_id);
+ port = tgt->port;
+ if (hba != tgt->port->priv) {
+ printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n");
+ goto ofld_cmpl_err;
+ }
+ /*
+ * cnic has allocated a context_id for this session; use this
+ * while enabling the session.
+ */
+ tgt->context_id = context_id;
+ if (ofld_kcqe->completion_status) {
+ if (ofld_kcqe->completion_status ==
+ FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
+ printk(KERN_ERR PFX "unable to allocate FCoE context "
+ "resources\n");
+ set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
+ }
+ goto ofld_cmpl_err;
+ } else {
+
+ /* now enable the session */
+ rc = bnx2fc_send_session_enable_req(port, tgt);
+ if (rc) {
+ printk(KERN_ALERT PFX "enable session failed\n");
+ goto ofld_cmpl_err;
+ }
+ }
+ return;
+ofld_cmpl_err:
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+}
+
+/**
+ * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
+ *
+ * @hba: adapter structure pointer
+ * @ofld_kcqe: connection offload kcqe pointer
+ *
+ * handle session enable completion, mark the rport as ready
+ */
+
+static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe)
+{
+ struct bnx2fc_rport *tgt;
+ u32 conn_id;
+ u32 context_id;
+
+ context_id = ofld_kcqe->fcoe_conn_context_id;
+ conn_id = ofld_kcqe->fcoe_conn_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
+ ofld_kcqe->fcoe_conn_context_id);
+
+ /*
+ * context_id should be the same for this target during offload
+ * and enable
+ */
+ if (tgt->context_id != context_id) {
+ printk(KERN_ALERT PFX "context id mis-match\n");
+ return;
+ }
+ if (hba != tgt->port->priv) {
+ printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
+ goto enbl_cmpl_err;
+ }
+ if (ofld_kcqe->completion_status) {
+ goto enbl_cmpl_err;
+ } else {
+ /* enable successful - rport ready for issuing IOs */
+ set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+ }
+ return;
+
+enbl_cmpl_err:
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+}
+
+static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *disable_kcqe)
+{
+
+ struct bnx2fc_rport *tgt;
+ u32 conn_id;
+
+ conn_id = disable_kcqe->fcoe_conn_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
+
+ if (disable_kcqe->completion_status) {
+ printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n",
+ disable_kcqe->completion_status);
+ return;
+ } else {
+ /* disable successful */
+ BNX2FC_TGT_DBG(tgt, "disable successful\n");
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
+ }
+}
+
+static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *destroy_kcqe)
+{
+ struct bnx2fc_rport *tgt;
+ u32 conn_id;
+
+ conn_id = destroy_kcqe->fcoe_conn_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
+
+ if (destroy_kcqe->completion_status) {
+ printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n",
+ destroy_kcqe->completion_status);
+ return;
+ } else {
+ /* destroy successful */
+ BNX2FC_TGT_DBG(tgt, "upload successful\n");
+ clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
+ }
+}
+
+static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
+{
+ switch (err_code) {
+ case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
+ printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
+ break;
+
+ case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
+ printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
+ break;
+
+ case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
+ printk(KERN_ERR PFX "init_failure due to NIC error\n");
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
+ }
+}
+
+/**
+ * bnx2fc_indicae_kcqe - process KCQE
+ *
+ * @hba: adapter structure pointer
+ * @kcqe: kcqe pointer
+ * @num_cqe: Number of completion queue elements
+ *
+ * Generic KCQ event handler
+ */
+void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
+ u32 num_cqe)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+ int i = 0;
+ struct fcoe_kcqe *kcqe = NULL;
+
+ while (i < num_cqe) {
+ kcqe = (struct fcoe_kcqe *) kcq[i++];
+
+ switch (kcqe->op_code) {
+ case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
+ bnx2fc_fastpath_notification(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
+ bnx2fc_process_ofld_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_ENABLE_CONN:
+ bnx2fc_process_enable_conn_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_INIT_FUNC:
+ if (kcqe->completion_status !=
+ FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
+ bnx2fc_init_failure(hba,
+ kcqe->completion_status);
+ } else {
+ set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ bnx2fc_get_link_state(hba);
+ printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
+ (u8)hba->pcidev->bus->number);
+ }
+ break;
+
+ case FCOE_KCQE_OPCODE_DESTROY_FUNC:
+ if (kcqe->completion_status !=
+ FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
+
+ printk(KERN_ERR PFX "DESTROY failed\n");
+ } else {
+ printk(KERN_ERR PFX "DESTROY success\n");
+ }
+ hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
+ wake_up_interruptible(&hba->destroy_wait);
+ break;
+
+ case FCOE_KCQE_OPCODE_DISABLE_CONN:
+ bnx2fc_process_conn_disable_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_DESTROY_CONN:
+ bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_STAT_FUNC:
+ if (kcqe->completion_status !=
+ FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
+ printk(KERN_ERR PFX "STAT failed\n");
+ complete(&hba->stat_req_done);
+ break;
+
+ case FCOE_KCQE_OPCODE_FCOE_ERROR:
+ /* fall thru */
+ default:
+ printk(KERN_ALERT PFX "unknown opcode 0x%x\n",
+ kcqe->op_code);
+ }
+ }
+}
+
+void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
+{
+ struct fcoe_sqe *sqe;
+
+ sqe = &tgt->sq[tgt->sq_prod_idx];
+
+ /* Fill SQ WQE */
+ sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
+ sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
+
+ /* Advance SQ Prod Idx */
+ if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
+ tgt->sq_prod_idx = 0;
+ tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
+ }
+}
+
+void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
+{
+ struct b577xx_doorbell_set_prod ev_doorbell;
+ u32 msg;
+
+ wmb();
+
+ memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod));
+ ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE;
+
+ ev_doorbell.prod = tgt->sq_prod_idx |
+ (tgt->sq_curr_toggle_bit << 15);
+ ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE <<
+ B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
+ msg = *((u32 *)&ev_doorbell);
+ writel(cpu_to_le32(msg), tgt->ctx_base);
+
+ mmiowb();
+
+}
+
+int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
+{
+ u32 context_id = tgt->context_id;
+ struct fcoe_port *port = tgt->port;
+ u32 reg_off;
+ resource_size_t reg_base;
+ struct bnx2fc_hba *hba = port->priv;
+
+ reg_base = pci_resource_start(hba->pcidev,
+ BNX2X_DOORBELL_PCI_BAR);
+ reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
+ (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
+ tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ if (!tgt->ctx_base)
+ return -ENOMEM;
+ return 0;
+}
+
+char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
+{
+ char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
+
+ if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
+ return NULL;
+
+ tgt->rq_cons_idx += num_items;
+
+ if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
+ tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
+
+ return buf;
+}
+
+void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
+{
+ /* return the rq buffer */
+ u32 next_prod_idx = tgt->rq_prod_idx + num_items;
+ if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
+ /* Wrap around RQ */
+ next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
+ }
+ tgt->rq_prod_idx = next_prod_idx;
+ tgt->conn_db->rq_prod = tgt->rq_prod_idx;
+}
+
+void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u16 orig_xid)
+{
+ u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ u32 context_id = tgt->context_id;
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ /* Tx Write Rx Read */
+ task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
+ task->tx_wr_rx_rd.init_flags = task_type <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
+ /* Common */
+ task->cmn.common_flags = context_id <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
+ task->cmn.general.cleanup_info.task_id = orig_xid;
+
+
+}
+
+void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task)
+{
+ struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct fc_frame_header *fc_hdr;
+ u8 task_type = 0;
+ u64 *hdr;
+ u64 temp_hdr[3];
+ u32 context_id;
+
+
+ /* Obtain task_type */
+ if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
+ (io_req->cmd_type == BNX2FC_ELS)) {
+ task_type = FCOE_TASK_TYPE_MIDPATH;
+ } else if (io_req->cmd_type == BNX2FC_ABTS) {
+ task_type = FCOE_TASK_TYPE_ABTS;
+ }
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ /* Setup the task from io_req for easy reference */
+ io_req->task = task;
+
+ BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
+ io_req->cmd_type, task_type);
+
+ /* Tx only */
+ if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
+ (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
+ task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ (u32)mp_req->mp_req_bd_dma;
+ task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ (u32)((u64)mp_req->mp_req_bd_dma >> 32);
+ task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
+ BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n",
+ (unsigned long long)mp_req->mp_req_bd_dma);
+ }
+
+ /* Tx Write Rx Read */
+ task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
+ task->tx_wr_rx_rd.init_flags = task_type <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
+
+ /* Common */
+ task->cmn.data_2_trns = io_req->data_xfer_len;
+ context_id = tgt->context_id;
+ task->cmn.common_flags = context_id <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
+ task->cmn.common_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
+ task->cmn.common_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
+
+ /* Rx Write Tx Read */
+ fc_hdr = &(mp_req->req_fc_hdr);
+ if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+ fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
+ fc_hdr->fh_rx_id = htons(0xffff);
+ task->rx_wr_tx_rd.rx_id = 0xffff;
+ } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
+ fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
+ }
+
+ /* Fill FC Header into middle path buffer */
+ hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+ memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
+ hdr[0] = cpu_to_be64(temp_hdr[0]);
+ hdr[1] = cpu_to_be64(temp_hdr[1]);
+ hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+ /* Rx Only */
+ if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+
+ task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ (u32)mp_req->mp_resp_bd_dma;
+ task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
+ task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
+ }
+}
+
+void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task)
+{
+ u8 task_type;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct io_bdt *bd_tbl = io_req->bd_tbl;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ u64 *fcp_cmnd;
+ u64 tmp_fcp_cmnd[4];
+ u32 context_id;
+ int cnt, i;
+ int bd_count;
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ /* Setup the task from io_req for easy reference */
+ io_req->task = task;
+
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+ task_type = FCOE_TASK_TYPE_WRITE;
+ else
+ task_type = FCOE_TASK_TYPE_READ;
+
+ /* Tx only */
+ if (task_type == FCOE_TASK_TYPE_WRITE) {
+ task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ (u32)bd_tbl->bd_tbl_dma;
+ task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+ task->tx_wr_only.sgl_ctx.mul_sges.sgl_size =
+ bd_tbl->bd_valid;
+ }
+
+ /*Tx Write Rx Read */
+ /* Init state to NORMAL */
+ task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
+ task->tx_wr_rx_rd.init_flags = task_type <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
+
+ /* Common */
+ task->cmn.data_2_trns = io_req->data_xfer_len;
+ context_id = tgt->context_id;
+ task->cmn.common_flags = context_id <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
+ task->cmn.common_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
+ task->cmn.common_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
+
+ /* Set initiative ownership */
+ task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT;
+
+ /* Set initial seq counter */
+ task->cmn.tx_low_seq_cnt = 1;
+
+ /* Set state to "waiting for the first packet" */
+ task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME;
+
+ /* Fill FCP_CMND IU */
+ fcp_cmnd = (u64 *)
+ task->cmn.general.cmd_info.fcp_cmd_payload.opaque;
+ bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
+
+ /* swap fcp_cmnd */
+ cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
+
+ for (i = 0; i < cnt; i++) {
+ *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
+ fcp_cmnd++;
+ }
+
+ /* Rx Write Tx Read */
+ task->rx_wr_tx_rd.rx_id = 0xffff;
+
+ /* Rx Only */
+ if (task_type == FCOE_TASK_TYPE_READ) {
+
+ bd_count = bd_tbl->bd_valid;
+ if (bd_count == 1) {
+
+ struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
+
+ task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo =
+ fcoe_bd_tbl->buf_addr_lo;
+ task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi =
+ fcoe_bd_tbl->buf_addr_hi;
+ task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem =
+ fcoe_bd_tbl->buf_len;
+ task->tx_wr_rx_rd.init_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT;
+ } else {
+
+ task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ (u32)bd_tbl->bd_tbl_dma;
+ task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+ task->rx_wr_only.sgl_ctx.mul_sges.sgl_size =
+ bd_tbl->bd_valid;
+ }
+ }
+}
+
+/**
+ * bnx2fc_setup_task_ctx - allocate and map task context
+ *
+ * @hba: pointer to adapter structure
+ *
+ * allocate memory for task context, and associated BD table to be used
+ * by firmware
+ *
+ */
+int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
+{
+ int rc = 0;
+ struct regpair *task_ctx_bdt;
+ dma_addr_t addr;
+ int i;
+
+ /*
+ * Allocate task context bd table. A page size of bd table
+ * can map 256 buffers. Each buffer contains 32 task context
+ * entries. Hence the limit with one page is 8192 task context
+ * entries.
+ */
+ hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_bd_dma,
+ GFP_KERNEL);
+ if (!hba->task_ctx_bd_tbl) {
+ printk(KERN_ERR PFX "unable to allocate task context BDT\n");
+ rc = -1;
+ goto out;
+ }
+ memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
+
+ /*
+ * Allocate task_ctx which is an array of pointers pointing to
+ * a page containing 32 task contexts
+ */
+ hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
+ GFP_KERNEL);
+ if (!hba->task_ctx) {
+ printk(KERN_ERR PFX "unable to allocate task context array\n");
+ rc = -1;
+ goto out1;
+ }
+
+ /*
+ * Allocate task_ctx_dma which is an array of dma addresses
+ */
+ hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
+ sizeof(dma_addr_t)), GFP_KERNEL);
+ if (!hba->task_ctx_dma) {
+ printk(KERN_ERR PFX "unable to alloc context mapping array\n");
+ rc = -1;
+ goto out2;
+ }
+
+ task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
+ for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+
+ hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_dma[i],
+ GFP_KERNEL);
+ if (!hba->task_ctx[i]) {
+ printk(KERN_ERR PFX "unable to alloc task context\n");
+ rc = -1;
+ goto out3;
+ }
+ memset(hba->task_ctx[i], 0, PAGE_SIZE);
+ addr = (u64)hba->task_ctx_dma[i];
+ task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
+ task_ctx_bdt->lo = cpu_to_le32((u32)addr);
+ task_ctx_bdt++;
+ }
+ return 0;
+
+out3:
+ for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+ if (hba->task_ctx[i]) {
+
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx[i], hba->task_ctx_dma[i]);
+ hba->task_ctx[i] = NULL;
+ }
+ }
+
+ kfree(hba->task_ctx_dma);
+ hba->task_ctx_dma = NULL;
+out2:
+ kfree(hba->task_ctx);
+ hba->task_ctx = NULL;
+out1:
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
+ hba->task_ctx_bd_tbl = NULL;
+out:
+ return rc;
+}
+
+void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
+{
+ int i;
+
+ if (hba->task_ctx_bd_tbl) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx_bd_tbl,
+ hba->task_ctx_bd_dma);
+ hba->task_ctx_bd_tbl = NULL;
+ }
+
+ if (hba->task_ctx) {
+ for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+ if (hba->task_ctx[i]) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx[i],
+ hba->task_ctx_dma[i]);
+ hba->task_ctx[i] = NULL;
+ }
+ }
+ kfree(hba->task_ctx);
+ hba->task_ctx = NULL;
+ }
+
+ kfree(hba->task_ctx_dma);
+ hba->task_ctx_dma = NULL;
+}
+
+static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
+{
+ int i;
+ int segment_count;
+ int hash_table_size;
+ u32 *pbl;
+
+ segment_count = hba->hash_tbl_segment_count;
+ hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
+ sizeof(struct fcoe_hash_table_entry);
+
+ pbl = hba->hash_tbl_pbl;
+ for (i = 0; i < segment_count; ++i) {
+ dma_addr_t dma_address;
+
+ dma_address = le32_to_cpu(*pbl);
+ ++pbl;
+ dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
+ ++pbl;
+ dma_free_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ hba->hash_tbl_segments[i],
+ dma_address);
+
+ }
+
+ if (hba->hash_tbl_pbl) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->hash_tbl_pbl,
+ hba->hash_tbl_pbl_dma);
+ hba->hash_tbl_pbl = NULL;
+ }
+}
+
+static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
+{
+ int i;
+ int hash_table_size;
+ int segment_count;
+ int segment_array_size;
+ int dma_segment_array_size;
+ dma_addr_t *dma_segment_array;
+ u32 *pbl;
+
+ hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
+ sizeof(struct fcoe_hash_table_entry);
+
+ segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
+ segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
+ hba->hash_tbl_segment_count = segment_count;
+
+ segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
+ hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
+ if (!hba->hash_tbl_segments) {
+ printk(KERN_ERR PFX "hash table pointers alloc failed\n");
+ return -ENOMEM;
+ }
+ dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
+ dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
+ if (!dma_segment_array) {
+ printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < segment_count; ++i) {
+ hba->hash_tbl_segments[i] =
+ dma_alloc_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ &dma_segment_array[i],
+ GFP_KERNEL);
+ if (!hba->hash_tbl_segments[i]) {
+ printk(KERN_ERR PFX "hash segment alloc failed\n");
+ while (--i >= 0) {
+ dma_free_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ hba->hash_tbl_segments[i],
+ dma_segment_array[i]);
+ hba->hash_tbl_segments[i] = NULL;
+ }
+ kfree(dma_segment_array);
+ return -ENOMEM;
+ }
+ memset(hba->hash_tbl_segments[i], 0,
+ BNX2FC_HASH_TBL_CHUNK_SIZE);
+ }
+
+ hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->hash_tbl_pbl_dma,
+ GFP_KERNEL);
+ if (!hba->hash_tbl_pbl) {
+ printk(KERN_ERR PFX "hash table pbl alloc failed\n");
+ kfree(dma_segment_array);
+ return -ENOMEM;
+ }
+ memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
+
+ pbl = hba->hash_tbl_pbl;
+ for (i = 0; i < segment_count; ++i) {
+ u64 paddr = dma_segment_array[i];
+ *pbl = cpu_to_le32((u32) paddr);
+ ++pbl;
+ *pbl = cpu_to_le32((u32) (paddr >> 32));
+ ++pbl;
+ }
+ pbl = hba->hash_tbl_pbl;
+ i = 0;
+ while (*pbl && *(pbl + 1)) {
+ u32 lo;
+ u32 hi;
+ lo = *pbl;
+ ++pbl;
+ hi = *pbl;
+ ++pbl;
+ ++i;
+ }
+ kfree(dma_segment_array);
+ return 0;
+}
+
+/**
+ * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
+ *
+ * @hba: Pointer to adapter structure
+ *
+ */
+int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
+{
+ u64 addr;
+ u32 mem_size;
+ int i;
+
+ if (bnx2fc_allocate_hash_table(hba))
+ return -ENOMEM;
+
+ mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
+ hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_ptr_dma,
+ GFP_KERNEL);
+ if (!hba->t2_hash_tbl_ptr) {
+ printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+ memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
+
+ mem_size = BNX2FC_NUM_MAX_SESS *
+ sizeof(struct fcoe_t2_hash_table_entry);
+ hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_dma,
+ GFP_KERNEL);
+ if (!hba->t2_hash_tbl) {
+ printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+ memset(hba->t2_hash_tbl, 0x00, mem_size);
+ for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+ addr = (unsigned long) hba->t2_hash_tbl_dma +
+ ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
+ hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
+ hba->t2_hash_tbl[i].next.hi = addr >> 32;
+ }
+
+ hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE, &hba->dummy_buf_dma,
+ GFP_KERNEL);
+ if (!hba->dummy_buffer) {
+ printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+
+ hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->stats_buf_dma,
+ GFP_KERNEL);
+ if (!hba->stats_buffer) {
+ printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+ memset(hba->stats_buffer, 0x00, PAGE_SIZE);
+
+ return 0;
+}
+
+void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
+{
+ u32 mem_size;
+
+ if (hba->stats_buffer) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->stats_buffer, hba->stats_buf_dma);
+ hba->stats_buffer = NULL;
+ }
+
+ if (hba->dummy_buffer) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->dummy_buffer, hba->dummy_buf_dma);
+ hba->dummy_buffer = NULL;
+ }
+
+ if (hba->t2_hash_tbl_ptr) {
+ mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
+ dma_free_coherent(&hba->pcidev->dev, mem_size,
+ hba->t2_hash_tbl_ptr,
+ hba->t2_hash_tbl_ptr_dma);
+ hba->t2_hash_tbl_ptr = NULL;
+ }
+
+ if (hba->t2_hash_tbl) {
+ mem_size = BNX2FC_NUM_MAX_SESS *
+ sizeof(struct fcoe_t2_hash_table_entry);
+ dma_free_coherent(&hba->pcidev->dev, mem_size,
+ hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
+ hba->t2_hash_tbl = NULL;
+ }
+ bnx2fc_free_hash_table(hba);
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
new file mode 100644
index 000000000000..0f1dd23730db
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -0,0 +1,1833 @@
+/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * IO manager and SCSI IO processing.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
+ int bd_index);
+static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
+static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
+static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
+ struct bnx2fc_cmd *io_req);
+static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
+static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
+static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
+ struct fcoe_fcp_rsp_payload *fcp_rsp,
+ u8 num_rq);
+
+void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
+ unsigned int timer_msec)
+{
+ struct bnx2fc_hba *hba = io_req->port->priv;
+
+ if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work,
+ msecs_to_jiffies(timer_msec)))
+ kref_get(&io_req->refcount);
+}
+
+static void bnx2fc_cmd_timeout(struct work_struct *work)
+{
+ struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
+ timeout_work.work);
+ struct fc_lport *lport;
+ struct fc_rport_priv *rdata;
+ u8 cmd_type = io_req->cmd_type;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ int logo_issued;
+ int rc;
+
+ BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d,"
+ "req_flags = %lx\n", cmd_type, io_req->req_flags);
+
+ spin_lock_bh(&tgt->tgt_lock);
+ if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) {
+ clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
+ /*
+ * ideally we should hold the io_req until RRQ complets,
+ * and release io_req from timeout hold.
+ */
+ spin_unlock_bh(&tgt->tgt_lock);
+ bnx2fc_send_rrq(io_req);
+ return;
+ }
+ if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
+ goto done;
+ }
+
+ switch (cmd_type) {
+ case BNX2FC_SCSI_CMD:
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags)) {
+ /* Handle eh_abort timeout */
+ BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
+ complete(&io_req->tm_done);
+ } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &io_req->req_flags)) {
+ /* Handle internally generated ABTS timeout */
+ BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
+ io_req->refcount.refcount.counter);
+ if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags))) {
+
+ lport = io_req->port->lport;
+ rdata = io_req->tgt->rdata;
+ logo_issued = test_and_set_bit(
+ BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ /* Explicitly logo the target */
+ if (!logo_issued) {
+ BNX2FC_IO_DBG(io_req, "Explicit "
+ "logo - tgt flags = 0x%lx\n",
+ tgt->flags);
+
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ return;
+ }
+ } else {
+ /* Hanlde IO timeout */
+ BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
+ if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "IO completed before "
+ " timer expiry\n");
+ goto done;
+ }
+
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &io_req->req_flags)) {
+ rc = bnx2fc_initiate_abts(io_req);
+ if (rc == SUCCESS)
+ goto done;
+ /*
+ * Explicitly logo the target if
+ * abts initiation fails
+ */
+ lport = io_req->port->lport;
+ rdata = io_req->tgt->rdata;
+ logo_issued = test_and_set_bit(
+ BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ if (!logo_issued) {
+ BNX2FC_IO_DBG(io_req, "Explicit "
+ "logo - tgt flags = 0x%lx\n",
+ tgt->flags);
+
+
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ return;
+ } else {
+ BNX2FC_IO_DBG(io_req, "IO already in "
+ "ABTS processing\n");
+ }
+ }
+ break;
+ case BNX2FC_ELS:
+
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
+
+ if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags)) {
+ lport = io_req->port->lport;
+ rdata = io_req->tgt->rdata;
+ logo_issued = test_and_set_bit(
+ BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ /* Explicitly logo the target */
+ if (!logo_issued) {
+ BNX2FC_IO_DBG(io_req, "Explicitly logo"
+ "(els)\n");
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ return;
+ }
+ } else {
+ /*
+ * Handle ELS timeout.
+ * tgt_lock is used to sync compl path and timeout
+ * path. If els compl path is processing this IO, we
+ * have nothing to do here, just release the timer hold
+ */
+ BNX2FC_IO_DBG(io_req, "ELS timed out\n");
+ if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
+ &io_req->req_flags))
+ goto done;
+
+ /* Indicate the cb_func that this ELS is timed out */
+ set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags);
+
+ if ((io_req->cb_func) && (io_req->cb_arg)) {
+ io_req->cb_func(io_req->cb_arg);
+ io_req->cb_arg = NULL;
+ }
+ }
+ break;
+ default:
+ printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n",
+ cmd_type);
+ break;
+ }
+
+done:
+ /* release the cmd that was held when timer was set */
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+}
+
+static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
+{
+ /* Called with host lock held */
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+
+ /*
+ * active_cmd_queue may have other command types as well,
+ * and during flush operation, we want to error back only
+ * scsi commands.
+ */
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD)
+ return;
+
+ BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
+ bnx2fc_unmap_sg_list(io_req);
+ io_req->sc_cmd = NULL;
+ if (!sc_cmd) {
+ printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
+ "IO(0x%x) already cleaned up\n",
+ io_req->xid);
+ return;
+ }
+ sc_cmd->result = err_code << 16;
+
+ BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
+ sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
+ sc_cmd->allowed);
+ scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+}
+
+struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
+ u16 min_xid, u16 max_xid)
+{
+ struct bnx2fc_cmd_mgr *cmgr;
+ struct io_bdt *bdt_info;
+ struct bnx2fc_cmd *io_req;
+ size_t len;
+ u32 mem_size;
+ u16 xid;
+ int i;
+ int num_ios;
+ size_t bd_tbl_sz;
+
+ if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
+ printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
+ and max_xid 0x%x\n", min_xid, max_xid);
+ return NULL;
+ }
+ BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid);
+
+ num_ios = max_xid - min_xid + 1;
+ len = (num_ios * (sizeof(struct bnx2fc_cmd *)));
+ len += sizeof(struct bnx2fc_cmd_mgr);
+
+ cmgr = kzalloc(len, GFP_KERNEL);
+ if (!cmgr) {
+ printk(KERN_ERR PFX "failed to alloc cmgr\n");
+ return NULL;
+ }
+
+ cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) *
+ num_possible_cpus(), GFP_KERNEL);
+ if (!cmgr->free_list) {
+ printk(KERN_ERR PFX "failed to alloc free_list\n");
+ goto mem_err;
+ }
+
+ cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) *
+ num_possible_cpus(), GFP_KERNEL);
+ if (!cmgr->free_list_lock) {
+ printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
+ goto mem_err;
+ }
+
+ cmgr->hba = hba;
+ cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ INIT_LIST_HEAD(&cmgr->free_list[i]);
+ spin_lock_init(&cmgr->free_list_lock[i]);
+ }
+
+ /* Pre-allocated pool of bnx2fc_cmds */
+ xid = BNX2FC_MIN_XID;
+ for (i = 0; i < num_ios; i++) {
+ io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
+
+ if (!io_req) {
+ printk(KERN_ERR PFX "failed to alloc io_req\n");
+ goto mem_err;
+ }
+
+ INIT_LIST_HEAD(&io_req->link);
+ INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
+
+ io_req->xid = xid++;
+ if (io_req->xid >= BNX2FC_MAX_OUTSTANDING_CMNDS)
+ printk(KERN_ERR PFX "ERROR allocating xids - 0x%x\n",
+ io_req->xid);
+ list_add_tail(&io_req->link,
+ &cmgr->free_list[io_req->xid % num_possible_cpus()]);
+ io_req++;
+ }
+
+ /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
+ mem_size = num_ios * sizeof(struct io_bdt *);
+ cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
+ if (!cmgr->io_bdt_pool) {
+ printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
+ goto mem_err;
+ }
+
+ mem_size = sizeof(struct io_bdt);
+ for (i = 0; i < num_ios; i++) {
+ cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL);
+ if (!cmgr->io_bdt_pool[i]) {
+ printk(KERN_ERR PFX "failed to alloc "
+ "io_bdt_pool[%d]\n", i);
+ goto mem_err;
+ }
+ }
+
+ /* Allocate an map fcoe_bdt_ctx structures */
+ bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
+ for (i = 0; i < num_ios; i++) {
+ bdt_info = cmgr->io_bdt_pool[i];
+ bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+ bd_tbl_sz,
+ &bdt_info->bd_tbl_dma,
+ GFP_KERNEL);
+ if (!bdt_info->bd_tbl) {
+ printk(KERN_ERR PFX "failed to alloc "
+ "bdt_tbl[%d]\n", i);
+ goto mem_err;
+ }
+ }
+
+ return cmgr;
+
+mem_err:
+ bnx2fc_cmd_mgr_free(cmgr);
+ return NULL;
+}
+
+void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
+{
+ struct io_bdt *bdt_info;
+ struct bnx2fc_hba *hba = cmgr->hba;
+ size_t bd_tbl_sz;
+ u16 min_xid = BNX2FC_MIN_XID;
+ u16 max_xid = BNX2FC_MAX_XID;
+ int num_ios;
+ int i;
+
+ num_ios = max_xid - min_xid + 1;
+
+ /* Free fcoe_bdt_ctx structures */
+ if (!cmgr->io_bdt_pool)
+ goto free_cmd_pool;
+
+ bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
+ for (i = 0; i < num_ios; i++) {
+ bdt_info = cmgr->io_bdt_pool[i];
+ if (bdt_info->bd_tbl) {
+ dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz,
+ bdt_info->bd_tbl,
+ bdt_info->bd_tbl_dma);
+ bdt_info->bd_tbl = NULL;
+ }
+ }
+
+ /* Destroy io_bdt pool */
+ for (i = 0; i < num_ios; i++) {
+ kfree(cmgr->io_bdt_pool[i]);
+ cmgr->io_bdt_pool[i] = NULL;
+ }
+
+ kfree(cmgr->io_bdt_pool);
+ cmgr->io_bdt_pool = NULL;
+
+free_cmd_pool:
+ kfree(cmgr->free_list_lock);
+
+ /* Destroy cmd pool */
+ if (!cmgr->free_list)
+ goto free_cmgr;
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ struct list_head *list;
+ struct list_head *tmp;
+
+ list_for_each_safe(list, tmp, &cmgr->free_list[i]) {
+ struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list;
+ list_del(&io_req->link);
+ kfree(io_req);
+ }
+ }
+ kfree(cmgr->free_list);
+free_cmgr:
+ /* Free command manager itself */
+ kfree(cmgr);
+}
+
+struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
+{
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
+ struct bnx2fc_cmd *io_req;
+ struct list_head *listp;
+ struct io_bdt *bd_tbl;
+ u32 max_sqes;
+ u16 xid;
+
+ max_sqes = tgt->max_sqes;
+ switch (type) {
+ case BNX2FC_TASK_MGMT_CMD:
+ max_sqes = BNX2FC_TM_MAX_SQES;
+ break;
+ case BNX2FC_ELS:
+ max_sqes = BNX2FC_ELS_MAX_SQES;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * NOTE: Free list insertions and deletions are protected with
+ * cmgr lock
+ */
+ spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ if ((list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) ||
+ (tgt->num_active_ios.counter >= max_sqes)) {
+ BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
+ "ios(%d):sqes(%d)\n",
+ tgt->num_active_ios.counter, tgt->max_sqes);
+ if (list_empty(&(cmd_mgr->free_list[smp_processor_id()])))
+ printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ return NULL;
+ }
+
+ listp = (struct list_head *)
+ cmd_mgr->free_list[smp_processor_id()].next;
+ list_del_init(listp);
+ io_req = (struct bnx2fc_cmd *) listp;
+ xid = io_req->xid;
+ cmd_mgr->cmds[xid] = io_req;
+ atomic_inc(&tgt->num_active_ios);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+
+ INIT_LIST_HEAD(&io_req->link);
+
+ io_req->port = port;
+ io_req->cmd_mgr = cmd_mgr;
+ io_req->req_flags = 0;
+ io_req->cmd_type = type;
+
+ /* Bind io_bdt for this io_req */
+ /* Have a static link between io_req and io_bdt_pool */
+ bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
+ bd_tbl->io_req = io_req;
+
+ /* Hold the io_req against deletion */
+ kref_init(&io_req->refcount);
+ return io_req;
+}
+static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
+{
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
+ struct bnx2fc_cmd *io_req;
+ struct list_head *listp;
+ struct io_bdt *bd_tbl;
+ u32 max_sqes;
+ u16 xid;
+
+ max_sqes = BNX2FC_SCSI_MAX_SQES;
+ /*
+ * NOTE: Free list insertions and deletions are protected with
+ * cmgr lock
+ */
+ spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ if ((list_empty(&cmd_mgr->free_list[smp_processor_id()])) ||
+ (tgt->num_active_ios.counter >= max_sqes)) {
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ return NULL;
+ }
+
+ listp = (struct list_head *)
+ cmd_mgr->free_list[smp_processor_id()].next;
+ list_del_init(listp);
+ io_req = (struct bnx2fc_cmd *) listp;
+ xid = io_req->xid;
+ cmd_mgr->cmds[xid] = io_req;
+ atomic_inc(&tgt->num_active_ios);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+
+ INIT_LIST_HEAD(&io_req->link);
+
+ io_req->port = port;
+ io_req->cmd_mgr = cmd_mgr;
+ io_req->req_flags = 0;
+
+ /* Bind io_bdt for this io_req */
+ /* Have a static link between io_req and io_bdt_pool */
+ bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
+ bd_tbl->io_req = io_req;
+
+ /* Hold the io_req against deletion */
+ kref_init(&io_req->refcount);
+ return io_req;
+}
+
+void bnx2fc_cmd_release(struct kref *ref)
+{
+ struct bnx2fc_cmd *io_req = container_of(ref,
+ struct bnx2fc_cmd, refcount);
+ struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
+
+ spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD)
+ bnx2fc_free_mp_resc(io_req);
+ cmd_mgr->cmds[io_req->xid] = NULL;
+ /* Delete IO from retire queue */
+ list_del_init(&io_req->link);
+ /* Add it to the free list */
+ list_add(&io_req->link,
+ &cmd_mgr->free_list[smp_processor_id()]);
+ atomic_dec(&io_req->tgt->num_active_ios);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+}
+
+static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
+ struct bnx2fc_hba *hba = io_req->port->priv;
+ size_t sz = sizeof(struct fcoe_bd_ctx);
+
+ /* clear tm flags */
+ mp_req->tm_flags = 0;
+ if (mp_req->mp_req_bd) {
+ dma_free_coherent(&hba->pcidev->dev, sz,
+ mp_req->mp_req_bd,
+ mp_req->mp_req_bd_dma);
+ mp_req->mp_req_bd = NULL;
+ }
+ if (mp_req->mp_resp_bd) {
+ dma_free_coherent(&hba->pcidev->dev, sz,
+ mp_req->mp_resp_bd,
+ mp_req->mp_resp_bd_dma);
+ mp_req->mp_resp_bd = NULL;
+ }
+ if (mp_req->req_buf) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->req_buf,
+ mp_req->req_buf_dma);
+ mp_req->req_buf = NULL;
+ }
+ if (mp_req->resp_buf) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->resp_buf,
+ mp_req->resp_buf_dma);
+ mp_req->resp_buf = NULL;
+ }
+}
+
+int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_mp_req *mp_req;
+ struct fcoe_bd_ctx *mp_req_bd;
+ struct fcoe_bd_ctx *mp_resp_bd;
+ struct bnx2fc_hba *hba = io_req->port->priv;
+ dma_addr_t addr;
+ size_t sz;
+
+ mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
+ memset(mp_req, 0, sizeof(struct bnx2fc_mp_req));
+
+ mp_req->req_len = sizeof(struct fcp_cmnd);
+ io_req->data_xfer_len = mp_req->req_len;
+ mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &mp_req->req_buf_dma,
+ GFP_ATOMIC);
+ if (!mp_req->req_buf) {
+ printk(KERN_ERR PFX "unable to alloc MP req buffer\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+
+ mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &mp_req->resp_buf_dma,
+ GFP_ATOMIC);
+ if (!mp_req->resp_buf) {
+ printk(KERN_ERR PFX "unable to alloc TM resp buffer\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+ memset(mp_req->req_buf, 0, PAGE_SIZE);
+ memset(mp_req->resp_buf, 0, PAGE_SIZE);
+
+ /* Allocate and map mp_req_bd and mp_resp_bd */
+ sz = sizeof(struct fcoe_bd_ctx);
+ mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
+ &mp_req->mp_req_bd_dma,
+ GFP_ATOMIC);
+ if (!mp_req->mp_req_bd) {
+ printk(KERN_ERR PFX "unable to alloc MP req bd\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+ mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
+ &mp_req->mp_resp_bd_dma,
+ GFP_ATOMIC);
+ if (!mp_req->mp_req_bd) {
+ printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+ /* Fill bd table */
+ addr = mp_req->req_buf_dma;
+ mp_req_bd = mp_req->mp_req_bd;
+ mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
+ mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
+ mp_req_bd->buf_len = PAGE_SIZE;
+ mp_req_bd->flags = 0;
+
+ /*
+ * MP buffer is either a task mgmt command or an ELS.
+ * So the assumption is that it consumes a single bd
+ * entry in the bd table
+ */
+ mp_resp_bd = mp_req->mp_resp_bd;
+ addr = mp_req->resp_buf_dma;
+ mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
+ mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
+ mp_resp_bd->buf_len = PAGE_SIZE;
+ mp_resp_bd->flags = 0;
+
+ return SUCCESS;
+}
+
+static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
+{
+ struct fc_lport *lport;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fcoe_port *port;
+ struct bnx2fc_hba *hba;
+ struct bnx2fc_rport *tgt;
+ struct bnx2fc_cmd *io_req;
+ struct bnx2fc_mp_req *tm_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct Scsi_Host *host = sc_cmd->device->host;
+ struct fc_frame_header *fc_hdr;
+ struct fcp_cmnd *fcp_cmnd;
+ int task_idx, index;
+ int rc = SUCCESS;
+ u16 xid;
+ u32 sid, did;
+ unsigned long start = jiffies;
+
+ lport = shost_priv(host);
+ port = lport_priv(lport);
+ hba = port->priv;
+
+ if (rport == NULL) {
+ printk(KERN_ALERT PFX "device_reset: rport is NULL\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+
+ rc = fc_block_scsi_eh(sc_cmd);
+ if (rc)
+ return rc;
+
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ printk(KERN_ERR PFX "device_reset: link is not ready\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+ /* rport and tgt are allocated together, so tgt should be non-NULL */
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
+ printk(KERN_ERR PFX "device_reset: tgt not offloaded\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+retry_tmf:
+ io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD);
+ if (!io_req) {
+ if (time_after(jiffies, start + HZ)) {
+ printk(KERN_ERR PFX "tmf: Failed TMF");
+ rc = FAILED;
+ goto tmf_err;
+ }
+ msleep(20);
+ goto retry_tmf;
+ }
+ /* Initialize rest of io_req fields */
+ io_req->sc_cmd = sc_cmd;
+ io_req->port = port;
+ io_req->tgt = tgt;
+
+ tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
+
+ rc = bnx2fc_init_mp_req(io_req);
+ if (rc == FAILED) {
+ printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ goto tmf_err;
+ }
+
+ /* Set TM flags */
+ io_req->io_req_flags = 0;
+ tm_req->tm_flags = tm_flags;
+
+ /* Fill FCP_CMND */
+ bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
+ fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
+ memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len);
+ fcp_cmnd->fc_dl = 0;
+
+ /* Fill FC header */
+ fc_hdr = &(tm_req->req_fc_hdr);
+ sid = tgt->sid;
+ did = rport->port_id;
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid,
+ FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+ /* Obtain exchange id */
+ xid = io_req->xid;
+
+ BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_mp_task(io_req, task);
+
+ sc_cmd->SCp.ptr = (char *)io_req;
+
+ /* Obtain free SQ entry */
+ spin_lock_bh(&tgt->tgt_lock);
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Enqueue the io_req to active_tm_queue */
+ io_req->on_tmf_queue = 1;
+ list_add_tail(&io_req->link, &tgt->active_tm_queue);
+
+ init_completion(&io_req->tm_done);
+ io_req->wait_for_comp = 1;
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ rc = wait_for_completion_timeout(&io_req->tm_done,
+ BNX2FC_TM_TIMEOUT * HZ);
+ spin_lock_bh(&tgt->tgt_lock);
+
+ io_req->wait_for_comp = 0;
+ if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags)))
+ set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
+
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ if (!rc) {
+ printk(KERN_ERR PFX "task mgmt command failed...\n");
+ rc = FAILED;
+ } else {
+ printk(KERN_ERR PFX "task mgmt command success...\n");
+ rc = SUCCESS;
+ }
+tmf_err:
+ return rc;
+}
+
+int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct fc_rport *rport = tgt->rport;
+ struct fc_rport_priv *rdata = tgt->rdata;
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ struct bnx2fc_cmd *abts_io_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fc_frame_header *fc_hdr;
+ struct bnx2fc_mp_req *abts_req;
+ int task_idx, index;
+ u32 sid, did;
+ u16 xid;
+ int rc = SUCCESS;
+ u32 r_a_tov = rdata->r_a_tov;
+
+ /* called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
+
+ port = io_req->port;
+ hba = port->priv;
+ lport = port->lport;
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ if (rport == NULL) {
+ printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ printk(KERN_ERR PFX "initiate_abts: link is not ready\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
+ if (!abts_io_req) {
+ printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ /* Initialize rest of io_req fields */
+ abts_io_req->sc_cmd = NULL;
+ abts_io_req->port = port;
+ abts_io_req->tgt = tgt;
+ abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */
+
+ abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req);
+ memset(abts_req, 0, sizeof(struct bnx2fc_mp_req));
+
+ /* Fill FC header */
+ fc_hdr = &(abts_req->req_fc_hdr);
+
+ /* Obtain oxid and rxid for the original exchange to be aborted */
+ fc_hdr->fh_ox_id = htons(io_req->xid);
+ fc_hdr->fh_rx_id = htons(io_req->task->rx_wr_tx_rd.rx_id);
+
+ sid = tgt->sid;
+ did = rport->port_id;
+
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid,
+ FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+
+ xid = abts_io_req->xid;
+ BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n");
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_mp_task(abts_io_req, task);
+
+ /*
+ * ABTS task is a temporary task that will be cleaned up
+ * irrespective of ABTS response. We need to start the timer
+ * for the original exchange, as the CQE is posted for the original
+ * IO request.
+ *
+ * Timer for ABTS is started only when it is originated by a
+ * TM request. For the ABTS issued as part of ULP timeout,
+ * scsi-ml maintains the timers.
+ */
+
+ /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
+ bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov);
+
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+
+abts_err:
+ return rc;
+}
+
+int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ struct bnx2fc_cmd *cleanup_io_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ int task_idx, index;
+ u16 xid, orig_xid;
+ int rc = 0;
+
+ /* ASSUMPTION: called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
+
+ port = io_req->port;
+ hba = port->priv;
+ lport = port->lport;
+
+ cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
+ if (!cleanup_io_req) {
+ printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
+ rc = -1;
+ goto cleanup_err;
+ }
+
+ /* Initialize rest of io_req fields */
+ cleanup_io_req->sc_cmd = NULL;
+ cleanup_io_req->port = port;
+ cleanup_io_req->tgt = tgt;
+ cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */
+
+ xid = cleanup_io_req->xid;
+
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ orig_xid = io_req->xid;
+
+ BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid);
+
+ bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
+
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+
+cleanup_err:
+ return rc;
+}
+
+/**
+ * bnx2fc_eh_target_reset: Reset a target
+ *
+ * @sc_cmd: SCSI command
+ *
+ * Set from SCSI host template to send task mgmt command to the target
+ * and wait for the response
+ */
+int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
+{
+ return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
+}
+
+/**
+ * bnx2fc_eh_device_reset - Reset a single LUN
+ *
+ * @sc_cmd: SCSI command
+ *
+ * Set from SCSI host template to send task mgmt command to the target
+ * and wait for the response
+ */
+int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
+{
+ return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
+}
+
+/**
+ * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
+ * SCSI command
+ *
+ * @sc_cmd: SCSI_ML command pointer
+ *
+ * SCSI abort request handler
+ */
+int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct bnx2fc_cmd *io_req;
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt;
+ int rc = FAILED;
+
+
+ rc = fc_block_scsi_eh(sc_cmd);
+ if (rc)
+ return rc;
+
+ lport = shost_priv(sc_cmd->device->host);
+ if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
+ printk(KERN_ALERT PFX "eh_abort: link not ready\n");
+ return rc;
+ }
+
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
+
+ spin_lock_bh(&tgt->tgt_lock);
+ io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
+ if (!io_req) {
+ /* Command might have just completed */
+ printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+ BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
+ io_req->refcount.refcount.counter);
+
+ /* Hold IO request across abort processing */
+ kref_get(&io_req->refcount);
+
+ BUG_ON(tgt != io_req->tgt);
+
+ /* Remove the io_req from the active_q. */
+ /*
+ * Task Mgmt functions (LUN RESET & TGT RESET) will not
+ * issue an ABTS on this particular IO req, as the
+ * io_req is no longer in the active_q.
+ */
+ if (tgt->flush_in_prog) {
+ printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ "flush in progress\n", io_req->xid);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+
+ if (io_req->on_active_queue == 0) {
+ printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ "not on active_q\n", io_req->xid);
+ /*
+ * This condition can happen only due to the FW bug,
+ * where we do not receive cleanup response from
+ * the FW. Handle this case gracefully by erroring
+ * back the IO request to SCSI-ml
+ */
+ bnx2fc_scsi_done(io_req, DID_ABORT);
+
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+
+ /*
+ * Only eh_abort processing will remove the IO from
+ * active_cmd_q before processing the request. this is
+ * done to avoid race conditions between IOs aborted
+ * as part of task management completion and eh_abort
+ * processing
+ */
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ /* Move IO req to retire queue */
+ list_add_tail(&io_req->link, &tgt->io_retire_queue);
+
+ init_completion(&io_req->tm_done);
+ io_req->wait_for_comp = 1;
+
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
+ /* Cancel the current timer running on this io_req */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+ set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
+ rc = bnx2fc_initiate_abts(io_req);
+ } else {
+ printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ "already in abts processing\n", io_req->xid);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+ if (rc == FAILED) {
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return rc;
+ }
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ wait_for_completion(&io_req->tm_done);
+
+ spin_lock_bh(&tgt->tgt_lock);
+ io_req->wait_for_comp = 0;
+ if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags))) {
+ /* Let the scsi-ml try to recover this command */
+ printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
+ io_req->xid);
+ rc = FAILED;
+ } else {
+ /*
+ * We come here even when there was a race condition
+ * between timeout and abts completion, and abts
+ * completion happens just in time.
+ */
+ BNX2FC_IO_DBG(io_req, "abort succeeded\n");
+ rc = SUCCESS;
+ bnx2fc_scsi_done(io_req, DID_ABORT);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
+
+ /* release the reference taken in eh_abort */
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return rc;
+}
+
+void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq)
+{
+ BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
+ "refcnt = %d, cmd_type = %d\n",
+ io_req->refcount.refcount.counter, io_req->cmd_type);
+ bnx2fc_scsi_done(io_req, DID_ERROR);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+}
+
+void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq)
+{
+ u32 r_ctl;
+ u32 r_a_tov = FC_DEF_R_A_TOV;
+ u8 issue_rrq = 0;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+
+ BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
+ "refcnt = %d, cmd_type = %d\n",
+ io_req->xid,
+ io_req->refcount.refcount.counter, io_req->cmd_type);
+
+ if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "Timer context finished processing"
+ " this io\n");
+ return;
+ }
+
+ /* Do not issue RRQ as this IO is already cleanedup */
+ if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
+ &io_req->req_flags))
+ goto io_compl;
+
+ /*
+ * For ABTS issued due to SCSI eh_abort_handler, timeout
+ * values are maintained by scsi-ml itself. Cancel timeout
+ * in case ABTS issued as part of task management function
+ * or due to FW error.
+ */
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ r_ctl = task->cmn.general.rsp_info.abts_rsp.r_ctl;
+
+ switch (r_ctl) {
+ case FC_RCTL_BA_ACC:
+ /*
+ * Dont release this cmd yet. It will be relesed
+ * after we get RRQ response
+ */
+ BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n");
+ issue_rrq = 1;
+ break;
+
+ case FC_RCTL_BA_RJT:
+ BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n");
+ break;
+ default:
+ printk(KERN_ERR PFX "Unknown ABTS response\n");
+ break;
+ }
+
+ if (issue_rrq) {
+ BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n");
+ set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
+ }
+ set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
+ bnx2fc_cmd_timer_set(io_req, r_a_tov);
+
+io_compl:
+ if (io_req->wait_for_comp) {
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags))
+ complete(&io_req->tm_done);
+ } else {
+ /*
+ * We end up here when ABTS is issued as
+ * in asynchronous context, i.e., as part
+ * of task management completion, or
+ * when FW error is received or when the
+ * ABTS is issued when the IO is timed
+ * out.
+ */
+
+ if (io_req->on_active_queue) {
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ /* Move IO req to retire queue */
+ list_add_tail(&io_req->link, &tgt->io_retire_queue);
+ }
+ bnx2fc_scsi_done(io_req, DID_ERROR);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
+}
+
+static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct list_head *list;
+ struct list_head *tmp;
+ struct bnx2fc_cmd *cmd;
+ int tm_lun = sc_cmd->device->lun;
+ int rc = 0;
+ int lun;
+
+ /* called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
+ /*
+ * Walk thru the active_ios queue and ABORT the IO
+ * that matches with the LUN that was reset
+ */
+ list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
+ cmd = (struct bnx2fc_cmd *)list;
+ lun = cmd->sc_cmd->device->lun;
+ if (lun == tm_lun) {
+ /* Initiate ABTS on this cmd */
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &cmd->req_flags)) {
+ /* cancel the IO timeout */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release);
+ /* timer hold */
+ rc = bnx2fc_initiate_abts(cmd);
+ /* abts shouldnt fail in this context */
+ WARN_ON(rc != SUCCESS);
+ } else
+ printk(KERN_ERR PFX "lun_rst: abts already in"
+ " progress for this IO 0x%x\n",
+ cmd->xid);
+ }
+ }
+}
+
+static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct list_head *list;
+ struct list_head *tmp;
+ struct bnx2fc_cmd *cmd;
+ int rc = 0;
+
+ /* called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n");
+ /*
+ * Walk thru the active_ios queue and ABORT the IO
+ * that matches with the LUN that was reset
+ */
+ list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
+ cmd = (struct bnx2fc_cmd *)list;
+ /* Initiate ABTS */
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &cmd->req_flags)) {
+ /* cancel the IO timeout */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* timer hold */
+ rc = bnx2fc_initiate_abts(cmd);
+ /* abts shouldnt fail in this context */
+ WARN_ON(rc != SUCCESS);
+
+ } else
+ printk(KERN_ERR PFX "tgt_rst: abts already in progress"
+ " for this IO 0x%x\n", cmd->xid);
+ }
+}
+
+void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task, u8 num_rq)
+{
+ struct bnx2fc_mp_req *tm_req;
+ struct fc_frame_header *fc_hdr;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ u64 *hdr;
+ u64 *temp_hdr;
+ void *rsp_buf;
+
+ /* Called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n");
+
+ if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags)))
+ set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags);
+ else {
+ /* TM has already timed out and we got
+ * delayed completion. Ignore completion
+ * processing.
+ */
+ return;
+ }
+
+ tm_req = &(io_req->mp_req);
+ fc_hdr = &(tm_req->resp_fc_hdr);
+ hdr = (u64 *)fc_hdr;
+ temp_hdr = (u64 *)
+ &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+ hdr[0] = cpu_to_be64(temp_hdr[0]);
+ hdr[1] = cpu_to_be64(temp_hdr[1]);
+ hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+ tm_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
+
+ rsp_buf = tm_req->resp_buf;
+
+ if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
+ bnx2fc_parse_fcp_rsp(io_req,
+ (struct fcoe_fcp_rsp_payload *)
+ rsp_buf, num_rq);
+ if (io_req->fcp_rsp_code == 0) {
+ /* TM successful */
+ if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
+ bnx2fc_lun_reset_cmpl(io_req);
+ else if (tm_req->tm_flags & FCP_TMF_TGT_RESET)
+ bnx2fc_tgt_reset_cmpl(io_req);
+ }
+ } else {
+ printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
+ fc_hdr->fh_r_ctl);
+ }
+ if (!sc_cmd->SCp.ptr) {
+ printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n");
+ return;
+ }
+ switch (io_req->fcp_status) {
+ case FC_GOOD:
+ if (io_req->cdb_status == 0) {
+ /* Good IO completion */
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ /* Transport status is good, SCSI status not good */
+ sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+ }
+ if (io_req->fcp_resid)
+ scsi_set_resid(sc_cmd, io_req->fcp_resid);
+ break;
+
+ default:
+ BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
+ io_req->fcp_status);
+ break;
+ }
+
+ sc_cmd = io_req->sc_cmd;
+ io_req->sc_cmd = NULL;
+
+ /* check if the io_req exists in tgt's tmf_q */
+ if (io_req->on_tmf_queue) {
+
+ list_del_init(&io_req->link);
+ io_req->on_tmf_queue = 0;
+ } else {
+
+ printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n");
+ return;
+ }
+
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ if (io_req->wait_for_comp) {
+ BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
+ complete(&io_req->tm_done);
+ }
+}
+
+static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
+ int bd_index)
+{
+ struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+ int frag_size, sg_frags;
+
+ sg_frags = 0;
+ while (sg_len) {
+ if (sg_len >= BNX2FC_BD_SPLIT_SZ)
+ frag_size = BNX2FC_BD_SPLIT_SZ;
+ else
+ frag_size = sg_len;
+ bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff;
+ bd[bd_index + sg_frags].buf_addr_hi = addr >> 32;
+ bd[bd_index + sg_frags].buf_len = (u16)frag_size;
+ bd[bd_index + sg_frags].flags = 0;
+
+ addr += (u64) frag_size;
+ sg_frags++;
+ sg_len -= frag_size;
+ }
+ return sg_frags;
+
+}
+
+static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+ struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+ struct scatterlist *sg;
+ int byte_count = 0;
+ int sg_count = 0;
+ int bd_count = 0;
+ int sg_frags;
+ unsigned int sg_len;
+ u64 addr;
+ int i;
+
+ sg_count = scsi_dma_map(sc);
+ scsi_for_each_sg(sc, sg, sg_count, i) {
+ sg_len = sg_dma_len(sg);
+ addr = sg_dma_address(sg);
+ if (sg_len > BNX2FC_MAX_BD_LEN) {
+ sg_frags = bnx2fc_split_bd(io_req, addr, sg_len,
+ bd_count);
+ } else {
+
+ sg_frags = 1;
+ bd[bd_count].buf_addr_lo = addr & 0xffffffff;
+ bd[bd_count].buf_addr_hi = addr >> 32;
+ bd[bd_count].buf_len = (u16)sg_len;
+ bd[bd_count].flags = 0;
+ }
+ bd_count += sg_frags;
+ byte_count += sg_len;
+ }
+ if (byte_count != scsi_bufflen(sc))
+ printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, "
+ "task_id = 0x%x\n", byte_count, scsi_bufflen(sc),
+ io_req->xid);
+ return bd_count;
+}
+
+static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+ struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+ int bd_count;
+
+ if (scsi_sg_count(sc))
+ bd_count = bnx2fc_map_sg(io_req);
+ else {
+ bd_count = 0;
+ bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
+ bd[0].buf_len = bd[0].flags = 0;
+ }
+ io_req->bd_tbl->bd_valid = bd_count;
+}
+
+static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+
+ if (io_req->bd_tbl->bd_valid && sc) {
+ scsi_dma_unmap(sc);
+ io_req->bd_tbl->bd_valid = 0;
+ }
+}
+
+void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
+ struct fcp_cmnd *fcp_cmnd)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ char tag[2];
+
+ memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+
+ int_to_scsilun(sc_cmd->device->lun,
+ (struct scsi_lun *) fcp_cmnd->fc_lun);
+
+
+ fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
+ memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
+
+ fcp_cmnd->fc_cmdref = 0;
+ fcp_cmnd->fc_pri_ta = 0;
+ fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
+ fcp_cmnd->fc_flags = io_req->io_req_flags;
+
+ if (scsi_populate_tag_msg(sc_cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ;
+ break;
+ case ORDERED_QUEUE_TAG:
+ fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED;
+ break;
+ default:
+ fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
+ break;
+ }
+ } else {
+ fcp_cmnd->fc_pri_ta = 0;
+ }
+}
+
+static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
+ struct fcoe_fcp_rsp_payload *fcp_rsp,
+ u8 num_rq)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ u8 rsp_flags = fcp_rsp->fcp_flags.flags;
+ u32 rq_buff_len = 0;
+ int i;
+ unsigned char *rq_data;
+ unsigned char *dummy;
+ int fcp_sns_len = 0;
+ int fcp_rsp_len = 0;
+
+ io_req->fcp_status = FC_GOOD;
+ io_req->fcp_resid = fcp_rsp->fcp_resid;
+
+ io_req->scsi_comp_flags = rsp_flags;
+ CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
+ fcp_rsp->scsi_status_code;
+
+ /* Fetch fcp_rsp_info and fcp_sns_info if available */
+ if (num_rq) {
+
+ /*
+ * We do not anticipate num_rq >1, as the linux defined
+ * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
+ * 256 bytes of single rq buffer is good enough to hold this.
+ */
+
+ if (rsp_flags &
+ FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) {
+ fcp_rsp_len = rq_buff_len
+ = fcp_rsp->fcp_rsp_len;
+ }
+
+ if (rsp_flags &
+ FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) {
+ fcp_sns_len = fcp_rsp->fcp_sns_len;
+ rq_buff_len += fcp_rsp->fcp_sns_len;
+ }
+
+ io_req->fcp_rsp_len = fcp_rsp_len;
+ io_req->fcp_sns_len = fcp_sns_len;
+
+ if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
+ /* Invalid sense sense length. */
+ printk(KERN_ALERT PFX "invalid sns length %d\n",
+ rq_buff_len);
+ /* reset rq_buff_len */
+ rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
+ }
+
+ rq_data = bnx2fc_get_next_rqe(tgt, 1);
+
+ if (num_rq > 1) {
+ /* We do not need extra sense data */
+ for (i = 1; i < num_rq; i++)
+ dummy = bnx2fc_get_next_rqe(tgt, 1);
+ }
+
+ /* fetch fcp_rsp_code */
+ if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
+ /* Only for task management function */
+ io_req->fcp_rsp_code = rq_data[3];
+ printk(KERN_ERR PFX "fcp_rsp_code = %d\n",
+ io_req->fcp_rsp_code);
+ }
+
+ /* fetch sense data */
+ rq_data += fcp_rsp_len;
+
+ if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
+ printk(KERN_ERR PFX "Truncating sense buffer\n");
+ fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
+ }
+
+ memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer));
+ if (fcp_sns_len)
+ memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
+
+ /* return RQ entries */
+ for (i = 0; i < num_rq; i++)
+ bnx2fc_return_rqe(tgt, 1);
+ }
+}
+
+/**
+ * bnx2fc_queuecommand - Queuecommand function of the scsi template
+ *
+ * @host: The Scsi_Host the command was issued to
+ * @sc_cmd: struct scsi_cmnd to be executed
+ *
+ * This is the IO strategy routine, called by SCSI-ML
+ **/
+int bnx2fc_queuecommand(struct Scsi_Host *host,
+ struct scsi_cmnd *sc_cmd)
+{
+ struct fc_lport *lport = shost_priv(host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct bnx2fc_rport *tgt;
+ struct bnx2fc_cmd *io_req;
+ int rc = 0;
+ int rval;
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval) {
+ sc_cmd->result = rval;
+ sc_cmd->scsi_done(sc_cmd);
+ return 0;
+ }
+
+ if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd;
+ }
+
+ /* rport and tgt are allocated together, so tgt should be non-NULL */
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ /*
+ * Session is not offloaded yet. Let SCSI-ml retry
+ * the command.
+ */
+ rc = SCSI_MLQUEUE_TARGET_BUSY;
+ goto exit_qcmd;
+ }
+
+ io_req = bnx2fc_cmd_alloc(tgt);
+ if (!io_req) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd;
+ }
+ io_req->sc_cmd = sc_cmd;
+
+ if (bnx2fc_post_io_req(tgt, io_req)) {
+ printk(KERN_ERR PFX "Unable to post io_req\n");
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd;
+ }
+exit_qcmd:
+ return rc;
+}
+
+void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq)
+{
+ struct fcoe_fcp_rsp_payload *fcp_rsp;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct scsi_cmnd *sc_cmd;
+ struct Scsi_Host *host;
+
+
+ /* scsi_cmd_cmpl is called with tgt lock held */
+
+ if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
+ /* we will not receive ABTS response for this IO */
+ BNX2FC_IO_DBG(io_req, "Timer context finished processing "
+ "this scsi cmd\n");
+ }
+
+ /* Cancel the timeout_work, as we received IO completion */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ sc_cmd = io_req->sc_cmd;
+ if (sc_cmd == NULL) {
+ printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n");
+ return;
+ }
+
+ /* Fetch fcp_rsp from task context and perform cmd completion */
+ fcp_rsp = (struct fcoe_fcp_rsp_payload *)
+ &(task->cmn.general.rsp_info.fcp_rsp.payload);
+
+ /* parse fcp_rsp and obtain sense data from RQ if available */
+ bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
+
+ host = sc_cmd->device->host;
+ if (!sc_cmd->SCp.ptr) {
+ printk(KERN_ERR PFX "SCp.ptr is NULL\n");
+ return;
+ }
+ io_req->sc_cmd = NULL;
+
+ if (io_req->on_active_queue) {
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ /* Move IO req to retire queue */
+ list_add_tail(&io_req->link, &tgt->io_retire_queue);
+ } else {
+ /* This should not happen, but could have been pulled
+ * by bnx2fc_flush_active_ios(), or during a race
+ * between command abort and (late) completion.
+ */
+ BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
+ if (io_req->wait_for_comp)
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags))
+ complete(&io_req->tm_done);
+ }
+
+ bnx2fc_unmap_sg_list(io_req);
+
+ switch (io_req->fcp_status) {
+ case FC_GOOD:
+ if (io_req->cdb_status == 0) {
+ /* Good IO completion */
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ /* Transport status is good, SCSI status not good */
+ BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d"
+ " fcp_resid = 0x%x\n",
+ io_req->cdb_status, io_req->fcp_resid);
+ sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+ }
+ if (io_req->fcp_resid)
+ scsi_set_resid(sc_cmd, io_req->fcp_resid);
+ break;
+ default:
+ printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n",
+ io_req->fcp_status);
+ break;
+ }
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+}
+
+static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
+ struct bnx2fc_cmd *io_req)
+{
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct fc_lport *lport = port->lport;
+ struct fcoe_dev_stats *stats;
+ int task_idx, index;
+ u16 xid;
+
+ /* Initialize rest of io_req fields */
+ io_req->cmd_type = BNX2FC_SCSI_CMD;
+ io_req->port = port;
+ io_req->tgt = tgt;
+ io_req->data_xfer_len = scsi_bufflen(sc_cmd);
+ sc_cmd->SCp.ptr = (char *)io_req;
+
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ io_req->io_req_flags = BNX2FC_READ;
+ stats->InputRequests++;
+ stats->InputBytes += io_req->data_xfer_len;
+ } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ io_req->io_req_flags = BNX2FC_WRITE;
+ stats->OutputRequests++;
+ stats->OutputBytes += io_req->data_xfer_len;
+ } else {
+ io_req->io_req_flags = 0;
+ stats->ControlRequests++;
+ }
+ put_cpu();
+
+ xid = io_req->xid;
+
+ /* Build buffer descriptor list for firmware from sg list */
+ bnx2fc_build_bd_list_from_sg(io_req);
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_task(io_req, task);
+
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (tgt->flush_in_prog) {
+ printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return -EAGAIN;
+ }
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ printk(KERN_ERR PFX "Session not ready...post_io\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return -EAGAIN;
+ }
+
+ /* Time IO req */
+ bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Enqueue the io_req to active_cmd_queue */
+
+ io_req->on_active_queue = 1;
+ /* move io_req from pending_queue to active_queue */
+ list_add_tail(&io_req->link, &tgt->active_cmd_queue);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return 0;
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
new file mode 100644
index 000000000000..7ea93af60260
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -0,0 +1,844 @@
+/* bnx2fc_tgt.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * Handles operations such as session offload/upload etc, and manages
+ * session resources such as connection id and qp resources.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+static void bnx2fc_upld_timer(unsigned long data);
+static void bnx2fc_ofld_timer(unsigned long data);
+static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
+ struct fcoe_port *port,
+ struct fc_rport_priv *rdata);
+static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
+
+static void bnx2fc_upld_timer(unsigned long data)
+{
+
+ struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+
+ BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
+ /* fake upload completion */
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
+}
+
+static void bnx2fc_ofld_timer(unsigned long data)
+{
+
+ struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+
+ BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
+ /* NOTE: This function should never be called, as
+ * offload should never timeout
+ */
+ /*
+ * If the timer has expired, this session is dead
+ * Clear offloaded flag and logout of this device.
+ * Since OFFLOADED flag is cleared, this case
+ * will be considered as offload error and the
+ * port will be logged off, and conn_id, session
+ * resources are freed up in bnx2fc_offload_session
+ */
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+}
+
+static void bnx2fc_offload_session(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt,
+ struct fc_rport_priv *rdata)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_rport *rport = rdata->rport;
+ struct bnx2fc_hba *hba = port->priv;
+ int rval;
+ int i = 0;
+
+ /* Initialize bnx2fc_rport */
+ /* NOTE: tgt is already bzero'd */
+ rval = bnx2fc_init_tgt(tgt, port, rdata);
+ if (rval) {
+ printk(KERN_ERR PFX "Failed to allocate conn id for "
+ "port_id (%6x)\n", rport->port_id);
+ goto ofld_err;
+ }
+
+ /* Allocate session resources */
+ rval = bnx2fc_alloc_session_resc(hba, tgt);
+ if (rval) {
+ printk(KERN_ERR PFX "Failed to allocate resources\n");
+ goto ofld_err;
+ }
+
+ /*
+ * Initialize FCoE session offload process.
+ * Upon completion of offload process add
+ * rport to list of rports
+ */
+retry_ofld:
+ clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ rval = bnx2fc_send_session_ofld_req(port, tgt);
+ if (rval) {
+ printk(KERN_ERR PFX "ofld_req failed\n");
+ goto ofld_err;
+ }
+
+ /*
+ * wait for the session is offloaded and enabled. 3 Secs
+ * should be ample time for this process to complete.
+ */
+ setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+ wait_event_interruptible(tgt->ofld_wait,
+ (test_bit(
+ BNX2FC_FLAG_OFLD_REQ_CMPL,
+ &tgt->flags)));
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&tgt->ofld_timer);
+
+ if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
+ if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
+ &tgt->flags)) {
+ BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, "
+ "retry ofld..%d\n", i++);
+ msleep_interruptible(1000);
+ if (i > 3) {
+ i = 0;
+ goto ofld_err;
+ }
+ goto retry_ofld;
+ }
+ goto ofld_err;
+ }
+ if (bnx2fc_map_doorbell(tgt)) {
+ printk(KERN_ERR PFX "map doorbell failed - no mem\n");
+ /* upload will take care of cleaning up sess resc */
+ lport->tt.rport_logoff(rdata);
+ }
+ return;
+
+ofld_err:
+ /* couldn't offload the session. log off from this rport */
+ BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
+ lport->tt.rport_logoff(rdata);
+ /* Free session resources */
+ bnx2fc_free_session_resc(hba, tgt);
+ if (tgt->fcoe_conn_id != -1)
+ bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+}
+
+void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
+{
+ struct bnx2fc_cmd *io_req;
+ struct list_head *list;
+ struct list_head *tmp;
+ int rc;
+ int i = 0;
+ BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n",
+ tgt->num_active_ios.counter);
+
+ spin_lock_bh(&tgt->tgt_lock);
+ tgt->flush_in_prog = 1;
+
+ list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ i++;
+ io_req = (struct bnx2fc_cmd *)list;
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n");
+
+ if (cancel_delayed_work(&io_req->timeout_work)) {
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags)) {
+ /* Handle eh_abort timeout */
+ BNX2FC_IO_DBG(io_req, "eh_abort for IO "
+ "cleaned up\n");
+ complete(&io_req->tm_done);
+ }
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+ }
+
+ set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags);
+ set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
+ rc = bnx2fc_initiate_cleanup(io_req);
+ BUG_ON(rc);
+ }
+
+ list_for_each_safe(list, tmp, &tgt->els_queue) {
+ i++;
+ io_req = (struct bnx2fc_cmd *)list;
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+
+ BNX2FC_IO_DBG(io_req, "els_queue cleanup\n");
+
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ if ((io_req->cb_func) && (io_req->cb_arg)) {
+ io_req->cb_func(io_req->cb_arg);
+ io_req->cb_arg = NULL;
+ }
+
+ rc = bnx2fc_initiate_cleanup(io_req);
+ BUG_ON(rc);
+ }
+
+ list_for_each_safe(list, tmp, &tgt->io_retire_queue) {
+ i++;
+ io_req = (struct bnx2fc_cmd *)list;
+ list_del_init(&io_req->link);
+
+ BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
+
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+
+ clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
+ }
+
+ BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i);
+ i = 0;
+ spin_unlock_bh(&tgt->tgt_lock);
+ /* wait for active_ios to go to 0 */
+ while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT))
+ msleep(25);
+ if (tgt->num_active_ios.counter != 0)
+ printk(KERN_ERR PFX "CLEANUP on port 0x%x:"
+ " active_ios = %d\n",
+ tgt->rdata->ids.port_id, tgt->num_active_ios.counter);
+ spin_lock_bh(&tgt->tgt_lock);
+ tgt->flush_in_prog = 0;
+ spin_unlock_bh(&tgt->tgt_lock);
+}
+
+static void bnx2fc_upload_session(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct bnx2fc_hba *hba = port->priv;
+
+ BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
+ tgt->num_active_ios.counter);
+
+ /*
+ * Called with hba->hba_mutex held.
+ * This is a blocking call
+ */
+ clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ bnx2fc_send_session_disable_req(port, tgt);
+
+ /*
+ * wait for upload to complete. 3 Secs
+ * should be sufficient time for this process to complete.
+ */
+ setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+ BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
+ wait_event_interruptible(tgt->upld_wait,
+ (test_bit(
+ BNX2FC_FLAG_UPLD_REQ_COMPL,
+ &tgt->flags)));
+
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&tgt->upld_timer);
+
+ /*
+ * traverse thru the active_q and tmf_q and cleanup
+ * IOs in these lists
+ */
+ BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n",
+ tgt->flags);
+ bnx2fc_flush_active_ios(tgt);
+
+ /* Issue destroy KWQE */
+ if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) {
+ BNX2FC_TGT_DBG(tgt, "send destroy req\n");
+ clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ bnx2fc_send_session_destroy_req(hba, tgt);
+
+ /* wait for destroy to complete */
+ setup_timer(&tgt->upld_timer,
+ bnx2fc_upld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+ wait_event_interruptible(tgt->upld_wait,
+ (test_bit(
+ BNX2FC_FLAG_UPLD_REQ_COMPL,
+ &tgt->flags)));
+
+ if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
+ printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
+
+ BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
+ tgt->flags);
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&tgt->upld_timer);
+
+ } else
+ printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy"
+ " not sent to FW\n");
+
+ /* Free session resources */
+ spin_lock_bh(&tgt->cq_lock);
+ bnx2fc_free_session_resc(hba, tgt);
+ bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+ spin_unlock_bh(&tgt->cq_lock);
+}
+
+static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
+ struct fcoe_port *port,
+ struct fc_rport_priv *rdata)
+{
+
+ struct fc_rport *rport = rdata->rport;
+ struct bnx2fc_hba *hba = port->priv;
+
+ tgt->rport = rport;
+ tgt->rdata = rdata;
+ tgt->port = port;
+
+ if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) {
+ BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n");
+ tgt->fcoe_conn_id = -1;
+ return -1;
+ }
+
+ tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt);
+ if (tgt->fcoe_conn_id == -1)
+ return -1;
+
+ BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id);
+
+ tgt->max_sqes = BNX2FC_SQ_WQES_MAX;
+ tgt->max_rqes = BNX2FC_RQ_WQES_MAX;
+ tgt->max_cqes = BNX2FC_CQ_WQES_MAX;
+
+ /* Initialize the toggle bit */
+ tgt->sq_curr_toggle_bit = 1;
+ tgt->cq_curr_toggle_bit = 1;
+ tgt->sq_prod_idx = 0;
+ tgt->cq_cons_idx = 0;
+ tgt->rq_prod_idx = 0x8000;
+ tgt->rq_cons_idx = 0;
+ atomic_set(&tgt->num_active_ios, 0);
+
+ tgt->work_time_slice = 2;
+
+ spin_lock_init(&tgt->tgt_lock);
+ spin_lock_init(&tgt->cq_lock);
+
+ /* Initialize active_cmd_queue list */
+ INIT_LIST_HEAD(&tgt->active_cmd_queue);
+
+ /* Initialize IO retire queue */
+ INIT_LIST_HEAD(&tgt->io_retire_queue);
+
+ INIT_LIST_HEAD(&tgt->els_queue);
+
+ /* Initialize active_tm_queue list */
+ INIT_LIST_HEAD(&tgt->active_tm_queue);
+
+ init_waitqueue_head(&tgt->ofld_wait);
+ init_waitqueue_head(&tgt->upld_wait);
+
+ return 0;
+}
+
+/**
+ * This event_callback is called after successful completion of libfc
+ * initiated target login. bnx2fc can proceed with initiating the session
+ * establishment.
+ */
+void bnx2fc_rport_event_handler(struct fc_lport *lport,
+ struct fc_rport_priv *rdata,
+ enum fc_rport_event event)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct fc_rport *rport = rdata->rport;
+ struct fc_rport_libfc_priv *rp;
+ struct bnx2fc_rport *tgt;
+ u32 port_id;
+
+ BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n",
+ event, rdata->ids.port_id);
+ switch (event) {
+ case RPORT_EV_READY:
+ if (!rport) {
+ printk(KERN_ALERT PFX "rport is NULL: ERROR!\n");
+ break;
+ }
+
+ rp = rport->dd_data;
+ if (rport->port_id == FC_FID_DIR_SERV) {
+ /*
+ * bnx2fc_rport structure doesnt exist for
+ * directory server.
+ * We should not come here, as lport will
+ * take care of fabric login
+ */
+ printk(KERN_ALERT PFX "%x - rport_event_handler ERROR\n",
+ rdata->ids.port_id);
+ break;
+ }
+
+ if (rdata->spp_type != FC_TYPE_FCP) {
+ BNX2FC_HBA_DBG(lport, "not FCP type target."
+ " not offloading\n");
+ break;
+ }
+ if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
+ BNX2FC_HBA_DBG(lport, "not FCP_TARGET"
+ " not offloading\n");
+ break;
+ }
+
+ /*
+ * Offlaod process is protected with hba mutex.
+ * Use the same mutex_lock for upload process too
+ */
+ mutex_lock(&hba->hba_mutex);
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ /* This can happen when ADISC finds the same target */
+ if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
+ BNX2FC_TGT_DBG(tgt, "already offloaded\n");
+ mutex_unlock(&hba->hba_mutex);
+ return;
+ }
+
+ /*
+ * Offload the session. This is a blocking call, and will
+ * wait until the session is offloaded.
+ */
+ bnx2fc_offload_session(port, tgt, rdata);
+
+ BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+
+ if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
+ /*
+ * Session is offloaded and enabled. Map
+ * doorbell register for this target
+ */
+ BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
+ /* This counter is protected with hba mutex */
+ hba->num_ofld_sess++;
+
+ set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+ } else {
+ /*
+ * Offload or enable would have failed.
+ * In offload/enable completion path, the
+ * rport would have already been removed
+ */
+ BNX2FC_TGT_DBG(tgt, "Port is being logged off as "
+ "offloaded flag not set\n");
+ }
+ mutex_unlock(&hba->hba_mutex);
+ break;
+ case RPORT_EV_LOGO:
+ case RPORT_EV_FAILED:
+ case RPORT_EV_STOP:
+ port_id = rdata->ids.port_id;
+ if (port_id == FC_FID_DIR_SERV)
+ break;
+
+ if (!rport) {
+ printk(KERN_ALERT PFX "%x - rport not created Yet!!\n",
+ port_id);
+ break;
+ }
+ rp = rport->dd_data;
+ mutex_lock(&hba->hba_mutex);
+ /*
+ * Perform session upload. Note that rdata->peers is already
+ * removed from disc->rports list before we get this event.
+ */
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
+ mutex_unlock(&hba->hba_mutex);
+ break;
+ }
+ clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+
+ bnx2fc_upload_session(port, tgt);
+ hba->num_ofld_sess--;
+ BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ /*
+ * Try to wake up the linkdown wait thread. If num_ofld_sess
+ * is 0, the waiting therad wakes up
+ */
+ if ((hba->wait_for_link_down) &&
+ (hba->num_ofld_sess == 0)) {
+ wake_up_interruptible(&hba->shutdown_wait);
+ }
+ if (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags)) {
+ printk(KERN_ERR PFX "Relogin to the tgt\n");
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_login(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ mutex_unlock(&hba->hba_mutex);
+
+ break;
+
+ case RPORT_EV_NONE:
+ break;
+ }
+}
+
+/**
+ * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id
+ *
+ * @port: fcoe_port struct to lookup the target port on
+ * @port_id: The remote port ID to look up
+ */
+struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
+ u32 port_id)
+{
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_rport *tgt;
+ struct fc_rport_priv *rdata;
+ int i;
+
+ for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+ tgt = hba->tgt_ofld_list[i];
+ if ((tgt) && (tgt->port == port)) {
+ rdata = tgt->rdata;
+ if (rdata->ids.port_id == port_id) {
+ if (rdata->rp_state != RPORT_ST_DELETE) {
+ BNX2FC_TGT_DBG(tgt, "rport "
+ "obtained\n");
+ return tgt;
+ } else {
+ printk(KERN_ERR PFX "rport 0x%x "
+ "is in DELETED state\n",
+ rdata->ids.port_id);
+ return NULL;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+
+/**
+ * bnx2fc_alloc_conn_id - allocates FCOE Connection id
+ *
+ * @hba: pointer to adapter structure
+ * @tgt: pointer to bnx2fc_rport structure
+ */
+static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ u32 conn_id, next;
+
+ /* called with hba mutex held */
+
+ /*
+ * tgt_ofld_list access is synchronized using
+ * both hba mutex and hba lock. Atleast hba mutex or
+ * hba lock needs to be held for read access.
+ */
+
+ spin_lock_bh(&hba->hba_lock);
+ next = hba->next_conn_id;
+ conn_id = hba->next_conn_id++;
+ if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS)
+ hba->next_conn_id = 0;
+
+ while (hba->tgt_ofld_list[conn_id] != NULL) {
+ conn_id++;
+ if (conn_id == BNX2FC_NUM_MAX_SESS)
+ conn_id = 0;
+
+ if (conn_id == next) {
+ /* No free conn_ids are available */
+ spin_unlock_bh(&hba->hba_lock);
+ return -1;
+ }
+ }
+ hba->tgt_ofld_list[conn_id] = tgt;
+ tgt->fcoe_conn_id = conn_id;
+ spin_unlock_bh(&hba->hba_lock);
+ return conn_id;
+}
+
+static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id)
+{
+ /* called with hba mutex held */
+ spin_lock_bh(&hba->hba_lock);
+ hba->tgt_ofld_list[conn_id] = NULL;
+ hba->next_conn_id = conn_id;
+ spin_unlock_bh(&hba->hba_lock);
+}
+
+/**
+ *bnx2fc_alloc_session_resc - Allocate qp resources for the session
+ *
+ */
+static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ dma_addr_t page;
+ int num_pages;
+ u32 *pbl;
+
+ /* Allocate and map SQ */
+ tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
+ tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ &tgt->sq_dma, GFP_KERNEL);
+ if (!tgt->sq) {
+ printk(KERN_ALERT PFX "unable to allocate SQ memory %d\n",
+ tgt->sq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->sq, 0, tgt->sq_mem_size);
+
+ /* Allocate and map CQ */
+ tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
+ tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ &tgt->cq_dma, GFP_KERNEL);
+ if (!tgt->cq) {
+ printk(KERN_ALERT PFX "unable to allocate CQ memory %d\n",
+ tgt->cq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->cq, 0, tgt->cq_mem_size);
+
+ /* Allocate and map RQ and RQ PBL */
+ tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
+ tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ &tgt->rq_dma, GFP_KERNEL);
+ if (!tgt->rq) {
+ printk(KERN_ALERT PFX "unable to allocate RQ memory %d\n",
+ tgt->rq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->rq, 0, tgt->rq_mem_size);
+
+ tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *);
+ tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ &tgt->rq_pbl_dma, GFP_KERNEL);
+ if (!tgt->rq_pbl) {
+ printk(KERN_ALERT PFX "unable to allocate RQ PBL %d\n",
+ tgt->rq_pbl_size);
+ goto mem_alloc_failure;
+ }
+
+ memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
+ num_pages = tgt->rq_mem_size / PAGE_SIZE;
+ page = tgt->rq_dma;
+ pbl = (u32 *)tgt->rq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += PAGE_SIZE;
+ }
+
+ /* Allocate and map XFERQ */
+ tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
+ tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) &
+ PAGE_MASK;
+
+ tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
+ &tgt->xferq_dma, GFP_KERNEL);
+ if (!tgt->xferq) {
+ printk(KERN_ALERT PFX "unable to allocate XFERQ %d\n",
+ tgt->xferq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->xferq, 0, tgt->xferq_mem_size);
+
+ /* Allocate and map CONFQ & CONFQ PBL */
+ tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
+ tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) &
+ PAGE_MASK;
+
+ tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
+ &tgt->confq_dma, GFP_KERNEL);
+ if (!tgt->confq) {
+ printk(KERN_ALERT PFX "unable to allocate CONFQ %d\n",
+ tgt->confq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->confq, 0, tgt->confq_mem_size);
+
+ tgt->confq_pbl_size =
+ (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *);
+ tgt->confq_pbl_size =
+ (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->confq_pbl_size,
+ &tgt->confq_pbl_dma, GFP_KERNEL);
+ if (!tgt->confq_pbl) {
+ printk(KERN_ALERT PFX "unable to allocate CONFQ PBL %d\n",
+ tgt->confq_pbl_size);
+ goto mem_alloc_failure;
+ }
+
+ memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
+ num_pages = tgt->confq_mem_size / PAGE_SIZE;
+ page = tgt->confq_dma;
+ pbl = (u32 *)tgt->confq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += PAGE_SIZE;
+ }
+
+ /* Allocate and map ConnDB */
+ tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
+
+ tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->conn_db_mem_size,
+ &tgt->conn_db_dma, GFP_KERNEL);
+ if (!tgt->conn_db) {
+ printk(KERN_ALERT PFX "unable to allocate conn_db %d\n",
+ tgt->conn_db_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->conn_db, 0, tgt->conn_db_mem_size);
+
+
+ /* Allocate and map LCQ */
+ tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
+ tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) &
+ PAGE_MASK;
+
+ tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ &tgt->lcq_dma, GFP_KERNEL);
+
+ if (!tgt->lcq) {
+ printk(KERN_ALERT PFX "unable to allocate lcq %d\n",
+ tgt->lcq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->lcq, 0, tgt->lcq_mem_size);
+
+ /* Arm CQ */
+ tgt->conn_db->cq_arm.lo = -1;
+ tgt->conn_db->rq_prod = 0x8000;
+
+ return 0;
+
+mem_alloc_failure:
+ bnx2fc_free_session_resc(hba, tgt);
+ bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+ return -ENOMEM;
+}
+
+/**
+ * bnx2i_free_session_resc - free qp resources for the session
+ *
+ * @hba: adapter structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ *
+ * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL
+ */
+static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
+
+ if (tgt->ctx_base) {
+ iounmap(tgt->ctx_base);
+ tgt->ctx_base = NULL;
+ }
+ /* Free LCQ */
+ if (tgt->lcq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ tgt->lcq, tgt->lcq_dma);
+ tgt->lcq = NULL;
+ }
+ /* Free connDB */
+ if (tgt->conn_db) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size,
+ tgt->conn_db, tgt->conn_db_dma);
+ tgt->conn_db = NULL;
+ }
+ /* Free confq and confq pbl */
+ if (tgt->confq_pbl) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size,
+ tgt->confq_pbl, tgt->confq_pbl_dma);
+ tgt->confq_pbl = NULL;
+ }
+ if (tgt->confq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
+ tgt->confq, tgt->confq_dma);
+ tgt->confq = NULL;
+ }
+ /* Free XFERQ */
+ if (tgt->xferq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
+ tgt->xferq, tgt->xferq_dma);
+ tgt->xferq = NULL;
+ }
+ /* Free RQ PBL and RQ */
+ if (tgt->rq_pbl) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ tgt->rq_pbl, tgt->rq_pbl_dma);
+ tgt->rq_pbl = NULL;
+ }
+ if (tgt->rq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ tgt->rq, tgt->rq_dma);
+ tgt->rq = NULL;
+ }
+ /* Free CQ */
+ if (tgt->cq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ tgt->cq, tgt->cq_dma);
+ tgt->cq = NULL;
+ }
+ /* Free SQ */
+ if (tgt->sq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ tgt->sq, tgt->sq_dma);
+ tgt->sq = NULL;
+ }
+}
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index e1ca5fe7e6bb..cfd59023227b 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -360,7 +360,7 @@ struct bnx2i_hba {
#define ADAPTER_STATE_LINK_DOWN 2
#define ADAPTER_STATE_INIT_FAILED 31
unsigned int mtu_supported;
- #define BNX2I_MAX_MTU_SUPPORTED 1500
+ #define BNX2I_MAX_MTU_SUPPORTED 9000
struct Scsi_Host *shost;
@@ -751,6 +751,8 @@ extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
struct iscsi_task *mtask);
extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_text(struct bnx2i_conn *conn,
+ struct iscsi_task *mtask);
extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
struct bnx2i_cmd *cmnd);
extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 96505e3ab986..1da34c019b8a 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -445,6 +445,56 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
}
/**
+ * bnx2i_send_iscsi_text - post iSCSI text WQE to hardware
+ * @conn: iscsi connection
+ * @mtask: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Text request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn,
+ struct iscsi_task *mtask)
+{
+ struct bnx2i_cmd *bnx2i_cmd;
+ struct bnx2i_text_request *text_wqe;
+ struct iscsi_text *text_hdr;
+ u32 dword;
+
+ bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
+ text_hdr = (struct iscsi_text *)mtask->hdr;
+ text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe;
+
+ memset(text_wqe, 0, sizeof(struct bnx2i_text_request));
+
+ text_wqe->op_code = text_hdr->opcode;
+ text_wqe->op_attr = text_hdr->flags;
+ text_wqe->data_length = ntoh24(text_hdr->dlength);
+ text_wqe->itt = mtask->itt |
+ (ISCSI_TASK_TYPE_MPATH << ISCSI_TEXT_REQUEST_TYPE_SHIFT);
+ text_wqe->ttt = be32_to_cpu(text_hdr->ttt);
+
+ text_wqe->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+
+ text_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
+ text_wqe->resp_bd_list_addr_hi =
+ (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
+
+ dword = ((1 << ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT) |
+ (bnx2i_conn->gen_pdu.resp_buf_size <<
+ ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
+ text_wqe->resp_buffer = dword;
+ text_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
+ text_wqe->bd_list_addr_hi =
+ (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
+ text_wqe->num_bds = 1;
+ text_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+ return 0;
+}
+
+
+/**
* bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
* @conn: iscsi connection
* @cmd: driver command structure which is requesting
@@ -490,15 +540,18 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
nopout_hdr = (struct iscsi_nopout *)task->hdr;
nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
+
+ memset(nopout_wqe, 0x00, sizeof(struct bnx2i_nop_out_request));
+
nopout_wqe->op_code = nopout_hdr->opcode;
nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
- u32 tmp = nopout_hdr->lun[0];
+ u32 tmp = nopout_wqe->lun[0];
/* 57710 requires LUN field to be swapped */
- nopout_hdr->lun[0] = nopout_hdr->lun[1];
- nopout_hdr->lun[1] = tmp;
+ nopout_wqe->lun[0] = nopout_wqe->lun[1];
+ nopout_wqe->lun[1] = tmp;
}
nopout_wqe->itt = ((u16)task->itt |
@@ -1425,6 +1478,68 @@ done:
return 0;
}
+
+/**
+ * bnx2i_process_text_resp - this function handles iscsi text response
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI Text Response CQE& complete it to open-iscsi user daemon
+ */
+static int bnx2i_process_text_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_task *task;
+ struct bnx2i_text_response *text;
+ struct iscsi_text_rsp *resp_hdr;
+ int pld_len;
+ int pad_len;
+
+ text = (struct bnx2i_text_response *) cqe;
+ spin_lock(&session->lock);
+ task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX);
+ if (!task)
+ goto done;
+
+ resp_hdr = (struct iscsi_text_rsp *)&bnx2i_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = text->op_code;
+ resp_hdr->flags = text->response_flags;
+ resp_hdr->hlength = 0;
+
+ hton24(resp_hdr->dlength, text->data_length);
+ resp_hdr->itt = task->hdr->itt;
+ resp_hdr->ttt = cpu_to_be32(text->ttt);
+ resp_hdr->statsn = task->hdr->exp_statsn;
+ resp_hdr->exp_cmdsn = cpu_to_be32(text->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(text->max_cmd_sn);
+ pld_len = text->data_length;
+ bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf +
+ pld_len;
+ pad_len = 0;
+ if (pld_len & 0x3)
+ pad_len = 4 - (pld_len % 4);
+
+ if (pad_len) {
+ int i = 0;
+ for (i = 0; i < pad_len; i++) {
+ bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
+ bnx2i_conn->gen_pdu.resp_wr_ptr++;
+ }
+ }
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
+ bnx2i_conn->gen_pdu.resp_buf,
+ bnx2i_conn->gen_pdu.resp_wr_ptr -
+ bnx2i_conn->gen_pdu.resp_buf);
+done:
+ spin_unlock(&session->lock);
+ return 0;
+}
+
+
/**
* bnx2i_process_tmf_resp - this function handles iscsi TMF response
* @session: iscsi session pointer
@@ -1766,6 +1881,10 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
bnx2i_process_tmf_resp(session, bnx2i_conn,
qp->cq_cons_qe);
break;
+ case ISCSI_OP_TEXT_RSP:
+ bnx2i_process_text_resp(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
case ISCSI_OP_LOGOUT_RSP:
bnx2i_process_logout_resp(session, bnx2i_conn,
qp->cq_cons_qe);
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 72a7b2d4a439..1d24a2819736 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
#define DRV_MODULE_NAME "bnx2i"
-#define DRV_MODULE_VERSION "2.6.2.2"
-#define DRV_MODULE_RELDATE "Nov 23, 2010"
+#define DRV_MODULE_VERSION "2.6.2.3"
+#define DRV_MODULE_RELDATE "Dec 31, 2010"
static char version[] __devinitdata =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -29,7 +29,7 @@ static char version[] __devinitdata =
MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and "
"Eddie Wai <eddie.wai@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711"
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711/57712"
" iSCSI Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -88,9 +88,11 @@ void bnx2i_identify_device(struct bnx2i_hba *hba)
(hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
- } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711E)
+ } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57711E ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57712 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57712E)
set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
else
printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
@@ -161,6 +163,21 @@ void bnx2i_start(void *handle)
struct bnx2i_hba *hba = handle;
int i = HZ;
+ if (!hba->cnic->max_iscsi_conn) {
+ printk(KERN_ALERT "bnx2i: dev %s does not support "
+ "iSCSI\n", hba->netdev->name);
+
+ if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+ mutex_lock(&bnx2i_dev_lock);
+ list_del_init(&hba->link);
+ adapter_count--;
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+ clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+ mutex_unlock(&bnx2i_dev_lock);
+ bnx2i_free_hba(hba);
+ }
+ return;
+ }
bnx2i_send_fw_iscsi_init_msg(hba);
while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
msleep(BNX2I_INIT_POLL_TIME);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f0dce26593eb..1809f9ccc4ce 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1092,6 +1092,9 @@ static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
case ISCSI_OP_SCSI_TMFUNC:
rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
break;
+ case ISCSI_OP_TEXT:
+ rc = bnx2i_send_iscsi_text(bnx2i_conn, task);
+ break;
default:
iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
"send_gen: unsupported op 0x%x\n",
@@ -1455,42 +1458,40 @@ static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
/**
- * bnx2i_conn_get_param - return iscsi connection parameter to caller
- * @cls_conn: pointer to iscsi cls conn
+ * bnx2i_ep_get_param - return iscsi ep parameter to caller
+ * @ep: pointer to iscsi endpoint
* @param: parameter type identifier
* @buf: buffer pointer
*
- * returns iSCSI connection parameters
+ * returns iSCSI ep parameters
*/
-static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf)
+static int bnx2i_ep_get_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf)
{
- struct iscsi_conn *conn = cls_conn->dd_data;
- struct bnx2i_conn *bnx2i_conn = conn->dd_data;
- int len = 0;
+ struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
+ struct bnx2i_hba *hba = bnx2i_ep->hba;
+ int len = -ENOTCONN;
- if (!(bnx2i_conn && bnx2i_conn->ep && bnx2i_conn->ep->hba))
- goto out;
+ if (!hba)
+ return -ENOTCONN;
switch (param) {
case ISCSI_PARAM_CONN_PORT:
- mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
- if (bnx2i_conn->ep->cm_sk)
- len = sprintf(buf, "%hu\n",
- bnx2i_conn->ep->cm_sk->dst_port);
- mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
+ mutex_lock(&hba->net_dev_lock);
+ if (bnx2i_ep->cm_sk)
+ len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port);
+ mutex_unlock(&hba->net_dev_lock);
break;
case ISCSI_PARAM_CONN_ADDRESS:
- mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
- if (bnx2i_conn->ep->cm_sk)
- len = sprintf(buf, "%pI4\n",
- &bnx2i_conn->ep->cm_sk->dst_ip);
- mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
+ mutex_lock(&hba->net_dev_lock);
+ if (bnx2i_ep->cm_sk)
+ len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip);
+ mutex_unlock(&hba->net_dev_lock);
break;
default:
- return iscsi_conn_get_param(cls_conn, param, buf);
+ return -ENOSYS;
}
-out:
+
return len;
}
@@ -1935,13 +1936,13 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
cnic_dev_10g = 1;
switch (bnx2i_ep->state) {
- case EP_STATE_CONNECT_FAILED:
case EP_STATE_CLEANUP_FAILED:
case EP_STATE_OFLD_FAILED:
case EP_STATE_DISCONN_TIMEDOUT:
ret = 0;
break;
case EP_STATE_CONNECT_START:
+ case EP_STATE_CONNECT_FAILED:
case EP_STATE_CONNECT_COMPL:
case EP_STATE_ULP_UPDATE_START:
case EP_STATE_ULP_UPDATE_COMPL:
@@ -2167,7 +2168,8 @@ struct iscsi_transport bnx2i_iscsi_transport = {
.name = "bnx2i",
.caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
CAP_MULTI_R2T | CAP_DATADGST |
- CAP_DATA_PATH_OFFLOAD,
+ CAP_DATA_PATH_OFFLOAD |
+ CAP_TEXT_NEGO,
.param_mask = ISCSI_MAX_RECV_DLENGTH |
ISCSI_MAX_XMIT_DLENGTH |
ISCSI_HDRDGST_EN |
@@ -2200,7 +2202,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
.bind_conn = bnx2i_conn_bind,
.destroy_conn = bnx2i_conn_destroy,
.set_param = iscsi_set_param,
- .get_conn_param = bnx2i_conn_get_param,
+ .get_conn_param = iscsi_conn_get_param,
.get_session_param = iscsi_session_get_param,
.get_host_param = bnx2i_host_get_param,
.start_conn = bnx2i_conn_start,
@@ -2209,6 +2211,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
.xmit_task = bnx2i_task_xmit,
.get_stats = bnx2i_conn_get_stats,
/* TCP connect - disconnect - option-2 interface calls */
+ .get_ep_param = bnx2i_ep_get_param,
.ep_connect = bnx2i_ep_connect,
.ep_poll = bnx2i_ep_poll,
.ep_disconnect = bnx2i_ep_disconnect,
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index a129a170b47b..fc2cdb62f53b 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -105,7 +105,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
/* owner and name should be set already */
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
| CAP_DATADGST | CAP_DIGEST_OFFLOAD |
- CAP_PADDING_OFFLOAD,
+ CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
.param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
@@ -137,7 +137,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
- .get_conn_param = cxgbi_get_conn_param,
+ .get_conn_param = iscsi_conn_get_param,
.set_param = cxgbi_set_conn_param,
.get_stats = cxgbi_get_conn_stats,
/* pdu xmit req from user space */
@@ -152,6 +152,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
.xmit_pdu = cxgbi_conn_xmit_pdu,
.parse_pdu_itt = cxgbi_parse_pdu_itt,
/* TCP connect/disconnect */
+ .get_ep_param = cxgbi_get_ep_param,
.ep_connect = cxgbi_ep_connect,
.ep_poll = cxgbi_ep_poll,
.ep_disconnect = cxgbi_ep_disconnect,
@@ -1108,10 +1109,11 @@ static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
csk, idx, npods, gl);
for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
- struct sk_buff *skb = ddp->gl_skb[idx];
+ struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
+ PPOD_SIZE, 0, GFP_ATOMIC);
- /* hold on to the skb until we clear the ddp mapping */
- skb_get(skb);
+ if (!skb)
+ return -ENOMEM;
ulp_mem_io_set_hdr(skb, pm_addr);
cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head +
@@ -1136,56 +1138,20 @@ static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
cdev, idx, npods, tag);
for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
- struct sk_buff *skb = ddp->gl_skb[idx];
+ struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
+ PPOD_SIZE, 0, GFP_ATOMIC);
if (!skb) {
- pr_err("tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
+ pr_err("tag 0x%x, 0x%x, %d/%u, skb OOM.\n",
tag, idx, i, npods);
continue;
}
- ddp->gl_skb[idx] = NULL;
- memset(skb->head + sizeof(struct ulp_mem_io), 0, PPOD_SIZE);
ulp_mem_io_set_hdr(skb, pm_addr);
skb->priority = CPL_PRIORITY_CONTROL;
cxgb3_ofld_send(cdev->lldev, skb);
}
}
-static void ddp_free_gl_skb(struct cxgbi_ddp_info *ddp, int idx, int cnt)
-{
- int i;
-
- log_debug(1 << CXGBI_DBG_DDP,
- "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt);
-
- for (i = 0; i < cnt; i++, idx++)
- if (ddp->gl_skb[idx]) {
- kfree_skb(ddp->gl_skb[idx]);
- ddp->gl_skb[idx] = NULL;
- }
-}
-
-static int ddp_alloc_gl_skb(struct cxgbi_ddp_info *ddp, int idx,
- int cnt, gfp_t gfp)
-{
- int i;
-
- log_debug(1 << CXGBI_DBG_DDP,
- "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt);
-
- for (i = 0; i < cnt; i++) {
- struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
- PPOD_SIZE, 0, gfp);
- if (skb)
- ddp->gl_skb[idx + i] = skb;
- else {
- ddp_free_gl_skb(ddp, idx, i);
- return -ENOMEM;
- }
- }
- return 0;
-}
-
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
unsigned int tid, int pg_idx, bool reply)
{
@@ -1316,8 +1282,6 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
}
tdev->ulp_iscsi = ddp;
- cdev->csk_ddp_free_gl_skb = ddp_free_gl_skb;
- cdev->csk_ddp_alloc_gl_skb = ddp_alloc_gl_skb;
cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
cdev->csk_ddp_set = ddp_set_map;
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
index 5f5e3394b594..20593fd69d8f 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
@@ -24,10 +24,21 @@
extern cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
-#define cxgb3i_get_private_ipv4addr(ndev) \
- (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr)
-#define cxgb3i_set_private_ipv4addr(ndev, addr) \
- (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr
+static inline unsigned int cxgb3i_get_private_ipv4addr(struct net_device *ndev)
+{
+ return ((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr;
+}
+
+static inline void cxgb3i_set_private_ipv4addr(struct net_device *ndev,
+ unsigned int addr)
+{
+ struct port_info *pi = (struct port_info *)netdev_priv(ndev);
+
+ pi->iscsic.flags = addr ? 1 : 0;
+ pi->iscsi_ipv4addr = addr;
+ if (addr)
+ memcpy(pi->iscsic.mac_addr, ndev->dev_addr, ETH_ALEN);
+}
struct cpl_iscsi_hdr_norss {
union opcode_tid ot;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 8c04fada710b..f3a4cd7cf782 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -106,7 +106,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.name = DRV_MODULE_NAME,
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
CAP_DATADGST | CAP_DIGEST_OFFLOAD |
- CAP_PADDING_OFFLOAD,
+ CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
.param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
@@ -138,7 +138,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
- .get_conn_param = cxgbi_get_conn_param,
+ .get_conn_param = iscsi_conn_get_param,
.set_param = cxgbi_set_conn_param,
.get_stats = cxgbi_get_conn_stats,
/* pdu xmit req from user space */
@@ -153,6 +153,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.xmit_pdu = cxgbi_conn_xmit_pdu,
.parse_pdu_itt = cxgbi_parse_pdu_itt,
/* TCP connect/disconnect */
+ .get_ep_param = cxgbi_get_ep_param,
.ep_connect = cxgbi_ep_connect,
.ep_poll = cxgbi_ep_poll,
.ep_disconnect = cxgbi_ep_disconnect,
@@ -1425,8 +1426,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
cxgbi_ddp_page_size_factor(pgsz_factor);
cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
- cdev->csk_ddp_free_gl_skb = NULL;
- cdev->csk_ddp_alloc_gl_skb = NULL;
cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
cdev->csk_ddp_set = ddp_set_map;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index a24dff9f9163..de764ea7419d 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -530,6 +530,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
csk->dst = dst;
csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
csk->daddr.sin_port = daddr->sin_port;
+ csk->daddr.sin_family = daddr->sin_family;
csk->saddr.sin_addr.s_addr = rt->rt_src;
return csk;
@@ -1264,12 +1265,6 @@ static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
return idx;
}
- if (cdev->csk_ddp_alloc_gl_skb) {
- err = cdev->csk_ddp_alloc_gl_skb(ddp, idx, npods, gfp);
- if (err < 0)
- goto unmark_entries;
- }
-
tag = cxgbi_ddp_tag_base(tformat, sw_tag);
tag |= idx << PPOD_IDX_SHIFT;
@@ -1280,11 +1275,8 @@ static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
hdr.page_offset = htonl(gl->offset);
err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl);
- if (err < 0) {
- if (cdev->csk_ddp_free_gl_skb)
- cdev->csk_ddp_free_gl_skb(ddp, idx, npods);
+ if (err < 0)
goto unmark_entries;
- }
ddp->idx_last = idx;
log_debug(1 << CXGBI_DBG_DDP,
@@ -1350,8 +1342,6 @@ static void ddp_destroy(struct kref *kref)
>> PPOD_PAGES_SHIFT;
pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods);
kfree(gl);
- if (cdev->csk_ddp_free_gl_skb)
- cdev->csk_ddp_free_gl_skb(ddp, i, npods);
i += npods;
} else
i++;
@@ -1394,8 +1384,6 @@ int cxgbi_ddp_init(struct cxgbi_device *cdev,
return -ENOMEM;
}
ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
- ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
- ppmax * sizeof(struct cxgbi_gather_list *));
cdev->ddp = ddp;
spin_lock_init(&ddp->map_lock);
@@ -1895,13 +1883,16 @@ EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
{
- u8 submode = 0;
+ if (hcrc || dcrc) {
+ u8 submode = 0;
- if (hcrc)
- submode |= 1;
- if (dcrc)
- submode |= 2;
- cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
+ if (hcrc)
+ submode |= 1;
+ if (dcrc)
+ submode |= 2;
+ cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
+ } else
+ cxgbi_skcb_ulp_mode(skb) = 0;
}
int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
@@ -2197,32 +2188,34 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
}
EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
-int cxgbi_get_conn_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf)
+int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
+ char *buf)
{
- struct iscsi_conn *iconn = cls_conn->dd_data;
+ struct cxgbi_endpoint *cep = ep->dd_data;
+ struct cxgbi_sock *csk;
int len;
log_debug(1 << CXGBI_DBG_ISCSI,
- "cls_conn 0x%p, param %d.\n", cls_conn, param);
+ "cls_conn 0x%p, param %d.\n", ep, param);
switch (param) {
case ISCSI_PARAM_CONN_PORT:
- spin_lock_bh(&iconn->session->lock);
- len = sprintf(buf, "%hu\n", iconn->portal_port);
- spin_unlock_bh(&iconn->session->lock);
- break;
case ISCSI_PARAM_CONN_ADDRESS:
- spin_lock_bh(&iconn->session->lock);
- len = sprintf(buf, "%s\n", iconn->portal_address);
- spin_unlock_bh(&iconn->session->lock);
- break;
+ if (!cep)
+ return -ENOTCONN;
+
+ csk = cep->csk;
+ if (!csk)
+ return -ENOTCONN;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &csk->daddr, param, buf);
default:
- return iscsi_conn_get_param(cls_conn, param, buf);
+ return -ENOSYS;
}
return len;
}
-EXPORT_SYMBOL_GPL(cxgbi_get_conn_param);
+EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
struct iscsi_cls_conn *
cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
@@ -2289,11 +2282,6 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
cxgbi_conn_max_xmit_dlength(conn);
cxgbi_conn_max_recv_dlength(conn);
- spin_lock_bh(&conn->session->lock);
- sprintf(conn->portal_address, "%pI4", &csk->daddr.sin_addr.s_addr);
- conn->portal_port = ntohs(csk->daddr.sin_port);
- spin_unlock_bh(&conn->session->lock);
-
log_debug(1 << CXGBI_DBG_ISCSI,
"cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
cls_session, cls_conn, ep, cconn, csk);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index c57d59db000c..0a20fd5f7102 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -131,7 +131,6 @@ struct cxgbi_ddp_info {
unsigned int rsvd_tag_mask;
spinlock_t map_lock;
struct cxgbi_gather_list **gl_map;
- struct sk_buff **gl_skb;
};
#define DDP_PGIDX_MAX 4
@@ -536,8 +535,6 @@ struct cxgbi_device {
struct cxgbi_ddp_info *ddp;
void (*dev_ddp_cleanup)(struct cxgbi_device *);
- void (*csk_ddp_free_gl_skb)(struct cxgbi_ddp_info *, int, int);
- int (*csk_ddp_alloc_gl_skb)(struct cxgbi_ddp_info *, int, int, gfp_t);
int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *,
unsigned int, unsigned int,
struct cxgbi_gather_list *);
@@ -715,7 +712,7 @@ void cxgbi_cleanup_task(struct iscsi_task *task);
void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
int cxgbi_set_conn_param(struct iscsi_cls_conn *,
enum iscsi_param, char *, int);
-int cxgbi_get_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *);
+int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param, char *);
struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32);
int cxgbi_bind_conn(struct iscsi_cls_session *,
struct iscsi_cls_conn *, u64, int);
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index b837c5b3c8f9..564e6ecd17c2 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -25,16 +25,9 @@
#include <scsi/scsi_dh.h>
#include "../scsi_priv.h"
-struct scsi_dh_devinfo_list {
- struct list_head node;
- char vendor[9];
- char model[17];
- struct scsi_device_handler *handler;
-};
-
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(scsi_dh_list);
-static LIST_HEAD(scsi_dh_dev_list);
+static int scsi_dh_list_idx = 1;
static struct scsi_device_handler *get_device_handler(const char *name)
{
@@ -51,40 +44,18 @@ static struct scsi_device_handler *get_device_handler(const char *name)
return found;
}
-
-static struct scsi_device_handler *
-scsi_dh_cache_lookup(struct scsi_device *sdev)
+static struct scsi_device_handler *get_device_handler_by_idx(int idx)
{
- struct scsi_dh_devinfo_list *tmp;
- struct scsi_device_handler *found_dh = NULL;
+ struct scsi_device_handler *tmp, *found = NULL;
spin_lock(&list_lock);
- list_for_each_entry(tmp, &scsi_dh_dev_list, node) {
- if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) &&
- !strncmp(sdev->model, tmp->model, strlen(tmp->model))) {
- found_dh = tmp->handler;
+ list_for_each_entry(tmp, &scsi_dh_list, list) {
+ if (tmp->idx == idx) {
+ found = tmp;
break;
}
}
spin_unlock(&list_lock);
-
- return found_dh;
-}
-
-static int scsi_dh_handler_lookup(struct scsi_device_handler *scsi_dh,
- struct scsi_device *sdev)
-{
- int i, found = 0;
-
- for(i = 0; scsi_dh->devlist[i].vendor; i++) {
- if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor,
- strlen(scsi_dh->devlist[i].vendor)) &&
- !strncmp(sdev->model, scsi_dh->devlist[i].model,
- strlen(scsi_dh->devlist[i].model))) {
- found = 1;
- break;
- }
- }
return found;
}
@@ -102,41 +73,14 @@ device_handler_match(struct scsi_device_handler *scsi_dh,
struct scsi_device *sdev)
{
struct scsi_device_handler *found_dh = NULL;
- struct scsi_dh_devinfo_list *tmp;
+ int idx;
- found_dh = scsi_dh_cache_lookup(sdev);
- if (found_dh)
- return found_dh;
+ idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model,
+ SCSI_DEVINFO_DH);
+ found_dh = get_device_handler_by_idx(idx);
- if (scsi_dh) {
- if (scsi_dh_handler_lookup(scsi_dh, sdev))
- found_dh = scsi_dh;
- } else {
- struct scsi_device_handler *tmp_dh;
-
- spin_lock(&list_lock);
- list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
- if (scsi_dh_handler_lookup(tmp_dh, sdev))
- found_dh = tmp_dh;
- }
- spin_unlock(&list_lock);
- }
-
- if (found_dh) { /* If device is found, add it to the cache */
- tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
- if (tmp) {
- strncpy(tmp->vendor, sdev->vendor, 8);
- strncpy(tmp->model, sdev->model, 16);
- tmp->vendor[8] = '\0';
- tmp->model[16] = '\0';
- tmp->handler = found_dh;
- spin_lock(&list_lock);
- list_add(&tmp->node, &scsi_dh_dev_list);
- spin_unlock(&list_lock);
- } else {
- found_dh = NULL;
- }
- }
+ if (scsi_dh && found_dh != scsi_dh)
+ found_dh = NULL;
return found_dh;
}
@@ -373,12 +317,25 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
*/
int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
{
+ int i;
+
if (get_device_handler(scsi_dh->name))
return -EBUSY;
spin_lock(&list_lock);
+ scsi_dh->idx = scsi_dh_list_idx++;
list_add(&scsi_dh->list, &scsi_dh_list);
spin_unlock(&list_lock);
+
+ for (i = 0; scsi_dh->devlist[i].vendor; i++) {
+ scsi_dev_info_list_add_keyed(0,
+ scsi_dh->devlist[i].vendor,
+ scsi_dh->devlist[i].model,
+ NULL,
+ scsi_dh->idx,
+ SCSI_DEVINFO_DH);
+ }
+
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
@@ -395,7 +352,7 @@ EXPORT_SYMBOL_GPL(scsi_register_device_handler);
*/
int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
{
- struct scsi_dh_devinfo_list *tmp, *pos;
+ int i;
if (!get_device_handler(scsi_dh->name))
return -ENODEV;
@@ -403,14 +360,14 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
scsi_dh_notifier_remove);
+ for (i = 0; scsi_dh->devlist[i].vendor; i++) {
+ scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor,
+ scsi_dh->devlist[i].model,
+ SCSI_DEVINFO_DH);
+ }
+
spin_lock(&list_lock);
list_del(&scsi_dh->list);
- list_for_each_entry_safe(pos, tmp, &scsi_dh_dev_list, node) {
- if (pos->handler == scsi_dh) {
- list_del(&pos->node);
- kfree(pos);
- }
- }
spin_unlock(&list_lock);
printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
@@ -576,6 +533,10 @@ static int __init scsi_dh_init(void)
{
int r;
+ r = scsi_dev_info_add_list(SCSI_DEVINFO_DH, "SCSI Device Handler");
+ if (r)
+ return r;
+
r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
if (!r)
@@ -590,6 +551,7 @@ static void __exit scsi_dh_exit(void)
bus_for_each_dev(&scsi_bus_type, NULL, NULL,
scsi_dh_sysfs_attr_remove);
bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
+ scsi_dev_info_remove_list(SCSI_DEVINFO_DH);
}
module_init(scsi_dh_init);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 6b729324b8d3..7cae0bc85390 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -253,13 +253,15 @@ static void stpg_endio(struct request *req, int error)
{
struct alua_dh_data *h = req->end_io_data;
struct scsi_sense_hdr sense_hdr;
- unsigned err = SCSI_DH_IO;
+ unsigned err = SCSI_DH_OK;
if (error || host_byte(req->errors) != DID_OK ||
- msg_byte(req->errors) != COMMAND_COMPLETE)
+ msg_byte(req->errors) != COMMAND_COMPLETE) {
+ err = SCSI_DH_IO;
goto done;
+ }
- if (err == SCSI_DH_IO && h->senselen > 0) {
+ if (h->senselen > 0) {
err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
&sense_hdr);
if (!err) {
@@ -285,7 +287,8 @@ static void stpg_endio(struct request *req, int error)
print_alua_state(h->state));
}
done:
- blk_put_request(req);
+ req->end_io_data = NULL;
+ __blk_put_request(req->q, req);
if (h->callback_fn) {
h->callback_fn(h->callback_data, err);
h->callback_fn = h->callback_data = NULL;
@@ -303,7 +306,6 @@ done:
static unsigned submit_stpg(struct alua_dh_data *h)
{
struct request *rq;
- int err = SCSI_DH_RES_TEMP_UNAVAIL;
int stpg_len = 8;
struct scsi_device *sdev = h->sdev;
@@ -332,7 +334,7 @@ static unsigned submit_stpg(struct alua_dh_data *h)
rq->end_io_data = h;
blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio);
- return err;
+ return SCSI_DH_OK;
}
/*
@@ -730,7 +732,9 @@ static const struct scsi_dh_devlist alua_dev_list[] = {
{"Pillar", "Axiom" },
{"Intel", "Multi-Flex"},
{"NETAPP", "LUN"},
+ {"NETAPP", "LUN C-Mode"},
{"AIX", "NVDISK"},
+ {"Promise", "VTrak"},
{NULL, NULL}
};
@@ -759,7 +763,7 @@ static int alua_bus_attach(struct scsi_device *sdev)
unsigned long flags;
int err = SCSI_DH_OK;
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 6faf472f7537..48441f6908a4 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -650,7 +650,7 @@ static int clariion_bus_attach(struct scsi_device *sdev)
unsigned long flags;
int err;
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index e3916641e627..b479f1eef968 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -225,7 +225,8 @@ static void start_stop_endio(struct request *req, int error)
}
}
done:
- blk_put_request(req);
+ req->end_io_data = NULL;
+ __blk_put_request(req->q, req);
if (h->callback_fn) {
h->callback_fn(h->callback_data, err);
h->callback_fn = h->callback_data = NULL;
@@ -338,8 +339,8 @@ static int hp_sw_bus_attach(struct scsi_device *sdev)
unsigned long flags;
int ret;
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
- + sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
+ scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ + sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n",
HP_SW_NAME);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 5be3ae15cb71..293c183dfe6d 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -281,11 +281,13 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
}
static struct request *rdac_failover_get(struct scsi_device *sdev,
- struct rdac_dh_data *h)
+ struct rdac_dh_data *h, struct list_head *list)
{
struct request *rq;
struct rdac_mode_common *common;
unsigned data_size;
+ struct rdac_queue_data *qdata;
+ u8 *lun_table;
if (h->ctlr->use_ms10) {
struct rdac_pg_expanded *rdac_pg;
@@ -298,6 +300,7 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
rdac_pg->subpage_code = 0x1;
rdac_pg->page_len[0] = 0x01;
rdac_pg->page_len[1] = 0x28;
+ lun_table = rdac_pg->lun_table;
} else {
struct rdac_pg_legacy *rdac_pg;
@@ -307,11 +310,16 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
common = &rdac_pg->common;
rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
rdac_pg->page_len = 0x68;
+ lun_table = rdac_pg->lun_table;
}
common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
common->rdac_options = RDAC_FORCED_QUIESENCE;
+ list_for_each_entry(qdata, list, entry) {
+ lun_table[qdata->h->lun] = 0x81;
+ }
+
/* get request for block layer packet command */
rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
if (!rq)
@@ -565,7 +573,6 @@ static void send_mode_select(struct work_struct *work)
int err, retry_cnt = RDAC_RETRY_COUNT;
struct rdac_queue_data *tmp, *qdata;
LIST_HEAD(list);
- u8 *lun_table;
spin_lock(&ctlr->ms_lock);
list_splice_init(&ctlr->ms_head, &list);
@@ -573,21 +580,12 @@ static void send_mode_select(struct work_struct *work)
ctlr->ms_sdev = NULL;
spin_unlock(&ctlr->ms_lock);
- if (ctlr->use_ms10)
- lun_table = ctlr->mode_select.expanded.lun_table;
- else
- lun_table = ctlr->mode_select.legacy.lun_table;
-
retry:
err = SCSI_DH_RES_TEMP_UNAVAIL;
- rq = rdac_failover_get(sdev, h);
+ rq = rdac_failover_get(sdev, h, &list);
if (!rq)
goto done;
- list_for_each_entry(qdata, &list, entry) {
- lun_table[qdata->h->lun] = 0x81;
- }
-
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"%s MODE_SELECT command",
(char *) h->ctlr->array_name, h->ctlr->index,
@@ -769,6 +767,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"DELL", "MD32xx"},
{"DELL", "MD32xxi"},
{"DELL", "MD36xxi"},
+ {"DELL", "MD36xxf"},
{"LSI", "INF-01-00"},
{"ENGENIO", "INF-01-00"},
{"STK", "FLEXLINE 380"},
@@ -800,7 +799,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
int err;
char array_name[ARRAY_LABEL_LEN];
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
@@ -906,4 +905,5 @@ module_exit(rdac_exit);
MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
+MODULE_VERSION("01.00.0000.0000");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
index 950f27615c76..f6d37d0271f7 100644
--- a/drivers/scsi/fcoe/Makefile
+++ b/drivers/scsi/fcoe/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_FCOE) += fcoe.o
obj-$(CONFIG_LIBFCOE) += libfcoe.o
+
+libfcoe-objs := fcoe_ctlr.o fcoe_transport.o
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 3becc6a20a4f..bde6ee5333eb 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -31,6 +31,7 @@
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/ctype.h>
+#include <linux/workqueue.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -58,6 +59,8 @@ MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
DEFINE_MUTEX(fcoe_config_mutex);
+static struct workqueue_struct *fcoe_wq;
+
/* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */
static DECLARE_COMPLETION(fcoe_flush_completion);
@@ -72,7 +75,6 @@ static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
static int fcoe_rcv(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
static int fcoe_percpu_receive_thread(void *);
-static void fcoe_clean_pending_queue(struct fc_lport *);
static void fcoe_percpu_clean(struct fc_lport *);
static int fcoe_link_speed_update(struct fc_lport *);
static int fcoe_link_ok(struct fc_lport *);
@@ -80,7 +82,6 @@ static int fcoe_link_ok(struct fc_lport *);
static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
static int fcoe_hostlist_add(const struct fc_lport *);
-static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
static int fcoe_device_notification(struct notifier_block *, ulong, void *);
static void fcoe_dev_setup(void);
static void fcoe_dev_cleanup(void);
@@ -101,10 +102,11 @@ static int fcoe_ddp_done(struct fc_lport *, u16);
static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
-static int fcoe_create(const char *, struct kernel_param *);
-static int fcoe_destroy(const char *, struct kernel_param *);
-static int fcoe_enable(const char *, struct kernel_param *);
-static int fcoe_disable(const char *, struct kernel_param *);
+static bool fcoe_match(struct net_device *netdev);
+static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
+static int fcoe_destroy(struct net_device *netdev);
+static int fcoe_enable(struct net_device *netdev);
+static int fcoe_disable(struct net_device *netdev);
static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
u32 did, struct fc_frame *,
@@ -117,24 +119,6 @@ static void fcoe_recv_frame(struct sk_buff *skb);
static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
-module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_FABRIC, S_IWUSR);
-__MODULE_PARM_TYPE(create, "string");
-MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
-module_param_call(create_vn2vn, fcoe_create, NULL,
- (void *)FIP_MODE_VN2VN, S_IWUSR);
-__MODULE_PARM_TYPE(create_vn2vn, "string");
-MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance "
- "on an Ethernet interface");
-module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
-__MODULE_PARM_TYPE(destroy, "string");
-MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
-module_param_call(enable, fcoe_enable, NULL, NULL, S_IWUSR);
-__MODULE_PARM_TYPE(enable, "string");
-MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
-module_param_call(disable, fcoe_disable, NULL, NULL, S_IWUSR);
-__MODULE_PARM_TYPE(disable, "string");
-MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
-
/* notification function for packets from net device */
static struct notifier_block fcoe_notifier = {
.notifier_call = fcoe_device_notification,
@@ -145,8 +129,8 @@ static struct notifier_block fcoe_cpu_notifier = {
.notifier_call = fcoe_cpu_callback,
};
-static struct scsi_transport_template *fcoe_transport_template;
-static struct scsi_transport_template *fcoe_vport_transport_template;
+static struct scsi_transport_template *fcoe_nport_scsi_transport;
+static struct scsi_transport_template *fcoe_vport_scsi_transport;
static int fcoe_vport_destroy(struct fc_vport *);
static int fcoe_vport_create(struct fc_vport *, bool disabled);
@@ -163,7 +147,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
.lport_set_port_id = fcoe_set_port_id,
};
-struct fc_function_template fcoe_transport_function = {
+struct fc_function_template fcoe_nport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
@@ -203,7 +187,7 @@ struct fc_function_template fcoe_transport_function = {
.bsg_request = fc_lport_bsg_request,
};
-struct fc_function_template fcoe_vport_transport_function = {
+struct fc_function_template fcoe_vport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
@@ -354,10 +338,18 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
struct fcoe_interface *fcoe;
int err;
+ if (!try_module_get(THIS_MODULE)) {
+ FCOE_NETDEV_DBG(netdev,
+ "Could not get a reference to the module\n");
+ fcoe = ERR_PTR(-EBUSY);
+ goto out;
+ }
+
fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
if (!fcoe) {
FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
- return NULL;
+ fcoe = ERR_PTR(-ENOMEM);
+ goto out_nomod;
}
dev_hold(netdev);
@@ -376,9 +368,15 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
fcoe_ctlr_destroy(&fcoe->ctlr);
kfree(fcoe);
dev_put(netdev);
- return NULL;
+ fcoe = ERR_PTR(err);
+ goto out_nomod;
}
+ goto out;
+
+out_nomod:
+ module_put(THIS_MODULE);
+out:
return fcoe;
}
@@ -440,6 +438,7 @@ static void fcoe_interface_release(struct kref *kref)
fcoe_ctlr_destroy(&fcoe->ctlr);
kfree(fcoe);
dev_put(netdev);
+ module_put(THIS_MODULE);
}
/**
@@ -503,7 +502,7 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
rtnl_lock();
if (!is_zero_ether_addr(port->data_src_addr))
@@ -559,17 +558,6 @@ static int fcoe_lport_config(struct fc_lport *lport)
}
/**
- * fcoe_queue_timer() - The fcoe queue timer
- * @lport: The local port
- *
- * Calls fcoe_check_wait_queue on timeout
- */
-static void fcoe_queue_timer(ulong lport)
-{
- fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
-}
-
-/**
* fcoe_get_wwn() - Get the world wide name from LLD if it supports it
* @netdev: the associated net device
* @wwn: the output WWN
@@ -648,7 +636,7 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
/* Setup lport private data to point to fcoe softc */
port = lport_priv(lport);
- fcoe = port->fcoe;
+ fcoe = port->priv;
/*
* Determine max frame size based on underlying device and optional
@@ -706,9 +694,9 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
lport->host->max_cmd_len = FCOE_MAX_CMD_LEN;
if (lport->vport)
- lport->host->transportt = fcoe_vport_transport_template;
+ lport->host->transportt = fcoe_vport_scsi_transport;
else
- lport->host->transportt = fcoe_transport_template;
+ lport->host->transportt = fcoe_nport_scsi_transport;
/* add the new host to the SCSI-ml */
rc = scsi_add_host(lport->host, dev);
@@ -758,7 +746,7 @@ bool fcoe_oem_match(struct fc_frame *fp)
static inline int fcoe_em_config(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
struct fcoe_interface *oldfcoe = NULL;
struct net_device *old_real_dev, *cur_real_dev;
u16 min_xid = FCOE_MIN_XID;
@@ -842,7 +830,7 @@ skip_oem:
static void fcoe_if_destroy(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
struct net_device *netdev = fcoe->netdev;
FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
@@ -884,7 +872,6 @@ static void fcoe_if_destroy(struct fc_lport *lport)
/* Release the Scsi_Host */
scsi_host_put(lport->host);
- module_put(THIS_MODULE);
}
/**
@@ -939,8 +926,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
struct device *parent, int npiv)
{
struct net_device *netdev = fcoe->netdev;
- struct fc_lport *lport = NULL;
+ struct fc_lport *lport, *n_port;
struct fcoe_port *port;
+ struct Scsi_Host *shost;
int rc;
/*
* parent is only a vport if npiv is 1,
@@ -950,13 +938,11 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
FCOE_NETDEV_DBG(netdev, "Create Interface\n");
- if (!npiv) {
- lport = libfc_host_alloc(&fcoe_shost_template,
- sizeof(struct fcoe_port));
- } else {
- lport = libfc_vport_create(vport,
- sizeof(struct fcoe_port));
- }
+ if (!npiv)
+ lport = libfc_host_alloc(&fcoe_shost_template, sizeof(*port));
+ else
+ lport = libfc_vport_create(vport, sizeof(*port));
+
if (!lport) {
FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
rc = -ENOMEM;
@@ -964,7 +950,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
}
port = lport_priv(lport);
port->lport = lport;
- port->fcoe = fcoe;
+ port->priv = fcoe;
+ port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH;
+ port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH;
INIT_WORK(&port->destroy_work, fcoe_destroy_work);
/* configure a fc_lport including the exchange manager */
@@ -1007,24 +995,27 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
goto out_lp_destroy;
}
- if (!npiv) {
- /*
- * fcoe_em_alloc() and fcoe_hostlist_add() both
- * need to be atomic with respect to other changes to the
- * hostlist since fcoe_em_alloc() looks for an existing EM
- * instance on host list updated by fcoe_hostlist_add().
- *
- * This is currently handled through the fcoe_config_mutex
- * begin held.
- */
-
+ /*
+ * fcoe_em_alloc() and fcoe_hostlist_add() both
+ * need to be atomic with respect to other changes to the
+ * hostlist since fcoe_em_alloc() looks for an existing EM
+ * instance on host list updated by fcoe_hostlist_add().
+ *
+ * This is currently handled through the fcoe_config_mutex
+ * begin held.
+ */
+ if (!npiv)
/* lport exch manager allocation */
rc = fcoe_em_config(lport);
- if (rc) {
- FCOE_NETDEV_DBG(netdev, "Could not configure the EM "
- "for the interface\n");
- goto out_lp_destroy;
- }
+ else {
+ shost = vport_to_shost(vport);
+ n_port = shost_priv(shost);
+ rc = fc_exch_mgr_list_clone(n_port, lport);
+ }
+
+ if (rc) {
+ FCOE_NETDEV_DBG(netdev, "Could not configure the EM\n");
+ goto out_lp_destroy;
}
fcoe_interface_get(fcoe);
@@ -1048,11 +1039,12 @@ out:
static int __init fcoe_if_init(void)
{
/* attach to scsi transport */
- fcoe_transport_template = fc_attach_transport(&fcoe_transport_function);
- fcoe_vport_transport_template =
- fc_attach_transport(&fcoe_vport_transport_function);
+ fcoe_nport_scsi_transport =
+ fc_attach_transport(&fcoe_nport_fc_functions);
+ fcoe_vport_scsi_transport =
+ fc_attach_transport(&fcoe_vport_fc_functions);
- if (!fcoe_transport_template) {
+ if (!fcoe_nport_scsi_transport) {
printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
return -ENODEV;
}
@@ -1069,10 +1061,10 @@ static int __init fcoe_if_init(void)
*/
int __exit fcoe_if_exit(void)
{
- fc_release_transport(fcoe_transport_template);
- fc_release_transport(fcoe_vport_transport_template);
- fcoe_transport_template = NULL;
- fcoe_vport_transport_template = NULL;
+ fc_release_transport(fcoe_nport_scsi_transport);
+ fc_release_transport(fcoe_vport_scsi_transport);
+ fcoe_nport_scsi_transport = NULL;
+ fcoe_vport_scsi_transport = NULL;
return 0;
}
@@ -1359,108 +1351,22 @@ err2:
}
/**
- * fcoe_start_io() - Start FCoE I/O
- * @skb: The packet to be transmitted
- *
- * This routine is called from the net device to start transmitting
- * FCoE packets.
- *
- * Returns: 0 for success
- */
-static inline int fcoe_start_io(struct sk_buff *skb)
-{
- struct sk_buff *nskb;
- int rc;
-
- nskb = skb_clone(skb, GFP_ATOMIC);
- rc = dev_queue_xmit(nskb);
- if (rc != 0)
- return rc;
- kfree_skb(skb);
- return 0;
-}
-
-/**
- * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC
+ * fcoe_alloc_paged_crc_eof() - Allocate a page to be used for the trailer CRC
* @skb: The packet to be transmitted
* @tlen: The total length of the trailer
*
- * This routine allocates a page for frame trailers. The page is re-used if
- * there is enough room left on it for the current trailer. If there isn't
- * enough buffer left a new page is allocated for the trailer. Reference to
- * the page from this function as well as the skbs using the page fragments
- * ensure that the page is freed at the appropriate time.
- *
* Returns: 0 for success
*/
-static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
+static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
{
struct fcoe_percpu_s *fps;
- struct page *page;
+ int rc;
fps = &get_cpu_var(fcoe_percpu);
- page = fps->crc_eof_page;
- if (!page) {
- page = alloc_page(GFP_ATOMIC);
- if (!page) {
- put_cpu_var(fcoe_percpu);
- return -ENOMEM;
- }
- fps->crc_eof_page = page;
- fps->crc_eof_offset = 0;
- }
-
- get_page(page);
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
- fps->crc_eof_offset, tlen);
- skb->len += tlen;
- skb->data_len += tlen;
- skb->truesize += tlen;
- fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
-
- if (fps->crc_eof_offset >= PAGE_SIZE) {
- fps->crc_eof_page = NULL;
- fps->crc_eof_offset = 0;
- put_page(page);
- }
+ rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
put_cpu_var(fcoe_percpu);
- return 0;
-}
-/**
- * fcoe_fc_crc() - Calculates the CRC for a given frame
- * @fp: The frame to be checksumed
- *
- * This uses crc32() routine to calculate the CRC for a frame
- *
- * Return: The 32 bit CRC value
- */
-u32 fcoe_fc_crc(struct fc_frame *fp)
-{
- struct sk_buff *skb = fp_skb(fp);
- struct skb_frag_struct *frag;
- unsigned char *data;
- unsigned long off, len, clen;
- u32 crc;
- unsigned i;
-
- crc = crc32(~0, skb->data, skb_headlen(skb));
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- off = frag->page_offset;
- len = frag->size;
- while (len > 0) {
- clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
- data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
- KM_SKB_DATA_SOFTIRQ);
- crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
- kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
- off += clen;
- len -= clen;
- }
- }
- return crc;
+ return rc;
}
/**
@@ -1483,7 +1389,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
unsigned int tlen; /* trailer length */
unsigned int elen; /* eth header, may include vlan */
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
u8 sof, eof;
struct fcoe_hdr *hp;
@@ -1524,7 +1430,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
/* copy port crc and eof to the skb buff */
if (skb_is_nonlinear(skb)) {
skb_frag_t *frag;
- if (fcoe_get_paged_crc_eof(skb, tlen)) {
+ if (fcoe_alloc_paged_crc_eof(skb, tlen)) {
kfree_skb(skb);
return -ENOMEM;
}
@@ -1604,6 +1510,56 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb)
}
/**
+ * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC
+ * @lport: The local port the frame was received on
+ * @fp: The received frame
+ *
+ * Return: 0 on passing filtering checks
+ */
+static inline int fcoe_filter_frames(struct fc_lport *lport,
+ struct fc_frame *fp)
+{
+ struct fcoe_interface *fcoe;
+ struct fc_frame_header *fh;
+ struct sk_buff *skb = (struct sk_buff *)fp;
+ struct fcoe_dev_stats *stats;
+
+ /*
+ * We only check CRC if no offload is available and if it is
+ * it's solicited data, in which case, the FCP layer would
+ * check it during the copy.
+ */
+ if (lport->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
+ fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ else
+ fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
+
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP)
+ return 0;
+
+ fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
+ if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
+ ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
+ FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
+ return -EINVAL;
+ }
+
+ if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED) ||
+ le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) {
+ fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ return 0;
+ }
+
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats->InvalidCRCCount++;
+ if (stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+ return -EINVAL;
+}
+
+/**
* fcoe_recv_frame() - process a single received frame
* @skb: frame to process
*/
@@ -1613,7 +1569,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
struct fcoe_dev_stats *stats;
- struct fc_frame_header *fh;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
struct fcoe_port *port;
@@ -1644,7 +1599,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
* was done in fcoe_rcv already.
*/
hp = (struct fcoe_hdr *) skb_network_header(skb);
- fh = (struct fc_frame_header *) skb_transport_header(skb);
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
@@ -1677,35 +1631,11 @@ static void fcoe_recv_frame(struct sk_buff *skb)
if (pskb_trim(skb, fr_len))
goto drop;
- /*
- * We only check CRC if no offload is available and if it is
- * it's solicited data, in which case, the FCP layer would
- * check it during the copy.
- */
- if (lport->crc_offload &&
- skb->ip_summed == CHECKSUM_UNNECESSARY)
- fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
- else
- fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
-
- fh = fc_frame_header_get(fp);
- if ((fh->fh_r_ctl != FC_RCTL_DD_SOL_DATA ||
- fh->fh_type != FC_TYPE_FCP) &&
- (fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
- if (le32_to_cpu(fr_crc(fp)) !=
- ~crc32(~0, skb->data, fr_len)) {
- if (stats->InvalidCRCCount < 5)
- printk(KERN_WARNING "fcoe: dropping "
- "frame with CRC error\n");
- stats->InvalidCRCCount++;
- goto drop;
- }
- fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ if (!fcoe_filter_frames(lport, fp)) {
+ put_cpu();
+ fc_exch_recv(lport, fp);
+ return;
}
- put_cpu();
- fc_exch_recv(lport, fp);
- return;
-
drop:
stats->ErrorFrames++;
put_cpu();
@@ -1744,64 +1674,6 @@ int fcoe_percpu_receive_thread(void *arg)
}
/**
- * fcoe_check_wait_queue() - Attempt to clear the transmit backlog
- * @lport: The local port whose backlog is to be cleared
- *
- * This empties the wait_queue, dequeues the head of the wait_queue queue
- * and calls fcoe_start_io() for each packet. If all skb have been
- * transmitted it returns the qlen. If an error occurs it restores
- * wait_queue (to try again later) and returns -1.
- *
- * The wait_queue is used when the skb transmit fails. The failed skb
- * will go in the wait_queue which will be emptied by the timer function or
- * by the next skb transmit.
- */
-static void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
-{
- struct fcoe_port *port = lport_priv(lport);
- int rc;
-
- spin_lock_bh(&port->fcoe_pending_queue.lock);
-
- if (skb)
- __skb_queue_tail(&port->fcoe_pending_queue, skb);
-
- if (port->fcoe_pending_queue_active)
- goto out;
- port->fcoe_pending_queue_active = 1;
-
- while (port->fcoe_pending_queue.qlen) {
- /* keep qlen > 0 until fcoe_start_io succeeds */
- port->fcoe_pending_queue.qlen++;
- skb = __skb_dequeue(&port->fcoe_pending_queue);
-
- spin_unlock_bh(&port->fcoe_pending_queue.lock);
- rc = fcoe_start_io(skb);
- spin_lock_bh(&port->fcoe_pending_queue.lock);
-
- if (rc) {
- __skb_queue_head(&port->fcoe_pending_queue, skb);
- /* undo temporary increment above */
- port->fcoe_pending_queue.qlen--;
- break;
- }
- /* undo temporary increment above */
- port->fcoe_pending_queue.qlen--;
- }
-
- if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
- lport->qfull = 0;
- if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
- mod_timer(&port->timer, jiffies + 2);
- port->fcoe_pending_queue_active = 0;
-out:
- if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
- lport->qfull = 1;
- spin_unlock_bh(&port->fcoe_pending_queue.lock);
- return;
-}
-
-/**
* fcoe_dev_setup() - Setup the link change notification interface
*/
static void fcoe_dev_setup(void)
@@ -1872,7 +1744,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
list_del(&fcoe->list);
port = lport_priv(fcoe->ctlr.lp);
fcoe_interface_cleanup(fcoe);
- schedule_work(&port->destroy_work);
+ queue_work(fcoe_wq, &port->destroy_work);
goto out;
break;
case NETDEV_FEAT_CHANGE:
@@ -1898,39 +1770,16 @@ out:
}
/**
- * fcoe_if_to_netdev() - Parse a name buffer to get a net device
- * @buffer: The name of the net device
- *
- * Returns: NULL or a ptr to net_device
- */
-static struct net_device *fcoe_if_to_netdev(const char *buffer)
-{
- char *cp;
- char ifname[IFNAMSIZ + 2];
-
- if (buffer) {
- strlcpy(ifname, buffer, IFNAMSIZ);
- cp = ifname + strlen(ifname);
- while (--cp >= ifname && *cp == '\n')
- *cp = '\0';
- return dev_get_by_name(&init_net, ifname);
- }
- return NULL;
-}
-
-/**
* fcoe_disable() - Disables a FCoE interface
- * @buffer: The name of the Ethernet interface to be disabled
- * @kp: The associated kernel parameter
+ * @netdev : The net_device object the Ethernet interface to create on
*
- * Called from sysfs.
+ * Called from fcoe transport.
*
* Returns: 0 for success
*/
-static int fcoe_disable(const char *buffer, struct kernel_param *kp)
+static int fcoe_disable(struct net_device *netdev)
{
struct fcoe_interface *fcoe;
- struct net_device *netdev;
int rc = 0;
mutex_lock(&fcoe_config_mutex);
@@ -1946,16 +1795,9 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp)
}
#endif
- netdev = fcoe_if_to_netdev(buffer);
- if (!netdev) {
- rc = -ENODEV;
- goto out_nodev;
- }
-
if (!rtnl_trylock()) {
- dev_put(netdev);
mutex_unlock(&fcoe_config_mutex);
- return restart_syscall();
+ return -ERESTARTSYS;
}
fcoe = fcoe_hostlist_lookup_port(netdev);
@@ -1967,7 +1809,6 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp)
} else
rc = -ENODEV;
- dev_put(netdev);
out_nodev:
mutex_unlock(&fcoe_config_mutex);
return rc;
@@ -1975,17 +1816,15 @@ out_nodev:
/**
* fcoe_enable() - Enables a FCoE interface
- * @buffer: The name of the Ethernet interface to be enabled
- * @kp: The associated kernel parameter
+ * @netdev : The net_device object the Ethernet interface to create on
*
- * Called from sysfs.
+ * Called from fcoe transport.
*
* Returns: 0 for success
*/
-static int fcoe_enable(const char *buffer, struct kernel_param *kp)
+static int fcoe_enable(struct net_device *netdev)
{
struct fcoe_interface *fcoe;
- struct net_device *netdev;
int rc = 0;
mutex_lock(&fcoe_config_mutex);
@@ -2000,17 +1839,9 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp)
goto out_nodev;
}
#endif
-
- netdev = fcoe_if_to_netdev(buffer);
- if (!netdev) {
- rc = -ENODEV;
- goto out_nodev;
- }
-
if (!rtnl_trylock()) {
- dev_put(netdev);
mutex_unlock(&fcoe_config_mutex);
- return restart_syscall();
+ return -ERESTARTSYS;
}
fcoe = fcoe_hostlist_lookup_port(netdev);
@@ -2021,7 +1852,6 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp)
else if (!fcoe_link_ok(fcoe->ctlr.lp))
fcoe_ctlr_link_up(&fcoe->ctlr);
- dev_put(netdev);
out_nodev:
mutex_unlock(&fcoe_config_mutex);
return rc;
@@ -2029,17 +1859,15 @@ out_nodev:
/**
* fcoe_destroy() - Destroy a FCoE interface
- * @buffer: The name of the Ethernet interface to be destroyed
- * @kp: The associated kernel parameter
+ * @netdev : The net_device object the Ethernet interface to create on
*
- * Called from sysfs.
+ * Called from fcoe transport
*
* Returns: 0 for success
*/
-static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
+static int fcoe_destroy(struct net_device *netdev)
{
struct fcoe_interface *fcoe;
- struct net_device *netdev;
int rc = 0;
mutex_lock(&fcoe_config_mutex);
@@ -2054,32 +1882,21 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
goto out_nodev;
}
#endif
-
- netdev = fcoe_if_to_netdev(buffer);
- if (!netdev) {
- rc = -ENODEV;
- goto out_nodev;
- }
-
if (!rtnl_trylock()) {
- dev_put(netdev);
mutex_unlock(&fcoe_config_mutex);
- return restart_syscall();
+ return -ERESTARTSYS;
}
fcoe = fcoe_hostlist_lookup_port(netdev);
if (!fcoe) {
rtnl_unlock();
rc = -ENODEV;
- goto out_putdev;
+ goto out_nodev;
}
fcoe_interface_cleanup(fcoe);
list_del(&fcoe->list);
/* RTNL mutex is dropped by fcoe_if_destroy */
fcoe_if_destroy(fcoe->ctlr.lp);
-
-out_putdev:
- dev_put(netdev);
out_nodev:
mutex_unlock(&fcoe_config_mutex);
return rc;
@@ -2102,27 +1919,39 @@ static void fcoe_destroy_work(struct work_struct *work)
}
/**
+ * fcoe_match() - Check if the FCoE is supported on the given netdevice
+ * @netdev : The net_device object the Ethernet interface to create on
+ *
+ * Called from fcoe transport.
+ *
+ * Returns: always returns true as this is the default FCoE transport,
+ * i.e., support all netdevs.
+ */
+static bool fcoe_match(struct net_device *netdev)
+{
+ return true;
+}
+
+/**
* fcoe_create() - Create a fcoe interface
- * @buffer: The name of the Ethernet interface to create on
- * @kp: The associated kernel param
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
*
- * Called from sysfs.
+ * Called from fcoe transport
*
* Returns: 0 for success
*/
-static int fcoe_create(const char *buffer, struct kernel_param *kp)
+static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
{
- enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
int rc;
struct fcoe_interface *fcoe;
struct fc_lport *lport;
- struct net_device *netdev;
mutex_lock(&fcoe_config_mutex);
if (!rtnl_trylock()) {
mutex_unlock(&fcoe_config_mutex);
- return restart_syscall();
+ return -ERESTARTSYS;
}
#ifdef CONFIG_FCOE_MODULE
@@ -2133,31 +1962,20 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
*/
if (THIS_MODULE->state != MODULE_STATE_LIVE) {
rc = -ENODEV;
- goto out_nomod;
- }
-#endif
-
- if (!try_module_get(THIS_MODULE)) {
- rc = -EINVAL;
- goto out_nomod;
- }
-
- netdev = fcoe_if_to_netdev(buffer);
- if (!netdev) {
- rc = -ENODEV;
goto out_nodev;
}
+#endif
/* look for existing lport */
if (fcoe_hostlist_lookup(netdev)) {
rc = -EEXIST;
- goto out_putdev;
+ goto out_nodev;
}
fcoe = fcoe_interface_create(netdev, fip_mode);
- if (!fcoe) {
- rc = -ENOMEM;
- goto out_putdev;
+ if (IS_ERR(fcoe)) {
+ rc = PTR_ERR(fcoe);
+ goto out_nodev;
}
lport = fcoe_if_create(fcoe, &netdev->dev, 0);
@@ -2186,18 +2004,13 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
* should be holding a reference taken in fcoe_if_create().
*/
fcoe_interface_put(fcoe);
- dev_put(netdev);
rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
return 0;
out_free:
fcoe_interface_put(fcoe);
-out_putdev:
- dev_put(netdev);
out_nodev:
- module_put(THIS_MODULE);
-out_nomod:
rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
return rc;
@@ -2212,8 +2025,7 @@ out_nomod:
*/
int fcoe_link_speed_update(struct fc_lport *lport)
{
- struct fcoe_port *port = lport_priv(lport);
- struct net_device *netdev = port->fcoe->netdev;
+ struct net_device *netdev = fcoe_netdev(lport);
struct ethtool_cmd ecmd = { ETHTOOL_GSET };
if (!dev_ethtool_get_settings(netdev, &ecmd)) {
@@ -2244,8 +2056,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
*/
int fcoe_link_ok(struct fc_lport *lport)
{
- struct fcoe_port *port = lport_priv(lport);
- struct net_device *netdev = port->fcoe->netdev;
+ struct net_device *netdev = fcoe_netdev(lport);
if (netif_oper_up(netdev))
return 0;
@@ -2309,24 +2120,6 @@ void fcoe_percpu_clean(struct fc_lport *lport)
}
/**
- * fcoe_clean_pending_queue() - Dequeue a skb and free it
- * @lport: The local port to dequeue a skb on
- */
-void fcoe_clean_pending_queue(struct fc_lport *lport)
-{
- struct fcoe_port *port = lport_priv(lport);
- struct sk_buff *skb;
-
- spin_lock_bh(&port->fcoe_pending_queue.lock);
- while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
- spin_unlock_bh(&port->fcoe_pending_queue.lock);
- kfree_skb(skb);
- spin_lock_bh(&port->fcoe_pending_queue.lock);
- }
- spin_unlock_bh(&port->fcoe_pending_queue.lock);
-}
-
-/**
* fcoe_reset() - Reset a local port
* @shost: The SCSI host associated with the local port to be reset
*
@@ -2335,7 +2128,13 @@ void fcoe_clean_pending_queue(struct fc_lport *lport)
int fcoe_reset(struct Scsi_Host *shost)
{
struct fc_lport *lport = shost_priv(shost);
- fc_lport_reset(lport);
+ struct fcoe_port *port = lport_priv(lport);
+ struct fcoe_interface *fcoe = port->priv;
+
+ fcoe_ctlr_link_down(&fcoe->ctlr);
+ fcoe_clean_pending_queue(fcoe->ctlr.lp);
+ if (!fcoe_link_ok(fcoe->ctlr.lp))
+ fcoe_ctlr_link_up(&fcoe->ctlr);
return 0;
}
@@ -2393,12 +2192,24 @@ static int fcoe_hostlist_add(const struct fc_lport *lport)
fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
if (!fcoe) {
port = lport_priv(lport);
- fcoe = port->fcoe;
+ fcoe = port->priv;
list_add_tail(&fcoe->list, &fcoe_hostlist);
}
return 0;
}
+
+static struct fcoe_transport fcoe_sw_transport = {
+ .name = {FCOE_TRANSPORT_DEFAULT},
+ .attached = false,
+ .list = LIST_HEAD_INIT(fcoe_sw_transport.list),
+ .match = fcoe_match,
+ .create = fcoe_create,
+ .destroy = fcoe_destroy,
+ .enable = fcoe_enable,
+ .disable = fcoe_disable,
+};
+
/**
* fcoe_init() - Initialize fcoe.ko
*
@@ -2410,6 +2221,18 @@ static int __init fcoe_init(void)
unsigned int cpu;
int rc = 0;
+ fcoe_wq = alloc_workqueue("fcoe", 0, 0);
+ if (!fcoe_wq)
+ return -ENOMEM;
+
+ /* register as a fcoe transport */
+ rc = fcoe_transport_attach(&fcoe_sw_transport);
+ if (rc) {
+ printk(KERN_ERR "failed to register an fcoe transport, check "
+ "if libfcoe is loaded\n");
+ return rc;
+ }
+
mutex_lock(&fcoe_config_mutex);
for_each_possible_cpu(cpu) {
@@ -2440,6 +2263,7 @@ out_free:
fcoe_percpu_thread_destroy(cpu);
}
mutex_unlock(&fcoe_config_mutex);
+ destroy_workqueue(fcoe_wq);
return rc;
}
module_init(fcoe_init);
@@ -2465,7 +2289,7 @@ static void __exit fcoe_exit(void)
list_del(&fcoe->list);
port = lport_priv(fcoe->ctlr.lp);
fcoe_interface_cleanup(fcoe);
- schedule_work(&port->destroy_work);
+ queue_work(fcoe_wq, &port->destroy_work);
}
rtnl_unlock();
@@ -2476,16 +2300,21 @@ static void __exit fcoe_exit(void)
mutex_unlock(&fcoe_config_mutex);
- /* flush any asyncronous interface destroys,
- * this should happen after the netdev notifier is unregistered */
- flush_scheduled_work();
- /* That will flush out all the N_Ports on the hostlist, but now we
- * may have NPIV VN_Ports scheduled for destruction */
- flush_scheduled_work();
+ /*
+ * destroy_work's may be chained but destroy_workqueue()
+ * can take care of them. Just kill the fcoe_wq.
+ */
+ destroy_workqueue(fcoe_wq);
- /* detach from scsi transport
- * must happen after all destroys are done, therefor after the flush */
+ /*
+ * Detaching from the scsi transport must happen after all
+ * destroys are done on the fcoe_wq. destroy_workqueue will
+ * enusre the fcoe_wq is flushed.
+ */
fcoe_if_exit();
+
+ /* detach from fcoe transport */
+ fcoe_transport_detach(&fcoe_sw_transport);
}
module_exit(fcoe_exit);
@@ -2557,7 +2386,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
void *arg, u32 timeout)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
struct fcoe_ctlr *fip = &fcoe->ctlr;
struct fc_frame_header *fh = fc_frame_header_get(fp);
@@ -2590,7 +2419,7 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
struct Scsi_Host *shost = vport_to_shost(vport);
struct fc_lport *n_port = shost_priv(shost);
struct fcoe_port *port = lport_priv(n_port);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
struct net_device *netdev = fcoe->netdev;
struct fc_lport *vn_port;
@@ -2630,7 +2459,7 @@ static int fcoe_vport_destroy(struct fc_vport *vport)
mutex_lock(&n_port->lp_mutex);
list_del(&vn_port->list);
mutex_unlock(&n_port->lp_mutex);
- schedule_work(&port->destroy_work);
+ queue_work(fcoe_wq, &port->destroy_work);
return 0;
}
@@ -2734,7 +2563,7 @@ static void fcoe_set_port_id(struct fc_lport *lport,
u32 port_id, struct fc_frame *fp)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index c69b2c56c2d1..408a6fd78fb4 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -24,7 +24,7 @@
#include <linux/kthread.h>
#define FCOE_MAX_QUEUE_DEPTH 256
-#define FCOE_LOW_QUEUE_DEPTH 32
+#define FCOE_MIN_QUEUE_DEPTH 32
#define FCOE_WORD_TO_BYTE 4
@@ -40,12 +40,6 @@
#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
-/*
- * Max MTU for FCoE: 14 (FCoE header) + 24 (FC header) + 2112 (max FC payload)
- * + 4 (FC CRC) + 4 (FCoE trailer) = 2158 bytes
- */
-#define FCOE_MTU 2158
-
unsigned int fcoe_debug_logging;
module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
@@ -71,21 +65,6 @@ do { \
netdev->name, ##args);)
/**
- * struct fcoe_percpu_s - The per-CPU context for FCoE receive threads
- * @thread: The thread context
- * @fcoe_rx_list: The queue of pending packets to process
- * @page: The memory page for calculating frame trailer CRCs
- * @crc_eof_offset: The offset into the CRC page pointing to available
- * memory for a new trailer
- */
-struct fcoe_percpu_s {
- struct task_struct *thread;
- struct sk_buff_head fcoe_rx_list;
- struct page *crc_eof_page;
- int crc_eof_offset;
-};
-
-/**
* struct fcoe_interface - A FCoE interface
* @list: Handle for a list of FCoE interfaces
* @netdev: The associated net device
@@ -108,30 +87,6 @@ struct fcoe_interface {
struct kref kref;
};
-/**
- * struct fcoe_port - The FCoE private structure
- * @fcoe: The associated fcoe interface
- * @lport: The associated local port
- * @fcoe_pending_queue: The pending Rx queue of skbs
- * @fcoe_pending_queue_active: Indicates if the pending queue is active
- * @timer: The queue timer
- * @destroy_work: Handle for work context
- * (to prevent RTNL deadlocks)
- * @data_srt_addr: Source address for data
- *
- * An instance of this structure is to be allocated along with the
- * Scsi_Host and libfc fc_lport structures.
- */
-struct fcoe_port {
- struct fcoe_interface *fcoe;
- struct fc_lport *lport;
- struct sk_buff_head fcoe_pending_queue;
- u8 fcoe_pending_queue_active;
- struct timer_list timer;
- struct work_struct destroy_work;
- u8 data_src_addr[ETH_ALEN];
-};
-
#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
/**
@@ -140,7 +95,8 @@ struct fcoe_port {
*/
static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
{
- return ((struct fcoe_port *)lport_priv(lport))->fcoe->netdev;
+ return ((struct fcoe_interface *)
+ ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
}
#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 625c6be25396..c93f007e702f 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -44,9 +44,7 @@
#include <scsi/libfc.h>
#include <scsi/libfcoe.h>
-MODULE_AUTHOR("Open-FCoE.org");
-MODULE_DESCRIPTION("FIP discovery protocol support for FCoE HBAs");
-MODULE_LICENSE("GPL v2");
+#include "libfcoe.h"
#define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */
#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */
@@ -66,31 +64,7 @@ static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS;
static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS;
static u8 fcoe_all_p2p[ETH_ALEN] = FIP_ALL_P2P_MACS;
-unsigned int libfcoe_debug_logging;
-module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
-
-#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
-#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
-
-#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
-do { \
- if (unlikely(libfcoe_debug_logging & LEVEL)) \
- do { \
- CMD; \
- } while (0); \
-} while (0)
-
-#define LIBFCOE_DBG(fmt, args...) \
- LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
- printk(KERN_INFO "libfcoe: " fmt, ##args);)
-
-#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
- LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
- printk(KERN_INFO "host%d: fip: " fmt, \
- (fip)->lp->host->host_no, ##args);)
-
-static const char *fcoe_ctlr_states[] = {
+static const char * const fcoe_ctlr_states[] = {
[FIP_ST_DISABLED] = "DISABLED",
[FIP_ST_LINK_WAIT] = "LINK_WAIT",
[FIP_ST_AUTO] = "AUTO",
@@ -308,8 +282,8 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
struct fip_mac_desc mac;
struct fip_wwn_desc wwnn;
struct fip_size_desc size;
- } __attribute__((packed)) desc;
- } __attribute__((packed)) *sol;
+ } __packed desc;
+ } __packed * sol;
u32 fcoe_size;
skb = dev_alloc_skb(sizeof(*sol));
@@ -456,7 +430,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
struct ethhdr eth;
struct fip_header fip;
struct fip_mac_desc mac;
- } __attribute__((packed)) *kal;
+ } __packed * kal;
struct fip_vn_desc *vn;
u32 len;
struct fc_lport *lp;
@@ -527,7 +501,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
struct ethhdr eth;
struct fip_header fip;
struct fip_encaps encaps;
- } __attribute__((packed)) *cap;
+ } __packed * cap;
struct fc_frame_header *fh;
struct fip_mac_desc *mac;
struct fcoe_fcf *fcf;
@@ -1819,7 +1793,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
struct fip_mac_desc mac;
struct fip_wwn_desc wwnn;
struct fip_vn_desc vn;
- } __attribute__((packed)) *frame;
+ } __packed * frame;
struct fip_fc4_feat *ff;
struct fip_size_desc *size;
u32 fcp_feat;
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
new file mode 100644
index 000000000000..258684101bfd
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -0,0 +1,770 @@
+/*
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include <linux/crc32.h>
+#include <scsi/libfcoe.h>
+
+#include "libfcoe.h"
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs");
+MODULE_LICENSE("GPL v2");
+
+static int fcoe_transport_create(const char *, struct kernel_param *);
+static int fcoe_transport_destroy(const char *, struct kernel_param *);
+static int fcoe_transport_show(char *buffer, const struct kernel_param *kp);
+static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device);
+static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device);
+static int fcoe_transport_enable(const char *, struct kernel_param *);
+static int fcoe_transport_disable(const char *, struct kernel_param *);
+static int libfcoe_device_notification(struct notifier_block *notifier,
+ ulong event, void *ptr);
+
+static LIST_HEAD(fcoe_transports);
+static DEFINE_MUTEX(ft_mutex);
+static LIST_HEAD(fcoe_netdevs);
+static DEFINE_MUTEX(fn_mutex);
+
+unsigned int libfcoe_debug_logging;
+module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+module_param_call(show, NULL, fcoe_transport_show, NULL, S_IRUSR);
+__MODULE_PARM_TYPE(show, "string");
+MODULE_PARM_DESC(show, " Show attached FCoE transports");
+
+module_param_call(create, fcoe_transport_create, NULL,
+ (void *)FIP_MODE_FABRIC, S_IWUSR);
+__MODULE_PARM_TYPE(create, "string");
+MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
+
+module_param_call(create_vn2vn, fcoe_transport_create, NULL,
+ (void *)FIP_MODE_VN2VN, S_IWUSR);
+__MODULE_PARM_TYPE(create_vn2vn, "string");
+MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance "
+ "on an Ethernet interface");
+
+module_param_call(destroy, fcoe_transport_destroy, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(destroy, "string");
+MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
+
+module_param_call(enable, fcoe_transport_enable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(enable, "string");
+MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
+
+module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(disable, "string");
+MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
+
+/* notification function for packets from net device */
+static struct notifier_block libfcoe_notifier = {
+ .notifier_call = libfcoe_device_notification,
+};
+
+/**
+ * fcoe_fc_crc() - Calculates the CRC for a given frame
+ * @fp: The frame to be checksumed
+ *
+ * This uses crc32() routine to calculate the CRC for a frame
+ *
+ * Return: The 32 bit CRC value
+ */
+u32 fcoe_fc_crc(struct fc_frame *fp)
+{
+ struct sk_buff *skb = fp_skb(fp);
+ struct skb_frag_struct *frag;
+ unsigned char *data;
+ unsigned long off, len, clen;
+ u32 crc;
+ unsigned i;
+
+ crc = crc32(~0, skb->data, skb_headlen(skb));
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ off = frag->page_offset;
+ len = frag->size;
+ while (len > 0) {
+ clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
+ data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
+ KM_SKB_DATA_SOFTIRQ);
+ crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
+ kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
+ off += clen;
+ len -= clen;
+ }
+ }
+ return crc;
+}
+EXPORT_SYMBOL_GPL(fcoe_fc_crc);
+
+/**
+ * fcoe_start_io() - Start FCoE I/O
+ * @skb: The packet to be transmitted
+ *
+ * This routine is called from the net device to start transmitting
+ * FCoE packets.
+ *
+ * Returns: 0 for success
+ */
+int fcoe_start_io(struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+ int rc;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+ rc = dev_queue_xmit(nskb);
+ if (rc != 0)
+ return rc;
+ kfree_skb(skb);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_start_io);
+
+
+/**
+ * fcoe_clean_pending_queue() - Dequeue a skb and free it
+ * @lport: The local port to dequeue a skb on
+ */
+void fcoe_clean_pending_queue(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct sk_buff *skb;
+
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+ while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+ kfree_skb(skb);
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+ }
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+}
+EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
+
+/**
+ * fcoe_check_wait_queue() - Attempt to clear the transmit backlog
+ * @lport: The local port whose backlog is to be cleared
+ *
+ * This empties the wait_queue, dequeues the head of the wait_queue queue
+ * and calls fcoe_start_io() for each packet. If all skb have been
+ * transmitted it returns the qlen. If an error occurs it restores
+ * wait_queue (to try again later) and returns -1.
+ *
+ * The wait_queue is used when the skb transmit fails. The failed skb
+ * will go in the wait_queue which will be emptied by the timer function or
+ * by the next skb transmit.
+ */
+void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ int rc;
+
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+
+ if (skb)
+ __skb_queue_tail(&port->fcoe_pending_queue, skb);
+
+ if (port->fcoe_pending_queue_active)
+ goto out;
+ port->fcoe_pending_queue_active = 1;
+
+ while (port->fcoe_pending_queue.qlen) {
+ /* keep qlen > 0 until fcoe_start_io succeeds */
+ port->fcoe_pending_queue.qlen++;
+ skb = __skb_dequeue(&port->fcoe_pending_queue);
+
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+ rc = fcoe_start_io(skb);
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+
+ if (rc) {
+ __skb_queue_head(&port->fcoe_pending_queue, skb);
+ /* undo temporary increment above */
+ port->fcoe_pending_queue.qlen--;
+ break;
+ }
+ /* undo temporary increment above */
+ port->fcoe_pending_queue.qlen--;
+ }
+
+ if (port->fcoe_pending_queue.qlen < port->min_queue_depth)
+ lport->qfull = 0;
+ if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
+ mod_timer(&port->timer, jiffies + 2);
+ port->fcoe_pending_queue_active = 0;
+out:
+ if (port->fcoe_pending_queue.qlen > port->max_queue_depth)
+ lport->qfull = 1;
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+}
+EXPORT_SYMBOL_GPL(fcoe_check_wait_queue);
+
+/**
+ * fcoe_queue_timer() - The fcoe queue timer
+ * @lport: The local port
+ *
+ * Calls fcoe_check_wait_queue on timeout
+ */
+void fcoe_queue_timer(ulong lport)
+{
+ fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
+}
+EXPORT_SYMBOL_GPL(fcoe_queue_timer);
+
+/**
+ * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC
+ * @skb: The packet to be transmitted
+ * @tlen: The total length of the trailer
+ * @fps: The fcoe context
+ *
+ * This routine allocates a page for frame trailers. The page is re-used if
+ * there is enough room left on it for the current trailer. If there isn't
+ * enough buffer left a new page is allocated for the trailer. Reference to
+ * the page from this function as well as the skbs using the page fragments
+ * ensure that the page is freed at the appropriate time.
+ *
+ * Returns: 0 for success
+ */
+int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
+ struct fcoe_percpu_s *fps)
+{
+ struct page *page;
+
+ page = fps->crc_eof_page;
+ if (!page) {
+ page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ return -ENOMEM;
+
+ fps->crc_eof_page = page;
+ fps->crc_eof_offset = 0;
+ }
+
+ get_page(page);
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
+ fps->crc_eof_offset, tlen);
+ skb->len += tlen;
+ skb->data_len += tlen;
+ skb->truesize += tlen;
+ fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
+
+ if (fps->crc_eof_offset >= PAGE_SIZE) {
+ fps->crc_eof_page = NULL;
+ fps->crc_eof_offset = 0;
+ put_page(page);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_get_paged_crc_eof);
+
+/**
+ * fcoe_transport_lookup - find an fcoe transport that matches a netdev
+ * @netdev: The netdev to look for from all attached transports
+ *
+ * Returns : ptr to the fcoe transport that supports this netdev or NULL
+ * if not found.
+ *
+ * The ft_mutex should be held when this is called
+ */
+static struct fcoe_transport *fcoe_transport_lookup(struct net_device *netdev)
+{
+ struct fcoe_transport *ft = NULL;
+
+ list_for_each_entry(ft, &fcoe_transports, list)
+ if (ft->match && ft->match(netdev))
+ return ft;
+ return NULL;
+}
+
+/**
+ * fcoe_transport_attach - Attaches an FCoE transport
+ * @ft: The fcoe transport to be attached
+ *
+ * Returns : 0 for success
+ */
+int fcoe_transport_attach(struct fcoe_transport *ft)
+{
+ int rc = 0;
+
+ mutex_lock(&ft_mutex);
+ if (ft->attached) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already attached\n",
+ ft->name);
+ rc = -EEXIST;
+ goto out_attach;
+ }
+
+ /* Add default transport to the tail */
+ if (strcmp(ft->name, FCOE_TRANSPORT_DEFAULT))
+ list_add(&ft->list, &fcoe_transports);
+ else
+ list_add_tail(&ft->list, &fcoe_transports);
+
+ ft->attached = true;
+ LIBFCOE_TRANSPORT_DBG("attaching transport %s\n", ft->name);
+
+out_attach:
+ mutex_unlock(&ft_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(fcoe_transport_attach);
+
+/**
+ * fcoe_transport_attach - Detaches an FCoE transport
+ * @ft: The fcoe transport to be attached
+ *
+ * Returns : 0 for success
+ */
+int fcoe_transport_detach(struct fcoe_transport *ft)
+{
+ int rc = 0;
+
+ mutex_lock(&ft_mutex);
+ if (!ft->attached) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already detached\n",
+ ft->name);
+ rc = -ENODEV;
+ goto out_attach;
+ }
+
+ list_del(&ft->list);
+ ft->attached = false;
+ LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name);
+
+out_attach:
+ mutex_unlock(&ft_mutex);
+ return rc;
+
+}
+EXPORT_SYMBOL(fcoe_transport_detach);
+
+static int fcoe_transport_show(char *buffer, const struct kernel_param *kp)
+{
+ int i, j;
+ struct fcoe_transport *ft = NULL;
+
+ i = j = sprintf(buffer, "Attached FCoE transports:");
+ mutex_lock(&ft_mutex);
+ list_for_each_entry(ft, &fcoe_transports, list) {
+ i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name);
+ if (i >= PAGE_SIZE)
+ break;
+ }
+ mutex_unlock(&ft_mutex);
+ if (i == j)
+ i += snprintf(&buffer[i], IFNAMSIZ, "none");
+ return i;
+}
+
+static int __init fcoe_transport_init(void)
+{
+ register_netdevice_notifier(&libfcoe_notifier);
+ return 0;
+}
+
+static int __exit fcoe_transport_exit(void)
+{
+ struct fcoe_transport *ft;
+
+ unregister_netdevice_notifier(&libfcoe_notifier);
+ mutex_lock(&ft_mutex);
+ list_for_each_entry(ft, &fcoe_transports, list)
+ printk(KERN_ERR "FCoE transport %s is still attached!\n",
+ ft->name);
+ mutex_unlock(&ft_mutex);
+ return 0;
+}
+
+
+static int fcoe_add_netdev_mapping(struct net_device *netdev,
+ struct fcoe_transport *ft)
+{
+ struct fcoe_netdev_mapping *nm;
+
+ nm = kmalloc(sizeof(*nm), GFP_KERNEL);
+ if (!nm) {
+ printk(KERN_ERR "Unable to allocate netdev_mapping");
+ return -ENOMEM;
+ }
+
+ nm->netdev = netdev;
+ nm->ft = ft;
+
+ mutex_lock(&fn_mutex);
+ list_add(&nm->list, &fcoe_netdevs);
+ mutex_unlock(&fn_mutex);
+ return 0;
+}
+
+
+static void fcoe_del_netdev_mapping(struct net_device *netdev)
+{
+ struct fcoe_netdev_mapping *nm = NULL, *tmp;
+
+ mutex_lock(&fn_mutex);
+ list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) {
+ if (nm->netdev == netdev) {
+ list_del(&nm->list);
+ kfree(nm);
+ mutex_unlock(&fn_mutex);
+ return;
+ }
+ }
+ mutex_unlock(&fn_mutex);
+}
+
+
+/**
+ * fcoe_netdev_map_lookup - find the fcoe transport that matches the netdev on which
+ * it was created
+ *
+ * Returns : ptr to the fcoe transport that supports this netdev or NULL
+ * if not found.
+ *
+ * The ft_mutex should be held when this is called
+ */
+static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *netdev)
+{
+ struct fcoe_transport *ft = NULL;
+ struct fcoe_netdev_mapping *nm;
+
+ mutex_lock(&fn_mutex);
+ list_for_each_entry(nm, &fcoe_netdevs, list) {
+ if (netdev == nm->netdev) {
+ ft = nm->ft;
+ mutex_unlock(&fn_mutex);
+ return ft;
+ }
+ }
+
+ mutex_unlock(&fn_mutex);
+ return NULL;
+}
+
+/**
+ * fcoe_if_to_netdev() - Parse a name buffer to get a net device
+ * @buffer: The name of the net device
+ *
+ * Returns: NULL or a ptr to net_device
+ */
+static struct net_device *fcoe_if_to_netdev(const char *buffer)
+{
+ char *cp;
+ char ifname[IFNAMSIZ + 2];
+
+ if (buffer) {
+ strlcpy(ifname, buffer, IFNAMSIZ);
+ cp = ifname + strlen(ifname);
+ while (--cp >= ifname && *cp == '\n')
+ *cp = '\0';
+ return dev_get_by_name(&init_net, ifname);
+ }
+ return NULL;
+}
+
+/**
+ * libfcoe_device_notification() - Handler for net device events
+ * @notifier: The context of the notification
+ * @event: The type of event
+ * @ptr: The net device that the event was on
+ *
+ * This function is called by the Ethernet driver in case of link change event.
+ *
+ * Returns: 0 for success
+ */
+static int libfcoe_device_notification(struct notifier_block *notifier,
+ ulong event, void *ptr)
+{
+ struct net_device *netdev = ptr;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ printk(KERN_ERR "libfcoe_device_notification: NETDEV_UNREGISTER %s\n",
+ netdev->name);
+ fcoe_del_netdev_mapping(netdev);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+
+/**
+ * fcoe_transport_create() - Create a fcoe interface
+ * @buffer: The name of the Ethernet interface to create on
+ * @kp: The associated kernel param
+ *
+ * Called from sysfs. This holds the ft_mutex while calling the
+ * registered fcoe transport's create function.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+ enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
+
+ if (!mutex_trylock(&ft_mutex))
+ return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module parameter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE)
+ goto out_nodev;
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer);
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (ft) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already has existing "
+ "FCoE instance on %s.\n",
+ ft->name, netdev->name);
+ rc = -EEXIST;
+ goto out_putdev;
+ }
+
+ ft = fcoe_transport_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ goto out_putdev;
+ }
+
+ rc = fcoe_add_netdev_mapping(netdev, ft);
+ if (rc) {
+ LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping "
+ "for FCoE transport %s for %s.\n",
+ ft->name, netdev->name);
+ goto out_putdev;
+ }
+
+ /* pass to transport create */
+ rc = ft->create ? ft->create(netdev, fip_mode) : -ENODEV;
+ if (rc)
+ fcoe_del_netdev_mapping(netdev);
+
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n",
+ ft->name, (rc) ? "failed" : "succeeded",
+ netdev->name);
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ if (rc == -ERESTARTSYS)
+ return restart_syscall();
+ else
+ return rc;
+}
+
+/**
+ * fcoe_transport_destroy() - Destroy a FCoE interface
+ * @buffer: The name of the Ethernet interface to be destroyed
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs. This holds the ft_mutex while calling the
+ * registered fcoe transport's destroy function.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ if (!mutex_trylock(&ft_mutex))
+ return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module parameter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE)
+ goto out_nodev;
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer);
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ goto out_putdev;
+ }
+
+ /* pass to transport destroy */
+ rc = ft->destroy ? ft->destroy(netdev) : -ENODEV;
+ fcoe_del_netdev_mapping(netdev);
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n",
+ ft->name, (rc) ? "failed" : "succeeded",
+ netdev->name);
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+
+ if (rc == -ERESTARTSYS)
+ return restart_syscall();
+ else
+ return rc;
+}
+
+/**
+ * fcoe_transport_disable() - Disables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be disabled
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ if (!mutex_trylock(&ft_mutex))
+ return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module parameter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE)
+ goto out_nodev;
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev)
+ goto out_nodev;
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft)
+ goto out_putdev;
+
+ rc = ft->disable ? ft->disable(netdev) : -ENODEV;
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+
+ if (rc == -ERESTARTSYS)
+ return restart_syscall();
+ else
+ return rc;
+}
+
+/**
+ * fcoe_transport_enable() - Enables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be enabled
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ if (!mutex_trylock(&ft_mutex))
+ return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module parameter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE)
+ goto out_nodev;
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev)
+ goto out_nodev;
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft)
+ goto out_putdev;
+
+ rc = ft->enable ? ft->enable(netdev) : -ENODEV;
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ if (rc == -ERESTARTSYS)
+ return restart_syscall();
+ else
+ return rc;
+}
+
+/**
+ * libfcoe_init() - Initialization routine for libfcoe.ko
+ */
+static int __init libfcoe_init(void)
+{
+ fcoe_transport_init();
+
+ return 0;
+}
+module_init(libfcoe_init);
+
+/**
+ * libfcoe_exit() - Tear down libfcoe.ko
+ */
+static void __exit libfcoe_exit(void)
+{
+ fcoe_transport_exit();
+}
+module_exit(libfcoe_exit);
diff --git a/drivers/scsi/fcoe/libfcoe.h b/drivers/scsi/fcoe/libfcoe.h
new file mode 100644
index 000000000000..6af5fc3a17d8
--- /dev/null
+++ b/drivers/scsi/fcoe/libfcoe.h
@@ -0,0 +1,31 @@
+#ifndef _FCOE_LIBFCOE_H_
+#define _FCOE_LIBFCOE_H_
+
+extern unsigned int libfcoe_debug_logging;
+#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
+#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
+#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
+
+#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
+do { \
+ if (unlikely(libfcoe_debug_logging & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+} while (0)
+
+#define LIBFCOE_DBG(fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
+ printk(KERN_INFO "libfcoe: " fmt, ##args);)
+
+#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
+ printk(KERN_INFO "host%d: fip: " fmt, \
+ (fip)->lp->host->host_no, ##args);)
+
+#define LIBFCOE_TRANSPORT_DBG(fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING, \
+ printk(KERN_INFO "%s: " fmt, \
+ __func__, ##args);)
+
+#endif /* _FCOE_LIBFCOE_H_ */
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 92f185081e62..671cde9d4060 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -37,7 +37,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.4.0.145"
+#define DRV_VERSION "1.5.0.1"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index db710148d156..b576be734e2e 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -654,7 +654,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
vdev->linkstatus_pa);
if (vdev->stats)
pci_free_consistent(vdev->pdev,
- sizeof(struct vnic_dev),
+ sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
pci_free_consistent(vdev->pdev,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 12deffccb8da..415ad4fb50d4 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -74,6 +74,10 @@ static int hpsa_allow_any;
module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(hpsa_allow_any,
"Allow hpsa driver to access unknown HP Smart Array hardware");
+static int hpsa_simple_mode;
+module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(hpsa_simple_mode,
+ "Use 'simple mode' rather than 'performant mode'");
/* define the PCI info for the cards we can control */
static const struct pci_device_id hpsa_pci_device_id[] = {
@@ -85,11 +89,13 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
@@ -109,11 +115,13 @@ static struct board_type products[] = {
{0x3249103C, "Smart Array P812", &SA5_access},
{0x324a103C, "Smart Array P712m", &SA5_access},
{0x324b103C, "Smart Array P711m", &SA5_access},
- {0x3250103C, "Smart Array", &SA5_access},
- {0x3250113C, "Smart Array", &SA5_access},
- {0x3250123C, "Smart Array", &SA5_access},
- {0x3250133C, "Smart Array", &SA5_access},
- {0x3250143C, "Smart Array", &SA5_access},
+ {0x3350103C, "Smart Array", &SA5_access},
+ {0x3351103C, "Smart Array", &SA5_access},
+ {0x3352103C, "Smart Array", &SA5_access},
+ {0x3353103C, "Smart Array", &SA5_access},
+ {0x3354103C, "Smart Array", &SA5_access},
+ {0x3355103C, "Smart Array", &SA5_access},
+ {0x3356103C, "Smart Array", &SA5_access},
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
};
@@ -147,17 +155,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
static int hpsa_slave_alloc(struct scsi_device *sdev);
static void hpsa_slave_destroy(struct scsi_device *sdev);
-static ssize_t raid_level_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t lunid_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t unique_id_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t host_show_firmware_revision(struct device *dev,
- struct device_attribute *attr, char *buf);
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
-static ssize_t host_store_rescan(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count);
static int check_for_unit_attention(struct ctlr_info *h,
struct CommandList *c);
static void check_ioctl_unit_attention(struct ctlr_info *h,
@@ -173,47 +171,10 @@ static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
unsigned long *memory_bar);
static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
-
-static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
-static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
-static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
-static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
-static DEVICE_ATTR(firmware_revision, S_IRUGO,
- host_show_firmware_revision, NULL);
-
-static struct device_attribute *hpsa_sdev_attrs[] = {
- &dev_attr_raid_level,
- &dev_attr_lunid,
- &dev_attr_unique_id,
- NULL,
-};
-
-static struct device_attribute *hpsa_shost_attrs[] = {
- &dev_attr_rescan,
- &dev_attr_firmware_revision,
- NULL,
-};
-
-static struct scsi_host_template hpsa_driver_template = {
- .module = THIS_MODULE,
- .name = "hpsa",
- .proc_name = "hpsa",
- .queuecommand = hpsa_scsi_queue_command,
- .scan_start = hpsa_scan_start,
- .scan_finished = hpsa_scan_finished,
- .change_queue_depth = hpsa_change_queue_depth,
- .this_id = -1,
- .use_clustering = ENABLE_CLUSTERING,
- .eh_device_reset_handler = hpsa_eh_device_reset_handler,
- .ioctl = hpsa_ioctl,
- .slave_alloc = hpsa_slave_alloc,
- .slave_destroy = hpsa_slave_destroy,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = hpsa_compat_ioctl,
-#endif
- .sdev_attrs = hpsa_sdev_attrs,
- .shost_attrs = hpsa_shost_attrs,
-};
+static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
+ void __iomem *vaddr, int wait_for_ready);
+#define BOARD_NOT_READY 0
+#define BOARD_READY 1
static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
{
@@ -291,67 +252,63 @@ static ssize_t host_show_firmware_revision(struct device *dev,
fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
}
-/* Enqueuing and dequeuing functions for cmdlists. */
-static inline void addQ(struct hlist_head *list, struct CommandList *c)
+static ssize_t host_show_commands_outstanding(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- hlist_add_head(&c->list, list);
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ctlr_info *h = shost_to_hba(shost);
+
+ return snprintf(buf, 20, "%d\n", h->commands_outstanding);
}
-static inline u32 next_command(struct ctlr_info *h)
+static ssize_t host_show_transport_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- u32 a;
-
- if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
- return h->access.command_completed(h);
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
- if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
- a = *(h->reply_pool_head); /* Next cmd in ring buffer */
- (h->reply_pool_head)++;
- h->commands_outstanding--;
- } else {
- a = FIFO_EMPTY;
- }
- /* Check for wraparound */
- if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
- h->reply_pool_head = h->reply_pool;
- h->reply_pool_wraparound ^= 1;
- }
- return a;
+ h = shost_to_hba(shost);
+ return snprintf(buf, 20, "%s\n",
+ h->transMethod & CFGTBL_Trans_Performant ?
+ "performant" : "simple");
}
-/* set_performant_mode: Modify the tag for cciss performant
- * set bit 0 for pull model, bits 3-1 for block fetch
- * register number
- */
-static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
-{
- if (likely(h->transMethod == CFGTBL_Trans_Performant))
- c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
-}
+/* List of controllers which cannot be reset on kexec with reset_devices */
+static u32 unresettable_controller[] = {
+ 0x324a103C, /* Smart Array P712m */
+ 0x324b103C, /* SmartArray P711m */
+ 0x3223103C, /* Smart Array P800 */
+ 0x3234103C, /* Smart Array P400 */
+ 0x3235103C, /* Smart Array P400i */
+ 0x3211103C, /* Smart Array E200i */
+ 0x3212103C, /* Smart Array E200 */
+ 0x3213103C, /* Smart Array E200i */
+ 0x3214103C, /* Smart Array E200i */
+ 0x3215103C, /* Smart Array E200i */
+ 0x3237103C, /* Smart Array E500 */
+ 0x323D103C, /* Smart Array P700m */
+ 0x409C0E11, /* Smart Array 6400 */
+ 0x409D0E11, /* Smart Array 6400 EM */
+};
-static void enqueue_cmd_and_start_io(struct ctlr_info *h,
- struct CommandList *c)
+static int ctlr_is_resettable(struct ctlr_info *h)
{
- unsigned long flags;
+ int i;
- set_performant_mode(h, c);
- spin_lock_irqsave(&h->lock, flags);
- addQ(&h->reqQ, c);
- h->Qdepth++;
- start_io(h);
- spin_unlock_irqrestore(&h->lock, flags);
+ for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
+ if (unresettable_controller[i] == h->board_id)
+ return 0;
+ return 1;
}
-static inline void removeQ(struct CommandList *c)
+static ssize_t host_show_resettable(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- if (WARN_ON(hlist_unhashed(&c->list)))
- return;
- hlist_del_init(&c->list);
-}
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
-static inline int is_hba_lunid(unsigned char scsi3addr[])
-{
- return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
+ h = shost_to_hba(shost);
+ return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h));
}
static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
@@ -359,15 +316,6 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
return (scsi3addr[3] & 0xC0) == 0x40;
}
-static inline int is_scsi_rev_5(struct ctlr_info *h)
-{
- if (!h->hba_inquiry_data)
- return 0;
- if ((h->hba_inquiry_data[2] & 0x07) == 5)
- return 1;
- return 0;
-}
-
static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
"UNKNOWN"
};
@@ -459,6 +407,129 @@ static ssize_t unique_id_show(struct device *dev,
sn[12], sn[13], sn[14], sn[15]);
}
+static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
+static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
+static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
+static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
+static DEVICE_ATTR(firmware_revision, S_IRUGO,
+ host_show_firmware_revision, NULL);
+static DEVICE_ATTR(commands_outstanding, S_IRUGO,
+ host_show_commands_outstanding, NULL);
+static DEVICE_ATTR(transport_mode, S_IRUGO,
+ host_show_transport_mode, NULL);
+static DEVICE_ATTR(resettable, S_IRUGO,
+ host_show_resettable, NULL);
+
+static struct device_attribute *hpsa_sdev_attrs[] = {
+ &dev_attr_raid_level,
+ &dev_attr_lunid,
+ &dev_attr_unique_id,
+ NULL,
+};
+
+static struct device_attribute *hpsa_shost_attrs[] = {
+ &dev_attr_rescan,
+ &dev_attr_firmware_revision,
+ &dev_attr_commands_outstanding,
+ &dev_attr_transport_mode,
+ &dev_attr_resettable,
+ NULL,
+};
+
+static struct scsi_host_template hpsa_driver_template = {
+ .module = THIS_MODULE,
+ .name = "hpsa",
+ .proc_name = "hpsa",
+ .queuecommand = hpsa_scsi_queue_command,
+ .scan_start = hpsa_scan_start,
+ .scan_finished = hpsa_scan_finished,
+ .change_queue_depth = hpsa_change_queue_depth,
+ .this_id = -1,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_device_reset_handler = hpsa_eh_device_reset_handler,
+ .ioctl = hpsa_ioctl,
+ .slave_alloc = hpsa_slave_alloc,
+ .slave_destroy = hpsa_slave_destroy,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = hpsa_compat_ioctl,
+#endif
+ .sdev_attrs = hpsa_sdev_attrs,
+ .shost_attrs = hpsa_shost_attrs,
+};
+
+
+/* Enqueuing and dequeuing functions for cmdlists. */
+static inline void addQ(struct list_head *list, struct CommandList *c)
+{
+ list_add_tail(&c->list, list);
+}
+
+static inline u32 next_command(struct ctlr_info *h)
+{
+ u32 a;
+
+ if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+ return h->access.command_completed(h);
+
+ if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+ a = *(h->reply_pool_head); /* Next cmd in ring buffer */
+ (h->reply_pool_head)++;
+ h->commands_outstanding--;
+ } else {
+ a = FIFO_EMPTY;
+ }
+ /* Check for wraparound */
+ if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
+ h->reply_pool_head = h->reply_pool;
+ h->reply_pool_wraparound ^= 1;
+ }
+ return a;
+}
+
+/* set_performant_mode: Modify the tag for cciss performant
+ * set bit 0 for pull model, bits 3-1 for block fetch
+ * register number
+ */
+static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
+{
+ if (likely(h->transMethod & CFGTBL_Trans_Performant))
+ c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
+}
+
+static void enqueue_cmd_and_start_io(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ unsigned long flags;
+
+ set_performant_mode(h, c);
+ spin_lock_irqsave(&h->lock, flags);
+ addQ(&h->reqQ, c);
+ h->Qdepth++;
+ start_io(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static inline void removeQ(struct CommandList *c)
+{
+ if (WARN_ON(list_empty(&c->list)))
+ return;
+ list_del_init(&c->list);
+}
+
+static inline int is_hba_lunid(unsigned char scsi3addr[])
+{
+ return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
+}
+
+static inline int is_scsi_rev_5(struct ctlr_info *h)
+{
+ if (!h->hba_inquiry_data)
+ return 0;
+ if ((h->hba_inquiry_data[2] & 0x07) == 5)
+ return 1;
+ return 0;
+}
+
static int hpsa_find_target_lun(struct ctlr_info *h,
unsigned char scsi3addr[], int bus, int *target, int *lun)
{
@@ -1130,6 +1201,10 @@ static void complete_scsi_command(struct CommandList *cp,
cmd->result = DID_TIME_OUT << 16;
dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
break;
+ case CMD_UNABORTABLE:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "Command unabortable\n");
+ break;
default:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
@@ -1160,7 +1235,7 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
sh->sg_tablesize = h->maxsgentries;
h->scsi_host = sh;
sh->hostdata[0] = (unsigned long) h;
- sh->irq = h->intr[PERF_MODE_INT];
+ sh->irq = h->intr[h->intr_mode];
sh->unique_id = sh->irq;
error = scsi_add_host(sh, &h->pdev->dev);
if (error)
@@ -1295,6 +1370,9 @@ static void hpsa_scsi_interpret_error(struct CommandList *cp)
case CMD_TIMEOUT:
dev_warn(d, "cp %p timed out\n", cp);
break;
+ case CMD_UNABORTABLE:
+ dev_warn(d, "Command unabortable\n");
+ break;
default:
dev_warn(d, "cp %p returned unknown status %x\n", cp,
ei->CommandStatus);
@@ -1595,6 +1673,8 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
return 0;
+ memset(scsi3addr, 0, 8);
+ scsi3addr[3] = target;
if (is_hba_lunid(scsi3addr))
return 0; /* Don't add the RAID controller here. */
@@ -1609,8 +1689,6 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
return 0;
}
- memset(scsi3addr, 0, 8);
- scsi3addr[3] = target;
if (hpsa_update_device_info(h, scsi3addr, this_device))
return 0;
(*nmsa2xxx_enclosures)++;
@@ -2199,7 +2277,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
c->cmdindex = i;
- INIT_HLIST_NODE(&c->list);
+ INIT_LIST_HEAD(&c->list);
c->busaddr = (u32) cmd_dma_handle;
temp64.val = (u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -2237,7 +2315,7 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
}
memset(c->err_info, 0, sizeof(*c->err_info));
- INIT_HLIST_NODE(&c->list);
+ INIT_LIST_HEAD(&c->list);
c->busaddr = (u32) cmd_dma_handle;
temp64.val = (u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -2267,7 +2345,7 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
pci_free_consistent(h->pdev, sizeof(*c->err_info),
c->err_info, (dma_addr_t) temp64.val);
pci_free_consistent(h->pdev, sizeof(*c),
- c, (dma_addr_t) c->busaddr);
+ c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
}
#ifdef CONFIG_COMPAT
@@ -2281,6 +2359,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
int err;
u32 cp;
+ memset(&arg64, 0, sizeof(arg64));
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
sizeof(arg64.LUN_info));
@@ -2317,6 +2396,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
int err;
u32 cp;
+ memset(&arg64, 0, sizeof(arg64));
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
sizeof(arg64.LUN_info));
@@ -2433,15 +2513,17 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
if (buff == NULL)
return -EFAULT;
- }
- if (iocommand.Request.Type.Direction == XFER_WRITE) {
- /* Copy the data into the buffer we created */
- if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
- kfree(buff);
- return -EFAULT;
+ if (iocommand.Request.Type.Direction == XFER_WRITE) {
+ /* Copy the data into the buffer we created */
+ if (copy_from_user(buff, iocommand.buf,
+ iocommand.buf_size)) {
+ kfree(buff);
+ return -EFAULT;
+ }
+ } else {
+ memset(buff, 0, iocommand.buf_size);
}
- } else
- memset(buff, 0, iocommand.buf_size);
+ }
c = cmd_special_alloc(h);
if (c == NULL) {
kfree(buff);
@@ -2487,8 +2569,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
cmd_special_free(h, c);
return -EFAULT;
}
-
- if (iocommand.Request.Type.Direction == XFER_READ) {
+ if (iocommand.Request.Type.Direction == XFER_READ &&
+ iocommand.buf_size > 0) {
/* Copy the data out of the buffer we created */
if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
kfree(buff);
@@ -2581,14 +2663,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
c->cmd_type = CMD_IOCTL_PEND;
c->Header.ReplyQueue = 0;
-
- if (ioc->buf_size > 0) {
- c->Header.SGList = sg_used;
- c->Header.SGTotal = sg_used;
- } else {
- c->Header.SGList = 0;
- c->Header.SGTotal = 0;
- }
+ c->Header.SGList = c->Header.SGTotal = sg_used;
memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
c->Header.Tag.lower = c->busaddr;
memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
@@ -2605,7 +2680,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
}
hpsa_scsi_do_simple_cmd_core(h, c);
- hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
+ if (sg_used)
+ hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
/* Copy the error information out */
memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
@@ -2614,7 +2690,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
status = -EFAULT;
goto cleanup1;
}
- if (ioc->Request.Type.Direction == XFER_READ) {
+ if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
/* Copy the data out of the buffer we created */
BYTE __user *ptr = ioc->buf;
for (i = 0; i < sg_used; i++) {
@@ -2810,8 +2886,8 @@ static void start_io(struct ctlr_info *h)
{
struct CommandList *c;
- while (!hlist_empty(&h->reqQ)) {
- c = hlist_entry(h->reqQ.first, struct CommandList, list);
+ while (!list_empty(&h->reqQ)) {
+ c = list_entry(h->reqQ.next, struct CommandList, list);
/* can't do anything if fifo is full */
if ((h->access.fifo_full(h))) {
dev_warn(&h->pdev->dev, "fifo full\n");
@@ -2867,20 +2943,22 @@ static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
static inline u32 hpsa_tag_contains_index(u32 tag)
{
-#define DIRECT_LOOKUP_BIT 0x10
return tag & DIRECT_LOOKUP_BIT;
}
static inline u32 hpsa_tag_to_index(u32 tag)
{
-#define DIRECT_LOOKUP_SHIFT 5
return tag >> DIRECT_LOOKUP_SHIFT;
}
-static inline u32 hpsa_tag_discard_error_bits(u32 tag)
+
+static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
{
-#define HPSA_ERROR_BITS 0x03
- return tag & ~HPSA_ERROR_BITS;
+#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
+#define HPSA_SIMPLE_ERROR_BITS 0x03
+ if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+ return tag & ~HPSA_SIMPLE_ERROR_BITS;
+ return tag & ~HPSA_PERF_ERROR_BITS;
}
/* process completion of an indexed ("direct lookup") command */
@@ -2904,10 +2982,9 @@ static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
{
u32 tag;
struct CommandList *c = NULL;
- struct hlist_node *tmp;
- tag = hpsa_tag_discard_error_bits(raw_tag);
- hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+ tag = hpsa_tag_discard_error_bits(h, raw_tag);
+ list_for_each_entry(c, &h->cmpQ, list) {
if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
finish_cmd(c, raw_tag);
return next_command(h);
@@ -2957,7 +3034,10 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Send a message CDB to the firmware. */
+/* Send a message CDB to the firmware. Careful, this only works
+ * in simple mode, not performant mode due to the tag lookup.
+ * We only ever use this immediately after a controller reset.
+ */
static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
unsigned char type)
{
@@ -3023,7 +3103,7 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
- if (hpsa_tag_discard_error_bits(tag) == paddr32)
+ if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
break;
msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
}
@@ -3055,38 +3135,6 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
#define hpsa_noop(p) hpsa_message(p, 3, 0)
-static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
-{
-/* the #defines are stolen from drivers/pci/msi.h. */
-#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
-#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
-
- int pos;
- u16 control = 0;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
- if (pos) {
- pci_read_config_word(pdev, msi_control_reg(pos), &control);
- if (control & PCI_MSI_FLAGS_ENABLE) {
- dev_info(&pdev->dev, "resetting MSI\n");
- pci_write_config_word(pdev, msi_control_reg(pos),
- control & ~PCI_MSI_FLAGS_ENABLE);
- }
- }
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_word(pdev, msi_control_reg(pos), &control);
- if (control & PCI_MSIX_FLAGS_ENABLE) {
- dev_info(&pdev->dev, "resetting MSI-X\n");
- pci_write_config_word(pdev, msi_control_reg(pos),
- control & ~PCI_MSIX_FLAGS_ENABLE);
- }
- }
-
- return 0;
-}
-
static int hpsa_controller_hard_reset(struct pci_dev *pdev,
void * __iomem vaddr, bool use_doorbell)
{
@@ -3142,17 +3190,17 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
*/
static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
{
- u16 saved_config_space[32];
u64 cfg_offset;
u32 cfg_base_addr;
u64 cfg_base_addr_index;
void __iomem *vaddr;
unsigned long paddr;
u32 misc_fw_support, active_transport;
- int rc, i;
+ int rc;
struct CfgTable __iomem *cfgtable;
bool use_doorbell;
u32 board_id;
+ u16 command_register;
/* For controllers as old as the P600, this is very nearly
* the same thing as
@@ -3162,14 +3210,6 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
* pci_set_power_state(pci_dev, PCI_D0);
* pci_restore_state(pci_dev);
*
- * but we can't use these nice canned kernel routines on
- * kexec, because they also check the MSI/MSI-X state in PCI
- * configuration space and do the wrong thing when it is
- * set/cleared. Also, the pci_save/restore_state functions
- * violate the ordering requirements for restoring the
- * configuration space from the CCISS document (see the
- * comment below). So we roll our own ....
- *
* For controllers newer than the P600, the pci power state
* method of resetting doesn't work so we have another way
* using the doorbell register.
@@ -3182,13 +3222,21 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
* likely not be happy. Just forbid resetting this conjoined mess.
* The 640x isn't really supported by hpsa anyway.
*/
- hpsa_lookup_board_id(pdev, &board_id);
+ rc = hpsa_lookup_board_id(pdev, &board_id);
+ if (rc < 0) {
+ dev_warn(&pdev->dev, "Not resetting device.\n");
+ return -ENODEV;
+ }
if (board_id == 0x409C0E11 || board_id == 0x409D0E11)
return -ENOTSUPP;
- for (i = 0; i < 32; i++)
- pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
-
+ /* Save the PCI command register */
+ pci_read_config_word(pdev, 4, &command_register);
+ /* Turn the board off. This is so that later pci_restore_state()
+ * won't turn the board on before the rest of config space is ready.
+ */
+ pci_disable_device(pdev);
+ pci_save_state(pdev);
/* find the first memory BAR, so we can find the cfg table */
rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
@@ -3214,46 +3262,47 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
misc_fw_support = readl(&cfgtable->misc_fw_support);
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
- /* The doorbell reset seems to cause lockups on some Smart
- * Arrays (e.g. P410, P410i, maybe others). Until this is
- * fixed or at least isolated, avoid the doorbell reset.
- */
- use_doorbell = 0;
-
rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
if (rc)
goto unmap_cfgtable;
- /* Restore the PCI configuration space. The Open CISS
- * Specification says, "Restore the PCI Configuration
- * Registers, offsets 00h through 60h. It is important to
- * restore the command register, 16-bits at offset 04h,
- * last. Do not restore the configuration status register,
- * 16-bits at offset 06h." Note that the offset is 2*i.
- */
- for (i = 0; i < 32; i++) {
- if (i == 2 || i == 3)
- continue;
- pci_write_config_word(pdev, 2*i, saved_config_space[i]);
+ pci_restore_state(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_warn(&pdev->dev, "failed to enable device.\n");
+ goto unmap_cfgtable;
}
- wmb();
- pci_write_config_word(pdev, 4, saved_config_space[2]);
+ pci_write_config_word(pdev, 4, command_register);
/* Some devices (notably the HP Smart Array 5i Controller)
need a little pause here */
msleep(HPSA_POST_RESET_PAUSE_MSECS);
+ /* Wait for board to become not ready, then ready. */
+ dev_info(&pdev->dev, "Waiting for board to become ready.\n");
+ rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
+ if (rc)
+ dev_warn(&pdev->dev,
+ "failed waiting for board to become not ready\n");
+ rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
+ if (rc) {
+ dev_warn(&pdev->dev,
+ "failed waiting for board to become ready\n");
+ goto unmap_cfgtable;
+ }
+ dev_info(&pdev->dev, "board ready.\n");
+
/* Controller should be in simple mode at this point. If it's not,
* It means we're on one of those controllers which doesn't support
* the doorbell reset method and on which the PCI power management reset
* method doesn't work (P800, for example.)
- * In those cases, pretend the reset worked and hope for the best.
+ * In those cases, don't try to proceed, as it generally doesn't work.
*/
active_transport = readl(&cfgtable->TransportActive);
if (active_transport & PERFORMANT_MODE) {
dev_warn(&pdev->dev, "Unable to successfully reset controller,"
- " proceeding anyway.\n");
- rc = -ENOTSUPP;
+ " Ignoring controller.\n");
+ rc = -ENODEV;
}
unmap_cfgtable:
@@ -3386,7 +3435,7 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
default_int_mode:
#endif /* CONFIG_PCI_MSI */
/* if we get here we're going to use the default interrupt mode */
- h->intr[PERF_MODE_INT] = h->pdev->irq;
+ h->intr[h->intr_mode] = h->pdev->irq;
}
static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
@@ -3438,18 +3487,28 @@ static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
return -ENODEV;
}
-static int __devinit hpsa_wait_for_board_ready(struct ctlr_info *h)
+static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
+ void __iomem *vaddr, int wait_for_ready)
{
- int i;
+ int i, iterations;
u32 scratchpad;
+ if (wait_for_ready)
+ iterations = HPSA_BOARD_READY_ITERATIONS;
+ else
+ iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
- for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
- scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
- if (scratchpad == HPSA_FIRMWARE_READY)
- return 0;
+ for (i = 0; i < iterations; i++) {
+ scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
+ if (wait_for_ready) {
+ if (scratchpad == HPSA_FIRMWARE_READY)
+ return 0;
+ } else {
+ if (scratchpad != HPSA_FIRMWARE_READY)
+ return 0;
+ }
msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
}
- dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
+ dev_warn(&pdev->dev, "board not ready, timed out.\n");
return -ENODEV;
}
@@ -3497,6 +3556,11 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
{
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+
+ /* Limit commands in memory limited kdump scenario. */
+ if (reset_devices && h->max_commands > 32)
+ h->max_commands = 32;
+
if (h->max_commands < 16) {
dev_warn(&h->pdev->dev, "Controller reports "
"max supported commands of %d, an obvious lie. "
@@ -3571,16 +3635,21 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
{
int i;
+ u32 doorbell_value;
+ unsigned long flags;
/* under certain very rare conditions, this can take awhile.
* (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
* as we enter this code.)
*/
for (i = 0; i < MAX_CONFIG_WAIT; i++) {
- if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
+ spin_lock_irqsave(&h->lock, flags);
+ doorbell_value = readl(h->vaddr + SA5_DOORBELL);
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (!(doorbell_value & CFGTBL_ChangeReq))
break;
/* delay and try again */
- msleep(10);
+ usleep_range(10000, 20000);
}
}
@@ -3603,6 +3672,7 @@ static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
"unable to get board into simple mode\n");
return -ENODEV;
}
+ h->transMethod = CFGTBL_Trans_Simple;
return 0;
}
@@ -3641,7 +3711,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
err = -ENOMEM;
goto err_out_free_res;
}
- err = hpsa_wait_for_board_ready(h);
+ err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
if (err)
goto err_out_free_res;
err = hpsa_find_cfgtables(h);
@@ -3710,8 +3780,6 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
return 0; /* just try to do the kdump anyhow. */
if (rc)
return -ENODEV;
- if (hpsa_reset_msi(pdev))
- return -ENODEV;
/* Now try to get the controller to respond to a no-op */
for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
@@ -3749,8 +3817,11 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
h->pdev = pdev;
h->busy_initializing = 1;
- INIT_HLIST_HEAD(&h->cmpQ);
- INIT_HLIST_HEAD(&h->reqQ);
+ h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
+ INIT_LIST_HEAD(&h->cmpQ);
+ INIT_LIST_HEAD(&h->reqQ);
+ spin_lock_init(&h->lock);
+ spin_lock_init(&h->scan_lock);
rc = hpsa_pci_init(h);
if (rc != 0)
goto clean1;
@@ -3777,20 +3848,20 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
h->access.set_intr_mask(h, HPSA_INTR_OFF);
if (h->msix_vector || h->msi_vector)
- rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_msi,
+ rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi,
IRQF_DISABLED, h->devname, h);
else
- rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_intx,
+ rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_intx,
IRQF_DISABLED, h->devname, h);
if (rc) {
dev_err(&pdev->dev, "unable to get irq %d for %s\n",
- h->intr[PERF_MODE_INT], h->devname);
+ h->intr[h->intr_mode], h->devname);
goto clean2;
}
dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
h->devname, pdev->device,
- h->intr[PERF_MODE_INT], dac ? "" : " not");
+ h->intr[h->intr_mode], dac ? "" : " not");
h->cmd_pool_bits =
kmalloc(((h->nr_cmds + BITS_PER_LONG -
@@ -3810,8 +3881,6 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
}
if (hpsa_allocate_sg_chain_blocks(h))
goto clean4;
- spin_lock_init(&h->lock);
- spin_lock_init(&h->scan_lock);
init_waitqueue_head(&h->scan_wait_queue);
h->scan_finished = 1; /* no scan currently in progress */
@@ -3843,7 +3912,7 @@ clean4:
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool,
h->errinfo_pool_dhandle);
- free_irq(h->intr[PERF_MODE_INT], h);
+ free_irq(h->intr[h->intr_mode], h);
clean2:
clean1:
h->busy_initializing = 0;
@@ -3887,7 +3956,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
*/
hpsa_flush_cache(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
- free_irq(h->intr[PERF_MODE_INT], h);
+ free_irq(h->intr[h->intr_mode], h);
#ifdef CONFIG_PCI_MSI
if (h->msix_vector)
pci_disable_msix(h->pdev);
@@ -3989,7 +4058,8 @@ static void calc_bucket_map(int bucket[], int num_buckets,
}
}
-static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
+static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
+ u32 use_short_tags)
{
int i;
unsigned long register_value;
@@ -4037,7 +4107,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
writel(0, &h->transtable->RepQCtrAddrHigh32);
writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
writel(0, &h->transtable->RepQAddr0High32);
- writel(CFGTBL_Trans_Performant,
+ writel(CFGTBL_Trans_Performant | use_short_tags,
&(h->cfgtable->HostWrite.TransportRequest));
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_mode_change_ack(h);
@@ -4047,12 +4117,18 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
" performant mode\n");
return;
}
+ /* Change the access methods to the performant access methods */
+ h->access = SA5_performant_access;
+ h->transMethod = CFGTBL_Trans_Performant;
}
static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
{
u32 trans_support;
+ if (hpsa_simple_mode)
+ return;
+
trans_support = readl(&(h->cfgtable->TransportSupport));
if (!(trans_support & PERFORMANT_MODE))
return;
@@ -4072,11 +4148,8 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|| (h->blockFetchTable == NULL))
goto clean_up;
- hpsa_enter_performant_mode(h);
-
- /* Change the access methods to the performant access methods */
- h->access = SA5_performant_access;
- h->transMethod = CFGTBL_Trans_Performant;
+ hpsa_enter_performant_mode(h,
+ trans_support & CFGTBL_Trans_use_short_tags);
return;
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 19586e189f0f..621a1530054a 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -72,11 +72,12 @@ struct ctlr_info {
unsigned int intr[4];
unsigned int msix_vector;
unsigned int msi_vector;
+ int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
struct access_method access;
/* queue and queue Info */
- struct hlist_head reqQ;
- struct hlist_head cmpQ;
+ struct list_head reqQ;
+ struct list_head cmpQ;
unsigned int Qdepth;
unsigned int maxQsinceinit;
unsigned int maxSG;
@@ -154,12 +155,16 @@ struct ctlr_info {
* HPSA_BOARD_READY_ITERATIONS are derived from those.
*/
#define HPSA_BOARD_READY_WAIT_SECS (120)
+#define HPSA_BOARD_NOT_READY_WAIT_SECS (10)
#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
#define HPSA_BOARD_READY_POLL_INTERVAL \
((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
#define HPSA_BOARD_READY_ITERATIONS \
((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
+#define HPSA_BOARD_NOT_READY_ITERATIONS \
+ ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
+ HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
#define HPSA_POST_RESET_PAUSE_MSECS (3000)
#define HPSA_POST_RESET_NOOP_RETRIES (12)
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index f5c4c3cc0530..18464900e761 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -104,6 +104,7 @@
#define CFGTBL_Trans_Simple 0x00000002l
#define CFGTBL_Trans_Performant 0x00000004l
+#define CFGTBL_Trans_use_short_tags 0x20000000l
#define CFGTBL_BusType_Ultra2 0x00000001l
#define CFGTBL_BusType_Ultra3 0x00000002l
@@ -265,6 +266,7 @@ struct ErrorInfo {
#define DIRECT_LOOKUP_SHIFT 5
#define DIRECT_LOOKUP_BIT 0x10
+#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1))
#define HPSA_ERROR_BIT 0x02
struct ctlr_info; /* defined in hpsa.h */
@@ -291,7 +293,7 @@ struct CommandList {
struct ctlr_info *h;
int cmd_type;
long cmdindex;
- struct hlist_node list;
+ struct list_head list;
struct request *rq;
struct completion *waiting;
void *scsi_cmd;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d841e98a8bd5..0621238fac4a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1301,7 +1301,7 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
ipr_clear_res_target(res);
list_move_tail(&res->queue, &ioa_cfg->free_res_q);
}
- } else if (!res->sdev) {
+ } else if (!res->sdev || res->del_from_ml) {
res->add_to_ml = 1;
if (ioa_cfg->allow_ml_add_del)
schedule_work(&ioa_cfg->work_q);
@@ -3104,7 +3104,10 @@ restart:
did_work = 1;
sdev = res->sdev;
if (!scsi_device_get(sdev)) {
- list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+ if (!res->add_to_ml)
+ list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+ else
+ res->del_from_ml = 0;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
scsi_remove_device(sdev);
scsi_device_put(sdev);
@@ -8864,7 +8867,7 @@ static void __ipr_remove(struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- flush_scheduled_work();
+ flush_work_sync(&ioa_cfg->work_q);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
spin_lock(&ipr_driver_lock);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index fec47de72535..a860452a8f71 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -608,54 +608,12 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
iscsi_sw_tcp_release_conn(conn);
}
-static int iscsi_sw_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
- char *buf, int *port,
- int (*getname)(struct socket *,
- struct sockaddr *,
- int *addrlen))
-{
- struct sockaddr_storage *addr;
- struct sockaddr_in6 *sin6;
- struct sockaddr_in *sin;
- int rc = 0, len;
-
- addr = kmalloc(sizeof(*addr), GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
-
- if (getname(sock, (struct sockaddr *) addr, &len)) {
- rc = -ENODEV;
- goto free_addr;
- }
-
- switch (addr->ss_family) {
- case AF_INET:
- sin = (struct sockaddr_in *)addr;
- spin_lock_bh(&conn->session->lock);
- sprintf(buf, "%pI4", &sin->sin_addr.s_addr);
- *port = be16_to_cpu(sin->sin_port);
- spin_unlock_bh(&conn->session->lock);
- break;
- case AF_INET6:
- sin6 = (struct sockaddr_in6 *)addr;
- spin_lock_bh(&conn->session->lock);
- sprintf(buf, "%pI6", &sin6->sin6_addr);
- *port = be16_to_cpu(sin6->sin6_port);
- spin_unlock_bh(&conn->session->lock);
- break;
- }
-free_addr:
- kfree(addr);
- return rc;
-}
-
static int
iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
int is_leading)
{
- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
- struct iscsi_host *ihost = shost_priv(shost);
+ struct iscsi_session *session = cls_session->dd_data;
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
@@ -670,27 +628,15 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
"sockfd_lookup failed %d\n", err);
return -EEXIST;
}
- /*
- * copy these values now because if we drop the session
- * userspace may still want to query the values since we will
- * be using them for the reconnect
- */
- err = iscsi_sw_tcp_get_addr(conn, sock, conn->portal_address,
- &conn->portal_port, kernel_getpeername);
- if (err)
- goto free_socket;
-
- err = iscsi_sw_tcp_get_addr(conn, sock, ihost->local_address,
- &ihost->local_port, kernel_getsockname);
- if (err)
- goto free_socket;
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
if (err)
goto free_socket;
+ spin_lock_bh(&session->lock);
/* bind iSCSI connection and socket */
tcp_sw_conn->sock = sock;
+ spin_unlock_bh(&session->lock);
/* setup Socket parameters */
sk = sock->sk;
@@ -752,24 +698,74 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- int len;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sockaddr_in6 addr;
+ int rc, len;
switch(param) {
case ISCSI_PARAM_CONN_PORT:
- spin_lock_bh(&conn->session->lock);
- len = sprintf(buf, "%hu\n", conn->portal_port);
- spin_unlock_bh(&conn->session->lock);
- break;
case ISCSI_PARAM_CONN_ADDRESS:
spin_lock_bh(&conn->session->lock);
- len = sprintf(buf, "%s\n", conn->portal_address);
+ if (!tcp_sw_conn || !tcp_sw_conn->sock) {
+ spin_unlock_bh(&conn->session->lock);
+ return -ENOTCONN;
+ }
+ rc = kernel_getpeername(tcp_sw_conn->sock,
+ (struct sockaddr *)&addr, &len);
spin_unlock_bh(&conn->session->lock);
- break;
+ if (rc)
+ return rc;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &addr, param, buf);
default:
return iscsi_conn_get_param(cls_conn, param, buf);
}
- return len;
+ return 0;
+}
+
+static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
+ struct iscsi_session *session = tcp_sw_host->session;
+ struct iscsi_conn *conn;
+ struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct sockaddr_in6 addr;
+ int rc, len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ spin_lock_bh(&session->lock);
+ conn = session->leadconn;
+ if (!conn) {
+ spin_unlock_bh(&session->lock);
+ return -ENOTCONN;
+ }
+ tcp_conn = conn->dd_data;
+
+ tcp_sw_conn = tcp_conn->dd_data;
+ if (!tcp_sw_conn->sock) {
+ spin_unlock_bh(&session->lock);
+ return -ENOTCONN;
+ }
+
+ rc = kernel_getsockname(tcp_sw_conn->sock,
+ (struct sockaddr *)&addr, &len);
+ spin_unlock_bh(&session->lock);
+ if (rc)
+ return rc;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &addr, param, buf);
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+
+ return 0;
}
static void
@@ -797,6 +793,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
+ struct iscsi_sw_tcp_host *tcp_sw_host;
struct Scsi_Host *shost;
if (ep) {
@@ -804,7 +801,8 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
return NULL;
}
- shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, 0, 1);
+ shost = iscsi_host_alloc(&iscsi_sw_tcp_sht,
+ sizeof(struct iscsi_sw_tcp_host), 1);
if (!shost)
return NULL;
shost->transportt = iscsi_sw_tcp_scsi_transport;
@@ -825,6 +823,8 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
if (!cls_session)
goto remove_host;
session = cls_session->dd_data;
+ tcp_sw_host = iscsi_host_priv(shost);
+ tcp_sw_host->session = session;
shost->can_queue = session->scsi_cmds_max;
if (iscsi_tcp_r2tpool_alloc(session))
@@ -929,7 +929,7 @@ static struct iscsi_transport iscsi_sw_tcp_transport = {
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_sw_tcp_conn_stop,
/* iscsi host params */
- .get_host_param = iscsi_host_get_param,
+ .get_host_param = iscsi_sw_tcp_host_get_param,
.set_host_param = iscsi_host_set_param,
/* IO */
.send_pdu = iscsi_conn_send_pdu,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 94644bad0ed7..666fe09378fa 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -55,6 +55,10 @@ struct iscsi_sw_tcp_conn {
ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
};
+struct iscsi_sw_tcp_host {
+ struct iscsi_session *session;
+};
+
struct iscsi_sw_tcp_hdrbuf {
struct iscsi_hdr hdrbuf;
char hdrextbuf[ISCSI_MAX_AHS_SIZE +
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index d21367d3305f..28231badd9e6 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -38,7 +38,7 @@ u16 fc_cpu_mask; /* cpu mask for possible cpus */
EXPORT_SYMBOL(fc_cpu_mask);
static u16 fc_cpu_order; /* 2's power to represent total possible cpus */
static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
-struct workqueue_struct *fc_exch_workqueue;
+static struct workqueue_struct *fc_exch_workqueue;
/*
* Structure and function definitions for managing Fibre Channel Exchanges
@@ -558,6 +558,22 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
return sp;
}
+/*
+ * Set the response handler for the exchange associated with a sequence.
+ */
+static void fc_seq_set_resp(struct fc_seq *sp,
+ void (*resp)(struct fc_seq *, struct fc_frame *,
+ void *),
+ void *arg)
+{
+ struct fc_exch *ep = fc_seq_exch(sp);
+
+ spin_lock_bh(&ep->ex_lock);
+ ep->resp = resp;
+ ep->arg = arg;
+ spin_unlock_bh(&ep->ex_lock);
+}
+
/**
* fc_seq_exch_abort() - Abort an exchange and sequence
* @req_sp: The sequence to be aborted
@@ -650,13 +666,10 @@ static void fc_exch_timeout(struct work_struct *work)
if (e_stat & ESB_ST_ABNORMAL)
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
+ if (!rc)
+ fc_exch_delete(ep);
if (resp)
resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
- if (!rc) {
- /* delete the exchange if it's already being aborted */
- fc_exch_delete(ep);
- return;
- }
fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
goto done;
}
@@ -1266,6 +1279,8 @@ free:
* @fp: The request frame
*
* On success, the sequence pointer will be returned and also in fr_seq(@fp).
+ * A reference will be held on the exchange/sequence for the caller, which
+ * must call fc_seq_release().
*/
static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
{
@@ -1283,6 +1298,15 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
}
/**
+ * fc_seq_release() - Release the hold
+ * @sp: The sequence.
+ */
+static void fc_seq_release(struct fc_seq *sp)
+{
+ fc_exch_release(fc_seq_exch(sp));
+}
+
+/**
* fc_exch_recv_req() - Handler for an incoming request
* @lport: The local port that received the request
* @mp: The EM that the exchange is on
@@ -2151,6 +2175,7 @@ err:
fc_exch_mgr_del(ema);
return -ENOMEM;
}
+EXPORT_SYMBOL(fc_exch_mgr_list_clone);
/**
* fc_exch_mgr_alloc() - Allocate an exchange manager
@@ -2254,16 +2279,45 @@ void fc_exch_mgr_free(struct fc_lport *lport)
EXPORT_SYMBOL(fc_exch_mgr_free);
/**
+ * fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending
+ * upon 'xid'.
+ * @f_ctl: f_ctl
+ * @lport: The local port the frame was received on
+ * @fh: The received frame header
+ */
+static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
+ struct fc_lport *lport,
+ struct fc_frame_header *fh)
+{
+ struct fc_exch_mgr_anchor *ema;
+ u16 xid;
+
+ if (f_ctl & FC_FC_EX_CTX)
+ xid = ntohs(fh->fh_ox_id);
+ else {
+ xid = ntohs(fh->fh_rx_id);
+ if (xid == FC_XID_UNKNOWN)
+ return list_entry(lport->ema_list.prev,
+ typeof(*ema), ema_list);
+ }
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
+ if ((xid >= ema->mp->min_xid) &&
+ (xid <= ema->mp->max_xid))
+ return ema;
+ }
+ return NULL;
+}
+/**
* fc_exch_recv() - Handler for received frames
* @lport: The local port the frame was received on
- * @fp: The received frame
+ * @fp: The received frame
*/
void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_exch_mgr_anchor *ema;
- u32 f_ctl, found = 0;
- u16 oxid;
+ u32 f_ctl;
/* lport lock ? */
if (!lport || lport->state == LPORT_ST_DISABLED) {
@@ -2274,24 +2328,17 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
}
f_ctl = ntoh24(fh->fh_f_ctl);
- oxid = ntohs(fh->fh_ox_id);
- if (f_ctl & FC_FC_EX_CTX) {
- list_for_each_entry(ema, &lport->ema_list, ema_list) {
- if ((oxid >= ema->mp->min_xid) &&
- (oxid <= ema->mp->max_xid)) {
- found = 1;
- break;
- }
- }
-
- if (!found) {
- FC_LPORT_DBG(lport, "Received response for out "
- "of range oxid:%hx\n", oxid);
- fc_frame_free(fp);
- return;
- }
- } else
- ema = list_entry(lport->ema_list.prev, typeof(*ema), ema_list);
+ ema = fc_find_ema(f_ctl, lport, fh);
+ if (!ema) {
+ FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
+ "fc_ctl <0x%x>, xid <0x%x>\n",
+ f_ctl,
+ (f_ctl & FC_FC_EX_CTX) ?
+ ntohs(fh->fh_ox_id) :
+ ntohs(fh->fh_rx_id));
+ fc_frame_free(fp);
+ return;
+ }
/*
* If frame is marked invalid, just drop it.
@@ -2329,6 +2376,9 @@ int fc_exch_init(struct fc_lport *lport)
if (!lport->tt.seq_start_next)
lport->tt.seq_start_next = fc_seq_start_next;
+ if (!lport->tt.seq_set_resp)
+ lport->tt.seq_set_resp = fc_seq_set_resp;
+
if (!lport->tt.exch_seq_send)
lport->tt.exch_seq_send = fc_exch_seq_send;
@@ -2350,6 +2400,9 @@ int fc_exch_init(struct fc_lport *lport)
if (!lport->tt.seq_assign)
lport->tt.seq_assign = fc_seq_assign;
+ if (!lport->tt.seq_release)
+ lport->tt.seq_release = fc_seq_release;
+
return 0;
}
EXPORT_SYMBOL(fc_exch_init);
@@ -2357,7 +2410,7 @@ EXPORT_SYMBOL(fc_exch_init);
/**
* fc_setup_exch_mgr() - Setup an exchange manager
*/
-int fc_setup_exch_mgr()
+int fc_setup_exch_mgr(void)
{
fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
0, SLAB_HWCACHE_ALIGN, NULL);
@@ -2395,7 +2448,7 @@ int fc_setup_exch_mgr()
/**
* fc_destroy_exch_mgr() - Destroy an exchange manager
*/
-void fc_destroy_exch_mgr()
+void fc_destroy_exch_mgr(void)
{
destroy_workqueue(fc_exch_workqueue);
kmem_cache_destroy(fc_em_cachep);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 5962d1a5a674..b1b03af158bf 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -42,7 +42,7 @@
#include "fc_libfc.h"
-struct kmem_cache *scsi_pkt_cachep;
+static struct kmem_cache *scsi_pkt_cachep;
/* SRB state definitions */
#define FC_SRB_FREE 0 /* cmd is free */
@@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
if (fsp) {
memset(fsp, 0, sizeof(*fsp));
fsp->lp = lport;
+ fsp->xfer_ddp = FC_XID_UNKNOWN;
atomic_set(&fsp->ref_cnt, 1);
init_timer(&fsp->timer);
INIT_LIST_HEAD(&fsp->list);
@@ -1201,6 +1202,7 @@ unlock:
static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
{
int rc = FAILED;
+ unsigned long ticks_left;
if (fc_fcp_send_abort(fsp))
return FAILED;
@@ -1209,13 +1211,13 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
fsp->wait_for_comp = 1;
spin_unlock_bh(&fsp->scsi_pkt_lock);
- rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
+ ticks_left = wait_for_completion_timeout(&fsp->tm_done,
+ FC_SCSI_TM_TOV);
spin_lock_bh(&fsp->scsi_pkt_lock);
fsp->wait_for_comp = 0;
- if (!rc) {
+ if (!ticks_left) {
FC_FCP_DBG(fsp, "target abort cmd failed\n");
- rc = FAILED;
} else if (fsp->state & FC_SRB_ABORTED) {
FC_FCP_DBG(fsp, "target abort cmd passed\n");
rc = SUCCESS;
@@ -1321,7 +1323,7 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
*
* scsi-eh will escalate for when either happens.
*/
- goto out;
+ return;
}
if (fc_fcp_lock_pkt(fsp))
@@ -1787,15 +1789,14 @@ static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport)
/**
* fc_queuecommand() - The queuecommand function of the SCSI template
+ * @shost: The Scsi_Host that the command was issued to
* @cmd: The scsi_cmnd to be executed
- * @done: The callback function to be called when the scsi_cmnd is complete
*
- * This is the i/o strategy routine, called by the SCSI layer. This routine
- * is called with the host_lock held.
+ * This is the i/o strategy routine, called by the SCSI layer.
*/
-static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
+int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
{
- struct fc_lport *lport;
+ struct fc_lport *lport = shost_priv(shost);
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
struct fc_fcp_pkt *fsp;
struct fc_rport_libfc_priv *rpriv;
@@ -1803,15 +1804,12 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
int rc = 0;
struct fcoe_dev_stats *stats;
- lport = shost_priv(sc_cmd->device->host);
-
rval = fc_remote_port_chkready(rport);
if (rval) {
sc_cmd->result = rval;
- done(sc_cmd);
+ sc_cmd->scsi_done(sc_cmd);
return 0;
}
- spin_unlock_irq(lport->host->host_lock);
if (!*(struct fc_remote_port **)rport->dd_data) {
/*
@@ -1819,7 +1817,7 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
* online
*/
sc_cmd->result = DID_IMM_RETRY << 16;
- done(sc_cmd);
+ sc_cmd->scsi_done(sc_cmd);
goto out;
}
@@ -1842,10 +1840,7 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
* build the libfc request pkt
*/
fsp->cmd = sc_cmd; /* save the cmd */
- fsp->lp = lport; /* save the softc ptr */
fsp->rport = rport; /* set the remote port ptr */
- fsp->xfer_ddp = FC_XID_UNKNOWN;
- sc_cmd->scsi_done = done;
/*
* set up the transfer length
@@ -1886,11 +1881,8 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
rc = SCSI_MLQUEUE_HOST_BUSY;
}
out:
- spin_lock_irq(lport->host->host_lock);
return rc;
}
-
-DEF_SCSI_QCMD(fc_queuecommand)
EXPORT_SYMBOL(fc_queuecommand);
/**
@@ -2112,7 +2104,6 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
* the sc passed in is not setup for execution like when sent
* through the queuecommand callout.
*/
- fsp->lp = lport; /* save the softc ptr */
fsp->rport = rport; /* set the remote port ptr */
/*
@@ -2245,7 +2236,7 @@ void fc_fcp_destroy(struct fc_lport *lport)
}
EXPORT_SYMBOL(fc_fcp_destroy);
-int fc_setup_fcp()
+int fc_setup_fcp(void)
{
int rc = 0;
@@ -2261,7 +2252,7 @@ int fc_setup_fcp()
return rc;
}
-void fc_destroy_fcp()
+void fc_destroy_fcp(void)
{
if (scsi_pkt_cachep)
kmem_cache_destroy(scsi_pkt_cachep);
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index 6a48c28e4420..b7735129f1f3 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -35,6 +35,27 @@ unsigned int fc_debug_logging;
module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+DEFINE_MUTEX(fc_prov_mutex);
+static LIST_HEAD(fc_local_ports);
+struct blocking_notifier_head fc_lport_notifier_head =
+ BLOCKING_NOTIFIER_INIT(fc_lport_notifier_head);
+EXPORT_SYMBOL(fc_lport_notifier_head);
+
+/*
+ * Providers which primarily send requests and PRLIs.
+ */
+struct fc4_prov *fc_active_prov[FC_FC4_PROV_SIZE] = {
+ [0] = &fc_rport_t0_prov,
+ [FC_TYPE_FCP] = &fc_rport_fcp_init,
+};
+
+/*
+ * Providers which receive requests.
+ */
+struct fc4_prov *fc_passive_prov[FC_FC4_PROV_SIZE] = {
+ [FC_TYPE_ELS] = &fc_lport_els_prov,
+};
+
/**
* libfc_init() - Initialize libfc.ko
*/
@@ -210,3 +231,102 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
}
EXPORT_SYMBOL(fc_fill_reply_hdr);
+
+/**
+ * fc_fc4_conf_lport_params() - Modify "service_params" of specified lport
+ * if there is service provider (target provider) registered with libfc
+ * for specified "fc_ft_type"
+ * @lport: Local port which service_params needs to be modified
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ */
+void fc_fc4_conf_lport_params(struct fc_lport *lport, enum fc_fh_type type)
+{
+ struct fc4_prov *prov_entry;
+ BUG_ON(type >= FC_FC4_PROV_SIZE);
+ BUG_ON(!lport);
+ prov_entry = fc_passive_prov[type];
+ if (type == FC_TYPE_FCP) {
+ if (prov_entry && prov_entry->recv)
+ lport->service_params |= FCP_SPPF_TARG_FCN;
+ }
+}
+
+void fc_lport_iterate(void (*notify)(struct fc_lport *, void *), void *arg)
+{
+ struct fc_lport *lport;
+
+ mutex_lock(&fc_prov_mutex);
+ list_for_each_entry(lport, &fc_local_ports, lport_list)
+ notify(lport, arg);
+ mutex_unlock(&fc_prov_mutex);
+}
+EXPORT_SYMBOL(fc_lport_iterate);
+
+/**
+ * fc_fc4_register_provider() - register FC-4 upper-level provider.
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ * @prov: structure describing provider including ops vector.
+ *
+ * Returns 0 on success, negative error otherwise.
+ */
+int fc_fc4_register_provider(enum fc_fh_type type, struct fc4_prov *prov)
+{
+ struct fc4_prov **prov_entry;
+ int ret = 0;
+
+ if (type >= FC_FC4_PROV_SIZE)
+ return -EINVAL;
+ mutex_lock(&fc_prov_mutex);
+ prov_entry = (prov->recv ? fc_passive_prov : fc_active_prov) + type;
+ if (*prov_entry)
+ ret = -EBUSY;
+ else
+ *prov_entry = prov;
+ mutex_unlock(&fc_prov_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(fc_fc4_register_provider);
+
+/**
+ * fc_fc4_deregister_provider() - deregister FC-4 upper-level provider.
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ * @prov: structure describing provider including ops vector.
+ */
+void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov)
+{
+ BUG_ON(type >= FC_FC4_PROV_SIZE);
+ mutex_lock(&fc_prov_mutex);
+ if (prov->recv)
+ rcu_assign_pointer(fc_passive_prov[type], NULL);
+ else
+ rcu_assign_pointer(fc_active_prov[type], NULL);
+ mutex_unlock(&fc_prov_mutex);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(fc_fc4_deregister_provider);
+
+/**
+ * fc_fc4_add_lport() - add new local port to list and run notifiers.
+ * @lport: The new local port.
+ */
+void fc_fc4_add_lport(struct fc_lport *lport)
+{
+ mutex_lock(&fc_prov_mutex);
+ list_add_tail(&lport->lport_list, &fc_local_ports);
+ blocking_notifier_call_chain(&fc_lport_notifier_head,
+ FC_LPORT_EV_ADD, lport);
+ mutex_unlock(&fc_prov_mutex);
+}
+
+/**
+ * fc_fc4_del_lport() - remove local port from list and run notifiers.
+ * @lport: The new local port.
+ */
+void fc_fc4_del_lport(struct fc_lport *lport)
+{
+ mutex_lock(&fc_prov_mutex);
+ list_del(&lport->lport_list);
+ blocking_notifier_call_chain(&fc_lport_notifier_head,
+ FC_LPORT_EV_DEL, lport);
+ mutex_unlock(&fc_prov_mutex);
+}
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index eea0c3541b71..fedc819d70c0 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -94,6 +94,17 @@ extern unsigned int fc_debug_logging;
(lport)->host->host_no, ##args))
/*
+ * FC-4 Providers.
+ */
+extern struct fc4_prov *fc_active_prov[]; /* providers without recv */
+extern struct fc4_prov *fc_passive_prov[]; /* providers with recv */
+extern struct mutex fc_prov_mutex; /* lock over table changes */
+
+extern struct fc4_prov fc_rport_t0_prov; /* type 0 provider */
+extern struct fc4_prov fc_lport_els_prov; /* ELS provider */
+extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */
+
+/*
* Set up direct-data placement for this I/O request
*/
void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
@@ -112,6 +123,9 @@ void fc_destroy_fcp(void);
* Internal libfc functions
*/
const char *fc_els_resp_type(struct fc_frame *);
+extern void fc_fc4_add_lport(struct fc_lport *);
+extern void fc_fc4_del_lport(struct fc_lport *);
+extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type);
/*
* Copies a buffer into an sg list
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index c5a10f94f845..8c08b210001d 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -633,6 +633,7 @@ int fc_lport_destroy(struct fc_lport *lport)
lport->tt.fcp_abort_io(lport);
lport->tt.disc_stop_final(lport);
lport->tt.exch_mgr_reset(lport, 0, 0);
+ fc_fc4_del_lport(lport);
return 0;
}
EXPORT_SYMBOL(fc_lport_destroy);
@@ -849,7 +850,7 @@ out:
}
/**
- * fc_lport_recv_req() - The generic lport request handler
+ * fc_lport_recv_els_req() - The generic lport ELS request handler
* @lport: The local port that received the request
* @fp: The request frame
*
@@ -859,9 +860,9 @@ out:
* Locking Note: This function should not be called with the lport
* lock held becuase it will grab the lock.
*/
-static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+static void fc_lport_recv_els_req(struct fc_lport *lport,
+ struct fc_frame *fp)
{
- struct fc_frame_header *fh = fc_frame_header_get(fp);
void (*recv)(struct fc_lport *, struct fc_frame *);
mutex_lock(&lport->lp_mutex);
@@ -873,8 +874,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
*/
if (!lport->link_up)
fc_frame_free(fp);
- else if (fh->fh_type == FC_TYPE_ELS &&
- fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
+ else {
/*
* Check opcode.
*/
@@ -903,14 +903,62 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
}
recv(lport, fp);
- } else {
- FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
- fr_eof(fp));
- fc_frame_free(fp);
}
mutex_unlock(&lport->lp_mutex);
}
+static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *spp_in,
+ struct fc_els_spp *spp_out)
+{
+ return FC_SPP_RESP_INVL;
+}
+
+struct fc4_prov fc_lport_els_prov = {
+ .prli = fc_lport_els_prli,
+ .recv = fc_lport_recv_els_req,
+};
+
+/**
+ * fc_lport_recv_req() - The generic lport request handler
+ * @lport: The lport that received the request
+ * @fp: The frame the request is in
+ *
+ * Locking Note: This function should not be called with the lport
+ * lock held becuase it may grab the lock.
+ */
+static void fc_lport_recv_req(struct fc_lport *lport,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_seq *sp = fr_seq(fp);
+ struct fc4_prov *prov;
+
+ /*
+ * Use RCU read lock and module_lock to be sure module doesn't
+ * deregister and get unloaded while we're calling it.
+ * try_module_get() is inlined and accepts a NULL parameter.
+ * Only ELSes and FCP target ops should come through here.
+ * The locking is unfortunate, and a better scheme is being sought.
+ */
+
+ rcu_read_lock();
+ if (fh->fh_type >= FC_FC4_PROV_SIZE)
+ goto drop;
+ prov = rcu_dereference(fc_passive_prov[fh->fh_type]);
+ if (!prov || !try_module_get(prov->module))
+ goto drop;
+ rcu_read_unlock();
+ prov->recv(lport, fp);
+ module_put(prov->module);
+ return;
+drop:
+ rcu_read_unlock();
+ FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
+ fc_frame_free(fp);
+ lport->tt.exch_done(sp);
+}
+
/**
* fc_lport_reset() - Reset a local port
* @lport: The local port which should be reset
@@ -1542,6 +1590,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
*/
int fc_lport_config(struct fc_lport *lport)
{
+ INIT_LIST_HEAD(&lport->ema_list);
INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
mutex_init(&lport->lp_mutex);
@@ -1549,6 +1598,7 @@ int fc_lport_config(struct fc_lport *lport)
fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
fc_lport_add_fc4_type(lport, FC_TYPE_CT);
+ fc_fc4_conf_lport_params(lport, FC_TYPE_FCP);
return 0;
}
@@ -1586,6 +1636,7 @@ int fc_lport_init(struct fc_lport *lport)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
+ fc_fc4_add_lport(lport);
return 0;
}
diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c
index dd2b43bb1c70..f33b897e4784 100644
--- a/drivers/scsi/libfc/fc_npiv.c
+++ b/drivers/scsi/libfc/fc_npiv.c
@@ -37,9 +37,7 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
vn_port = libfc_host_alloc(shost->hostt, privsize);
if (!vn_port)
- goto err_out;
- if (fc_exch_mgr_list_clone(n_port, vn_port))
- goto err_put;
+ return vn_port;
vn_port->vport = vport;
vport->dd_data = vn_port;
@@ -49,11 +47,6 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
mutex_unlock(&n_port->lp_mutex);
return vn_port;
-
-err_put:
- scsi_host_put(vn_port->host);
-err_out:
- return NULL;
}
EXPORT_SYMBOL(libfc_vport_create);
@@ -86,6 +79,7 @@ struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id)
return lport;
}
+EXPORT_SYMBOL(fc_vport_id_lookup);
/*
* When setting the link state of vports during an lport state change, it's
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index a7175adab32d..49e1ccca09d5 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -58,7 +58,7 @@
#include "fc_libfc.h"
-struct workqueue_struct *rport_event_queue;
+static struct workqueue_struct *rport_event_queue;
static void fc_rport_enter_flogi(struct fc_rport_priv *);
static void fc_rport_enter_plogi(struct fc_rport_priv *);
@@ -145,8 +145,10 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
INIT_WORK(&rdata->event_work, fc_rport_work);
- if (port_id != FC_FID_DIR_SERV)
+ if (port_id != FC_FID_DIR_SERV) {
+ rdata->lld_event_callback = lport->tt.rport_event_callback;
list_add_rcu(&rdata->peers, &lport->disc.rports);
+ }
return rdata;
}
@@ -257,6 +259,8 @@ static void fc_rport_work(struct work_struct *work)
struct fc_rport_operations *rport_ops;
struct fc_rport_identifiers ids;
struct fc_rport *rport;
+ struct fc4_prov *prov;
+ u8 type;
mutex_lock(&rdata->rp_mutex);
event = rdata->event;
@@ -300,12 +304,25 @@ static void fc_rport_work(struct work_struct *work)
FC_RPORT_DBG(rdata, "callback ev %d\n", event);
rport_ops->event_callback(lport, rdata, event);
}
+ if (rdata->lld_event_callback) {
+ FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
+ rdata->lld_event_callback(lport, rdata, event);
+ }
kref_put(&rdata->kref, lport->tt.rport_destroy);
break;
case RPORT_EV_FAILED:
case RPORT_EV_LOGO:
case RPORT_EV_STOP:
+ if (rdata->prli_count) {
+ mutex_lock(&fc_prov_mutex);
+ for (type = 1; type < FC_FC4_PROV_SIZE; type++) {
+ prov = fc_passive_prov[type];
+ if (prov && prov->prlo)
+ prov->prlo(rdata);
+ }
+ mutex_unlock(&fc_prov_mutex);
+ }
port_id = rdata->ids.port_id;
mutex_unlock(&rdata->rp_mutex);
@@ -313,6 +330,10 @@ static void fc_rport_work(struct work_struct *work)
FC_RPORT_DBG(rdata, "callback ev %d\n", event);
rport_ops->event_callback(lport, rdata, event);
}
+ if (rdata->lld_event_callback) {
+ FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
+ rdata->lld_event_callback(lport, rdata, event);
+ }
cancel_delayed_work_sync(&rdata->retry_work);
/*
@@ -336,6 +357,7 @@ static void fc_rport_work(struct work_struct *work)
if (port_id == FC_FID_DIR_SERV) {
rdata->event = RPORT_EV_NONE;
mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
} else if ((rdata->flags & FC_RP_STARTED) &&
rdata->major_retries <
lport->max_rport_retry_count) {
@@ -575,7 +597,7 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata,
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
if (PTR_ERR(fp) == -FC_EX_CLOSED)
- return fc_rport_error(rdata, fp);
+ goto out;
if (rdata->retries < rdata->local_port->max_rport_retry_count) {
FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
@@ -588,7 +610,8 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata,
return;
}
- return fc_rport_error(rdata, fp);
+out:
+ fc_rport_error(rdata, fp);
}
/**
@@ -878,6 +901,9 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
+ /* save plogi response sp_features for further reference */
+ rdata->sp_features = ntohs(plp->fl_csp.sp_features);
+
if (lport->point_to_multipoint)
fc_rport_login_complete(rdata, fp);
csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
@@ -949,6 +975,8 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_els_prli prli;
struct fc_els_spp spp;
} *pp;
+ struct fc_els_spp temp_spp;
+ struct fc4_prov *prov;
u32 roles = FC_RPORT_ROLE_UNKNOWN;
u32 fcp_parm = 0;
u8 op;
@@ -983,6 +1011,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n",
pp->spp.spp_flags);
+ rdata->spp_type = pp->spp.spp_type;
if (resp_code != FC_SPP_RESP_ACK) {
if (resp_code == FC_SPP_RESP_CONF)
fc_rport_error(rdata, fp);
@@ -996,6 +1025,15 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
fcp_parm = ntohl(pp->spp.spp_params);
if (fcp_parm & FCP_SPPF_RETRY)
rdata->flags |= FC_RP_FLAGS_RETRY;
+ if (fcp_parm & FCP_SPPF_CONF_COMPL)
+ rdata->flags |= FC_RP_FLAGS_CONF_REQ;
+
+ prov = fc_passive_prov[FC_TYPE_FCP];
+ if (prov) {
+ memset(&temp_spp, 0, sizeof(temp_spp));
+ prov->prli(rdata, pp->prli.prli_spp_len,
+ &pp->spp, &temp_spp);
+ }
rdata->supported_classes = FC_COS_CLASS3;
if (fcp_parm & FCP_SPPF_INIT_FCN)
@@ -1033,6 +1071,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
struct fc_els_spp spp;
} *pp;
struct fc_frame *fp;
+ struct fc4_prov *prov;
/*
* If the rport is one of the well known addresses
@@ -1054,9 +1093,20 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
return;
}
- if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
- fc_rport_prli_resp, rdata,
- 2 * lport->r_a_tov))
+ fc_prli_fill(lport, fp);
+
+ prov = fc_passive_prov[FC_TYPE_FCP];
+ if (prov) {
+ pp = fc_frame_payload_get(fp, sizeof(*pp));
+ prov->prli(rdata, sizeof(pp->spp), NULL, &pp->spp);
+ }
+
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rdata->ids.port_id,
+ fc_host_port_id(lport->host), FC_TYPE_ELS,
+ FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+ if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp,
+ NULL, rdata, 2 * lport->r_a_tov))
fc_rport_error_retry(rdata, NULL);
else
kref_get(&rdata->kref);
@@ -1642,9 +1692,9 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
unsigned int len;
unsigned int plen;
enum fc_els_spp_resp resp;
+ enum fc_els_spp_resp passive;
struct fc_seq_els_data rjt_data;
- u32 fcp_parm;
- u32 roles = FC_RPORT_ROLE_UNKNOWN;
+ struct fc4_prov *prov;
FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
fc_rport_state(rdata));
@@ -1678,46 +1728,42 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
pp->prli.prli_len = htons(len);
len -= sizeof(struct fc_els_prli);
- /* reinitialize remote port roles */
- rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
-
/*
* Go through all the service parameter pages and build
* response. If plen indicates longer SPP than standard,
* use that. The entire response has been pre-cleared above.
*/
spp = &pp->spp;
+ mutex_lock(&fc_prov_mutex);
while (len >= plen) {
+ rdata->spp_type = rspp->spp_type;
spp->spp_type = rspp->spp_type;
spp->spp_type_ext = rspp->spp_type_ext;
- spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
- resp = FC_SPP_RESP_ACK;
-
- switch (rspp->spp_type) {
- case 0: /* common to all FC-4 types */
- break;
- case FC_TYPE_FCP:
- fcp_parm = ntohl(rspp->spp_params);
- if (fcp_parm & FCP_SPPF_RETRY)
- rdata->flags |= FC_RP_FLAGS_RETRY;
- rdata->supported_classes = FC_COS_CLASS3;
- if (fcp_parm & FCP_SPPF_INIT_FCN)
- roles |= FC_RPORT_ROLE_FCP_INITIATOR;
- if (fcp_parm & FCP_SPPF_TARG_FCN)
- roles |= FC_RPORT_ROLE_FCP_TARGET;
- rdata->ids.roles = roles;
-
- spp->spp_params = htonl(lport->service_params);
- break;
- default:
- resp = FC_SPP_RESP_INVL;
- break;
+ resp = 0;
+
+ if (rspp->spp_type < FC_FC4_PROV_SIZE) {
+ prov = fc_active_prov[rspp->spp_type];
+ if (prov)
+ resp = prov->prli(rdata, plen, rspp, spp);
+ prov = fc_passive_prov[rspp->spp_type];
+ if (prov) {
+ passive = prov->prli(rdata, plen, rspp, spp);
+ if (!resp || passive == FC_SPP_RESP_ACK)
+ resp = passive;
+ }
+ }
+ if (!resp) {
+ if (spp->spp_flags & FC_SPP_EST_IMG_PAIR)
+ resp |= FC_SPP_RESP_CONF;
+ else
+ resp |= FC_SPP_RESP_INVL;
}
spp->spp_flags |= resp;
len -= plen;
rspp = (struct fc_els_spp *)((char *)rspp + plen);
spp = (struct fc_els_spp *)((char *)spp + plen);
}
+ mutex_unlock(&fc_prov_mutex);
/*
* Send LS_ACC. If this fails, the originator should retry.
@@ -1887,9 +1933,82 @@ int fc_rport_init(struct fc_lport *lport)
EXPORT_SYMBOL(fc_rport_init);
/**
+ * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator.
+ * @rdata: remote port private
+ * @spp_len: service parameter page length
+ * @rspp: received service parameter page
+ * @spp: response service parameter page
+ *
+ * Returns the value for the response code to be placed in spp_flags;
+ * Returns 0 if not an initiator.
+ */
+static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *rspp,
+ struct fc_els_spp *spp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ u32 fcp_parm;
+
+ fcp_parm = ntohl(rspp->spp_params);
+ rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+ if (fcp_parm & FCP_SPPF_INIT_FCN)
+ rdata->ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (fcp_parm & FCP_SPPF_TARG_FCN)
+ rdata->ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+ if (fcp_parm & FCP_SPPF_RETRY)
+ rdata->flags |= FC_RP_FLAGS_RETRY;
+ rdata->supported_classes = FC_COS_CLASS3;
+
+ if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR))
+ return 0;
+
+ spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
+
+ /*
+ * OR in our service parameters with other providers (target), if any.
+ */
+ fcp_parm = ntohl(spp->spp_params);
+ spp->spp_params = htonl(fcp_parm | lport->service_params);
+ return FC_SPP_RESP_ACK;
+}
+
+/*
+ * FC-4 provider ops for FCP initiator.
+ */
+struct fc4_prov fc_rport_fcp_init = {
+ .prli = fc_rport_fcp_prli,
+};
+
+/**
+ * fc_rport_t0_prli() - Handle incoming PRLI parameters for type 0
+ * @rdata: remote port private
+ * @spp_len: service parameter page length
+ * @rspp: received service parameter page
+ * @spp: response service parameter page
+ */
+static int fc_rport_t0_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *rspp,
+ struct fc_els_spp *spp)
+{
+ if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR)
+ return FC_SPP_RESP_INVL;
+ return FC_SPP_RESP_ACK;
+}
+
+/*
+ * FC-4 provider ops for type 0 service parameters.
+ *
+ * This handles the special case of type 0 which is always successful
+ * but doesn't do anything otherwise.
+ */
+struct fc4_prov fc_rport_t0_prov = {
+ .prli = fc_rport_t0_prli,
+};
+
+/**
* fc_setup_rport() - Initialize the rport_event_queue
*/
-int fc_setup_rport()
+int fc_setup_rport(void)
{
rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
if (!rport_event_queue)
@@ -1900,7 +2019,7 @@ int fc_setup_rport()
/**
* fc_destroy_rport() - Destroy the rport_event_queue
*/
-void fc_destroy_rport()
+void fc_destroy_rport(void)
{
destroy_workqueue(rport_event_queue);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index da8b61543ee4..0c550d5b9133 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -3352,6 +3352,47 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
}
EXPORT_SYMBOL_GPL(iscsi_session_get_param);
+int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
+ enum iscsi_param param, char *buf)
+{
+ struct sockaddr_in6 *sin6 = NULL;
+ struct sockaddr_in *sin = NULL;
+ int len;
+
+ switch (addr->ss_family) {
+ case AF_INET:
+ sin = (struct sockaddr_in *)addr;
+ break;
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ if (sin)
+ len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr);
+ else
+ len = sprintf(buf, "%pI6\n", &sin6->sin6_addr);
+ break;
+ case ISCSI_PARAM_CONN_PORT:
+ if (sin)
+ len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port));
+ else
+ len = sprintf(buf, "%hu\n",
+ be16_to_cpu(sin6->sin6_port));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param);
+
int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
@@ -3416,9 +3457,6 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
case ISCSI_HOST_PARAM_INITIATOR_NAME:
len = sprintf(buf, "%s\n", ihost->initiatorname);
break;
- case ISCSI_HOST_PARAM_IPADDRESS:
- len = sprintf(buf, "%s\n", ihost->local_address);
- break;
default:
return -ENOSYS;
}
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
index 18f33cd54411..9dafe64e7c7a 100644
--- a/drivers/scsi/libsas/Kconfig
+++ b/drivers/scsi/libsas/Kconfig
@@ -46,11 +46,3 @@ config SCSI_SAS_HOST_SMP
Allows sas hosts to receive SMP frames. Selecting this
option builds an SMP interpreter into libsas. Say
N here if you want to save the few kb this consumes.
-
-config SCSI_SAS_LIBSAS_DEBUG
- bool "Compile the SAS Domain Transport Attributes in debug mode"
- default y
- depends on SCSI_SAS_LIBSAS
- help
- Compiles the SAS Layer in debug mode. In debug mode, the
- SAS Layer prints diagnostic and debug messages.
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
index 1ad1323c60fa..2e70140f70c3 100644
--- a/drivers/scsi/libsas/Makefile
+++ b/drivers/scsi/libsas/Makefile
@@ -21,10 +21,6 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
-ifeq ($(CONFIG_SCSI_SAS_LIBSAS_DEBUG),y)
- EXTRA_CFLAGS += -DSAS_DEBUG
-endif
-
obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o
libsas-y += sas_init.o \
sas_phy.o \
@@ -36,4 +32,4 @@ libsas-y += sas_init.o \
sas_scsi_host.o \
sas_task.o
libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o
-libsas-$(CONFIG_SCSI_SAS_HOST_SMP) += sas_host_smp.o \ No newline at end of file
+libsas-$(CONFIG_SCSI_SAS_HOST_SMP) += sas_host_smp.o
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 4d3b704ede1c..31fc21f4d831 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -71,13 +71,13 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
case SAS_SG_ERR:
return AC_ERR_INVALID;
- case SAM_STAT_CHECK_CONDITION:
case SAS_OPEN_TO:
case SAS_OPEN_REJECT:
SAS_DPRINTK("%s: Saw error %d. What to do?\n",
__func__, ts->stat);
return AC_ERR_OTHER;
+ case SAM_STAT_CHECK_CONDITION:
case SAS_ABORTED_TASK:
return AC_ERR_DEV;
@@ -107,13 +107,15 @@ static void sas_ata_task_done(struct sas_task *task)
sas_ha = dev->port->ha;
spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
- if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD) {
+ if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
+ ((stat->stat == SAM_STAT_CHECK_CONDITION &&
+ dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
dev->sata_dev.sstatus = resp->sstatus;
dev->sata_dev.serror = resp->serror;
dev->sata_dev.scontrol = resp->scontrol;
- } else if (stat->stat != SAM_STAT_GOOD) {
+ } else {
ac = sas_to_ata_err(stat);
if (ac) {
SAS_DPRINTK("%s: SAS error %x\n", __func__,
@@ -305,55 +307,6 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
}
}
-static int sas_ata_scr_write(struct ata_link *link, unsigned int sc_reg_in,
- u32 val)
-{
- struct domain_device *dev = link->ap->private_data;
-
- SAS_DPRINTK("STUB %s\n", __func__);
- switch (sc_reg_in) {
- case SCR_STATUS:
- dev->sata_dev.sstatus = val;
- break;
- case SCR_CONTROL:
- dev->sata_dev.scontrol = val;
- break;
- case SCR_ERROR:
- dev->sata_dev.serror = val;
- break;
- case SCR_ACTIVE:
- dev->sata_dev.ap->link.sactive = val;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
- u32 *val)
-{
- struct domain_device *dev = link->ap->private_data;
-
- SAS_DPRINTK("STUB %s\n", __func__);
- switch (sc_reg_in) {
- case SCR_STATUS:
- *val = dev->sata_dev.sstatus;
- return 0;
- case SCR_CONTROL:
- *val = dev->sata_dev.scontrol;
- return 0;
- case SCR_ERROR:
- *val = dev->sata_dev.serror;
- return 0;
- case SCR_ACTIVE:
- *val = dev->sata_dev.ap->link.sactive;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
static struct ata_port_operations sas_sata_ops = {
.prereset = ata_std_prereset,
.softreset = NULL,
@@ -367,8 +320,6 @@ static struct ata_port_operations sas_sata_ops = {
.qc_fill_rtf = sas_ata_qc_fill_rtf,
.port_start = ata_sas_port_start,
.port_stop = ata_sas_port_stop,
- .scr_read = sas_ata_scr_read,
- .scr_write = sas_ata_scr_write
};
static struct ata_port_info sata_port_info = {
@@ -801,7 +752,7 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost)
if (!dev_is_sata(ddev))
continue;
-
+
ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata port error handler");
ata_scsi_port_error_handler(shost, ap);
}
@@ -834,13 +785,13 @@ int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
LIST_HEAD(sata_q);
ap = NULL;
-
+
list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
struct domain_device *ddev = cmd_to_domain_dev(cmd);
if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd))
continue;
- if(ap && ap != ddev->sata_dev.ap)
+ if (ap && ap != ddev->sata_dev.ap)
continue;
ap = ddev->sata_dev.ap;
rtn = 1;
@@ -848,8 +799,21 @@ int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
}
if (!list_empty(&sata_q)) {
- ata_port_printk(ap, KERN_DEBUG,"sas eh calling libata cmd error handler\n");
+ ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata cmd error handler\n");
ata_scsi_cmd_error_handler(shost, ap, &sata_q);
+ /*
+ * ata's error handler may leave the cmd on the list
+ * so make sure they don't remain on a stack list
+ * about to go out of scope.
+ *
+ * This looks strange, since the commands are
+ * now part of no list, but the next error
+ * action will be ata_port_error_handler()
+ * which takes no list and sweeps them up
+ * anyway from the ata tag array.
+ */
+ while (!list_empty(&sata_q))
+ list_del_init(sata_q.next);
}
} while (ap);
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
index c17c25030f1c..fc460933575c 100644
--- a/drivers/scsi/libsas/sas_dump.c
+++ b/drivers/scsi/libsas/sas_dump.c
@@ -24,8 +24,6 @@
#include "sas_dump.h"
-#ifdef SAS_DEBUG
-
static const char *sas_hae_str[] = {
[0] = "HAE_RESET",
};
@@ -72,5 +70,3 @@ void sas_dump_port(struct asd_sas_port *port)
SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode);
SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys);
}
-
-#endif /* SAS_DEBUG */
diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h
index 47b45d4f5258..800e4c69093f 100644
--- a/drivers/scsi/libsas/sas_dump.h
+++ b/drivers/scsi/libsas/sas_dump.h
@@ -24,19 +24,7 @@
#include "sas_internal.h"
-#ifdef SAS_DEBUG
-
void sas_dprint_porte(int phyid, enum port_event pe);
void sas_dprint_phye(int phyid, enum phy_event pe);
void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he);
void sas_dump_port(struct asd_sas_port *port);
-
-#else /* SAS_DEBUG */
-
-static inline void sas_dprint_porte(int phyid, enum port_event pe) { }
-static inline void sas_dprint_phye(int phyid, enum phy_event pe) { }
-static inline void sas_dprint_hae(struct sas_ha_struct *sas_ha,
- enum ha_event he) { }
-static inline void sas_dump_port(struct asd_sas_port *port) { }
-
-#endif /* SAS_DEBUG */
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 505ffe358293..f3f693b772ac 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -244,6 +244,11 @@ static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
* dev to host FIS as described in section G.5 of
* sas-2 r 04b */
dr = &((struct smp_resp *)disc_resp)->disc;
+ if (memcmp(dev->sas_addr, dr->attached_sas_addr,
+ SAS_ADDR_SIZE) == 0) {
+ sas_printk("Found loopback topology, just ignore it!\n");
+ return 0;
+ }
if (!(dr->attached_dev_type == 0 &&
dr->attached_sata_dev))
break;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 0001374bd6b2..8b538bd1ff2b 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -33,11 +33,7 @@
#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
-#ifdef SAS_DEBUG
-#define SAS_DPRINTK(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
-#else
-#define SAS_DPRINTK(fmt, ...)
-#endif
+#define SAS_DPRINTK(fmt, ...) printk(KERN_DEBUG "sas: " fmt, ## __VA_ARGS__)
#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 67758ea8eb7f..f6e189f40917 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -681,11 +681,10 @@ enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
{
struct sas_task *task = TO_SAS_TASK(cmd);
unsigned long flags;
- enum blk_eh_timer_return rtn;
+ enum blk_eh_timer_return rtn;
if (sas_ata_timed_out(cmd, task, &rtn))
return rtn;
-
if (!task) {
cmd->request->timeout /= 2;
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index ad05d6edb8f6..14de249917f8 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -19,10 +19,8 @@
# *******************************************************************/
######################################################################
-ifneq ($(GCOV),)
- EXTRA_CFLAGS += -fprofile-arcs -ftest-coverage
- EXTRA_CFLAGS += -O0
-endif
+ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
+ccflags-$(GCOV) += -O0
obj-$(CONFIG_SCSI_LPFC) := lpfc.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 746dd3d7a092..b64c6da870d3 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2010 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -325,6 +325,7 @@ struct lpfc_vport {
#define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */
#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
+#define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */
uint32_t ct_flags;
#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@@ -348,6 +349,8 @@ struct lpfc_vport {
uint32_t fc_myDID; /* fibre channel S_ID */
uint32_t fc_prevDID; /* previous fibre channel S_ID */
+ struct lpfc_name fabric_portname;
+ struct lpfc_name fabric_nodename;
int32_t stopped; /* HBA has not been restarted since last ERATT */
uint8_t fc_linkspeed; /* Link speed after last READ_LA */
@@ -372,6 +375,7 @@ struct lpfc_vport {
#define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */
#define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */
#define WORKER_FDMI_TMO 0x4 /* vport: FDMI timeout */
+#define WORKER_DELAYED_DISC_TMO 0x8 /* vport: delayed discovery */
#define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */
#define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */
@@ -382,6 +386,7 @@ struct lpfc_vport {
struct timer_list fc_fdmitmo;
struct timer_list els_tmofunc;
+ struct timer_list delayed_disc_tmo;
int unreg_vpi_cmpl;
@@ -548,6 +553,8 @@ struct lpfc_hba {
#define LPFC_SLI3_CRP_ENABLED 0x08
#define LPFC_SLI3_BG_ENABLED 0x20
#define LPFC_SLI3_DSS_ENABLED 0x40
+#define LPFC_SLI4_PERFH_ENABLED 0x80
+#define LPFC_SLI4_PHWQ_ENABLED 0x100
uint32_t iocb_cmd_size;
uint32_t iocb_rsp_size;
@@ -655,7 +662,7 @@ struct lpfc_hba {
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
-
+ uint32_t cfg_enable_dss;
lpfc_vpd_t vpd; /* vital product data */
struct pci_dev *pcidev;
@@ -792,6 +799,10 @@ struct lpfc_hba {
struct dentry *debug_slow_ring_trc;
struct lpfc_debugfs_trc *slow_ring_trc;
atomic_t slow_ring_trc_cnt;
+ /* iDiag debugfs sub-directory */
+ struct dentry *idiag_root;
+ struct dentry *idiag_pci_cfg;
+ struct dentry *idiag_que_info;
#endif
/* Used for deferred freeing of ELS data buffers */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 3512abb8a587..e7c020df12fa 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -623,10 +623,14 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
int status = 0;
int cnt = 0;
int i;
+ int rc;
init_completion(&online_compl);
- lpfc_workq_post_event(phba, &status, &online_compl,
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_OFFLINE_PREP);
+ if (rc == 0)
+ return -ENOMEM;
+
wait_for_completion(&online_compl);
if (status != 0)
@@ -652,7 +656,10 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
}
init_completion(&online_compl);
- lpfc_workq_post_event(phba, &status, &online_compl, type);
+ rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
+ if (rc == 0)
+ return -ENOMEM;
+
wait_for_completion(&online_compl);
if (status != 0)
@@ -671,6 +678,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
*
* Notes:
* Assumes any error from lpfc_do_offline() will be negative.
+ * Do not make this function static.
*
* Returns:
* lpfc_do_offline() return code if not zero
@@ -682,6 +690,7 @@ lpfc_selective_reset(struct lpfc_hba *phba)
{
struct completion online_compl;
int status = 0;
+ int rc;
if (!phba->cfg_enable_hba_reset)
return -EIO;
@@ -692,8 +701,11 @@ lpfc_selective_reset(struct lpfc_hba *phba)
return status;
init_completion(&online_compl);
- lpfc_workq_post_event(phba, &status, &online_compl,
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
+
wait_for_completion(&online_compl);
if (status != 0)
@@ -812,14 +824,17 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
struct completion online_compl;
int status=0;
+ int rc;
if (!phba->cfg_enable_hba_reset)
return -EACCES;
init_completion(&online_compl);
if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
- lpfc_workq_post_event(phba, &status, &online_compl,
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
wait_for_completion(&online_compl);
} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
@@ -1279,6 +1294,28 @@ lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_dss_show - Return the current state of dss and the configured state
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_dss_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
+ (phba->cfg_enable_dss) ? "Enabled" : "Disabled",
+ (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
+ "" : "Not ");
+}
+
+/**
* lpfc_param_show - Return a cfg attribute value in decimal
*
* Description:
@@ -1597,13 +1634,13 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
#define LPFC_ATTR(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_init(name, defval, minval, maxval)
#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1611,7 +1648,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1622,7 +1659,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_hex_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1630,7 +1667,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_hex_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1641,13 +1678,13 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_init(name, defval, minval, maxval)
#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1655,7 +1692,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1666,7 +1703,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_hex_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1674,7 +1711,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_hex_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1718,7 +1755,7 @@ static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
-
+static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -1813,6 +1850,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
int stat1=0, stat2=0;
unsigned int i, j, cnt=count;
u8 wwpn[8];
+ int rc;
if (!phba->cfg_enable_hba_reset)
return -EACCES;
@@ -1863,7 +1901,11 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
"0463 lpfc_soft_wwpn attribute set failed to "
"reinit adapter - %d\n", stat1);
init_completion(&online_compl);
- lpfc_workq_post_event(phba, &stat2, &online_compl, LPFC_EVT_ONLINE);
+ rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
+
wait_for_completion(&online_compl);
if (stat2)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1954,7 +1996,7 @@ static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
static int lpfc_poll = 0;
-module_param(lpfc_poll, int, 0);
+module_param(lpfc_poll, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
" 0 - none,"
" 1 - poll with interrupts enabled"
@@ -1964,21 +2006,21 @@ static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
lpfc_poll_show, lpfc_poll_store);
int lpfc_sli_mode = 0;
-module_param(lpfc_sli_mode, int, 0);
+module_param(lpfc_sli_mode, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
" 0 - auto (SLI-3 if supported),"
" 2 - select SLI-2 even on SLI-3 capable HBAs,"
" 3 - select SLI-3");
int lpfc_enable_npiv = 1;
-module_param(lpfc_enable_npiv, int, 0);
+module_param(lpfc_enable_npiv, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
lpfc_param_show(enable_npiv);
lpfc_param_init(enable_npiv, 1, 0, 1);
static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
int lpfc_enable_rrq;
-module_param(lpfc_enable_rrq, int, 0);
+module_param(lpfc_enable_rrq, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
lpfc_param_show(enable_rrq);
lpfc_param_init(enable_rrq, 0, 0, 1);
@@ -2040,7 +2082,7 @@ static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
lpfc_txcmplq_hw_show, NULL);
int lpfc_iocb_cnt = 2;
-module_param(lpfc_iocb_cnt, int, 1);
+module_param(lpfc_iocb_cnt, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_iocb_cnt,
"Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
lpfc_param_show(iocb_cnt);
@@ -2192,7 +2234,7 @@ static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
# disappear until the timer expires. Value range is [0,255]. Default
# value is 30.
*/
-module_param(lpfc_devloss_tmo, int, 0);
+module_param(lpfc_devloss_tmo, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_devloss_tmo,
"Seconds driver will hold I/O waiting "
"for a device to come back");
@@ -2302,7 +2344,7 @@ LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
# Default value of this parameter is 1.
*/
static int lpfc_restrict_login = 1;
-module_param(lpfc_restrict_login, int, 0);
+module_param(lpfc_restrict_login, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_restrict_login,
"Restrict virtual ports login to remote initiators.");
lpfc_vport_param_show(restrict_login);
@@ -2473,7 +2515,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
static int lpfc_topology = 0;
-module_param(lpfc_topology, int, 0);
+module_param(lpfc_topology, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
lpfc_param_show(topology)
lpfc_param_init(topology, 0, 0, 6)
@@ -2915,7 +2957,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
}
static int lpfc_link_speed = 0;
-module_param(lpfc_link_speed, int, 0);
+module_param(lpfc_link_speed, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
lpfc_param_show(link_speed)
@@ -3043,7 +3085,7 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
}
static int lpfc_aer_support = 1;
-module_param(lpfc_aer_support, int, 1);
+module_param(lpfc_aer_support, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
lpfc_param_show(aer_support)
@@ -3155,7 +3197,7 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
# The value is set in milliseconds.
*/
static int lpfc_max_scsicmpl_time;
-module_param(lpfc_max_scsicmpl_time, int, 0);
+module_param(lpfc_max_scsicmpl_time, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
"Use command completion time to control queue depth");
lpfc_vport_param_show(max_scsicmpl_time);
@@ -3331,7 +3373,7 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
*/
unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION;
-module_param(lpfc_prot_mask, uint, 0);
+module_param(lpfc_prot_mask, uint, S_IRUGO);
MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
/*
@@ -3343,9 +3385,28 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
#
*/
unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
-module_param(lpfc_prot_guard, byte, 0);
+module_param(lpfc_prot_guard, byte, S_IRUGO);
MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
+/*
+ * Delay initial NPort discovery when Clean Address bit is cleared in
+ * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
+ * This parameter can have value 0 or 1.
+ * When this parameter is set to 0, no delay is added to the initial
+ * discovery.
+ * When this parameter is set to non-zero value, initial Nport discovery is
+ * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
+ * accept and FCID/Fabric name/Fabric portname is changed.
+ * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
+ * when Clean Address bit is cleared in FLOGI/FDISC
+ * accept and FCID/Fabric name/Fabric portname is changed.
+ * Default value is 0.
+ */
+int lpfc_delay_discovery;
+module_param(lpfc_delay_discovery, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_delay_discovery,
+ "Delay NPort discovery when Clean Address bit is cleared. "
+ "Allowed values: 0,1.");
/*
* lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
@@ -3437,6 +3498,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_txcmplq_hw,
&dev_attr_lpfc_fips_level,
&dev_attr_lpfc_fips_rev,
+ &dev_attr_lpfc_dss,
NULL,
};
@@ -4639,6 +4701,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_aer_support_init(phba, lpfc_aer_support);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
+ phba->cfg_enable_dss = 1;
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 17fde522c84a..3d40023f4804 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -53,9 +53,9 @@ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_supported_pages(struct lpfcMboxq *);
-void lpfc_sli4_params(struct lpfcMboxq *);
+void lpfc_pc_sli4_params(struct lpfcMboxq *);
int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
-
+int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
@@ -167,6 +167,8 @@ int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_fdmi_tmo(unsigned long);
void lpfc_fdmi_timeout_handler(struct lpfc_vport *);
+void lpfc_delayed_disc_tmo(unsigned long);
+void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
int lpfc_config_port_prep(struct lpfc_hba *);
int lpfc_config_port_post(struct lpfc_hba *);
@@ -341,6 +343,7 @@ extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
extern int lpfc_sli_mode;
extern int lpfc_enable_npiv;
+extern int lpfc_delay_discovery;
int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t);
@@ -423,6 +426,6 @@ int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
uint16_t, uint16_t, uint16_t);
void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
-void lpfc_cleanup_vports_rrqs(struct lpfc_vport *);
+void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index c004fa9a681e..d9edfd90d7ff 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1738,6 +1738,55 @@ fdmi_cmd_exit:
return 1;
}
+/**
+ * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer.
+ * @ptr - Context object of the timer.
+ *
+ * This function set the WORKER_DELAYED_DISC_TMO flag and wake up
+ * the worker thread.
+ **/
+void
+lpfc_delayed_disc_tmo(unsigned long ptr)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t tmo_posted;
+ unsigned long iflag;
+
+ spin_lock_irqsave(&vport->work_port_lock, iflag);
+ tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO;
+ if (!tmo_posted)
+ vport->work_port_events |= WORKER_DELAYED_DISC_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+ if (!tmo_posted)
+ lpfc_worker_wake_up(phba);
+ return;
+}
+
+/**
+ * lpfc_delayed_disc_timeout_handler - Function called by worker thread to
+ * handle delayed discovery.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This function start nport discovery of the vport.
+ **/
+void
+lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ spin_lock_irq(shost->host_lock);
+ if (!(vport->fc_flag & FC_DISC_DELAYED)) {
+ spin_unlock_irq(shost->host_lock);
+ return;
+ }
+ vport->fc_flag &= ~FC_DISC_DELAYED;
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_do_scr_ns_plogi(vport->phba, vport);
+}
+
void
lpfc_fdmi_tmo(unsigned long ptr)
{
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index a80d938fafc9..a753581509d6 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2007-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -57,8 +57,8 @@
* # mount -t debugfs none /sys/kernel/debug
*
* The lpfc debugfs directory hierarchy is:
- * lpfc/lpfcX/vportY
- * where X is the lpfc hba unique_id
+ * /sys/kernel/debug/lpfc/fnX/vportY
+ * where X is the lpfc hba function unique_id
* where Y is the vport VPI on that hba
*
* Debugging services available per vport:
@@ -82,52 +82,34 @@
* the HBA. X MUST also be a power of 2.
*/
static int lpfc_debugfs_enable = 1;
-module_param(lpfc_debugfs_enable, int, 0);
+module_param(lpfc_debugfs_enable, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services");
/* This MUST be a power of 2 */
static int lpfc_debugfs_max_disc_trc;
-module_param(lpfc_debugfs_max_disc_trc, int, 0);
+module_param(lpfc_debugfs_max_disc_trc, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc,
"Set debugfs discovery trace depth");
/* This MUST be a power of 2 */
static int lpfc_debugfs_max_slow_ring_trc;
-module_param(lpfc_debugfs_max_slow_ring_trc, int, 0);
+module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc,
"Set debugfs slow ring trace depth");
static int lpfc_debugfs_mask_disc_trc;
-module_param(lpfc_debugfs_mask_disc_trc, int, 0);
+module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
"Set debugfs discovery trace mask");
#include <linux/debugfs.h>
-/* size of output line, for discovery_trace and slow_ring_trace */
-#define LPFC_DEBUG_TRC_ENTRY_SIZE 100
-
-/* nodelist output buffer size */
-#define LPFC_NODELIST_SIZE 8192
-#define LPFC_NODELIST_ENTRY_SIZE 120
-
-/* dumpHBASlim output buffer size */
-#define LPFC_DUMPHBASLIM_SIZE 4096
-
-/* dumpHostSlim output buffer size */
-#define LPFC_DUMPHOSTSLIM_SIZE 4096
-
-/* hbqinfo output buffer size */
-#define LPFC_HBQINFO_SIZE 8192
-
-struct lpfc_debug {
- char *buffer;
- int len;
-};
-
static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
static unsigned long lpfc_debugfs_start_time = 0L;
+/* iDiag */
+static struct lpfc_idiag idiag;
+
/**
* lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer
* @vport: The vport to gather the log info from.
@@ -996,8 +978,6 @@ lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
return nbytes;
}
-
-
/**
* lpfc_debugfs_nodelist_open - Open the nodelist debugfs file
* @inode: The inode pointer that contains a vport pointer.
@@ -1099,6 +1079,7 @@ lpfc_debugfs_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct lpfc_debug *debug = file->private_data;
+
return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer,
debug->len);
}
@@ -1137,6 +1118,776 @@ lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
return 0;
}
+/*
+ * iDiag debugfs file access methods
+ */
+
+/*
+ * iDiag PCI config space register access methods:
+ *
+ * The PCI config space register accessees of read, write, read-modify-write
+ * for set bits, and read-modify-write for clear bits to SLI4 PCI functions
+ * are provided. In the proper SLI4 PCI function's debugfs iDiag directory,
+ *
+ * /sys/kernel/debug/lpfc/fn<#>/iDiag
+ *
+ * the access is through the debugfs entry pciCfg:
+ *
+ * 1. For PCI config space register read access, there are two read methods:
+ * A) read a single PCI config space register in the size of a byte
+ * (8 bits), a word (16 bits), or a dword (32 bits); or B) browse through
+ * the 4K extended PCI config space.
+ *
+ * A) Read a single PCI config space register consists of two steps:
+ *
+ * Step-1: Set up PCI config space register read command, the command
+ * syntax is,
+ *
+ * echo 1 <where> <count> > pciCfg
+ *
+ * where, 1 is the iDiag command for PCI config space read, <where> is the
+ * offset from the beginning of the device's PCI config space to read from,
+ * and <count> is the size of PCI config space register data to read back,
+ * it will be 1 for reading a byte (8 bits), 2 for reading a word (16 bits
+ * or 2 bytes), or 4 for reading a dword (32 bits or 4 bytes).
+ *
+ * Setp-2: Perform the debugfs read operation to execute the idiag command
+ * set up in Step-1,
+ *
+ * cat pciCfg
+ *
+ * Examples:
+ * To read PCI device's vendor-id and device-id from PCI config space,
+ *
+ * echo 1 0 4 > pciCfg
+ * cat pciCfg
+ *
+ * To read PCI device's currnt command from config space,
+ *
+ * echo 1 4 2 > pciCfg
+ * cat pciCfg
+ *
+ * B) Browse through the entire 4K extended PCI config space also consists
+ * of two steps:
+ *
+ * Step-1: Set up PCI config space register browsing command, the command
+ * syntax is,
+ *
+ * echo 1 0 4096 > pciCfg
+ *
+ * where, 1 is the iDiag command for PCI config space read, 0 must be used
+ * as the offset for PCI config space register browse, and 4096 must be
+ * used as the count for PCI config space register browse.
+ *
+ * Step-2: Repeately issue the debugfs read operation to browse through
+ * the entire PCI config space registers:
+ *
+ * cat pciCfg
+ * cat pciCfg
+ * cat pciCfg
+ * ...
+ *
+ * When browsing to the end of the 4K PCI config space, the browse method
+ * shall wrap around to start reading from beginning again, and again...
+ *
+ * 2. For PCI config space register write access, it supports a single PCI
+ * config space register write in the size of a byte (8 bits), a word
+ * (16 bits), or a dword (32 bits). The command syntax is,
+ *
+ * echo 2 <where> <count> <value> > pciCfg
+ *
+ * where, 2 is the iDiag command for PCI config space write, <where> is
+ * the offset from the beginning of the device's PCI config space to write
+ * into, <count> is the size of data to write into the PCI config space,
+ * it will be 1 for writing a byte (8 bits), 2 for writing a word (16 bits
+ * or 2 bytes), or 4 for writing a dword (32 bits or 4 bytes), and <value>
+ * is the data to be written into the PCI config space register at the
+ * offset.
+ *
+ * Examples:
+ * To disable PCI device's interrupt assertion,
+ *
+ * 1) Read in device's PCI config space register command field <cmd>:
+ *
+ * echo 1 4 2 > pciCfg
+ * cat pciCfg
+ *
+ * 2) Set bit 10 (Interrupt Disable bit) in the <cmd>:
+ *
+ * <cmd> = <cmd> | (1 < 10)
+ *
+ * 3) Write the modified command back:
+ *
+ * echo 2 4 2 <cmd> > pciCfg
+ *
+ * 3. For PCI config space register set bits access, it supports a single PCI
+ * config space register set bits in the size of a byte (8 bits), a word
+ * (16 bits), or a dword (32 bits). The command syntax is,
+ *
+ * echo 3 <where> <count> <bitmask> > pciCfg
+ *
+ * where, 3 is the iDiag command for PCI config space set bits, <where> is
+ * the offset from the beginning of the device's PCI config space to set
+ * bits into, <count> is the size of the bitmask to set into the PCI config
+ * space, it will be 1 for setting a byte (8 bits), 2 for setting a word
+ * (16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4 bytes), and
+ * <bitmask> is the bitmask, indicating the bits to be set into the PCI
+ * config space register at the offset. The logic performed to the content
+ * of the PCI config space register, regval, is,
+ *
+ * regval |= <bitmask>
+ *
+ * 4. For PCI config space register clear bits access, it supports a single
+ * PCI config space register clear bits in the size of a byte (8 bits),
+ * a word (16 bits), or a dword (32 bits). The command syntax is,
+ *
+ * echo 4 <where> <count> <bitmask> > pciCfg
+ *
+ * where, 4 is the iDiag command for PCI config space clear bits, <where>
+ * is the offset from the beginning of the device's PCI config space to
+ * clear bits from, <count> is the size of the bitmask to set into the PCI
+ * config space, it will be 1 for setting a byte (8 bits), 2 for setting
+ * a word(16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4
+ * bytes), and <bitmask> is the bitmask, indicating the bits to be cleared
+ * from the PCI config space register at the offset. the logic performed
+ * to the content of the PCI config space register, regval, is,
+ *
+ * regval &= ~<bitmask>
+ *
+ * Note, for all single register read, write, set bits, or clear bits access,
+ * the offset (<where>) must be aligned with the size of the data:
+ *
+ * For data size of byte (8 bits), the offset must be aligned to the byte
+ * boundary; for data size of word (16 bits), the offset must be aligned
+ * to the word boundary; while for data size of dword (32 bits), the offset
+ * must be aligned to the dword boundary. Otherwise, the interface will
+ * return the error:
+ *
+ * "-bash: echo: write error: Invalid argument".
+ *
+ * For example:
+ *
+ * echo 1 2 4 > pciCfg
+ * -bash: echo: write error: Invalid argument
+ *
+ * Note also, all of the numbers in the command fields for all read, write,
+ * set bits, and clear bits PCI config space register command fields can be
+ * either decimal or hex.
+ *
+ * For example,
+ * echo 1 0 4096 > pciCfg
+ *
+ * will be the same as
+ * echo 1 0 0x1000 > pciCfg
+ *
+ * And,
+ * echo 2 155 1 10 > pciCfg
+ *
+ * will be
+ * echo 2 0x9b 1 0xa > pciCfg
+ */
+
+/**
+ * lpfc_idiag_cmd_get - Get and parse idiag debugfs comands from user space
+ * @buf: The pointer to the user space buffer.
+ * @nbytes: The number of bytes in the user space buffer.
+ * @idiag_cmd: pointer to the idiag command struct.
+ *
+ * This routine reads data from debugfs user space buffer and parses the
+ * buffer for getting the idiag command and arguments. The while space in
+ * between the set of data is used as the parsing separator.
+ *
+ * This routine returns 0 when successful, it returns proper error code
+ * back to the user space in error conditions.
+ */
+static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes,
+ struct lpfc_idiag_cmd *idiag_cmd)
+{
+ char mybuf[64];
+ char *pbuf, *step_str;
+ int bsize, i;
+
+ /* Protect copy from user */
+ if (!access_ok(VERIFY_READ, buf, nbytes))
+ return -EFAULT;
+
+ memset(mybuf, 0, sizeof(mybuf));
+ memset(idiag_cmd, 0, sizeof(*idiag_cmd));
+ bsize = min(nbytes, (sizeof(mybuf)-1));
+
+ if (copy_from_user(mybuf, buf, bsize))
+ return -EFAULT;
+ pbuf = &mybuf[0];
+ step_str = strsep(&pbuf, "\t ");
+
+ /* The opcode must present */
+ if (!step_str)
+ return -EINVAL;
+
+ idiag_cmd->opcode = simple_strtol(step_str, NULL, 0);
+ if (idiag_cmd->opcode == 0)
+ return -EINVAL;
+
+ for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) {
+ step_str = strsep(&pbuf, "\t ");
+ if (!step_str)
+ return 0;
+ idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_idiag_open - idiag open debugfs
+ * @inode: The inode pointer that contains a pointer to phba.
+ * @file: The file pointer to attach the file operation.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It
+ * gets the reference to phba from the i_private field in @inode, it then
+ * allocates buffer for the file operation, performs the necessary PCI config
+ * space read into the allocated buffer according to the idiag user command
+ * setup, and then returns a pointer to buffer in the private_data field in
+ * @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an
+ * negative error value.
+ **/
+static int
+lpfc_idiag_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->i_private = inode->i_private;
+ debug->buffer = NULL;
+ file->private_data = debug;
+
+ return 0;
+}
+
+/**
+ * lpfc_idiag_release - Release idiag access file operation
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine is the generic release routine for the idiag access file
+ * operation, it frees the buffer that was allocated when the debugfs file
+ * was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_idiag_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ /* Free the buffers to the file operation */
+ kfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+/**
+ * lpfc_idiag_cmd_release - Release idiag cmd access file operation
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs file
+ * was opened. It also reset the fields in the idiag command struct in the
+ * case the command is not continuous browsing of the data structure.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_idiag_cmd_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ /* Read PCI config register, if not read all, clear command fields */
+ if ((debug->op == LPFC_IDIAG_OP_RD) &&
+ (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD))
+ if ((idiag.cmd.data[1] == sizeof(uint8_t)) ||
+ (idiag.cmd.data[1] == sizeof(uint16_t)) ||
+ (idiag.cmd.data[1] == sizeof(uint32_t)))
+ memset(&idiag, 0, sizeof(idiag));
+
+ /* Write PCI config register, clear command fields */
+ if ((debug->op == LPFC_IDIAG_OP_WR) &&
+ (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR))
+ memset(&idiag, 0, sizeof(idiag));
+
+ /* Free the buffers to the file operation */
+ kfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+/**
+ * lpfc_idiag_pcicfg_read - idiag debugfs read pcicfg
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba pci config space according to the
+ * idiag command, and copies to user @buf. Depending on the PCI config space
+ * read command setup, it does either a single register read of a byte
+ * (8 bits), a word (16 bits), or a dword (32 bits) or browsing through all
+ * registers from the 4K extended PCI config space.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ int offset_label, offset, len = 0, index = LPFC_PCI_CFG_RD_SIZE;
+ int where, count;
+ char *pbuffer;
+ struct pci_dev *pdev;
+ uint32_t u32val;
+ uint16_t u16val;
+ uint8_t u8val;
+
+ pdev = phba->pcidev;
+ if (!pdev)
+ return 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_PCI_CFG_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
+ where = idiag.cmd.data[0];
+ count = idiag.cmd.data[1];
+ } else
+ return 0;
+
+ /* Read single PCI config space register */
+ switch (count) {
+ case SIZE_U8: /* byte (8 bits) */
+ pci_read_config_byte(pdev, where, &u8val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: %02x\n", where, u8val);
+ break;
+ case SIZE_U16: /* word (16 bits) */
+ pci_read_config_word(pdev, where, &u16val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: %04x\n", where, u16val);
+ break;
+ case SIZE_U32: /* double word (32 bits) */
+ pci_read_config_dword(pdev, where, &u32val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: %08x\n", where, u32val);
+ break;
+ case LPFC_PCI_CFG_SIZE: /* browse all */
+ goto pcicfg_browse;
+ break;
+ default:
+ /* illegal count */
+ len = 0;
+ break;
+ }
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+pcicfg_browse:
+
+ /* Browse all PCI config space registers */
+ offset_label = idiag.offset.last_rd;
+ offset = offset_label;
+
+ /* Read PCI config space */
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: ", offset_label);
+ while (index > 0) {
+ pci_read_config_dword(pdev, offset, &u32val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%08x ", u32val);
+ offset += sizeof(uint32_t);
+ index -= sizeof(uint32_t);
+ if (!index)
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "\n");
+ else if (!(index % (8 * sizeof(uint32_t)))) {
+ offset_label += (8 * sizeof(uint32_t));
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "\n%03x: ", offset_label);
+ }
+ }
+
+ /* Set up the offset for next portion of pci cfg read */
+ idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE;
+ if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE)
+ idiag.offset.last_rd = 0;
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_pcicfg_write - Syntax check and set up idiag pcicfg commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and
+ * then perform the syntax check for PCI config space read or write command
+ * accordingly. In the case of PCI config space read command, it sets up
+ * the command in the idiag command struct for the debugfs read operation.
+ * In the case of PCI config space write operation, it executes the write
+ * operation into the PCI config space accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ */
+static ssize_t
+lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t where, value, count;
+ uint32_t u32val;
+ uint16_t u16val;
+ uint8_t u8val;
+ struct pci_dev *pdev;
+ int rc;
+
+ pdev = phba->pcidev;
+ if (!pdev)
+ return -EFAULT;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc)
+ return rc;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
+ /* Read command from PCI config space, set up command fields */
+ where = idiag.cmd.data[0];
+ count = idiag.cmd.data[1];
+ if (count == LPFC_PCI_CFG_SIZE) {
+ if (where != 0)
+ goto error_out;
+ } else if ((count != sizeof(uint8_t)) &&
+ (count != sizeof(uint16_t)) &&
+ (count != sizeof(uint32_t)))
+ goto error_out;
+ if (count == sizeof(uint8_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t))
+ goto error_out;
+ if (where % sizeof(uint8_t))
+ goto error_out;
+ }
+ if (count == sizeof(uint16_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t))
+ goto error_out;
+ if (where % sizeof(uint16_t))
+ goto error_out;
+ }
+ if (count == sizeof(uint32_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t))
+ goto error_out;
+ if (where % sizeof(uint32_t))
+ goto error_out;
+ }
+ } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ /* Write command to PCI config space, read-modify-write */
+ where = idiag.cmd.data[0];
+ count = idiag.cmd.data[1];
+ value = idiag.cmd.data[2];
+ /* Sanity checks */
+ if ((count != sizeof(uint8_t)) &&
+ (count != sizeof(uint16_t)) &&
+ (count != sizeof(uint32_t)))
+ goto error_out;
+ if (count == sizeof(uint8_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t))
+ goto error_out;
+ if (where % sizeof(uint8_t))
+ goto error_out;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+ pci_write_config_byte(pdev, where,
+ (uint8_t)value);
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+ rc = pci_read_config_byte(pdev, where, &u8val);
+ if (!rc) {
+ u8val |= (uint8_t)value;
+ pci_write_config_byte(pdev, where,
+ u8val);
+ }
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ rc = pci_read_config_byte(pdev, where, &u8val);
+ if (!rc) {
+ u8val &= (uint8_t)(~value);
+ pci_write_config_byte(pdev, where,
+ u8val);
+ }
+ }
+ }
+ if (count == sizeof(uint16_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t))
+ goto error_out;
+ if (where % sizeof(uint16_t))
+ goto error_out;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+ pci_write_config_word(pdev, where,
+ (uint16_t)value);
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+ rc = pci_read_config_word(pdev, where, &u16val);
+ if (!rc) {
+ u16val |= (uint16_t)value;
+ pci_write_config_word(pdev, where,
+ u16val);
+ }
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ rc = pci_read_config_word(pdev, where, &u16val);
+ if (!rc) {
+ u16val &= (uint16_t)(~value);
+ pci_write_config_word(pdev, where,
+ u16val);
+ }
+ }
+ }
+ if (count == sizeof(uint32_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t))
+ goto error_out;
+ if (where % sizeof(uint32_t))
+ goto error_out;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+ pci_write_config_dword(pdev, where, value);
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+ rc = pci_read_config_dword(pdev, where,
+ &u32val);
+ if (!rc) {
+ u32val |= value;
+ pci_write_config_dword(pdev, where,
+ u32val);
+ }
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ rc = pci_read_config_dword(pdev, where,
+ &u32val);
+ if (!rc) {
+ u32val &= ~value;
+ pci_write_config_dword(pdev, where,
+ u32val);
+ }
+ }
+ }
+ } else
+ /* All other opecodes are illegal for now */
+ goto error_out;
+
+ return nbytes;
+error_out:
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_queinfo_read - idiag debugfs read queue information
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba SLI4 PCI function queue information,
+ * and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ int len = 0, fcp_qidx;
+ char *pbuffer;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ /* Get slow-path event queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Slow-path EQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], EQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.sp_eq->queue_id,
+ phba->sli4_hba.sp_eq->entry_count,
+ phba->sli4_hba.sp_eq->host_index,
+ phba->sli4_hba.sp_eq->hba_index);
+
+ /* Get fast-path event queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Fast-path EQ information:\n");
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], EQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.fp_eq[fcp_qidx]->queue_id,
+ phba->sli4_hba.fp_eq[fcp_qidx]->entry_count,
+ phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
+ phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
+ }
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+
+ /* Get mailbox complete queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Mailbox CQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated EQ-ID [%02d]:\n",
+ phba->sli4_hba.mbx_cq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], CQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.mbx_cq->queue_id,
+ phba->sli4_hba.mbx_cq->entry_count,
+ phba->sli4_hba.mbx_cq->host_index,
+ phba->sli4_hba.mbx_cq->hba_index);
+
+ /* Get slow-path complete queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Slow-path CQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated EQ-ID [%02d]:\n",
+ phba->sli4_hba.els_cq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], CQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.els_cq->queue_id,
+ phba->sli4_hba.els_cq->entry_count,
+ phba->sli4_hba.els_cq->host_index,
+ phba->sli4_hba.els_cq->hba_index);
+
+ /* Get fast-path complete queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Fast-path CQ information:\n");
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated EQ-ID [%02d]:\n",
+ phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], EQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id,
+ phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count,
+ phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
+ phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
+ }
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+
+ /* Get mailbox queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Mailbox MQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated CQ-ID [%02d]:\n",
+ phba->sli4_hba.mbx_wq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], MQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.mbx_wq->queue_id,
+ phba->sli4_hba.mbx_wq->entry_count,
+ phba->sli4_hba.mbx_wq->host_index,
+ phba->sli4_hba.mbx_wq->hba_index);
+
+ /* Get slow-path work queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Slow-path WQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated CQ-ID [%02d]:\n",
+ phba->sli4_hba.els_wq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], WQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.els_wq->queue_id,
+ phba->sli4_hba.els_wq->entry_count,
+ phba->sli4_hba.els_wq->host_index,
+ phba->sli4_hba.els_wq->hba_index);
+
+ /* Get fast-path work queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Fast-path WQ information:\n");
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated CQ-ID [%02d]:\n",
+ phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], WQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id,
+ phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count,
+ phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
+ phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
+ }
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+
+ /* Get receive queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Slow-path RQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated CQ-ID [%02d]:\n",
+ phba->sli4_hba.hdr_rq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], RHQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.hdr_rq->queue_id,
+ phba->sli4_hba.hdr_rq->entry_count,
+ phba->sli4_hba.hdr_rq->host_index,
+ phba->sli4_hba.hdr_rq->hba_index);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], RDQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.dat_rq->queue_id,
+ phba->sli4_hba.dat_rq->entry_count,
+ phba->sli4_hba.dat_rq->host_index,
+ phba->sli4_hba.dat_rq->hba_index);
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
#undef lpfc_debugfs_op_disc_trc
static const struct file_operations lpfc_debugfs_op_disc_trc = {
.owner = THIS_MODULE,
@@ -1213,6 +1964,28 @@ static const struct file_operations lpfc_debugfs_op_slow_ring_trc = {
static struct dentry *lpfc_debugfs_root = NULL;
static atomic_t lpfc_debugfs_hba_count;
+
+/*
+ * File operations for the iDiag debugfs
+ */
+#undef lpfc_idiag_op_pciCfg
+static const struct file_operations lpfc_idiag_op_pciCfg = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_pcicfg_read,
+ .write = lpfc_idiag_pcicfg_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_queInfo
+static const struct file_operations lpfc_idiag_op_queInfo = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .read = lpfc_idiag_queinfo_read,
+ .release = lpfc_idiag_release,
+};
+
#endif
/**
@@ -1249,8 +2022,8 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
if (!lpfc_debugfs_start_time)
lpfc_debugfs_start_time = jiffies;
- /* Setup lpfcX directory for specific HBA */
- snprintf(name, sizeof(name), "lpfc%d", phba->brd_no);
+ /* Setup funcX directory for specific HBA PCI function */
+ snprintf(name, sizeof(name), "fn%d", phba->brd_no);
if (!phba->hba_debugfs_root) {
phba->hba_debugfs_root =
debugfs_create_dir(name, lpfc_debugfs_root);
@@ -1275,28 +2048,38 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
/* Setup dumpHBASlim */
- snprintf(name, sizeof(name), "dumpHBASlim");
- phba->debug_dumpHBASlim =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpHBASlim);
- if (!phba->debug_dumpHBASlim) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0413 Cannot create debugfs dumpHBASlim\n");
- goto debug_failed;
- }
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ snprintf(name, sizeof(name), "dumpHBASlim");
+ phba->debug_dumpHBASlim =
+ debugfs_create_file(name,
+ S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dumpHBASlim);
+ if (!phba->debug_dumpHBASlim) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0413 Cannot create debugfs "
+ "dumpHBASlim\n");
+ goto debug_failed;
+ }
+ } else
+ phba->debug_dumpHBASlim = NULL;
/* Setup dumpHostSlim */
- snprintf(name, sizeof(name), "dumpHostSlim");
- phba->debug_dumpHostSlim =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpHostSlim);
- if (!phba->debug_dumpHostSlim) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0414 Cannot create debugfs dumpHostSlim\n");
- goto debug_failed;
- }
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ snprintf(name, sizeof(name), "dumpHostSlim");
+ phba->debug_dumpHostSlim =
+ debugfs_create_file(name,
+ S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dumpHostSlim);
+ if (!phba->debug_dumpHostSlim) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0414 Cannot create debugfs "
+ "dumpHostSlim\n");
+ goto debug_failed;
+ }
+ } else
+ phba->debug_dumpHBASlim = NULL;
/* Setup dumpData */
snprintf(name, sizeof(name), "dumpData");
@@ -1322,8 +2105,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
-
-
/* Setup slow ring trace */
if (lpfc_debugfs_max_slow_ring_trc) {
num = lpfc_debugfs_max_slow_ring_trc - 1;
@@ -1342,7 +2123,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
}
-
snprintf(name, sizeof(name), "slow_ring_trace");
phba->debug_slow_ring_trc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
@@ -1434,6 +2214,53 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
"0409 Cant create debugfs nodelist\n");
goto debug_failed;
}
+
+ /*
+ * iDiag debugfs root entry points for SLI4 device only
+ */
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ goto debug_failed;
+
+ snprintf(name, sizeof(name), "iDiag");
+ if (!phba->idiag_root) {
+ phba->idiag_root =
+ debugfs_create_dir(name, phba->hba_debugfs_root);
+ if (!phba->idiag_root) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2922 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ /* Initialize iDiag data structure */
+ memset(&idiag, 0, sizeof(idiag));
+ }
+
+ /* iDiag read PCI config space */
+ snprintf(name, sizeof(name), "pciCfg");
+ if (!phba->idiag_pci_cfg) {
+ phba->idiag_pci_cfg =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba, &lpfc_idiag_op_pciCfg);
+ if (!phba->idiag_pci_cfg) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2923 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ idiag.offset.last_rd = 0;
+ }
+
+ /* iDiag get PCI function queue information */
+ snprintf(name, sizeof(name), "queInfo");
+ if (!phba->idiag_que_info) {
+ phba->idiag_que_info =
+ debugfs_create_file(name, S_IFREG|S_IRUGO,
+ phba->idiag_root, phba, &lpfc_idiag_op_queInfo);
+ if (!phba->idiag_que_info) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2924 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ }
+
debug_failed:
return;
#endif
@@ -1508,8 +2335,31 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
phba->debug_slow_ring_trc = NULL;
}
+ /*
+ * iDiag release
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (phba->idiag_que_info) {
+ /* iDiag queInfo */
+ debugfs_remove(phba->idiag_que_info);
+ phba->idiag_que_info = NULL;
+ }
+ if (phba->idiag_pci_cfg) {
+ /* iDiag pciCfg */
+ debugfs_remove(phba->idiag_pci_cfg);
+ phba->idiag_pci_cfg = NULL;
+ }
+
+ /* Finally remove the iDiag debugfs root */
+ if (phba->idiag_root) {
+ /* iDiag root */
+ debugfs_remove(phba->idiag_root);
+ phba->idiag_root = NULL;
+ }
+ }
+
if (phba->hba_debugfs_root) {
- debugfs_remove(phba->hba_debugfs_root); /* lpfcX */
+ debugfs_remove(phba->hba_debugfs_root); /* fnX */
phba->hba_debugfs_root = NULL;
atomic_dec(&lpfc_debugfs_hba_count);
}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 03c7313a1012..91b9a9427cda 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2007 Emulex. All rights reserved. *
+ * Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -22,6 +22,44 @@
#define _H_LPFC_DEBUG_FS
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+
+/* size of output line, for discovery_trace and slow_ring_trace */
+#define LPFC_DEBUG_TRC_ENTRY_SIZE 100
+
+/* nodelist output buffer size */
+#define LPFC_NODELIST_SIZE 8192
+#define LPFC_NODELIST_ENTRY_SIZE 120
+
+/* dumpHBASlim output buffer size */
+#define LPFC_DUMPHBASLIM_SIZE 4096
+
+/* dumpHostSlim output buffer size */
+#define LPFC_DUMPHOSTSLIM_SIZE 4096
+
+/* hbqinfo output buffer size */
+#define LPFC_HBQINFO_SIZE 8192
+
+/* rdPciConf output buffer size */
+#define LPFC_PCI_CFG_SIZE 4096
+#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2)
+#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
+
+/* queue info output buffer size */
+#define LPFC_QUE_INFO_GET_BUF_SIZE 2048
+
+#define SIZE_U8 sizeof(uint8_t)
+#define SIZE_U16 sizeof(uint16_t)
+#define SIZE_U32 sizeof(uint32_t)
+
+struct lpfc_debug {
+ char *i_private;
+ char op;
+#define LPFC_IDIAG_OP_RD 1
+#define LPFC_IDIAG_OP_WR 2
+ char *buffer;
+ int len;
+};
+
struct lpfc_debugfs_trc {
char *fmt;
uint32_t data1;
@@ -30,6 +68,26 @@ struct lpfc_debugfs_trc {
uint32_t seq_cnt;
unsigned long jif;
};
+
+struct lpfc_idiag_offset {
+ uint32_t last_rd;
+};
+
+#define LPFC_IDIAG_CMD_DATA_SIZE 4
+struct lpfc_idiag_cmd {
+ uint32_t opcode;
+#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001
+#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002
+#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
+#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
+ uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
+};
+
+struct lpfc_idiag {
+ uint32_t active;
+ struct lpfc_idiag_cmd cmd;
+ struct lpfc_idiag_offset offset;
+};
#endif
/* Mask for discovery_trace */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index c62d567cc845..8e28edf9801e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -485,6 +485,59 @@ fail:
}
/**
+ * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @sp: pointer to service parameter data structure.
+ *
+ * This routine is called from FLOGI/FDISC completion handler functions.
+ * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
+ * node nodename is changed in the completion service parameter else return
+ * 0. This function also set flag in the vport data structure to delay
+ * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
+ * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
+ * node nodename is changed in the completion service parameter.
+ *
+ * Return code
+ * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
+ * 1 - FCID or Fabric Nodename or Fabric portname is changed.
+ *
+ **/
+static uint8_t
+lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
+ struct serv_parm *sp)
+{
+ uint8_t fabric_param_changed = 0;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if ((vport->fc_prevDID != vport->fc_myDID) ||
+ memcmp(&vport->fabric_portname, &sp->portName,
+ sizeof(struct lpfc_name)) ||
+ memcmp(&vport->fabric_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name)))
+ fabric_param_changed = 1;
+
+ /*
+ * Word 1 Bit 31 in common service parameter is overloaded.
+ * Word 1 Bit 31 in FLOGI request is multiple NPort request
+ * Word 1 Bit 31 in FLOGI response is clean address bit
+ *
+ * If fabric parameter is changed and clean address bit is
+ * cleared delay nport discovery if
+ * - vport->fc_prevDID != 0 (not initial discovery) OR
+ * - lpfc_delay_discovery module parameter is set.
+ */
+ if (fabric_param_changed && !sp->cmn.clean_address_bit &&
+ (vport->fc_prevDID || lpfc_delay_discovery)) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_DISC_DELAYED;
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ return fabric_param_changed;
+}
+
+
+/**
* lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
* @vport: pointer to a host virtual N_Port data structure.
* @ndlp: pointer to a node-list data structure.
@@ -512,6 +565,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *np;
struct lpfc_nodelist *next_np;
+ uint8_t fabric_param_changed;
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_FABRIC;
@@ -544,6 +598,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
sp->cmn.bbRcvSizeLsb;
+
+ fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
+ memcpy(&vport->fabric_portname, &sp->portName,
+ sizeof(struct lpfc_name));
+ memcpy(&vport->fabric_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name));
memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
@@ -565,7 +625,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
}
- if ((vport->fc_prevDID != vport->fc_myDID) &&
+ if (fabric_param_changed &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
/* If our NportID changed, we need to ensure all
@@ -2203,6 +2263,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_sli *psli;
+ struct lpfcMboxq *mbox;
psli = &phba->sli;
/* we pass cmdiocb to state machine which needs rspiocb as well */
@@ -2260,6 +2321,21 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
NLP_EVT_CMPL_LOGO);
out:
lpfc_els_free_iocb(phba, cmdiocb);
+ /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
+ if ((vport->fc_flag & FC_PT2PT) &&
+ !(vport->fc_flag & FC_PT2PT_PLOGI)) {
+ phba->pport->fc_myDID = 0;
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_config_link(phba, mbox);
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
+ MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ }
+ }
return;
}
@@ -2745,7 +2821,8 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
}
break;
case ELS_CMD_FDISC:
- lpfc_issue_els_fdisc(vport, ndlp, retry);
+ if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
+ lpfc_issue_els_fdisc(vport, ndlp, retry);
break;
}
return;
@@ -2815,9 +2892,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
switch (irsp->ulpStatus) {
case IOSTAT_FCP_RSP_ERROR:
+ break;
case IOSTAT_REMOTE_STOP:
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ /* This IO was aborted by the target, we don't
+ * know the rxid and because we did not send the
+ * ABTS we cannot generate and RRQ.
+ */
+ lpfc_set_rrq_active(phba, ndlp,
+ cmdiocb->sli4_xritag, 0, 0);
+ }
break;
-
case IOSTAT_LOCAL_REJECT:
switch ((irsp->un.ulpWord[4] & 0xff)) {
case IOERR_LOOP_OPEN_FAILURE:
@@ -4013,28 +4098,34 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
uint8_t *pcmd;
struct RRQ *rrq;
uint16_t rxid;
+ uint16_t xri;
struct lpfc_node_rrq *prrq;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
pcmd += sizeof(uint32_t);
rrq = (struct RRQ *)pcmd;
- rxid = bf_get(rrq_oxid, rrq);
+ rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
+ rxid = be16_to_cpu(bf_get(rrq_rxid, rrq));
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
" x%x x%x\n",
- bf_get(rrq_did, rrq),
- bf_get(rrq_oxid, rrq),
+ be32_to_cpu(bf_get(rrq_did, rrq)),
+ be16_to_cpu(bf_get(rrq_oxid, rrq)),
rxid,
iocb->iotag, iocb->iocb.ulpContext);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
- prrq = lpfc_get_active_rrq(vport, rxid, ndlp->nlp_DID);
+ if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
+ xri = be16_to_cpu(bf_get(rrq_oxid, rrq));
+ else
+ xri = rxid;
+ prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
if (prrq)
- lpfc_clr_rrq_active(phba, rxid, prrq);
+ lpfc_clr_rrq_active(phba, xri, prrq);
return;
}
@@ -6166,6 +6257,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (vport->load_flag & FC_UNLOADING)
goto dropit;
+ /* If NPort discovery is delayed drop incoming ELS */
+ if ((vport->fc_flag & FC_DISC_DELAYED) &&
+ (cmd != ELS_CMD_PLOGI))
+ goto dropit;
+
ndlp = lpfc_findnode_did(vport, did);
if (!ndlp) {
/* Cannot find existing Fabric ndlp, so allocate a new one */
@@ -6218,6 +6314,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
lpfc_send_els_event(vport, ndlp, payload);
+
+ /* If Nport discovery is delayed, reject PLOGIs */
+ if (vport->fc_flag & FC_DISC_DELAYED) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ break;
+ }
if (vport->port_state < LPFC_DISC_AUTH) {
if (!(phba->pport->fc_flag & FC_PT2PT) ||
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -6596,6 +6698,21 @@ void
lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp, *ndlp_fdmi;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /*
+ * If lpfc_delay_discovery parameter is set and the clean address
+ * bit is cleared and fc fabric parameters chenged, delay FC NPort
+ * discovery.
+ */
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_flag & FC_DISC_DELAYED) {
+ spin_unlock_irq(shost->host_lock);
+ mod_timer(&vport->delayed_disc_tmo,
+ jiffies + HZ * phba->fc_ratov);
+ return;
+ }
+ spin_unlock_irq(shost->host_lock);
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (!ndlp) {
@@ -6938,6 +7055,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *next_np;
IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_iocbq *piocb;
+ struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+ struct serv_parm *sp;
+ uint8_t fabric_param_changed;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0123 FDISC completes. x%x/x%x prevDID: x%x\n",
@@ -6981,7 +7101,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
- if ((vport->fc_prevDID != vport->fc_myDID) &&
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+ sp = prsp->virt + sizeof(uint32_t);
+ fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
+ memcpy(&vport->fabric_portname, &sp->portName,
+ sizeof(struct lpfc_name));
+ memcpy(&vport->fabric_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name));
+ if (fabric_param_changed &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
/* If our NportID changed, we need to ensure all
* remaining NPORTs get unreg_login'ed so we can
@@ -7582,6 +7709,32 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is invoked by the vport cleanup for deletions and the cleanup
+ * for an ndlp on removal.
+ **/
+void
+lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+ unsigned long iflag = 0;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+ list_for_each_entry_safe(sglq_entry, sglq_next,
+ &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
+ if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
+ sglq_entry->ndlp = NULL;
+ }
+ spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return;
+}
+
+/**
* lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the els xri abort wcqe structure.
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bb015960dbc9..154c715fb3af 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -658,6 +658,8 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_ramp_down_queue_handler(phba);
if (work_port_events & WORKER_RAMP_UP_QUEUE)
lpfc_ramp_up_queue_handler(phba);
+ if (work_port_events & WORKER_DELAYED_DISC_TMO)
+ lpfc_delayed_disc_timeout_handler(vport);
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -838,6 +840,11 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
lpfc_port_link_failure(vport);
+ /* Stop delayed Nport discovery */
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_DISC_DELAYED;
+ spin_unlock_irq(shost->host_lock);
+ del_timer_sync(&vport->delayed_disc_tmo);
}
int
@@ -3160,7 +3167,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_unlock_irq(shost->host_lock);
vport->unreg_vpi_cmpl = VPORT_OK;
mempool_free(pmb, phba->mbox_mem_pool);
- lpfc_cleanup_vports_rrqs(vport);
+ lpfc_cleanup_vports_rrqs(vport, NULL);
/*
* This shost reference might have been taken at the beginning of
* lpfc_vport_delete()
@@ -3900,6 +3907,8 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
return;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_cleanup_vports_rrqs(vport, ndlp);
lpfc_nlp_put(ndlp);
return;
}
@@ -4289,7 +4298,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_del_init(&ndlp->els_retry_evt.evt_listp);
list_del_init(&ndlp->dev_loss_evt.evt_listp);
-
+ lpfc_cleanup_vports_rrqs(vport, ndlp);
lpfc_unreg_rpi(vport, ndlp);
return 0;
@@ -4426,10 +4435,11 @@ lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
+ unsigned long iflags;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
ndlp = __lpfc_findnode_did(vport, did);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
return ndlp;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 96ed3ba6ba95..94ae37c5111a 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -341,6 +341,12 @@ struct csp {
uint8_t bbCreditMsb;
uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
+/*
+ * Word 1 Bit 31 in common service parameter is overloaded.
+ * Word 1 Bit 31 in FLOGI request is multiple NPort request
+ * Word 1 Bit 31 in FLOGI response is clean address bit
+ */
+#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */
#ifdef __BIG_ENDIAN_BITFIELD
uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
uint16_t randomOffset:1; /* FC Word 1, bit 30 */
@@ -3198,7 +3204,10 @@ typedef struct {
#define IOERR_SLER_RRQ_RJT_ERR 0x4C
#define IOERR_SLER_RRQ_RETRY_ERR 0x4D
#define IOERR_SLER_ABTS_ERR 0x4E
-
+#define IOERR_ELXSEC_KEY_UNWRAP_ERROR 0xF0
+#define IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR 0xF1
+#define IOERR_ELXSEC_CRYPTO_ERROR 0xF2
+#define IOERR_ELXSEC_CRYPTO_COMPARE_ERROR 0xF3
#define IOERR_DRVR_MASK 0x100
#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */
#define IOERR_SLI_BRESET 0x102
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 94c1aa1136de..c7178d60c7bf 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -778,6 +778,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
+#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
/* FCoE Opcodes */
#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
@@ -1852,6 +1853,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7
#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2
+#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11
+#define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_perfh_WORD word2
uint32_t word3;
#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
@@ -1877,6 +1881,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7
#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT 11
+#define lpfc_mbx_rq_ftr_rsp_perfh_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_perfh_WORD word3
};
struct lpfc_mbx_supp_pages {
@@ -1935,7 +1942,7 @@ struct lpfc_mbx_supp_pages {
#define LPFC_SLI4_PARAMETERS 2
};
-struct lpfc_mbx_sli4_params {
+struct lpfc_mbx_pc_sli4_params {
uint32_t word1;
#define qs_SHIFT 0
#define qs_MASK 0x00000001
@@ -2051,6 +2058,88 @@ struct lpfc_mbx_sli4_params {
uint32_t rsvd_13_63[51];
};
+struct lpfc_sli4_parameters {
+ uint32_t word0;
+#define cfg_prot_type_SHIFT 0
+#define cfg_prot_type_MASK 0x000000FF
+#define cfg_prot_type_WORD word0
+ uint32_t word1;
+#define cfg_ft_SHIFT 0
+#define cfg_ft_MASK 0x00000001
+#define cfg_ft_WORD word1
+#define cfg_sli_rev_SHIFT 4
+#define cfg_sli_rev_MASK 0x0000000f
+#define cfg_sli_rev_WORD word1
+#define cfg_sli_family_SHIFT 8
+#define cfg_sli_family_MASK 0x0000000f
+#define cfg_sli_family_WORD word1
+#define cfg_if_type_SHIFT 12
+#define cfg_if_type_MASK 0x0000000f
+#define cfg_if_type_WORD word1
+#define cfg_sli_hint_1_SHIFT 16
+#define cfg_sli_hint_1_MASK 0x000000ff
+#define cfg_sli_hint_1_WORD word1
+#define cfg_sli_hint_2_SHIFT 24
+#define cfg_sli_hint_2_MASK 0x0000001f
+#define cfg_sli_hint_2_WORD word1
+ uint32_t word2;
+ uint32_t word3;
+ uint32_t word4;
+#define cfg_cqv_SHIFT 14
+#define cfg_cqv_MASK 0x00000003
+#define cfg_cqv_WORD word4
+ uint32_t word5;
+ uint32_t word6;
+#define cfg_mqv_SHIFT 14
+#define cfg_mqv_MASK 0x00000003
+#define cfg_mqv_WORD word6
+ uint32_t word7;
+ uint32_t word8;
+#define cfg_wqv_SHIFT 14
+#define cfg_wqv_MASK 0x00000003
+#define cfg_wqv_WORD word8
+ uint32_t word9;
+ uint32_t word10;
+#define cfg_rqv_SHIFT 14
+#define cfg_rqv_MASK 0x00000003
+#define cfg_rqv_WORD word10
+ uint32_t word11;
+#define cfg_rq_db_window_SHIFT 28
+#define cfg_rq_db_window_MASK 0x0000000f
+#define cfg_rq_db_window_WORD word11
+ uint32_t word12;
+#define cfg_fcoe_SHIFT 0
+#define cfg_fcoe_MASK 0x00000001
+#define cfg_fcoe_WORD word12
+#define cfg_phwq_SHIFT 15
+#define cfg_phwq_MASK 0x00000001
+#define cfg_phwq_WORD word12
+#define cfg_loopbk_scope_SHIFT 28
+#define cfg_loopbk_scope_MASK 0x0000000f
+#define cfg_loopbk_scope_WORD word12
+ uint32_t sge_supp_len;
+ uint32_t word14;
+#define cfg_sgl_page_cnt_SHIFT 0
+#define cfg_sgl_page_cnt_MASK 0x0000000f
+#define cfg_sgl_page_cnt_WORD word14
+#define cfg_sgl_page_size_SHIFT 8
+#define cfg_sgl_page_size_MASK 0x000000ff
+#define cfg_sgl_page_size_WORD word14
+#define cfg_sgl_pp_align_SHIFT 16
+#define cfg_sgl_pp_align_MASK 0x000000ff
+#define cfg_sgl_pp_align_WORD word14
+ uint32_t word15;
+ uint32_t word16;
+ uint32_t word17;
+ uint32_t word18;
+ uint32_t word19;
+};
+
+struct lpfc_mbx_get_sli4_parameters {
+ struct mbox_header header;
+ struct lpfc_sli4_parameters sli4_parameters;
+};
+
/* Mailbox Completion Queue Error Messages */
#define MB_CQE_STATUS_SUCCESS 0x0
#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@@ -2103,7 +2192,8 @@ struct lpfc_mqe {
struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
struct lpfc_mbx_query_fw_cfg query_fw_cfg;
struct lpfc_mbx_supp_pages supp_pages;
- struct lpfc_mbx_sli4_params sli4_params;
+ struct lpfc_mbx_pc_sli4_params sli4_params;
+ struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
struct lpfc_mbx_nop nop;
} un;
};
@@ -2381,6 +2471,10 @@ struct wqe_common {
#define wqe_wqes_SHIFT 15
#define wqe_wqes_MASK 0x00000001
#define wqe_wqes_WORD word10
+/* Note that this field overlaps above fields */
+#define wqe_wqid_SHIFT 1
+#define wqe_wqid_MASK 0x0000007f
+#define wqe_wqid_WORD word10
#define wqe_pri_SHIFT 16
#define wqe_pri_MASK 0x00000007
#define wqe_pri_WORD word10
@@ -2599,7 +2693,8 @@ struct fcp_iwrite64_wqe {
uint32_t total_xfer_len;
uint32_t initial_xfer_len;
struct wqe_common wqe_com; /* words 6-11 */
- uint32_t rsvd_12_15[4]; /* word 12-15 */
+ uint32_t rsrvd12;
+ struct ulp_bde64 ph_bde; /* words 13-15 */
};
struct fcp_iread64_wqe {
@@ -2608,7 +2703,8 @@ struct fcp_iread64_wqe {
uint32_t total_xfer_len; /* word 4 */
uint32_t rsrvd5; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
- uint32_t rsvd_12_15[4]; /* word 12-15 */
+ uint32_t rsrvd12;
+ struct ulp_bde64 ph_bde; /* words 13-15 */
};
struct fcp_icmnd64_wqe {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6d0b36aa3389..35665cfb5689 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -460,7 +460,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
|| ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
&& !(phba->lmt & LMT_16Gb))) {
/* Reset link speed to auto */
- lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1302 Invalid speed for this board: "
"Reset link speed to auto: x%x\n",
phba->cfg_link_speed);
@@ -945,17 +945,13 @@ static void
lpfc_rrq_timeout(unsigned long ptr)
{
struct lpfc_hba *phba;
- uint32_t tmo_posted;
unsigned long iflag;
phba = (struct lpfc_hba *)ptr;
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
- tmo_posted = phba->hba_flag & HBA_RRQ_ACTIVE;
- if (!tmo_posted)
- phba->hba_flag |= HBA_RRQ_ACTIVE;
+ phba->hba_flag |= HBA_RRQ_ACTIVE;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
- if (!tmo_posted)
- lpfc_worker_wake_up(phba);
+ lpfc_worker_wake_up(phba);
}
/**
@@ -2280,6 +2276,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
/* Wait for any activity on ndlps to settle */
msleep(10);
}
+ lpfc_cleanup_vports_rrqs(vport, NULL);
}
/**
@@ -2295,6 +2292,7 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
{
del_timer_sync(&vport->els_tmofunc);
del_timer_sync(&vport->fc_fdmitmo);
+ del_timer_sync(&vport->delayed_disc_tmo);
lpfc_can_disctmo(vport);
return;
}
@@ -2355,6 +2353,10 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
del_timer_sync(&phba->fabric_block_timer);
del_timer_sync(&phba->eratt_poll);
del_timer_sync(&phba->hb_tmofunc);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ del_timer_sync(&phba->rrq_tmr);
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+ }
phba->hb_outstanding = 0;
switch (phba->pci_dev_grp) {
@@ -2732,6 +2734,11 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
init_timer(&vport->els_tmofunc);
vport->els_tmofunc.function = lpfc_els_timeout;
vport->els_tmofunc.data = (unsigned long)vport;
+
+ init_timer(&vport->delayed_disc_tmo);
+ vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
+ vport->delayed_disc_tmo.data = (unsigned long)vport;
+
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
goto out_put_shost;
@@ -4283,36 +4290,37 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_bsmbx;
}
- /* Get the Supported Pages. It is always available. */
+ /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
lpfc_supported_pages(mboxq);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- if (unlikely(rc)) {
- rc = -EIO;
- mempool_free(mboxq, phba->mbox_mem_pool);
- goto out_free_bsmbx;
- }
-
- mqe = &mboxq->u.mqe;
- memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
- LPFC_MAX_SUPPORTED_PAGES);
- for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
- switch (pn_page[i]) {
- case LPFC_SLI4_PARAMETERS:
- phba->sli4_hba.pc_sli4_params.supported = 1;
- break;
- default:
- break;
+ if (!rc) {
+ mqe = &mboxq->u.mqe;
+ memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
+ LPFC_MAX_SUPPORTED_PAGES);
+ for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
+ switch (pn_page[i]) {
+ case LPFC_SLI4_PARAMETERS:
+ phba->sli4_hba.pc_sli4_params.supported = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ /* Read the port's SLI4 Parameters capabilities if supported. */
+ if (phba->sli4_hba.pc_sli4_params.supported)
+ rc = lpfc_pc_sli4_params_get(phba, mboxq);
+ if (rc) {
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ rc = -EIO;
+ goto out_free_bsmbx;
}
}
-
- /* Read the port's SLI4 Parameters capabilities if supported. */
- if (phba->sli4_hba.pc_sli4_params.supported)
- rc = lpfc_pc_sli4_params_get(phba, mboxq);
+ /*
+ * Get sli4 parameters that override parameters from Port capabilities.
+ * If this call fails it is not a critical error so continue loading.
+ */
+ lpfc_get_sli4_parameters(phba, mboxq);
mempool_free(mboxq, phba->mbox_mem_pool);
- if (rc) {
- rc = -EIO;
- goto out_free_bsmbx;
- }
/* Create all the SLI4 queues */
rc = lpfc_sli4_queue_create(phba);
if (rc)
@@ -7810,7 +7818,7 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
mqe = &mboxq->u.mqe;
/* Read the port's SLI4 Parameters port capabilities */
- lpfc_sli4_params(mboxq);
+ lpfc_pc_sli4_params(mboxq);
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
else {
@@ -7854,6 +7862,66 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
/**
+ * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
+ *
+ * This function is called in the SLI4 code path to read the port's
+ * sli4 capabilities.
+ *
+ * This function may be be called from any context that can block-wait
+ * for the completion. The expectation is that this routine is called
+ * typically from probe_one or from the online routine.
+ **/
+int
+lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ int rc;
+ struct lpfc_mqe *mqe = &mboxq->u.mqe;
+ struct lpfc_pc_sli4_params *sli4_params;
+ int length;
+ struct lpfc_sli4_parameters *mbx_sli4_parameters;
+
+ /* Read the port's SLI4 Config Parameters */
+ length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
+ length, LPFC_SLI4_MBX_EMBED);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
+ lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
+ if (unlikely(rc))
+ return rc;
+ sli4_params = &phba->sli4_hba.pc_sli4_params;
+ mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
+ sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
+ sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
+ sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
+ sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
+ mbx_sli4_parameters);
+ sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
+ mbx_sli4_parameters);
+ if (bf_get(cfg_phwq, mbx_sli4_parameters))
+ phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
+ else
+ phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
+ sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
+ sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+ sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
+ sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
+ sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
+ sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
+ sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
+ mbx_sli4_parameters);
+ sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
+ mbx_sli4_parameters);
+ return 0;
+}
+
+/**
* lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
* @pdev: pointer to PCI device
* @pid: pointer to PCI device identifier
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 23403c650207..dba32dfdb59b 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1263,7 +1263,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
if (phba->cfg_enable_bg)
mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
- mb->un.varCfgPort.cdss = 1; /* Configure Security */
+ if (phba->cfg_enable_dss)
+ mb->un.varCfgPort.cdss = 1; /* Configure Security */
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
@@ -1692,7 +1693,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
* @mbox: pointer to lpfc mbox command.
* @subsystem: The sli4 config sub mailbox subsystem.
* @opcode: The sli4 config sub mailbox command opcode.
- * @length: Length of the sli4 config mailbox command.
+ * @length: Length of the sli4 config mailbox command (including sub-header).
*
* This routine sets up the header fields of SLI4 specific mailbox command
* for sending IOCTL command.
@@ -1723,14 +1724,14 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
if (emb) {
/* Set up main header fields */
bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
- sli4_config->header.cfg_mhdr.payload_length =
- LPFC_MBX_CMD_HDR_LENGTH + length;
+ sli4_config->header.cfg_mhdr.payload_length = length;
/* Set up sub-header fields following main header */
bf_set(lpfc_mbox_hdr_opcode,
&sli4_config->header.cfg_shdr.request, opcode);
bf_set(lpfc_mbox_hdr_subsystem,
&sli4_config->header.cfg_shdr.request, subsystem);
- sli4_config->header.cfg_shdr.request.request_length = length;
+ sli4_config->header.cfg_shdr.request.request_length =
+ length - LPFC_MBX_CMD_HDR_LENGTH;
return length;
}
@@ -1902,6 +1903,7 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
/* Set up host requested features. */
bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
+ bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
/* Enable DIF (block guard) only if configured to do so. */
if (phba->cfg_enable_bg)
@@ -2159,17 +2161,16 @@ lpfc_supported_pages(struct lpfcMboxq *mbox)
}
/**
- * lpfc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params
- * mailbox command.
+ * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
* @mbox: pointer to lpfc mbox command to initialize.
*
* The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
* retrieve the particular SLI4 features supported by the port.
**/
void
-lpfc_sli4_params(struct lpfcMboxq *mbox)
+lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
{
- struct lpfc_mbx_sli4_params *sli4_params;
+ struct lpfc_mbx_pc_sli4_params *sli4_params;
memset(mbox, 0, sizeof(*mbox));
sli4_params = &mbox->u.mqe.un.sli4_params;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d85a7423a694..52b35159fc35 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -350,7 +350,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
- /* no need to reg_login if we are already in one of these states */
+ /*
+ * Need to unreg_login if we are already in one of these states and
+ * change to NPR state. This will block the port until after the ACC
+ * completes and the reg_login is issued and completed.
+ */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
@@ -359,8 +363,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
case NLP_STE_MAPPED_NODE:
- lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
- return 1;
+ lpfc_unreg_rpi(vport, ndlp);
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
}
if ((vport->fc_flag & FC_PT2PT) &&
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c97751c95d77..bf34178b80bf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -609,6 +609,32 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
}
/**
+ * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is invoked by the vport cleanup for deletions and the cleanup
+ * for an ndlp on removal.
+ **/
+void
+lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_scsi_buf *psb, *next_psb;
+ unsigned long iflag = 0;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ list_for_each_entry_safe(psb, next_psb,
+ &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
+ if (psb->rdata && psb->rdata->pnode
+ && psb->rdata->pnode->vport == vport)
+ psb->rdata = NULL;
+ }
+ spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
+/**
* lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
@@ -640,7 +666,11 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
psb->status = IOSTAT_SUCCESS;
spin_unlock(
&phba->sli4_hba.abts_scsi_buf_list_lock);
- ndlp = psb->rdata->pnode;
+ if (psb->rdata && psb->rdata->pnode)
+ ndlp = psb->rdata->pnode;
+ else
+ ndlp = NULL;
+
rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (ndlp)
@@ -964,36 +994,29 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
static struct lpfc_scsi_buf*
lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
- struct lpfc_scsi_buf *lpfc_cmd = NULL;
- struct lpfc_scsi_buf *start_lpfc_cmd = NULL;
- struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
+ struct lpfc_scsi_buf *lpfc_cmd ;
unsigned long iflag = 0;
int found = 0;
spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
- list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
- spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
- while (!found && lpfc_cmd) {
+ list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
+ list) {
if (lpfc_test_rrq_active(phba, ndlp,
- lpfc_cmd->cur_iocbq.sli4_xritag)) {
- lpfc_release_scsi_buf_s4(phba, lpfc_cmd);
- spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
- list_remove_head(scsi_buf_list, lpfc_cmd,
- struct lpfc_scsi_buf, list);
- spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
- iflag);
- if (lpfc_cmd == start_lpfc_cmd) {
- lpfc_cmd = NULL;
- break;
- } else
- continue;
- }
+ lpfc_cmd->cur_iocbq.sli4_xritag))
+ continue;
+ list_del(&lpfc_cmd->list);
found = 1;
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->nonsg_phys = 0;
lpfc_cmd->prot_seg_cnt = 0;
+ break;
}
- return lpfc_cmd;
+ spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
+ iflag);
+ if (!found)
+ return NULL;
+ else
+ return lpfc_cmd;
}
/**
* lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
@@ -1981,12 +2004,14 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
struct scatterlist *sgel = NULL;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+ struct sli4_sge *first_data_sgl;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
dma_addr_t physaddr;
uint32_t num_bde = 0;
uint32_t dma_len;
uint32_t dma_offset = 0;
int nseg;
+ struct ulp_bde64 *bde;
/*
* There are three possibilities here - use scatter-gather segment, use
@@ -2011,7 +2036,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl += 1;
-
+ first_data_sgl = sgl;
lpfc_cmd->seg_cnt = nseg;
if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
@@ -2047,6 +2072,17 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
dma_offset += dma_len;
sgl++;
}
+ /* setup the performance hint (first data BDE) if enabled */
+ if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
+ bde = (struct ulp_bde64 *)
+ &(iocb_cmd->unsli3.sli3Words[5]);
+ bde->addrLow = first_data_sgl->addr_lo;
+ bde->addrHigh = first_data_sgl->addr_hi;
+ bde->tus.f.bdeSize =
+ le32_to_cpu(first_data_sgl->sge_len);
+ bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bde->tus.w = cpu_to_le32(bde->tus.w);
+ }
} else {
sgl += 1;
/* clear the last flag in the fcp_rsp map entry */
@@ -2471,6 +2507,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_worker_wake_up(phba);
break;
case IOSTAT_LOCAL_REJECT:
+ case IOSTAT_REMOTE_STOP:
+ if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
+ lpfc_cmd->result ==
+ IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
+ lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
+ lpfc_cmd->result ==
+ IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
+ cmd->result = ScsiResult(DID_NO_CONNECT, 0);
+ break;
+ }
if (lpfc_cmd->result == IOERR_INVALID_RPI ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
@@ -2478,7 +2524,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
cmd->result = ScsiResult(DID_REQUEUE, 0);
break;
}
-
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
@@ -2497,7 +2542,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
"on unprotected cmd\n");
}
}
-
+ if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
+ && (phba->sli_rev == LPFC_SLI_REV4)
+ && (pnode && NLP_CHK_NODE_ACT(pnode))) {
+ /* This IO was aborted by the target, we don't
+ * know the rxid and because we did not send the
+ * ABTS we cannot generate and RRQ.
+ */
+ lpfc_set_rrq_active(phba, pnode,
+ lpfc_cmd->cur_iocbq.sli4_xritag,
+ 0, 0);
+ }
/* else: fall through */
default:
cmd->result = ScsiResult(DID_ERROR, 0);
@@ -2508,9 +2563,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|| (pnode->nlp_state != NLP_STE_MAPPED_NODE))
cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
SAM_STAT_BUSY);
- } else {
+ } else
cmd->result = ScsiResult(DID_OK, 0);
- }
if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
@@ -3004,11 +3058,11 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
* transport is still transitioning.
*/
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
+ cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
goto out_fail_command;
}
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
- goto out_host_busy;
+ goto out_tgt_busy;
lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
if (lpfc_cmd == NULL) {
@@ -3125,6 +3179,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
out_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
+ out_tgt_busy:
+ return SCSI_MLQUEUE_TARGET_BUSY;
+
out_fail_command:
done(cmnd);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index a359d2b873ce..2ee0374a9908 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -96,7 +96,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
/* set consumption flag every once in a while */
if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
-
+ if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
+ bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
/* Update the host index before invoking device */
@@ -534,15 +535,35 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t adj_xri;
struct lpfc_node_rrq *rrq;
int empty;
+ uint32_t did = 0;
+
+
+ if (!ndlp)
+ return -EINVAL;
+
+ if (!phba->cfg_enable_rrq)
+ return -EINVAL;
+
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+ goto out;
+ }
+ did = ndlp->nlp_DID;
/*
* set the active bit even if there is no mem available.
*/
adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (!ndlp)
- return -EINVAL;
+
+ if (NLP_CHK_FREE_REQ(ndlp))
+ goto out;
+
+ if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
+ goto out;
+
if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
- return -EINVAL;
+ goto out;
+
rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
if (rrq) {
rrq->send_rrq = send_rrq;
@@ -553,14 +574,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
empty = list_empty(&phba->active_rrq_list);
- if (phba->cfg_enable_rrq && send_rrq)
- /*
- * We need the xri before we can add this to the
- * phba active rrq list.
- */
- rrq->send_rrq = send_rrq;
- else
- rrq->send_rrq = 0;
+ rrq->send_rrq = send_rrq;
list_add_tail(&rrq->list, &phba->active_rrq_list);
if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
phba->hba_flag |= HBA_RRQ_ACTIVE;
@@ -569,40 +583,49 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
}
return 0;
}
- return -ENOMEM;
+out:
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2921 Can't set rrq active xri:0x%x rxid:0x%x"
+ " DID:0x%x Send:%d\n",
+ xritag, rxid, did, send_rrq);
+ return -EINVAL;
}
/**
- * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
+ * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
* @phba: Pointer to HBA context object.
* @xritag: xri used in this exchange.
* @rrq: The RRQ to be cleared.
*
- * This function is called with hbalock held. This function
**/
-static void
-__lpfc_clr_rrq_active(struct lpfc_hba *phba,
- uint16_t xritag,
- struct lpfc_node_rrq *rrq)
+void
+lpfc_clr_rrq_active(struct lpfc_hba *phba,
+ uint16_t xritag,
+ struct lpfc_node_rrq *rrq)
{
uint16_t adj_xri;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL;
- ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
+ if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
+ ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
/* The target DID could have been swapped (cable swap)
* we should use the ndlp from the findnode if it is
* available.
*/
- if (!ndlp)
+ if ((!ndlp) && rrq->ndlp)
ndlp = rrq->ndlp;
+ if (!ndlp)
+ goto out;
+
adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
rrq->send_rrq = 0;
rrq->xritag = 0;
rrq->rrq_stop_time = 0;
}
+out:
mempool_free(rrq, phba->rrq_pool);
}
@@ -627,34 +650,34 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
struct lpfc_node_rrq *nextrrq;
unsigned long next_time;
unsigned long iflags;
+ LIST_HEAD(send_rrq);
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
next_time = jiffies + HZ * (phba->fc_ratov + 1);
list_for_each_entry_safe(rrq, nextrrq,
- &phba->active_rrq_list, list) {
- if (time_after(jiffies, rrq->rrq_stop_time)) {
- list_del(&rrq->list);
- if (!rrq->send_rrq)
- /* this call will free the rrq */
- __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
- else {
- /* if we send the rrq then the completion handler
- * will clear the bit in the xribitmap.
- */
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- if (lpfc_send_rrq(phba, rrq)) {
- lpfc_clr_rrq_active(phba, rrq->xritag,
- rrq);
- }
- spin_lock_irqsave(&phba->hbalock, iflags);
- }
- } else if (time_before(rrq->rrq_stop_time, next_time))
+ &phba->active_rrq_list, list) {
+ if (time_after(jiffies, rrq->rrq_stop_time))
+ list_move(&rrq->list, &send_rrq);
+ else if (time_before(rrq->rrq_stop_time, next_time))
next_time = rrq->rrq_stop_time;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (!list_empty(&phba->active_rrq_list))
mod_timer(&phba->rrq_tmr, next_time);
+ list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
+ list_del(&rrq->list);
+ if (!rrq->send_rrq)
+ /* this call will free the rrq */
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ else if (lpfc_send_rrq(phba, rrq)) {
+ /* if we send the rrq then the completion handler
+ * will clear the bit in the xribitmap.
+ */
+ lpfc_clr_rrq_active(phba, rrq->xritag,
+ rrq);
+ }
+ }
}
/**
@@ -692,29 +715,37 @@ lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
/**
* lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
* @vport: Pointer to vport context object.
- *
- * Remove all active RRQs for this vport from the phba->active_rrq_list and
- * clear the rrq.
+ * @ndlp: Pointer to the lpfc_node_list structure.
+ * If ndlp is NULL Remove all active RRQs for this vport from the
+ * phba->active_rrq_list and clear the rrq.
+ * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
**/
void
-lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport)
+lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_node_rrq *rrq;
struct lpfc_node_rrq *nextrrq;
unsigned long iflags;
+ LIST_HEAD(rrq_list);
if (phba->sli_rev != LPFC_SLI_REV4)
return;
- spin_lock_irqsave(&phba->hbalock, iflags);
- list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
- if (rrq->vport == vport) {
- list_del(&rrq->list);
- __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
- }
+ if (!ndlp) {
+ lpfc_sli4_vport_delete_els_xri_aborted(vport);
+ lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
}
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
+ if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
+ list_move(&rrq->list, &rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
+ list_del(&rrq->list);
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ }
}
/**
@@ -732,24 +763,27 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
struct lpfc_node_rrq *nextrrq;
unsigned long next_time;
unsigned long iflags;
+ LIST_HEAD(rrq_list);
if (phba->sli_rev != LPFC_SLI_REV4)
return;
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
next_time = jiffies + HZ * (phba->fc_ratov * 2);
- list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+ list_splice_init(&phba->active_rrq_list, &rrq_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
list_del(&rrq->list);
- __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
if (!list_empty(&phba->active_rrq_list))
mod_timer(&phba->rrq_tmr, next_time);
}
/**
- * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
+ * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
* @phba: Pointer to HBA context object.
* @ndlp: Targets nodelist pointer for this exchange.
* @xritag the xri in the bitmap to test.
@@ -758,8 +792,8 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
* returns 0 = rrq not active for this xri
* 1 = rrq is valid for this xri.
**/
-static int
-__lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+int
+lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag)
{
uint16_t adj_xri;
@@ -802,52 +836,6 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
}
/**
- * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
- * @phba: Pointer to HBA context object.
- * @xritag: xri used in this exchange.
- * @rrq: The RRQ to be cleared.
- *
- * This function is takes the hbalock.
- **/
-void
-lpfc_clr_rrq_active(struct lpfc_hba *phba,
- uint16_t xritag,
- struct lpfc_node_rrq *rrq)
-{
- unsigned long iflags;
-
- spin_lock_irqsave(&phba->hbalock, iflags);
- __lpfc_clr_rrq_active(phba, xritag, rrq);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- return;
-}
-
-
-
-/**
- * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
- * @phba: Pointer to HBA context object.
- * @ndlp: Targets nodelist pointer for this exchange.
- * @xritag the xri in the bitmap to test.
- *
- * This function takes the hbalock.
- * returns 0 = rrq not active for this xri
- * 1 = rrq is valid for this xri.
- **/
-int
-lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
- uint16_t xritag)
-{
- int ret;
- unsigned long iflags;
-
- spin_lock_irqsave(&phba->hbalock, iflags);
- ret = __lpfc_test_rrq_active(phba, ndlp, xritag);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- return ret;
-}
-
-/**
* __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
* @phba: Pointer to HBA context object.
* @piocb: Pointer to the iocbq.
@@ -884,7 +872,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
return NULL;
adj_xri = sglq->sli4_xritag -
phba->sli4_hba.max_cfg_param.xri_base;
- if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
+ if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
/* This xri has an rrq outstanding for this DID.
* put it back in the list and get another xri.
*/
@@ -969,7 +957,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
} else {
sglq->state = SGL_FREED;
sglq->ndlp = NULL;
- list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
+ list_add_tail(&sglq->list,
+ &phba->sli4_hba.lpfc_sgl_list);
/* Check if TXQ queue needs to be serviced */
if (pring->txq_cnt)
@@ -4817,7 +4806,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"0378 No support for fcpi mode.\n");
ftr_rsp++;
}
-
+ if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
+ phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
+ else
+ phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
/*
* If the port cannot support the host's requested features
* then turn off the global config parameters to disable the
@@ -5004,7 +4996,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
phba->link_state = LPFC_LINK_DOWN;
spin_unlock_irq(&phba->hbalock);
- rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK)
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
out_unset_queue:
/* Unset all the queues set up in this routine when error out */
if (rc)
@@ -10478,6 +10471,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
cq->type = type;
cq->subtype = subtype;
cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+ cq->assoc_qid = eq->queue_id;
cq->host_index = 0;
cq->hba_index = 0;
@@ -10672,6 +10666,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
goto out;
}
mq->type = LPFC_MQ;
+ mq->assoc_qid = cq->queue_id;
mq->subtype = subtype;
mq->host_index = 0;
mq->hba_index = 0;
@@ -10759,6 +10754,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
goto out;
}
wq->type = LPFC_WQ;
+ wq->assoc_qid = cq->queue_id;
wq->subtype = subtype;
wq->host_index = 0;
wq->hba_index = 0;
@@ -10876,6 +10872,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
goto out;
}
hrq->type = LPFC_HRQ;
+ hrq->assoc_qid = cq->queue_id;
hrq->subtype = subtype;
hrq->host_index = 0;
hrq->hba_index = 0;
@@ -10936,6 +10933,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
goto out;
}
drq->type = LPFC_DRQ;
+ drq->assoc_qid = cq->queue_id;
drq->subtype = subtype;
drq->host_index = 0;
drq->hba_index = 0;
@@ -11189,7 +11187,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
if (!mbox)
return -ENOMEM;
length = (sizeof(struct lpfc_mbx_rq_destroy) -
- sizeof(struct mbox_header));
+ sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
length, LPFC_SLI4_MBX_EMBED);
@@ -11279,7 +11277,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
sizeof(struct lpfc_mbx_post_sgl_pages) -
- sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
+ sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
&mbox->u.mqe.un.post_sgl_pages;
@@ -12402,7 +12400,8 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
sizeof(struct lpfc_mbx_post_hdr_tmpl) -
- sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
+ sizeof(struct lpfc_sli4_cfg_mhdr),
+ LPFC_SLI4_MBX_EMBED);
bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
hdr_tmpl, rpi_page->page_count);
bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index c7217d579e0f..595056b89608 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -125,9 +125,9 @@ struct lpfc_queue {
uint32_t entry_count; /* Number of entries to support on the queue */
uint32_t entry_size; /* Size of each queue entry. */
uint32_t queue_id; /* Queue ID assigned by the hardware */
+ uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
struct list_head page_list;
uint32_t page_count; /* Number of pages allocated for this queue */
-
uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */
union sli4_qe qe[1]; /* array to index entries (must be last) */
@@ -359,6 +359,10 @@ struct lpfc_pc_sli4_params {
uint32_t hdr_pp_align;
uint32_t sgl_pages_max;
uint32_t sgl_pp_align;
+ uint8_t cqv;
+ uint8_t mqv;
+ uint8_t wqv;
+ uint8_t rqv;
};
/* SLI4 HBA data structure entries */
@@ -562,6 +566,8 @@ void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
+void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *);
int lpfc_sli4_brdreset(struct lpfc_hba *);
int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 386cf92de492..0a4d376dbca5 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2010 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.20"
+#define LPFC_DRIVER_VERSION "8.3.21"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 6b8d2952e32f..30ba5440c67a 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -464,6 +464,7 @@ disable_vport(struct fc_vport *fc_vport)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
long timeout;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (ndlp && NLP_CHK_NODE_ACT(ndlp)
@@ -498,6 +499,9 @@ disable_vport(struct fc_vport *fc_vport)
* scsi_host_put() to release the vport.
*/
lpfc_mbx_unreg_vpi(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ spin_unlock_irq(shost->host_lock);
lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 9aa048525eb2..c212694a9714 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1412,7 +1412,7 @@ megaraid_isr_memmapped(int irq, void *devp)
* @nstatus - number of completed commands
* @status - status of the last command completed
*
- * Complete the comamnds and call the scsi mid-layer callback hooks.
+ * Complete the commands and call the scsi mid-layer callback hooks.
*/
static void
mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
@@ -4296,7 +4296,7 @@ mega_support_cluster(adapter_t *adapter)
* @adapter - pointer to our soft state
* @dma_handle - DMA address of the buffer
*
- * Issue internal comamnds while interrupts are available.
+ * Issue internal commands while interrupts are available.
* We only issue direct mailbox commands from within the driver. ioctl()
* interface using these routines can issue passthru commands.
*/
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 1b5e375732c0..635b228c3ead 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.05.29-rc1"
-#define MEGASAS_RELDATE "Dec. 7, 2010"
-#define MEGASAS_EXT_VERSION "Tue. Dec. 7 17:00:00 PDT 2010"
+#define MEGASAS_VERSION "00.00.05.34-rc1"
+#define MEGASAS_RELDATE "Feb. 24, 2011"
+#define MEGASAS_EXT_VERSION "Thu. Feb. 24 17:00:00 PDT 2011"
/*
* Device IDs
@@ -723,6 +723,7 @@ struct megasas_ctrl_info {
MEGASAS_MAX_DEV_PER_CHANNEL)
#define MEGASAS_MAX_SECTORS (2*1024)
+#define MEGASAS_MAX_SECTORS_IEEE (2*128)
#define MEGASAS_DBG_LVL 1
#define MEGASAS_FW_BUSY 1
@@ -1477,4 +1478,7 @@ struct megasas_mgmt_info {
int max_index;
};
+#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
+#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
+
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 5d6d07bd1cd0..bbd10c81fd9c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,12 +18,13 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.05.29-rc1
+ * Version : v00.00.05.34-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
* Sumant Patro
* Bo Yang
+ * Adam Radford <linuxraid@lsi.com>
*
* Send feedback to: <megaraidlinux@lsi.com>
*
@@ -134,7 +135,11 @@ spinlock_t poll_aen_lock;
void
megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
u8 alt_status);
-
+static u32
+megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
+static int
+megasas_adp_reset_gen2(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *reg_set);
static irqreturn_t megasas_isr(int irq, void *devp);
static u32
megasas_init_adapter_mfi(struct megasas_instance *instance);
@@ -554,6 +559,8 @@ static int
megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
{
u32 status;
+ u32 mfiStatus = 0;
+
/*
* Check if it is our interrupt
*/
@@ -564,6 +571,15 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
}
/*
+ * Check if it is our interrupt
+ */
+ if ((megasas_read_fw_status_reg_gen2(regs) & MFI_STATE_MASK) ==
+ MFI_STATE_FAULT) {
+ mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+ } else
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+
+ /*
* Clear the interrupt by writing back the same value
*/
writel(status, &regs->outbound_intr_status);
@@ -573,7 +589,7 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
*/
readl(&regs->outbound_intr_status);
- return 1;
+ return mfiStatus;
}
/**
@@ -597,17 +613,6 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
}
/**
- * megasas_adp_reset_skinny - For controller reset
- * @regs: MFI register set
- */
-static int
-megasas_adp_reset_skinny(struct megasas_instance *instance,
- struct megasas_register_set __iomem *regs)
-{
- return 0;
-}
-
-/**
* megasas_check_reset_skinny - For controller reset check
* @regs: MFI register set
*/
@@ -625,7 +630,7 @@ static struct megasas_instance_template megasas_instance_template_skinny = {
.disable_intr = megasas_disable_intr_skinny,
.clear_intr = megasas_clear_intr_skinny,
.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
- .adp_reset = megasas_adp_reset_skinny,
+ .adp_reset = megasas_adp_reset_gen2,
.check_reset = megasas_check_reset_skinny,
.service_isr = megasas_isr,
.tasklet = megasas_complete_cmd_dpc,
@@ -740,20 +745,28 @@ megasas_adp_reset_gen2(struct megasas_instance *instance,
{
u32 retry = 0 ;
u32 HostDiag;
+ u32 *seq_offset = &reg_set->seq_offset;
+ u32 *hostdiag_offset = &reg_set->host_diag;
+
+ if (instance->instancet == &megasas_instance_template_skinny) {
+ seq_offset = &reg_set->fusion_seq_offset;
+ hostdiag_offset = &reg_set->fusion_host_diag;
+ }
+
+ writel(0, seq_offset);
+ writel(4, seq_offset);
+ writel(0xb, seq_offset);
+ writel(2, seq_offset);
+ writel(7, seq_offset);
+ writel(0xd, seq_offset);
- writel(0, &reg_set->seq_offset);
- writel(4, &reg_set->seq_offset);
- writel(0xb, &reg_set->seq_offset);
- writel(2, &reg_set->seq_offset);
- writel(7, &reg_set->seq_offset);
- writel(0xd, &reg_set->seq_offset);
msleep(1000);
- HostDiag = (u32)readl(&reg_set->host_diag);
+ HostDiag = (u32)readl(hostdiag_offset);
while ( !( HostDiag & DIAG_WRITE_ENABLE) ) {
msleep(100);
- HostDiag = (u32)readl(&reg_set->host_diag);
+ HostDiag = (u32)readl(hostdiag_offset);
printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n",
retry, HostDiag);
@@ -764,14 +777,14 @@ megasas_adp_reset_gen2(struct megasas_instance *instance,
printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
- writel((HostDiag | DIAG_RESET_ADAPTER), &reg_set->host_diag);
+ writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
ssleep(10);
- HostDiag = (u32)readl(&reg_set->host_diag);
+ HostDiag = (u32)readl(hostdiag_offset);
while ( ( HostDiag & DIAG_RESET_ADAPTER) ) {
msleep(100);
- HostDiag = (u32)readl(&reg_set->host_diag);
+ HostDiag = (u32)readl(hostdiag_offset);
printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n",
retry, HostDiag);
@@ -877,7 +890,7 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
* @instance: Adapter soft state
* @cmd_to_abort: Previously issued cmd to be aborted
*
- * MFI firmware can abort previously issued AEN comamnd (automatic event
+ * MFI firmware can abort previously issued AEN command (automatic event
* notification). The megasas_issue_blocked_abort_cmd() issues such abort
* cmd and waits for return status.
* Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
@@ -2503,7 +2516,9 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
if ((mfiStatus = instance->instancet->clear_intr(
instance->reg_set)
) == 0) {
- return IRQ_NONE;
+ /* Hardware may not set outbound_intr_status in MSI-X mode */
+ if (!instance->msi_flag)
+ return IRQ_NONE;
}
instance->mfiStatus = mfiStatus;
@@ -2611,7 +2626,9 @@ megasas_transition_to_ready(struct megasas_instance* instance)
case MFI_STATE_FAULT:
printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
- return -ENODEV;
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FAULT;
+ break;
case MFI_STATE_WAIT_HANDSHAKE:
/*
@@ -3424,7 +3441,6 @@ fail_reply_queue:
megasas_free_cmds(instance);
fail_alloc_cmds:
- iounmap(instance->reg_set);
return 1;
}
@@ -3494,7 +3510,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Get operational params, sge flags, send init cmd to controller */
if (instance->instancet->init_adapter(instance))
- return -ENODEV;
+ goto fail_init_adapter;
printk(KERN_ERR "megasas: INIT adapter done\n");
@@ -3543,7 +3559,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
* Setup tasklet for cmd completion
*/
- tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
+ tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
/* Initialize the cmd completion timer */
@@ -3553,6 +3569,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
MEGASAS_COMPLETION_TIMER_INTERVAL);
return 0;
+fail_init_adapter:
fail_ready_state:
iounmap(instance->reg_set);
@@ -3820,6 +3837,10 @@ static int megasas_io_attach(struct megasas_instance *instance)
instance->max_fw_cmds - MEGASAS_INT_CMDS;
host->this_id = instance->init_id;
host->sg_tablesize = instance->max_num_sge;
+
+ if (instance->fw_support_ieee)
+ instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
+
/*
* Check if the module parameter value for max_sectors can be used
*/
@@ -3899,9 +3920,26 @@ fail_set_dma_mask:
static int __devinit
megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
- int rval;
+ int rval, pos;
struct Scsi_Host *host;
struct megasas_instance *instance;
+ u16 control = 0;
+
+ /* Reset MSI-X in the kdump kernel */
+ if (reset_devices) {
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(pdev, msi_control_reg(pos),
+ &control);
+ if (control & PCI_MSIX_FLAGS_ENABLE) {
+ dev_info(&pdev->dev, "resetting MSI-X\n");
+ pci_write_config_word(pdev,
+ msi_control_reg(pos),
+ control &
+ ~PCI_MSIX_FLAGS_ENABLE);
+ }
+ }
+ }
/*
* Announce PCI information
@@ -4039,12 +4077,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
- /*
- * Initialize MFI Firmware
- */
- if (megasas_init_fw(instance))
- goto fail_init_mfi;
-
/* Try to enable MSI-X */
if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) &&
@@ -4054,6 +4086,12 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
instance->msi_flag = 1;
/*
+ * Initialize MFI Firmware
+ */
+ if (megasas_init_fw(instance))
+ goto fail_init_mfi;
+
+ /*
* Register IRQ
*/
if (request_irq(instance->msi_flag ? instance->msixentry.vector :
@@ -4105,24 +4143,23 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
instance->instancet->disable_intr(instance->reg_set);
free_irq(instance->msi_flag ? instance->msixentry.vector :
instance->pdev->irq, instance);
+fail_irq:
+ if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)
+ megasas_release_fusion(instance);
+ else
+ megasas_release_mfi(instance);
+ fail_init_mfi:
if (instance->msi_flag)
pci_disable_msix(instance->pdev);
-
- fail_irq:
- fail_init_mfi:
fail_alloc_dma_buf:
if (instance->evt_detail)
pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
instance->evt_detail,
instance->evt_detail_h);
- if (instance->producer) {
+ if (instance->producer)
pci_free_consistent(pdev, sizeof(u32), instance->producer,
instance->producer_h);
- megasas_release_mfi(instance);
- } else {
- megasas_release_fusion(instance);
- }
if (instance->consumer)
pci_free_consistent(pdev, sizeof(u32), instance->consumer,
instance->consumer_h);
@@ -4242,9 +4279,8 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
/* cancel the delayed work if this work still in queue */
if (instance->ev != NULL) {
struct megasas_aen_event *ev = instance->ev;
- cancel_delayed_work(
+ cancel_delayed_work_sync(
(struct delayed_work *)&ev->hotplug_work);
- flush_scheduled_work();
instance->ev = NULL;
}
@@ -4297,6 +4333,10 @@ megasas_resume(struct pci_dev *pdev)
if (megasas_set_dma_mask(pdev))
goto fail_set_dma_mask;
+ /* Now re-enable MSI-X */
+ if (instance->msi_flag)
+ pci_enable_msix(instance->pdev, &instance->msixentry, 1);
+
/*
* Initialize MFI Firmware
*/
@@ -4333,10 +4373,6 @@ megasas_resume(struct pci_dev *pdev)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- /* Now re-enable MSI-X */
- if (instance->msi_flag)
- pci_enable_msix(instance->pdev, &instance->msixentry, 1);
-
/*
* Register IRQ
*/
@@ -4417,9 +4453,8 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
/* cancel the delayed work if this work still in queue*/
if (instance->ev != NULL) {
struct megasas_aen_event *ev = instance->ev;
- cancel_delayed_work(
+ cancel_delayed_work_sync(
(struct delayed_work *)&ev->hotplug_work);
- flush_scheduled_work();
instance->ev = NULL;
}
@@ -4611,6 +4646,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* For each user buffer, create a mirror buffer and copy in
*/
for (i = 0; i < ioc->sge_count; i++) {
+ if (!ioc->sgl[i].iov_len)
+ continue;
+
kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
ioc->sgl[i].iov_len,
&buf_handle, GFP_KERNEL);
@@ -5177,6 +5215,7 @@ megasas_aen_polling(struct work_struct *work)
break;
case MR_EVT_LD_OFFLINE:
+ case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
megasas_get_ld_list(instance);
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index d6e2a663b165..145a8cffb1fa 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -81,6 +81,10 @@ u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+
+void
+megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
+
u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
struct LD_LOAD_BALANCE_INFO *lbInfo);
u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
@@ -983,13 +987,15 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
return 0;
-fail_alloc_cmds:
-fail_alloc_mfi_cmds:
fail_map_info:
if (i == 1)
dma_free_coherent(&instance->pdev->dev, fusion->map_sz,
fusion->ld_map[0], fusion->ld_map_phys[0]);
fail_ioc_init:
+ megasas_free_cmds_fusion(instance);
+fail_alloc_cmds:
+ megasas_free_cmds(instance);
+fail_alloc_mfi_cmds:
return 1;
}
@@ -1431,8 +1437,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
/* Check if this is a system PD I/O */
- if ((instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) &&
- (instance->pd_list[pd_index].driveType == TYPE_DISK)) {
+ if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
io_request->Function = 0;
io_request->DevHandle =
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
@@ -1455,7 +1460,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
io_request->RaidContext.VirtualDiskTgtId = device_id;
- io_request->LUN[0] = scmd->device->lun;
+ io_request->LUN[1] = scmd->device->lun;
io_request->DataLength = scsi_bufflen(scmd);
}
@@ -1479,7 +1484,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
device_id = MEGASAS_DEV_INDEX(instance, scp);
/* Zero out some fields so they don't get reused */
- io_request->LUN[0] = 0;
+ io_request->LUN[1] = 0;
io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
io_request->EEDPFlags = 0;
@@ -1743,7 +1748,7 @@ complete_cmd_fusion(struct megasas_instance *instance)
wmb();
writel(fusion->last_reply_idx,
&instance->reg_set->reply_post_host_index);
-
+ megasas_check_and_restore_queue_depth(instance);
return IRQ_HANDLED;
}
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 8be75e65f763..a3e60385787f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.16
+ * mpi2.h Version: 02.00.17
*
* Version History
* ---------------
@@ -63,6 +63,7 @@
* function codes, 0xF0 to 0xFF.
* 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT.
* Added alternative defines for the SGE Direction bit.
+ * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -88,7 +89,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x10)
+#define MPI2_HEADER_VERSION_UNIT (0x11)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index d76a65847603..f5b9c766e28f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.15
+ * mpi2_cnfg.h Version: 02.00.16
*
* Version History
* ---------------
@@ -125,6 +125,8 @@
* define.
* Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define.
* Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
+ * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing)
+ * defines.
* --------------------------------------------------------------------------
*/
@@ -745,8 +747,6 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040)
#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020)
#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004)
-#define MPI2_IOUNITPAGE1_MULTI_PATHING (0x00000002)
-#define MPI2_IOUNITPAGE1_SINGLE_PATHING (0x00000000)
/* IO Unit Page 3 */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
deleted file mode 100644
index b1e88f26b748..000000000000
--- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
+++ /dev/null
@@ -1,384 +0,0 @@
- ==============================
- Fusion-MPT MPI 2.0 Header File Change History
- ==============================
-
- Copyright (c) 2000-2010 LSI Corporation.
-
- ---------------------------------------
- Header Set Release Version: 02.00.14
- Header Set Release Date: 10-28-09
- ---------------------------------------
-
- Filename Current version Prior version
- ---------- --------------- -------------
- mpi2.h 02.00.14 02.00.13
- mpi2_cnfg.h 02.00.13 02.00.12
- mpi2_init.h 02.00.08 02.00.07
- mpi2_ioc.h 02.00.13 02.00.12
- mpi2_raid.h 02.00.04 02.00.04
- mpi2_sas.h 02.00.03 02.00.02
- mpi2_targ.h 02.00.03 02.00.03
- mpi2_tool.h 02.00.04 02.00.04
- mpi2_type.h 02.00.00 02.00.00
- mpi2_ra.h 02.00.00 02.00.00
- mpi2_hbd.h 02.00.00
- mpi2_history.txt 02.00.14 02.00.13
-
-
- * Date Version Description
- * -------- -------- ------------------------------------------------------
-
-mpi2.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT.
- * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT.
- * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT.
- * Moved ReplyPostHostIndex register to offset 0x6C of the
- * MPI2_SYSTEM_INTERFACE_REGS and modified the define for
- * MPI2_REPLY_POST_HOST_INDEX_OFFSET.
- * Added union of request descriptors.
- * Added union of reply descriptors.
- * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT.
- * Added define for MPI2_VERSION_02_00.
- * Fixed the size of the FunctionDependent5 field in the
- * MPI2_DEFAULT_REPLY structure.
- * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT.
- * Removed the MPI-defined Fault Codes and extended the
- * product specific codes up to 0xEFFF.
- * Added a sixth key value for the WriteSequence register
- * and changed the flush value to 0x0.
- * Added message function codes for Diagnostic Buffer Post
- * and Diagnsotic Release.
- * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED
- * Moved MPI2_VERSION_UNION from mpi2_ioc.h.
- * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT.
- * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT.
- * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT.
- * Added #defines for marking a reply descriptor as unused.
- * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT.
- * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT.
- * Moved LUN field defines from mpi2_init.h.
- * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT.
- * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT.
- * In all request and reply descriptors, replaced VF_ID
- * field with MSIxIndex field.
- * Removed DevHandle field from
- * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
- * bytes reserved.
- * Added RAID Accelerator functionality.
- * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
- * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
- * Added MSI-x index mask and shift for Reply Post Host
- * Index register.
- * Added function code for Host Based Discovery Action.
- * --------------------------------------------------------------------------
-
-mpi2_cnfg.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags.
- * Added Manufacturing Page 11.
- * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE
- * define.
- * 06-26-07 02.00.02 Adding generic structure for product-specific
- * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS.
- * Rework of BIOS Page 2 configuration page.
- * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the
- * forms.
- * Added configuration pages IOC Page 8 and Driver
- * Persistent Mapping Page 0.
- * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated
- * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1,
- * RAID Physical Disk Pages 0 and 1, RAID Configuration
- * Page 0).
- * Added new value for AccessStatus field of SAS Device
- * Page 0 (_SATA_NEEDS_INITIALIZATION).
- * 10-31-07 02.00.04 Added missing SEPDevHandle field to
- * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
- * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for
- * NVDATA.
- * Modified IOC Page 7 to use masks and added field for
- * SASBroadcastPrimitiveMasks.
- * Added MPI2_CONFIG_PAGE_BIOS_4.
- * Added MPI2_CONFIG_PAGE_LOG_0.
- * 02-29-08 02.00.06 Modified various names to make them 32-character unique.
- * Added SAS Device IDs.
- * Updated Integrated RAID configuration pages including
- * Manufacturing Page 4, IOC Page 6, and RAID Configuration
- * Page 0.
- * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA.
- * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION.
- * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING.
- * Added missing MaxNumRoutedSasAddresses field to
- * MPI2_CONFIG_PAGE_EXPANDER_0.
- * Added SAS Port Page 0.
- * Modified structure layout for
- * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0.
- * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use
- * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array.
- * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF
- * to 0x000000FF.
- * Added two new values for the Physical Disk Coercion Size
- * bits in the Flags field of Manufacturing Page 4.
- * Added product-specific Manufacturing pages 16 to 31.
- * Modified Flags bits for controlling write cache on SATA
- * drives in IO Unit Page 1.
- * Added new bit to AdditionalControlFlags of SAS IO Unit
- * Page 1 to control Invalid Topology Correction.
- * Added SupportedPhysDisks field to RAID Volume Page 1 and
- * added related defines.
- * Added additional defines for RAID Volume Page 0
- * VolumeStatusFlags field.
- * Modified meaning of RAID Volume Page 0 VolumeSettings
- * define for auto-configure of hot-swap drives.
- * Added PhysDiskAttributes field (and related defines) to
- * RAID Physical Disk Page 0.
- * Added MPI2_SAS_PHYINFO_PHY_VACANT define.
- * Added three new DiscoveryStatus bits for SAS IO Unit
- * Page 0 and SAS Expander Page 0.
- * Removed multiplexing information from SAS IO Unit pages.
- * Added BootDeviceWaitTime field to SAS IO Unit Page 4.
- * Removed Zone Address Resolved bit from PhyInfo and from
- * Expander Page 0 Flags field.
- * Added two new AccessStatus values to SAS Device Page 0
- * for indicating routing problems. Added 3 reserved words
- * to this page.
- * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3.
- * Inserted missing reserved field into structure for IOC
- * Page 6.
- * Added more pending task bits to RAID Volume Page 0
- * VolumeStatusFlags defines.
- * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define.
- * Added a new DiscoveryStatus bit for SAS IO Unit Page 0
- * and SAS Expander Page 0 to flag a downstream initiator
- * when in simplified routing mode.
- * Removed SATA Init Failure defines for DiscoveryStatus
- * fields of SAS IO Unit Page 0 and SAS Expander Page 0.
- * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define.
- * Added PortGroups, DmaGroup, and ControlGroup fields to
- * SAS Device Page 0.
- * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO
- * Unit Page 6.
- * Added expander reduced functionality data to SAS
- * Expander Page 0.
- * Added SAS PHY Page 2 and SAS PHY Page 3.
- * 07-30-09 02.00.12 Added IO Unit Page 7.
- * Added new device ids.
- * Added SAS IO Unit Page 5.
- * Added partial and slumber power management capable flags
- * to SAS Device Page 0 Flags field.
- * Added PhyInfo defines for power condition.
- * Added Ethernet configuration pages.
- * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
- * Added SAS PHY Page 4 structure and defines.
- * --------------------------------------------------------------------------
-
-mpi2_init.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t.
- * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines.
- * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention.
- * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY.
- * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t.
- * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO
- * Control field Task Attribute flags.
- * Moved LUN field defines to mpi2.h becasue they are
- * common to many structures.
- * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to
- * Query Asynchronous Event.
- * Defined two new bits in the SlotStatus field of the SCSI
- * Enclosure Processor Request and Reply.
- * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
- * both SCSI IO Error Reply and SCSI Task Management Reply.
- * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
- * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
- * --------------------------------------------------------------------------
-
-mpi2_ioc.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to
- * MaxTargets.
- * Added TotalImageSize field to FWDownload Request.
- * Added reserved words to FWUpload Request.
- * 06-26-07 02.00.02 Added IR Configuration Change List Event.
- * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit
- * request and replaced it with
- * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth.
- * Replaced the MinReplyQueueDepth field of the IOCFacts
- * reply with MaxReplyDescriptorPostQueueDepth.
- * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum
- * depth for the Reply Descriptor Post Queue.
- * Added SASAddress field to Initiator Device Table
- * Overflow Event data.
- * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING
- * for SAS Initiator Device Status Change Event data.
- * Modified Reason Code defines for SAS Topology Change
- * List Event data, including adding a bit for PHY Vacant
- * status, and adding a mask for the Reason Code.
- * Added define for
- * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING.
- * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID.
- * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of
- * the IOCFacts Reply.
- * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
- * Moved MPI2_VERSION_UNION to mpi2.h.
- * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks
- * instead of enables, and added SASBroadcastPrimitiveMasks
- * field.
- * Added Log Entry Added Event and related structure.
- * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID.
- * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET.
- * Added MaxVolumes and MaxPersistentEntries fields to
- * IOCFacts reply.
- * Added ProtocalFlags and IOCCapabilities fields to
- * MPI2_FW_IMAGE_HEADER.
- * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT.
- * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to
- * a U16 (from a U32).
- * Removed extra 's' from EventMasks name.
- * 06-27-08 02.00.08 Fixed an offset in a comment.
- * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST.
- * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and
- * renamed MinReplyFrameSize to ReplyFrameSize.
- * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX.
- * Added two new RAIDOperation values for Integrated RAID
- * Operations Status Event data.
- * Added four new IR Configuration Change List Event data
- * ReasonCode values.
- * Added two new ReasonCode defines for SAS Device Status
- * Change Event data.
- * Added three new DiscoveryStatus bits for the SAS
- * Discovery event data.
- * Added Multiplexing Status Change bit to the PhyStatus
- * field of the SAS Topology Change List event data.
- * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY.
- * BootFlags are now product-specific.
- * Added defines for the indivdual signature bytes
- * for MPI2_INIT_IMAGE_FOOTER.
- * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define.
- * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR
- * define.
- * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE
- * define.
- * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define.
- * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define.
- * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define.
- * Added two new reason codes for SAS Device Status Change
- * Event.
- * Added new event: SAS PHY Counter.
- * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
- * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
- * Added new product id family for 2208.
- * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
- * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
- * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
- * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
- * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
- * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
- * Added Host Based Discovery Phy Event data.
- * Added defines for ProductID Product field
- * (MPI2_FW_HEADER_PID_).
- * Modified values for SAS ProductID Family
- * (MPI2_FW_HEADER_PID_FAMILY_).
- * --------------------------------------------------------------------------
-
-mpi2_raid.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 08-31-07 02.00.01 Modifications to RAID Action request and reply,
- * including the Actions and ActionData.
- * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD.
- * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
- * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
- * can be sized by the build environment.
- * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
- * VolumeCreationFlags and marked the old one as obsolete.
- * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
- * --------------------------------------------------------------------------
-
-mpi2_sas.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit
- * Control Request.
- * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
- * Request.
- * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
- * to MPI2_SGE_IO_UNION since it supports chained SGLs.
- * 05-12-10 02.00.04 Modified some comments.
- * --------------------------------------------------------------------------
-
-mpi2_targ.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 08-31-07 02.00.01 Added Command Buffer Data Location Address Space bits to
- * BufferPostFlags field of CommandBufferPostBase Request.
- * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
- * 10-02-08 02.00.03 Removed NextCmdBufferOffset from
- * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST.
- * Target Status Send Request only takes a single SGE for
- * response data.
- * --------------------------------------------------------------------------
-
-mpi2_tool.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release
- * structures and defines.
- * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
- * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
- * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request
- * and reply messages.
- * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
- * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
- * 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
- * --------------------------------------------------------------------------
-
-mpi2_type.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * --------------------------------------------------------------------------
-
-mpi2_ra.h
- * 05-06-09 02.00.00 Initial version.
- * --------------------------------------------------------------------------
-
-mpi2_hbd.h
- * 10-28-09 02.00.00 Initial version.
- * --------------------------------------------------------------------------
-
-
-mpi2_history.txt Parts list history
-
-Filename 02.00.14 02.00.13 02.00.12
----------- -------- -------- --------
-mpi2.h 02.00.14 02.00.13 02.00.12
-mpi2_cnfg.h 02.00.13 02.00.12 02.00.11
-mpi2_init.h 02.00.08 02.00.07 02.00.07
-mpi2_ioc.h 02.00.13 02.00.12 02.00.11
-mpi2_raid.h 02.00.04 02.00.04 02.00.03
-mpi2_sas.h 02.00.03 02.00.02 02.00.02
-mpi2_targ.h 02.00.03 02.00.03 02.00.03
-mpi2_tool.h 02.00.04 02.00.04 02.00.03
-mpi2_type.h 02.00.00 02.00.00 02.00.00
-mpi2_ra.h 02.00.00 02.00.00 02.00.00
-mpi2_hbd.h 02.00.00
-
-Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
----------- -------- -------- -------- -------- -------- --------
-mpi2.h 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
-mpi2_cnfg.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 02.00.06
-mpi2_init.h 02.00.06 02.00.06 02.00.05 02.00.05 02.00.04 02.00.03
-mpi2_ioc.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.07 02.00.06
-mpi2_raid.h 02.00.03 02.00.03 02.00.03 02.00.03 02.00.02 02.00.02
-mpi2_sas.h 02.00.02 02.00.02 02.00.01 02.00.01 02.00.01 02.00.01
-mpi2_targ.h 02.00.03 02.00.03 02.00.02 02.00.02 02.00.02 02.00.02
-mpi2_tool.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02
-mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
-
-Filename 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
----------- -------- -------- -------- -------- -------- --------
-mpi2.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
-mpi2_cnfg.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
-mpi2_init.h 02.00.02 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00
-mpi2_ioc.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
-mpi2_raid.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00
-mpi2_sas.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00
-mpi2_targ.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00
-mpi2_tool.h 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
-mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
-
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
index 608f6d6e6fca..fdffde1ebc0f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -6,7 +6,7 @@
* Title: MPI Serial Attached SCSI structures and definitions
* Creation Date: February 9, 2007
*
- * mpi2_sas.h Version: 02.00.04
+ * mpi2_sas.h Version: 02.00.05
*
* Version History
* ---------------
@@ -21,6 +21,7 @@
* 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
* to MPI2_SGE_IO_UNION since it supports chained SGLs.
* 05-12-10 02.00.04 Modified some comments.
+ * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control.
* --------------------------------------------------------------------------
*/
@@ -163,7 +164,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST
U32 Reserved4; /* 0x14 */
U32 DataLength; /* 0x18 */
U8 CommandFIS[20]; /* 0x1C */
- MPI2_SGE_IO_UNION SGL; /* 0x20 */
+ MPI2_SGE_IO_UNION SGL; /* 0x30 */
} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t;
@@ -246,6 +247,8 @@ typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST
#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D)
#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E)
#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F)
+#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14)
+#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15)
#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80)
/* values for the PrimFlags field */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 5c6e3a67bb94..2a4bceda364b 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -6,7 +6,7 @@
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.05
+ * mpi2_tool.h Version: 02.00.06
*
* Version History
* ---------------
@@ -23,6 +23,8 @@
* Added MPI2_DIAG_BUF_TYPE_EXTENDED.
* Incremented MPI2_DIAG_BUF_TYPE_COUNT.
* 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
+ * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
+ * Post Request.
* --------------------------------------------------------------------------
*/
@@ -354,6 +356,10 @@ typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST
/* count of the number of buffer types */
#define MPI2_DIAG_BUF_TYPE_COUNT (0x03)
+/* values for the Flags field */
+#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002)
+#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001)
+
/****************************************************************************
* Diagnostic Buffer Post reply
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 9ead0399808a..e8a6f1cf1e4b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -752,20 +752,19 @@ static u8
_base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
{
int i;
- u8 cb_idx = 0xFF;
-
- if (smid >= ioc->hi_priority_smid) {
- if (smid < ioc->internal_smid) {
- i = smid - ioc->hi_priority_smid;
- cb_idx = ioc->hpr_lookup[i].cb_idx;
- } else if (smid <= ioc->hba_queue_depth) {
- i = smid - ioc->internal_smid;
- cb_idx = ioc->internal_lookup[i].cb_idx;
- }
- } else {
+ u8 cb_idx;
+
+ if (smid < ioc->hi_priority_smid) {
i = smid - 1;
cb_idx = ioc->scsi_lookup[i].cb_idx;
- }
+ } else if (smid < ioc->internal_smid) {
+ i = smid - ioc->hi_priority_smid;
+ cb_idx = ioc->hpr_lookup[i].cb_idx;
+ } else if (smid <= ioc->hba_queue_depth) {
+ i = smid - ioc->internal_smid;
+ cb_idx = ioc->internal_lookup[i].cb_idx;
+ } else
+ cb_idx = 0xFF;
return cb_idx;
}
@@ -1430,7 +1429,7 @@ mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
struct scsi_cmnd *scmd)
{
unsigned long flags;
- struct request_tracker *request;
+ struct scsiio_tracker *request;
u16 smid;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
@@ -1442,7 +1441,7 @@ mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
}
request = list_entry(ioc->free_list.next,
- struct request_tracker, tracker_list);
+ struct scsiio_tracker, tracker_list);
request->scmd = scmd;
request->cb_idx = cb_idx;
smid = request->smid;
@@ -1496,48 +1495,47 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
struct chain_tracker *chain_req, *next;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- if (smid >= ioc->hi_priority_smid) {
- if (smid < ioc->internal_smid) {
- /* hi-priority */
- i = smid - ioc->hi_priority_smid;
- ioc->hpr_lookup[i].cb_idx = 0xFF;
- list_add_tail(&ioc->hpr_lookup[i].tracker_list,
- &ioc->hpr_free_list);
- } else {
- /* internal queue */
- i = smid - ioc->internal_smid;
- ioc->internal_lookup[i].cb_idx = 0xFF;
- list_add_tail(&ioc->internal_lookup[i].tracker_list,
- &ioc->internal_free_list);
+ if (smid < ioc->hi_priority_smid) {
+ /* scsiio queue */
+ i = smid - 1;
+ if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
+ list_for_each_entry_safe(chain_req, next,
+ &ioc->scsi_lookup[i].chain_list, tracker_list) {
+ list_del_init(&chain_req->tracker_list);
+ list_add_tail(&chain_req->tracker_list,
+ &ioc->free_chain_list);
+ }
}
+ ioc->scsi_lookup[i].cb_idx = 0xFF;
+ ioc->scsi_lookup[i].scmd = NULL;
+ list_add_tail(&ioc->scsi_lookup[i].tracker_list,
+ &ioc->free_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return;
- }
- /* scsiio queue */
- i = smid - 1;
- if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
- list_for_each_entry_safe(chain_req, next,
- &ioc->scsi_lookup[i].chain_list, tracker_list) {
- list_del_init(&chain_req->tracker_list);
- list_add_tail(&chain_req->tracker_list,
- &ioc->free_chain_list);
+ /*
+ * See _wait_for_commands_to_complete() call with regards
+ * to this code.
+ */
+ if (ioc->shost_recovery && ioc->pending_io_count) {
+ if (ioc->pending_io_count == 1)
+ wake_up(&ioc->reset_wq);
+ ioc->pending_io_count--;
}
+ return;
+ } else if (smid < ioc->internal_smid) {
+ /* hi-priority */
+ i = smid - ioc->hi_priority_smid;
+ ioc->hpr_lookup[i].cb_idx = 0xFF;
+ list_add_tail(&ioc->hpr_lookup[i].tracker_list,
+ &ioc->hpr_free_list);
+ } else if (smid <= ioc->hba_queue_depth) {
+ /* internal queue */
+ i = smid - ioc->internal_smid;
+ ioc->internal_lookup[i].cb_idx = 0xFF;
+ list_add_tail(&ioc->internal_lookup[i].tracker_list,
+ &ioc->internal_free_list);
}
- ioc->scsi_lookup[i].cb_idx = 0xFF;
- ioc->scsi_lookup[i].scmd = NULL;
- list_add_tail(&ioc->scsi_lookup[i].tracker_list,
- &ioc->free_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-
- /*
- * See _wait_for_commands_to_complete() call with regards to this code.
- */
- if (ioc->shost_recovery && ioc->pending_io_count) {
- if (ioc->pending_io_count == 1)
- wake_up(&ioc->reset_wq);
- ioc->pending_io_count--;
- }
}
/**
@@ -1725,6 +1723,31 @@ _base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
}
/**
+ * _base_display_intel_branding - Display branding string
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
+{
+ if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_INTEL &&
+ ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008) {
+
+ switch (ioc->pdev->subsystem_device) {
+ case MPT2SAS_INTEL_RMS2LL080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS2LL080_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS2LL040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS2LL040_BRANDING);
+ break;
+ }
+ }
+}
+
+/**
* _base_display_ioc_capabilities - Disply IOC's capabilities.
* @ioc: per adapter object
*
@@ -1754,6 +1777,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
ioc->bios_pg3.BiosVersion & 0x000000FF);
_base_display_dell_branding(ioc);
+ _base_display_intel_branding(ioc);
printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
@@ -2252,9 +2276,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
ioc->name, (unsigned long long) ioc->request_dma));
total_sz += sz;
- sz = ioc->scsiio_depth * sizeof(struct request_tracker);
+ sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
ioc->scsi_lookup_pages = get_order(sz);
- ioc->scsi_lookup = (struct request_tracker *)__get_free_pages(
+ ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
GFP_KERNEL, ioc->scsi_lookup_pages);
if (!ioc->scsi_lookup) {
printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, "
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 283568c6fb04..a3f8aa9baea4 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,8 +69,8 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "07.100.00.00"
-#define MPT2SAS_MAJOR_VERSION 07
+#define MPT2SAS_DRIVER_VERSION "08.100.00.00"
+#define MPT2SAS_MAJOR_VERSION 08
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
#define MPT2SAS_RELEASE_VERSION 00
@@ -101,7 +101,8 @@
#define MPT_NAME_LENGTH 32 /* generic length of strings */
#define MPT_STRING_LENGTH 64
-#define MPT_MAX_CALLBACKS 16
+#define MPT_MAX_CALLBACKS 16
+
#define CAN_SLEEP 1
#define NO_SLEEP 0
@@ -154,6 +155,20 @@
#define MPT2SAS_DELL_6GBPS_SAS_SSDID 0x1F22
/*
+ * Intel HBA branding
+ */
+#define MPT2SAS_INTEL_RMS2LL080_BRANDING \
+ "Intel Integrated RAID Module RMS2LL080"
+#define MPT2SAS_INTEL_RMS2LL040_BRANDING \
+ "Intel Integrated RAID Module RMS2LL040"
+
+/*
+ * Intel HBA SSDIDs
+ */
+#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
+#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
+
+/*
* per target private data
*/
#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01
@@ -431,14 +446,14 @@ struct chain_tracker {
};
/**
- * struct request_tracker - firmware request tracker
+ * struct scsiio_tracker - scsi mf request tracker
* @smid: system message id
* @scmd: scsi request pointer
* @cb_idx: callback index
* @chain_list: list of chains associated to this IO
* @tracker_list: list of free request (ioc->free_list)
*/
-struct request_tracker {
+struct scsiio_tracker {
u16 smid;
struct scsi_cmnd *scmd;
u8 cb_idx;
@@ -447,6 +462,19 @@ struct request_tracker {
};
/**
+ * struct request_tracker - misc mf request tracker
+ * @smid: system message id
+ * @scmd: scsi request pointer
+ * @cb_idx: callback index
+ * @tracker_list: list of free request (ioc->free_list)
+ */
+struct request_tracker {
+ u16 smid;
+ u8 cb_idx;
+ struct list_head tracker_list;
+};
+
+/**
* struct _tr_list - target reset list
* @handle: device handle
* @state: state machine
@@ -709,7 +737,7 @@ struct MPT2SAS_ADAPTER {
u8 *request;
dma_addr_t request_dma;
u32 request_dma_sz;
- struct request_tracker *scsi_lookup;
+ struct scsiio_tracker *scsi_lookup;
ulong scsi_lookup_pages;
spinlock_t scsi_lookup_lock;
struct list_head free_list;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 5ded3db6e316..6ceb7759bfe5 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -6975,7 +6975,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
u32 device_state;
mpt2sas_base_stop_watchdog(ioc);
- flush_scheduled_work();
scsi_block_requests(shost);
device_state = pci_choose_state(pdev, state);
printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, entering "
diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile
index 52ac4264677d..ffbf759e46f1 100644
--- a/drivers/scsi/mvsas/Makefile
+++ b/drivers/scsi/mvsas/Makefile
@@ -21,9 +21,7 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
-ifeq ($(CONFIG_SCSI_MVSAS_DEBUG),y)
- EXTRA_CFLAGS += -DMV_DEBUG
-endif
+ccflags-$(CONFIG_SCSI_MVSAS_DEBUG) := -DMV_DEBUG
obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
mvsas-y += mv_init.o \
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index b37c8a3c1bb0..86afb13f1e79 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1005,11 +1005,23 @@ int osd_req_read_sg(struct osd_request *or,
const struct osd_sg_entry *sglist, unsigned numentries)
{
u64 len;
- int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
+ u64 off;
+ int ret;
- if (ret)
- return ret;
- osd_req_read(or, obj, 0, bio, len);
+ if (numentries > 1) {
+ off = 0;
+ ret = _add_sg_continuation_descriptor(or, sglist, numentries,
+ &len);
+ if (ret)
+ return ret;
+ } else {
+ /* Optimize the case of single segment, read_sg is a
+ * bidi operation.
+ */
+ len = sglist->len;
+ off = sglist->offset;
+ }
+ osd_req_read(or, obj, off, bio, len);
return 0;
}
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 54de1d1af1a7..521e2182d45b 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -1484,7 +1484,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct osst
int dbg = debugging;
#endif
- if ((buffer = (unsigned char *)vmalloc((nframes + 1) * OS_DATA_SIZE)) == NULL)
+ if ((buffer = vmalloc((nframes + 1) * OS_DATA_SIZE)) == NULL)
return (-EIO);
printk(KERN_INFO "%s:I: Reading back %d frames from drive buffer%s\n",
@@ -2296,7 +2296,7 @@ static int osst_write_header(struct osst_tape * STp, struct osst_request ** aSRp
if (STp->raw) return 0;
if (STp->header_cache == NULL) {
- if ((STp->header_cache = (os_header_t *)vmalloc(sizeof(os_header_t))) == NULL) {
+ if ((STp->header_cache = vmalloc(sizeof(os_header_t))) == NULL) {
printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name);
return (-ENOMEM);
}
@@ -2484,7 +2484,7 @@ static int __osst_analyze_headers(struct osst_tape * STp, struct osst_request **
name, ppos, update_frame_cntr);
#endif
if (STp->header_cache == NULL) {
- if ((STp->header_cache = (os_header_t *)vmalloc(sizeof(os_header_t))) == NULL) {
+ if ((STp->header_cache = vmalloc(sizeof(os_header_t))) == NULL) {
printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name);
return 0;
}
@@ -5851,9 +5851,7 @@ static int osst_probe(struct device *dev)
/* if this is the first attach, build the infrastructure */
write_lock(&os_scsi_tapes_lock);
if (os_scsi_tapes == NULL) {
- os_scsi_tapes =
- (struct osst_tape **)kmalloc(osst_max_dev * sizeof(struct osst_tape *),
- GFP_ATOMIC);
+ os_scsi_tapes = kmalloc(osst_max_dev * sizeof(struct osst_tape *), GFP_ATOMIC);
if (os_scsi_tapes == NULL) {
write_unlock(&os_scsi_tapes_lock);
printk(KERN_ERR "osst :E: Unable to allocate array for OnStream SCSI tapes.\n");
diff --git a/drivers/scsi/pcmcia/Makefile b/drivers/scsi/pcmcia/Makefile
index eca379059db6..683bf148b5b7 100644
--- a/drivers/scsi/pcmcia/Makefile
+++ b/drivers/scsi/pcmcia/Makefile
@@ -1,5 +1,5 @@
-EXTRA_CFLAGS += -Idrivers/scsi
+ccflags-y := -Idrivers/scsi
# 16-bit client drivers
obj-$(CONFIG_PCMCIA_QLOGIC) += qlogic_cs.o
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index d8db0137c0c7..18b6c55cd08c 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1382,53 +1382,50 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
return MPI_IO_STATUS_BUSY;
}
-static void pm8001_work_queue(struct work_struct *work)
+static void pm8001_work_fn(struct work_struct *work)
{
- struct delayed_work *dw = container_of(work, struct delayed_work, work);
- struct pm8001_wq *wq = container_of(dw, struct pm8001_wq, work_q);
+ struct pm8001_work *pw = container_of(work, struct pm8001_work, work);
struct pm8001_device *pm8001_dev;
- struct domain_device *dev;
+ struct domain_device *dev;
- switch (wq->handler) {
+ switch (pw->handler) {
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
- pm8001_dev = wq->data;
+ pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
- pm8001_dev = wq->data;
+ pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_DS_IN_ERROR:
- pm8001_dev = wq->data;
+ pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_DS_NON_OPERATIONAL:
- pm8001_dev = wq->data;
+ pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
}
- list_del(&wq->entry);
- kfree(wq);
+ kfree(pw);
}
static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
int handler)
{
- struct pm8001_wq *wq;
+ struct pm8001_work *pw;
int ret = 0;
- wq = kmalloc(sizeof(struct pm8001_wq), GFP_ATOMIC);
- if (wq) {
- wq->pm8001_ha = pm8001_ha;
- wq->data = data;
- wq->handler = handler;
- INIT_DELAYED_WORK(&wq->work_q, pm8001_work_queue);
- list_add_tail(&wq->entry, &pm8001_ha->wq_list);
- schedule_delayed_work(&wq->work_q, 0);
+ pw = kmalloc(sizeof(struct pm8001_work), GFP_ATOMIC);
+ if (pw) {
+ pw->pm8001_ha = pm8001_ha;
+ pw->data = data;
+ pw->handler = handler;
+ INIT_WORK(&pw->work, pm8001_work_fn);
+ queue_work(pm8001_wq, &pw->work);
} else
ret = -ENOMEM;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index b95285f3383f..002360da01e3 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -51,6 +51,8 @@ static int pm8001_id;
LIST_HEAD(hba_list);
+struct workqueue_struct *pm8001_wq;
+
/**
* The main structure which LLDD must register for scsi core.
*/
@@ -134,7 +136,6 @@ static void __devinit pm8001_phy_init(struct pm8001_hba_info *pm8001_ha,
static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
{
int i;
- struct pm8001_wq *wq;
if (!pm8001_ha)
return;
@@ -150,8 +151,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
if (pm8001_ha->shost)
scsi_host_put(pm8001_ha->shost);
- list_for_each_entry(wq, &pm8001_ha->wq_list, entry)
- cancel_delayed_work(&wq->work_q);
+ flush_workqueue(pm8001_wq);
kfree(pm8001_ha->tags);
kfree(pm8001_ha);
}
@@ -381,7 +381,6 @@ pm8001_pci_alloc(struct pci_dev *pdev, u32 chip_id, struct Scsi_Host *shost)
pm8001_ha->sas = sha;
pm8001_ha->shost = shost;
pm8001_ha->id = pm8001_id++;
- INIT_LIST_HEAD(&pm8001_ha->wq_list);
pm8001_ha->logging_level = 0x01;
sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
#ifdef PM8001_USE_TASKLET
@@ -758,7 +757,7 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
int i , pos;
u32 device_state;
pm8001_ha = sha->lldd_ha;
- flush_scheduled_work();
+ flush_workqueue(pm8001_wq);
scsi_block_requests(pm8001_ha->shost);
pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pos == 0) {
@@ -870,17 +869,26 @@ static struct pci_driver pm8001_pci_driver = {
*/
static int __init pm8001_init(void)
{
- int rc;
+ int rc = -ENOMEM;
+
+ pm8001_wq = alloc_workqueue("pm8001", 0, 0);
+ if (!pm8001_wq)
+ goto err;
+
pm8001_id = 0;
pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops);
if (!pm8001_stt)
- return -ENOMEM;
+ goto err_wq;
rc = pci_register_driver(&pm8001_pci_driver);
if (rc)
- goto err_out;
+ goto err_tp;
return 0;
-err_out:
+
+err_tp:
sas_release_transport(pm8001_stt);
+err_wq:
+ destroy_workqueue(pm8001_wq);
+err:
return rc;
}
@@ -888,6 +896,7 @@ static void __exit pm8001_exit(void)
{
pci_unregister_driver(&pm8001_pci_driver);
sas_release_transport(pm8001_stt);
+ destroy_workqueue(pm8001_wq);
}
module_init(pm8001_init);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 7f064f9ca828..bdb6b27dedd6 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -50,6 +50,7 @@
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <scsi/libsas.h>
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
@@ -379,18 +380,16 @@ struct pm8001_hba_info {
#ifdef PM8001_USE_TASKLET
struct tasklet_struct tasklet;
#endif
- struct list_head wq_list;
u32 logging_level;
u32 fw_status;
const struct firmware *fw_image;
};
-struct pm8001_wq {
- struct delayed_work work_q;
+struct pm8001_work {
+ struct work_struct work;
struct pm8001_hba_info *pm8001_ha;
void *data;
int handler;
- struct list_head entry;
};
struct pm8001_fw_image_header {
@@ -460,6 +459,9 @@ struct fw_control_ex {
void *param3;
};
+/* pm8001 workqueue */
+extern struct workqueue_struct *pm8001_wq;
+
/******************** function prototype *********************/
int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out);
void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 321cf3ae8630..bcf858e88c64 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -5454,7 +5454,7 @@ static void __devexit pmcraid_remove(struct pci_dev *pdev)
pmcraid_shutdown(pdev);
pmcraid_disable_interrupts(pinstance, ~0);
- flush_scheduled_work();
+ flush_work_sync(&pinstance->worker_q);
pmcraid_kill_tasklets(pinstance);
pmcraid_unregister_interrupt_handler(pinstance);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ccfc8e78be21..6c51c0a35b9e 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2402,13 +2402,13 @@ struct qla_hw_data {
volatile struct {
uint32_t mbox_int :1;
uint32_t mbox_busy :1;
-
uint32_t disable_risc_code_load :1;
uint32_t enable_64bit_addressing :1;
uint32_t enable_lip_reset :1;
uint32_t enable_target_reset :1;
uint32_t enable_lip_full_login :1;
uint32_t enable_led_scheme :1;
+
uint32_t msi_enabled :1;
uint32_t msix_enabled :1;
uint32_t disable_serdes :1;
@@ -2417,6 +2417,7 @@ struct qla_hw_data {
uint32_t pci_channel_io_perm_failure :1;
uint32_t fce_enabled :1;
uint32_t fac_supported :1;
+
uint32_t chip_reset_done :1;
uint32_t port0 :1;
uint32_t running_gold_fw :1;
@@ -2424,9 +2425,11 @@ struct qla_hw_data {
uint32_t cpu_affinity_enabled :1;
uint32_t disable_msix_handshake :1;
uint32_t fcp_prio_enabled :1;
- uint32_t fw_hung :1;
- uint32_t quiesce_owner:1;
+ uint32_t isp82xx_fw_hung:1;
+
+ uint32_t quiesce_owner:1;
uint32_t thermal_supported:1;
+ uint32_t isp82xx_reset_hdlr_active:1;
/* 26 bits */
} flags;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 89e900adb679..d48326ee3f61 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -565,6 +565,7 @@ extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
extern void qla82xx_start_iocbs(srb_t *);
extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
+extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
/* BSG related functions */
extern int qla24xx_bsg_request(struct fc_bsg_job *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4c083928c2fb..74a91b6dfc68 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -121,8 +121,11 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
rval = QLA_FUNCTION_FAILED;
if (ms_pkt->entry_status != 0) {
- DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n",
- vha->host_no, routine, ms_pkt->entry_status));
+ DEBUG2_3(printk(KERN_WARNING "scsi(%ld): %s failed, error status "
+ "(%x) on port_id: %02x%02x%02x.\n",
+ vha->host_no, routine, ms_pkt->entry_status,
+ vha->d_id.b.domain, vha->d_id.b.area,
+ vha->d_id.b.al_pa));
} else {
if (IS_FWI2_CAPABLE(ha))
comp_status = le16_to_cpu(
@@ -136,8 +139,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
if (ct_rsp->header.response !=
__constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
DEBUG2_3(printk("scsi(%ld): %s failed, "
- "rejected request:\n", vha->host_no,
- routine));
+ "rejected request on port_id: %02x%02x%02x\n",
+ vha->host_no, routine,
+ vha->d_id.b.domain, vha->d_id.b.area,
+ vha->d_id.b.al_pa));
DEBUG2_3(qla2x00_dump_buffer(
(uint8_t *)&ct_rsp->header,
sizeof(struct ct_rsp_hdr)));
@@ -147,8 +152,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
break;
default:
DEBUG2_3(printk("scsi(%ld): %s failed, completion "
- "status (%x).\n", vha->host_no, routine,
- comp_status));
+ "status (%x) on port_id: %02x%02x%02x.\n",
+ vha->host_no, routine, comp_status,
+ vha->d_id.b.domain, vha->d_id.b.area,
+ vha->d_id.b.al_pa));
break;
}
}
@@ -1965,7 +1972,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
"scsi(%ld): GFF_ID issue IOCB failed "
"(%d).\n", vha->host_no, rval));
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
- "GPN_ID") != QLA_SUCCESS) {
+ "GFF_ID") != QLA_SUCCESS) {
DEBUG2_3(printk(KERN_INFO
"scsi(%ld): GFF_ID IOCB status had a "
"failure status code\n", vha->host_no));
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d9479c3fe5f8..8575808dbae0 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1967,7 +1967,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
} else {
/* Mailbox cmd failed. Timeout on min_wait. */
if (time_after_eq(jiffies, mtime) ||
- (IS_QLA82XX(ha) && ha->flags.fw_hung))
+ ha->flags.isp82xx_fw_hung)
break;
}
@@ -3945,8 +3945,13 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp;
unsigned long flags;
+ fc_port_t *fcport;
- vha->flags.online = 0;
+ /* For ISP82XX, driver waits for completion of the commands.
+ * online flag should be set.
+ */
+ if (!IS_QLA82XX(ha))
+ vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
ha->qla_stats.total_isp_aborts++;
@@ -3954,7 +3959,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
qla_printk(KERN_INFO, ha,
"Performing ISP error recovery - ha= %p.\n", ha);
- /* Chip reset does not apply to 82XX */
+ /* For ISP82XX, reset_chip is just disabling interrupts.
+ * Driver waits for the completion of the commands.
+ * the interrupts need to be enabled.
+ */
if (!IS_QLA82XX(ha))
ha->isp_ops->reset_chip(vha);
@@ -3980,14 +3988,31 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
LOOP_DOWN_TIME);
}
+ /* Clear all async request states across all VPs. */
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ list_for_each_entry(fcport, &vp->vp_fcports, list)
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
if (!ha->flags.eeh_busy) {
/* Make sure for ISP 82XX IO DMA is complete */
if (IS_QLA82XX(ha)) {
- if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
- WAIT_HOST) == QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Done wait for pending commands\n"));
- }
+ qla82xx_chip_reset_cleanup(vha);
+
+ /* Done waiting for pending commands.
+ * Reset the online flag.
+ */
+ vha->flags.online = 0;
}
/* Requeue all commands in outstanding command list. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 4c1ba6263eb3..d78d5896fc33 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -328,6 +328,7 @@ qla2x00_start_scsi(srb_t *sp)
struct qla_hw_data *ha;
struct req_que *req;
struct rsp_que *rsp;
+ char tag[2];
/* Setup device pointers. */
ret = 0;
@@ -406,7 +407,22 @@ qla2x00_start_scsi(srb_t *sp)
cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
/* Update tagged queuing modifier */
- cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_HEAD_TAG);
+ break;
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_ORDERED_TAG);
+ break;
+ default:
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_SIMPLE_TAG);
+ break;
+ }
+ }
/* Load SCSI command packet. */
memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
@@ -971,6 +987,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
uint16_t fcp_cmnd_len;
struct fcp_cmnd *fcp_cmnd;
dma_addr_t crc_ctx_dma;
+ char tag[2];
cmd = sp->cmd;
@@ -1068,9 +1085,27 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
- fcp_cmnd->task_attribute = 0;
fcp_cmnd->task_management = 0;
+ /*
+ * Update tagged queuing modifier if using command tag queuing
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ fcp_cmnd->task_attribute = TSK_ORDERED;
+ break;
+ default:
+ fcp_cmnd->task_attribute = 0;
+ break;
+ }
+ } else {
+ fcp_cmnd->task_attribute = 0;
+ }
+
cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
@@ -1177,6 +1212,7 @@ qla24xx_start_scsi(srb_t *sp)
struct scsi_cmnd *cmd = sp->cmd;
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
+ char tag[2];
/* Setup device pointers. */
ret = 0;
@@ -1260,6 +1296,18 @@ qla24xx_start_scsi(srb_t *sp)
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+ /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ cmd_pkt->task = TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->task = TSK_ORDERED;
+ break;
+ }
+ }
+
/* Load SCSI command packet. */
memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index e473e9fb363c..7a7c0ecfe7dd 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -71,6 +71,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
+ if (ha->flags.isp82xx_fw_hung) {
+ /* Setting Link-Down error */
+ mcp->mb[0] = MBS_LINK_DOWN_ERROR;
+ rval = QLA_FUNCTION_FAILED;
+ goto premature_exit;
+ }
+
/*
* Wait for active mailbox commands to finish by waiting at most tov
* seconds. This is to serialize actual issuing of mailbox cmds during
@@ -83,13 +90,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
- if (IS_QLA82XX(ha) && ha->flags.fw_hung) {
- /* Setting Link-Down error */
- mcp->mb[0] = MBS_LINK_DOWN_ERROR;
- rval = QLA_FUNCTION_FAILED;
- goto premature_exit;
- }
-
ha->flags.mbox_busy = 1;
/* Save mailbox command for debug */
ha->mcp = mcp;
@@ -223,7 +223,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ha->flags.mbox_int = 0;
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- if (IS_QLA82XX(ha) && ha->flags.fw_hung) {
+ if (ha->flags.isp82xx_fw_hung) {
ha->flags.mbox_busy = 0;
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
@@ -2462,22 +2462,19 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
"-- completion status (%x).\n", __func__,
vha->host_no, le16_to_cpu(sts->comp_status)));
rval = QLA_FUNCTION_FAILED;
- } else if (!(le16_to_cpu(sts->scsi_status) &
- SS_RESPONSE_INFO_LEN_VALID)) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- no response info (%x).\n", __func__, vha->host_no,
- le16_to_cpu(sts->scsi_status)));
- rval = QLA_FUNCTION_FAILED;
- } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- not enough response info (%d).\n", __func__,
- vha->host_no, le32_to_cpu(sts->rsp_data_len)));
- rval = QLA_FUNCTION_FAILED;
- } else if (sts->data[3]) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- response (%x).\n", __func__,
- vha->host_no, sts->data[3]));
- rval = QLA_FUNCTION_FAILED;
+ } else if (le16_to_cpu(sts->scsi_status) &
+ SS_RESPONSE_INFO_LEN_VALID) {
+ if (le32_to_cpu(sts->rsp_data_len) < 4) {
+ DEBUG2_3_11(printk("%s(%ld): ignoring inconsistent "
+ "data length -- not enough response info (%d).\n",
+ __func__, vha->host_no,
+ le32_to_cpu(sts->rsp_data_len)));
+ } else if (sts->data[3]) {
+ DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+ "-- response (%x).\n", __func__,
+ vha->host_no, sts->data[3]));
+ rval = QLA_FUNCTION_FAILED;
+ }
}
/* Issue marker IOCB. */
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index fdb96a3584a5..76ec876e6b21 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -7,6 +7,7 @@
#include "qla_def.h"
#include <linux/delay.h>
#include <linux/pci.h>
+#include <scsi/scsi_tcq.h>
#define MASK(n) ((1ULL<<(n))-1)
#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
@@ -2547,7 +2548,7 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
*dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
*dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- *dsd_seg++ = dsd_list_len;
+ cmd_pkt->fcp_data_dseg_len = dsd_list_len;
} else {
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
@@ -2620,6 +2621,7 @@ qla82xx_start_scsi(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
+ char tag[2];
/* Setup device pointers. */
ret = 0;
@@ -2770,6 +2772,22 @@ sufficient_dsds:
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+ /*
+ * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ ctx->fcp_cmnd->task_attribute =
+ TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ ctx->fcp_cmnd->task_attribute =
+ TSK_ORDERED;
+ break;
+ }
+ }
+
/* build FCP_CMND IU */
memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
@@ -2835,6 +2853,20 @@ sufficient_dsds:
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
sizeof(cmd_pkt->lun));
+ /*
+ * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ cmd_pkt->task = TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->task = TSK_ORDERED;
+ break;
+ }
+ }
+
/* Load SCSI command packet. */
memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
@@ -3457,46 +3489,28 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
}
}
-static void
+int
qla82xx_check_fw_alive(scsi_qla_host_t *vha)
{
- uint32_t fw_heartbeat_counter, halt_status;
- struct qla_hw_data *ha = vha->hw;
+ uint32_t fw_heartbeat_counter;
+ int status = 0;
- fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
+ QLA82XX_PEG_ALIVE_COUNTER);
/* all 0xff, assume AER/EEH in progress, ignore */
if (fw_heartbeat_counter == 0xffffffff)
- return;
+ return status;
if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
vha->seconds_since_last_heartbeat++;
/* FW not alive after 2 seconds */
if (vha->seconds_since_last_heartbeat == 2) {
vha->seconds_since_last_heartbeat = 0;
- halt_status = qla82xx_rd_32(ha,
- QLA82XX_PEG_HALT_STATUS1);
- if (halt_status & HALT_STATUS_UNRECOVERABLE) {
- set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
- } else {
- qla_printk(KERN_INFO, ha,
- "scsi(%ld): %s - detect abort needed\n",
- vha->host_no, __func__);
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- }
- qla2xxx_wake_dpc(vha);
- ha->flags.fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- DEBUG2(qla_printk(KERN_ERR, ha,
- "Due to fw hung, doing premature "
- "completion of mbx command\n"));
- if (test_bit(MBX_INTR_WAIT,
- &ha->mbx_cmd_flags))
- complete(&ha->mbx_intr_comp);
- }
+ status = 1;
}
} else
vha->seconds_since_last_heartbeat = 0;
vha->fw_heartbeat_counter = fw_heartbeat_counter;
+ return status;
}
/*
@@ -3557,6 +3571,8 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
break;
case QLA82XX_DEV_NEED_RESET:
qla82xx_need_reset_handler(vha);
+ dev_init_timeout = jiffies +
+ (ha->nx_dev_init_timeout * HZ);
break;
case QLA82XX_DEV_NEED_QUIESCENT:
qla82xx_need_qsnt_handler(vha);
@@ -3596,30 +3612,18 @@ exit:
void qla82xx_watchdog(scsi_qla_host_t *vha)
{
- uint32_t dev_state;
+ uint32_t dev_state, halt_status;
struct qla_hw_data *ha = vha->hw;
- dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-
/* don't poll if reset is going on */
- if (!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
- test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))) {
- if (dev_state == QLA82XX_DEV_NEED_RESET) {
+ if (!ha->flags.isp82xx_reset_hdlr_active) {
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ if (dev_state == QLA82XX_DEV_NEED_RESET &&
+ !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
qla_printk(KERN_WARNING, ha,
- "%s(): Adapter reset needed!\n", __func__);
+ "%s(): Adapter reset needed!\n", __func__);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
- ha->flags.fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- DEBUG2(qla_printk(KERN_ERR, ha,
- "Need reset, doing premature "
- "completion of mbx command\n"));
- if (test_bit(MBX_INTR_WAIT,
- &ha->mbx_cmd_flags))
- complete(&ha->mbx_intr_comp);
- }
} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
!test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
DEBUG(qla_printk(KERN_INFO, ha,
@@ -3629,6 +3633,31 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
qla2xxx_wake_dpc(vha);
} else {
qla82xx_check_fw_alive(vha);
+ if (qla82xx_check_fw_alive(vha)) {
+ halt_status = qla82xx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS1);
+ if (halt_status & HALT_STATUS_UNRECOVERABLE) {
+ set_bit(ISP_UNRECOVERABLE,
+ &vha->dpc_flags);
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "scsi(%ld): %s - detect abort needed\n",
+ vha->host_no, __func__);
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ }
+ qla2xxx_wake_dpc(vha);
+ ha->flags.isp82xx_fw_hung = 1;
+ if (ha->flags.mbox_busy) {
+ ha->flags.mbox_int = 1;
+ DEBUG2(qla_printk(KERN_ERR, ha,
+ "Due to fw hung, doing premature "
+ "completion of mbx command\n"));
+ if (test_bit(MBX_INTR_WAIT,
+ &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
+ }
+ }
}
}
}
@@ -3663,6 +3692,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
"Exiting.\n", __func__, vha->host_no);
return QLA_SUCCESS;
}
+ ha->flags.isp82xx_reset_hdlr_active = 1;
qla82xx_idc_lock(ha);
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
@@ -3683,7 +3713,8 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
qla82xx_idc_unlock(ha);
if (rval == QLA_SUCCESS) {
- ha->flags.fw_hung = 0;
+ ha->flags.isp82xx_fw_hung = 0;
+ ha->flags.isp82xx_reset_hdlr_active = 0;
qla82xx_restart_isp(vha);
}
@@ -3791,3 +3822,71 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
return status;
}
+
+void
+qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
+{
+ int i;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Check if 82XX firmware is alive or not
+ * We may have arrived here from NEED_RESET
+ * detection only
+ */
+ if (!ha->flags.isp82xx_fw_hung) {
+ for (i = 0; i < 2; i++) {
+ msleep(1000);
+ if (qla82xx_check_fw_alive(vha)) {
+ ha->flags.isp82xx_fw_hung = 1;
+ if (ha->flags.mbox_busy) {
+ ha->flags.mbox_int = 1;
+ complete(&ha->mbx_intr_comp);
+ }
+ break;
+ }
+ }
+ }
+
+ /* Abort all commands gracefully if fw NOT hung */
+ if (!ha->flags.isp82xx_fw_hung) {
+ int cnt, que;
+ srb_t *sp;
+ struct req_que *req;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ continue;
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp) {
+ if (!sp->ctx ||
+ (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
+ spin_unlock_irqrestore(
+ &ha->hardware_lock, flags);
+ if (ha->isp_ops->abort_command(sp)) {
+ qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx abort command failed in %s\n",
+ vha->host_no, __func__);
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx abort command success in %s\n",
+ vha->host_no, __func__);
+ }
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* Wait for pending cmds (physical and virtual) to complete */
+ if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
+ WAIT_HOST) == QLA_SUCCESS) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "Done wait for pending commands\n"));
+ }
+ }
+}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e90f7c16b956..75a966c94860 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -506,7 +506,7 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
static inline srb_t *
qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
- struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+ struct scsi_cmnd *cmd)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
@@ -520,14 +520,13 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
sp->cmd = cmd;
sp->flags = 0;
CMD_SP(cmd) = (void *)sp;
- cmd->scsi_done = done;
sp->ctx = NULL;
return sp;
}
static int
-qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
@@ -537,7 +536,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
srb_t *sp;
int rval;
- spin_unlock_irq(vha->host->host_lock);
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure)
cmd->result = DID_NO_CONNECT << 16;
@@ -569,40 +567,32 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
goto qc24_target_busy;
}
- sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
+ sp = qla2x00_get_new_sp(base_vha, fcport, cmd);
if (!sp)
- goto qc24_host_busy_lock;
+ goto qc24_host_busy;
rval = ha->isp_ops->start_scsi(sp);
if (rval != QLA_SUCCESS)
goto qc24_host_busy_free_sp;
- spin_lock_irq(vha->host->host_lock);
-
return 0;
qc24_host_busy_free_sp:
qla2x00_sp_free_dma(sp);
mempool_free(sp, ha->srb_mempool);
-qc24_host_busy_lock:
- spin_lock_irq(vha->host->host_lock);
+qc24_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
qc24_target_busy:
- spin_lock_irq(vha->host->host_lock);
return SCSI_MLQUEUE_TARGET_BUSY;
qc24_fail_command:
- spin_lock_irq(vha->host->host_lock);
- done(cmd);
+ cmd->scsi_done(cmd);
return 0;
}
-static DEF_SCSI_QCMD(qla2xxx_queuecommand)
-
-
/*
* qla2x00_eh_wait_on_command
* Waits for the command to be returned by the Firmware for some
@@ -821,17 +811,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
srb_t *sp;
- int ret = SUCCESS;
+ int ret;
unsigned int id, lun;
unsigned long flags;
int wait = 0;
struct qla_hw_data *ha = vha->hw;
- fc_block_scsi_eh(cmd);
-
if (!CMD_SP(cmd))
return SUCCESS;
+ ret = fc_block_scsi_eh(cmd);
+ if (ret != 0)
+ return ret;
+ ret = SUCCESS;
+
id = cmd->device->id;
lun = cmd->device->lun;
@@ -940,11 +933,13 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
int err;
- fc_block_scsi_eh(cmd);
-
if (!fcport)
return FAILED;
+ err = fc_block_scsi_eh(cmd);
+ if (err != 0)
+ return err;
+
qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
vha->host_no, cmd->device->id, cmd->device->lun, name);
@@ -1018,14 +1013,17 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
int ret = FAILED;
unsigned int id, lun;
- fc_block_scsi_eh(cmd);
-
id = cmd->device->id;
lun = cmd->device->lun;
if (!fcport)
return ret;
+ ret = fc_block_scsi_eh(cmd);
+ if (ret != 0)
+ return ret;
+ ret = FAILED;
+
qla_printk(KERN_INFO, vha->hw,
"scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
@@ -1078,14 +1076,17 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
unsigned int id, lun;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- fc_block_scsi_eh(cmd);
-
id = cmd->device->id;
lun = cmd->device->lun;
if (!fcport)
return ret;
+ ret = fc_block_scsi_eh(cmd);
+ if (ret != 0)
+ return ret;
+ ret = FAILED;
+
qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
@@ -3805,7 +3806,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
ha->flags.eeh_busy = 1;
/* For ISP82XX complete any pending mailbox cmd */
if (IS_QLA82XX(ha)) {
- ha->flags.fw_hung = 1;
+ ha->flags.isp82xx_fw_hung = 1;
if (ha->flags.mbox_busy) {
ha->flags.mbox_int = 1;
DEBUG2(qla_printk(KERN_ERR, ha,
@@ -3945,7 +3946,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_READY);
qla82xx_idc_unlock(ha);
- ha->flags.fw_hung = 0;
+ ha->flags.isp82xx_fw_hung = 0;
rval = qla82xx_restart_isp(base_vha);
qla82xx_idc_lock(ha);
/* Clear driver state register */
@@ -3958,7 +3959,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
"This devfn is not reset owner = 0x%x\n", ha->pdev->devfn));
if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
QLA82XX_DEV_READY)) {
- ha->flags.fw_hung = 0;
+ ha->flags.isp82xx_fw_hung = 0;
rval = qla82xx_restart_isp(base_vha);
qla82xx_idc_lock(ha);
qla82xx_set_drv_active(base_vha);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index cf0075a2d0c2..3a260c3f055a 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.05-k0"
+#define QLA2XXX_VERSION "8.03.07.00"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
-#define QLA_DRIVER_PATCH_VER 5
+#define QLA_DRIVER_PATCH_VER 7
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 6ffbe9727dff..03e028e6e809 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1027,7 +1027,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
((ddb_entry->default_time2wait +
4) * HZ);
- DEBUG2(printk("scsi%ld: ddb [%d] initate"
+ DEBUG2(printk("scsi%ld: ddb [%d] initiate"
" RELOGIN after %d seconds\n",
ha->host_no,
ddb_entry->fw_ddb_index,
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 3fc1d256636f..967836ef5ab2 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -812,7 +812,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
);
start_dpc++;
DEBUG(printk("scsi%ld:%d:%d: ddb [%d] "
- "initate relogin after"
+ "initiate relogin after"
" %d seconds\n",
ha->host_no, ddb_entry->bus,
ddb_entry->target,
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index a6b2d72022fc..fa5758cbdedb 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -89,32 +89,34 @@ static const char * scsi_debug_version_date = "20100324";
/* With these defaults, this driver will make 1 host with 1 target
* (id 0) containing 1 logical unit (lun 0). That is 1 device.
*/
+#define DEF_ATO 1
#define DEF_DELAY 1
#define DEF_DEV_SIZE_MB 8
-#define DEF_EVERY_NTH 0
-#define DEF_NUM_PARTS 0
-#define DEF_OPTS 0
-#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
-#define DEF_PTYPE 0
+#define DEF_DIF 0
+#define DEF_DIX 0
#define DEF_D_SENSE 0
-#define DEF_NO_LUN_0 0
-#define DEF_VIRTUAL_GB 0
+#define DEF_EVERY_NTH 0
#define DEF_FAKE_RW 0
-#define DEF_VPD_USE_HOSTNO 1
-#define DEF_SECTOR_SIZE 512
-#define DEF_DIX 0
-#define DEF_DIF 0
#define DEF_GUARD 0
-#define DEF_ATO 1
-#define DEF_PHYSBLK_EXP 0
+#define DEF_LBPU 0
+#define DEF_LBPWS 0
+#define DEF_LBPWS10 0
#define DEF_LOWEST_ALIGNED 0
+#define DEF_NO_LUN_0 0
+#define DEF_NUM_PARTS 0
+#define DEF_OPTS 0
#define DEF_OPT_BLKS 64
+#define DEF_PHYSBLK_EXP 0
+#define DEF_PTYPE 0
+#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
+#define DEF_SECTOR_SIZE 512
+#define DEF_UNMAP_ALIGNMENT 0
+#define DEF_UNMAP_GRANULARITY 1
#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
#define DEF_UNMAP_MAX_DESC 256
-#define DEF_UNMAP_GRANULARITY 1
-#define DEF_UNMAP_ALIGNMENT 0
-#define DEF_TPWS 0
-#define DEF_TPU 0
+#define DEF_VIRTUAL_GB 0
+#define DEF_VPD_USE_HOSTNO 1
+#define DEF_WRITESAME_LENGTH 0xFFFF
/* bit mask values for scsi_debug_opts */
#define SCSI_DEBUG_OPT_NOISE 1
@@ -144,6 +146,7 @@ static const char * scsi_debug_version_date = "20100324";
/* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
* sector on read commands: */
#define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
+#define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
* or "peripheral device" addressing (value 0) */
@@ -155,36 +158,38 @@ static const char * scsi_debug_version_date = "20100324";
#define SCSI_DEBUG_CANQUEUE 255
static int scsi_debug_add_host = DEF_NUM_HOST;
+static int scsi_debug_ato = DEF_ATO;
static int scsi_debug_delay = DEF_DELAY;
static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
+static int scsi_debug_dif = DEF_DIF;
+static int scsi_debug_dix = DEF_DIX;
+static int scsi_debug_dsense = DEF_D_SENSE;
static int scsi_debug_every_nth = DEF_EVERY_NTH;
+static int scsi_debug_fake_rw = DEF_FAKE_RW;
+static int scsi_debug_guard = DEF_GUARD;
+static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
static int scsi_debug_max_luns = DEF_MAX_LUNS;
static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
-static int scsi_debug_num_parts = DEF_NUM_PARTS;
+static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
static int scsi_debug_no_uld = 0;
+static int scsi_debug_num_parts = DEF_NUM_PARTS;
static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
+static int scsi_debug_opt_blks = DEF_OPT_BLKS;
static int scsi_debug_opts = DEF_OPTS;
-static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
+static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
-static int scsi_debug_dsense = DEF_D_SENSE;
-static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
+static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
+static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
-static int scsi_debug_fake_rw = DEF_FAKE_RW;
static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
-static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
-static int scsi_debug_dix = DEF_DIX;
-static int scsi_debug_dif = DEF_DIF;
-static int scsi_debug_guard = DEF_GUARD;
-static int scsi_debug_ato = DEF_ATO;
-static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
-static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
-static int scsi_debug_opt_blks = DEF_OPT_BLKS;
-static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
-static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
-static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
+static unsigned int scsi_debug_lbpu = DEF_LBPU;
+static unsigned int scsi_debug_lbpws = DEF_LBPWS;
+static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
-static unsigned int scsi_debug_tpws = DEF_TPWS;
-static unsigned int scsi_debug_tpu = DEF_TPU;
+static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
+static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
+static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
+static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
static int scsi_debug_cmnd_count = 0;
@@ -206,6 +211,11 @@ static int sdebug_sectors_per; /* sectors per cylinder */
#define SCSI_DEBUG_MAX_CMD_LEN 32
+static unsigned int scsi_debug_lbp(void)
+{
+ return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
+}
+
struct sdebug_dev_info {
struct list_head dev_list;
unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
@@ -727,7 +737,7 @@ static int inquiry_evpd_b0(unsigned char * arr)
/* Optimal Transfer Length */
put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
- if (scsi_debug_tpu) {
+ if (scsi_debug_lbpu) {
/* Maximum Unmap LBA Count */
put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
@@ -744,7 +754,10 @@ static int inquiry_evpd_b0(unsigned char * arr)
/* Optimal Unmap Granularity */
put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
- return 0x3c; /* Mandatory page length for thin provisioning */
+ /* Maximum WRITE SAME Length */
+ put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
+
+ return 0x3c; /* Mandatory page length for Logical Block Provisioning */
return sizeof(vpdb0_data);
}
@@ -767,12 +780,15 @@ static int inquiry_evpd_b2(unsigned char *arr)
memset(arr, 0, 0x8);
arr[0] = 0; /* threshold exponent */
- if (scsi_debug_tpu)
+ if (scsi_debug_lbpu)
arr[1] = 1 << 7;
- if (scsi_debug_tpws)
+ if (scsi_debug_lbpws)
arr[1] |= 1 << 6;
+ if (scsi_debug_lbpws10)
+ arr[1] |= 1 << 5;
+
return 0x8;
}
@@ -831,7 +847,8 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
arr[n++] = 0x89; /* ATA information */
arr[n++] = 0xb0; /* Block limits (SBC) */
arr[n++] = 0xb1; /* Block characteristics (SBC) */
- arr[n++] = 0xb2; /* Thin provisioning (SBC) */
+ if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
+ arr[n++] = 0xb2;
arr[3] = n - 4; /* number of supported VPD pages */
} else if (0x80 == cmd[2]) { /* unit serial number */
arr[1] = cmd[2]; /*sanity */
@@ -879,7 +896,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_evpd_b1(&arr[4]);
- } else if (0xb2 == cmd[2]) { /* Thin provisioning (SBC) */
+ } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_evpd_b2(&arr[4]);
} else {
@@ -1053,8 +1070,8 @@ static int resp_readcap16(struct scsi_cmnd * scp,
arr[13] = scsi_debug_physblk_exp & 0xf;
arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
- if (scsi_debug_tpu || scsi_debug_tpws)
- arr[14] |= 0x80; /* TPE */
+ if (scsi_debug_lbp())
+ arr[14] |= 0x80; /* LBPME */
arr[15] = scsi_debug_lowest_aligned & 0xff;
@@ -1791,15 +1808,15 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
return ret;
if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
- (lba <= OPT_MEDIUM_ERR_ADDR) &&
+ (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
/* claim unrecoverable read error */
- mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR,
- 0);
+ mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
/* set info field and valid bit for fixed descriptor */
if (0x70 == (devip->sense_buff[0] & 0x7f)) {
devip->sense_buff[0] |= 0x80; /* Valid bit */
- ret = OPT_MEDIUM_ERR_ADDR;
+ ret = (lba < OPT_MEDIUM_ERR_ADDR)
+ ? OPT_MEDIUM_ERR_ADDR : (int)lba;
devip->sense_buff[3] = (ret >> 24) & 0xff;
devip->sense_buff[4] = (ret >> 16) & 0xff;
devip->sense_buff[5] = (ret >> 8) & 0xff;
@@ -2084,6 +2101,12 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
if (ret)
return ret;
+ if (num > scsi_debug_write_same_length) {
+ mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
+ 0);
+ return check_condition_result;
+ }
+
write_lock_irqsave(&atomic_rw, iflags);
if (unmap && scsi_debug_unmap_granularity) {
@@ -2695,37 +2718,40 @@ static int schedule_resp(struct scsi_cmnd * cmnd,
/sys/bus/pseudo/drivers/scsi_debug directory is changed.
*/
module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
+module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
+module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
+module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
+module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
+module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
+module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
+module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
+module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
+module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
+module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
-module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
-module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
- S_IRUGO | S_IWUSR);
module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
-module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
-module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
-module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
-module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
-module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
-module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
-module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
+module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
+module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
-module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
-module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
-module_param_named(tpu, scsi_debug_tpu, int, S_IRUGO);
-module_param_named(tpws, scsi_debug_tpws, int, S_IRUGO);
+module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
+module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
+ S_IRUGO | S_IWUSR);
+module_param_named(write_same_length, scsi_debug_write_same_length, int,
+ S_IRUGO | S_IWUSR);
MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2733,36 +2759,38 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(SCSI_DEBUG_VERSION);
MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
+MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
+MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
+MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
+MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
+MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
+MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
+MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
+MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
+MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
+MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
-MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
-MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
-MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
-MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
-MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
-MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
-MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
-MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
-MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
+MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
+MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
-MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
-MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
-MODULE_PARM_DESC(tpu, "enable TP, support UNMAP command (def=0)");
-MODULE_PARM_DESC(tpws, "enable TP, support WRITE SAME(16) with UNMAP bit (def=0)");
+MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
+MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
+MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
static char sdebug_info[256];
@@ -3150,7 +3178,7 @@ static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
{
ssize_t count;
- if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0)
+ if (!scsi_debug_lbp())
return scnprintf(buf, PAGE_SIZE, "0-%u\n",
sdebug_store_sectors);
@@ -3333,8 +3361,8 @@ static int __init scsi_debug_init(void)
memset(dif_storep, 0xff, dif_size);
}
- /* Thin Provisioning */
- if (scsi_debug_tpu || scsi_debug_tpws) {
+ /* Logical Block Provisioning */
+ if (scsi_debug_lbp()) {
unsigned int map_bytes;
scsi_debug_unmap_max_blocks =
@@ -3664,7 +3692,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
errsts = resp_readcap16(SCpnt, devip);
else if (cmd[1] == SAI_GET_LBA_STATUS) {
- if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0) {
+ if (scsi_debug_lbp() == 0) {
mk_sense_buffer(devip, ILLEGAL_REQUEST,
INVALID_COMMAND_OPCODE, 0);
errsts = check_condition_result;
@@ -3775,8 +3803,10 @@ write:
}
break;
case WRITE_SAME_16:
+ case WRITE_SAME:
if (cmd[1] & 0x8) {
- if (scsi_debug_tpws == 0) {
+ if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
+ (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
mk_sense_buffer(devip, ILLEGAL_REQUEST,
INVALID_FIELD_IN_CDB, 0);
errsts = check_condition_result;
@@ -3785,8 +3815,6 @@ write:
}
if (errsts)
break;
- /* fall through */
- case WRITE_SAME:
errsts = check_readiness(SCpnt, 0, devip);
if (errsts)
break;
@@ -3798,7 +3826,7 @@ write:
if (errsts)
break;
- if (scsi_debug_unmap_max_desc == 0 || scsi_debug_tpu == 0) {
+ if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
mk_sense_buffer(devip, ILLEGAL_REQUEST,
INVALID_COMMAND_OPCODE, 0);
errsts = check_condition_result;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 43fad4c09beb..82e9e5c0476e 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -382,6 +382,91 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
/**
+ * scsi_dev_info_list_del_keyed - remove one dev_info list entry.
+ * @vendor: vendor string
+ * @model: model (product) string
+ * @key: specify list to use
+ *
+ * Description:
+ * Remove and destroy one dev_info entry for @vendor, @model
+ * in list specified by @key.
+ *
+ * Returns: 0 OK, -error on failure.
+ **/
+int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
+{
+ struct scsi_dev_info_list *devinfo, *found = NULL;
+ struct scsi_dev_info_list_table *devinfo_table =
+ scsi_devinfo_lookup_by_key(key);
+
+ if (IS_ERR(devinfo_table))
+ return PTR_ERR(devinfo_table);
+
+ list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list,
+ dev_info_list) {
+ if (devinfo->compatible) {
+ /*
+ * Behave like the older version of get_device_flags.
+ */
+ size_t max;
+ /*
+ * XXX why skip leading spaces? If an odd INQUIRY
+ * value, that should have been part of the
+ * scsi_static_device_list[] entry, such as " FOO"
+ * rather than "FOO". Since this code is already
+ * here, and we don't know what device it is
+ * trying to work with, leave it as-is.
+ */
+ max = 8; /* max length of vendor */
+ while ((max > 0) && *vendor == ' ') {
+ max--;
+ vendor++;
+ }
+ /*
+ * XXX removing the following strlen() would be
+ * good, using it means that for a an entry not in
+ * the list, we scan every byte of every vendor
+ * listed in scsi_static_device_list[], and never match
+ * a single one (and still have to compare at
+ * least the first byte of each vendor).
+ */
+ if (memcmp(devinfo->vendor, vendor,
+ min(max, strlen(devinfo->vendor))))
+ continue;
+ /*
+ * Skip spaces again.
+ */
+ max = 16; /* max length of model */
+ while ((max > 0) && *model == ' ') {
+ max--;
+ model++;
+ }
+ if (memcmp(devinfo->model, model,
+ min(max, strlen(devinfo->model))))
+ continue;
+ found = devinfo;
+ } else {
+ if (!memcmp(devinfo->vendor, vendor,
+ sizeof(devinfo->vendor)) &&
+ !memcmp(devinfo->model, model,
+ sizeof(devinfo->model)))
+ found = devinfo;
+ }
+ if (found)
+ break;
+ }
+
+ if (found) {
+ list_del(&found->dev_info_list);
+ kfree(found);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(scsi_dev_info_list_del_keyed);
+
+/**
* scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list.
* @dev_list: string of device flags to add
*
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 45c75649b9e0..633c2395a92a 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -3,14 +3,14 @@
*
* SCSI error/timeout handling
* Initial versions: Eric Youngdale. Based upon conversations with
- * Leonard Zubkoff and David Miller at Linux Expo,
+ * Leonard Zubkoff and David Miller at Linux Expo,
* ideas originating from all over the place.
*
* Restructured scsi_unjam_host and associated functions.
* September 04, 2002 Mike Anderson (andmike@us.ibm.com)
*
* Forward port of Russell King's (rmk@arm.linux.org.uk) changes and
- * minor cleanups.
+ * minor cleanups.
* September 30, 2002 Mike Anderson (andmike@us.ibm.com)
*/
@@ -129,14 +129,15 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
{
struct scsi_cmnd *scmd = req->special;
enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
+ struct Scsi_Host *host = scmd->device->host;
trace_scsi_dispatch_cmd_timeout(scmd);
scsi_log_completion(scmd, TIMEOUT_ERROR);
- if (scmd->device->host->transportt->eh_timed_out)
- rtn = scmd->device->host->transportt->eh_timed_out(scmd);
- else if (scmd->device->host->hostt->eh_timed_out)
- rtn = scmd->device->host->hostt->eh_timed_out(scmd);
+ if (host->transportt->eh_timed_out)
+ rtn = host->transportt->eh_timed_out(scmd);
+ else if (host->hostt->eh_timed_out)
+ rtn = host->hostt->eh_timed_out(scmd);
if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
@@ -195,7 +196,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
++total_failures;
if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD)
++cmd_cancel;
- else
+ else
++cmd_failed;
}
}
@@ -214,7 +215,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
SCSI_LOG_ERROR_RECOVERY(2, printk("Total of %d commands on %d"
" devices require eh work\n",
- total_failures, devices_failed));
+ total_failures, devices_failed));
}
#endif
@@ -223,7 +224,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
* @scmd: Cmd to have sense checked.
*
* Return value:
- * SUCCESS or FAILED or NEEDS_RETRY
+ * SUCCESS or FAILED or NEEDS_RETRY or TARGET_ERROR
*
* Notes:
* When a deferred error is detected the current command has
@@ -294,7 +295,7 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
return NEEDS_RETRY;
}
/*
- * if the device is in the process of becoming ready, we
+ * if the device is in the process of becoming ready, we
* should retry.
*/
if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
@@ -326,17 +327,19 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
*/
return SUCCESS;
- /* these three are not supported */
+ /* these are not supported */
case COPY_ABORTED:
case VOLUME_OVERFLOW:
case MISCOMPARE:
- return SUCCESS;
+ case BLANK_CHECK:
+ case DATA_PROTECT:
+ return TARGET_ERROR;
case MEDIUM_ERROR:
if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
sshdr.asc == 0x13 || /* AMNF DATA FIELD */
sshdr.asc == 0x14) { /* RECORD NOT FOUND */
- return SUCCESS;
+ return TARGET_ERROR;
}
return NEEDS_RETRY;
@@ -344,11 +347,9 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
if (scmd->device->retry_hwerror)
return ADD_TO_MLQUEUE;
else
- return SUCCESS;
+ return TARGET_ERROR;
case ILLEGAL_REQUEST:
- case BLANK_CHECK:
- case DATA_PROTECT:
default:
return SUCCESS;
}
@@ -488,7 +489,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
*/
static void scsi_eh_done(struct scsi_cmnd *scmd)
{
- struct completion *eh_action;
+ struct completion *eh_action;
SCSI_LOG_ERROR_RECOVERY(3,
printk("%s scmd: %p result: %x\n",
@@ -507,22 +508,23 @@ static int scsi_try_host_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
int rtn;
+ struct Scsi_Host *host = scmd->device->host;
+ struct scsi_host_template *hostt = host->hostt;
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n",
__func__));
- if (!scmd->device->host->hostt->eh_host_reset_handler)
+ if (!hostt->eh_host_reset_handler)
return FAILED;
- rtn = scmd->device->host->hostt->eh_host_reset_handler(scmd);
+ rtn = hostt->eh_host_reset_handler(scmd);
if (rtn == SUCCESS) {
- if (!scmd->device->host->hostt->skip_settle_delay)
+ if (!hostt->skip_settle_delay)
ssleep(HOST_RESET_SETTLE_TIME);
- spin_lock_irqsave(scmd->device->host->host_lock, flags);
- scsi_report_bus_reset(scmd->device->host,
- scmd_channel(scmd));
- spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
+ scsi_report_bus_reset(host, scmd_channel(scmd));
+ spin_unlock_irqrestore(host->host_lock, flags);
}
return rtn;
@@ -536,22 +538,23 @@ static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
int rtn;
+ struct Scsi_Host *host = scmd->device->host;
+ struct scsi_host_template *hostt = host->hostt;
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n",
__func__));
- if (!scmd->device->host->hostt->eh_bus_reset_handler)
+ if (!hostt->eh_bus_reset_handler)
return FAILED;
- rtn = scmd->device->host->hostt->eh_bus_reset_handler(scmd);
+ rtn = hostt->eh_bus_reset_handler(scmd);
if (rtn == SUCCESS) {
- if (!scmd->device->host->hostt->skip_settle_delay)
+ if (!hostt->skip_settle_delay)
ssleep(BUS_RESET_SETTLE_TIME);
- spin_lock_irqsave(scmd->device->host->host_lock, flags);
- scsi_report_bus_reset(scmd->device->host,
- scmd_channel(scmd));
- spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
+ scsi_report_bus_reset(host, scmd_channel(scmd));
+ spin_unlock_irqrestore(host->host_lock, flags);
}
return rtn;
@@ -577,16 +580,18 @@ static int scsi_try_target_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
int rtn;
+ struct Scsi_Host *host = scmd->device->host;
+ struct scsi_host_template *hostt = host->hostt;
- if (!scmd->device->host->hostt->eh_target_reset_handler)
+ if (!hostt->eh_target_reset_handler)
return FAILED;
- rtn = scmd->device->host->hostt->eh_target_reset_handler(scmd);
+ rtn = hostt->eh_target_reset_handler(scmd);
if (rtn == SUCCESS) {
- spin_lock_irqsave(scmd->device->host->host_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
__starget_for_each_device(scsi_target(scmd->device), NULL,
__scsi_report_device_reset);
- spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
+ spin_unlock_irqrestore(host->host_lock, flags);
}
return rtn;
@@ -605,27 +610,28 @@ static int scsi_try_target_reset(struct scsi_cmnd *scmd)
static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
{
int rtn;
+ struct scsi_host_template *hostt = scmd->device->host->hostt;
- if (!scmd->device->host->hostt->eh_device_reset_handler)
+ if (!hostt->eh_device_reset_handler)
return FAILED;
- rtn = scmd->device->host->hostt->eh_device_reset_handler(scmd);
+ rtn = hostt->eh_device_reset_handler(scmd);
if (rtn == SUCCESS)
__scsi_report_device_reset(scmd->device, NULL);
return rtn;
}
-static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd)
+static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt, struct scsi_cmnd *scmd)
{
- if (!scmd->device->host->hostt->eh_abort_handler)
+ if (!hostt->eh_abort_handler)
return FAILED;
- return scmd->device->host->hostt->eh_abort_handler(scmd);
+ return hostt->eh_abort_handler(scmd);
}
static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
{
- if (scsi_try_to_abort_cmd(scmd) != SUCCESS)
+ if (scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd) != SUCCESS)
if (scsi_try_bus_device_reset(scmd) != SUCCESS)
if (scsi_try_target_reset(scmd) != SUCCESS)
if (scsi_try_bus_reset(scmd) != SUCCESS)
@@ -787,6 +793,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
case SUCCESS:
case NEEDS_RETRY:
case FAILED:
+ case TARGET_ERROR:
break;
case ADD_TO_MLQUEUE:
rtn = NEEDS_RETRY;
@@ -845,7 +852,7 @@ EXPORT_SYMBOL(scsi_eh_finish_cmd);
*
* Description:
* See if we need to request sense information. if so, then get it
- * now, so we have a better idea of what to do.
+ * now, so we have a better idea of what to do.
*
* Notes:
* This has the unfortunate side effect that if a shost adapter does
@@ -957,7 +964,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:"
"0x%p\n", current->comm,
scmd));
- rtn = scsi_try_to_abort_cmd(scmd);
+ rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd);
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
if (!scsi_device_online(scmd->device) ||
@@ -965,7 +972,6 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
!scsi_eh_tur(scmd)) {
scsi_eh_finish_cmd(scmd, done_q);
}
-
} else
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
" cmd failed:"
@@ -1009,7 +1015,7 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
*
* Notes:
* If commands are failing due to not ready, initializing command required,
- * try revalidating the device, which will end up sending a start unit.
+ * try revalidating the device, which will end up sending a start unit.
*/
static int scsi_eh_stu(struct Scsi_Host *shost,
struct list_head *work_q,
@@ -1063,7 +1069,7 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
* Try a bus device reset. Still, look to see whether we have multiple
* devices that are jammed or not - if we have multiple devices, it
* makes no sense to try bus_device_reset - we really would need to try
- * a bus_reset instead.
+ * a bus_reset instead.
*/
static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
struct list_head *work_q,
@@ -1163,7 +1169,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
}
/**
- * scsi_eh_bus_reset - send a bus reset
+ * scsi_eh_bus_reset - send a bus reset
* @shost: &scsi host being recovered.
* @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
@@ -1180,7 +1186,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
* we really want to loop over the various channels, and do this on
* a channel by channel basis. we should also check to see if any
* of the failed commands are on soft_reset devices, and if so, skip
- * the reset.
+ * the reset.
*/
for (channel = 0; channel <= shost->max_channel; channel++) {
@@ -1222,7 +1228,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
}
/**
- * scsi_eh_host_reset - send a host reset
+ * scsi_eh_host_reset - send a host reset
* @work_q: list_head for processed commands.
* @done_q: list_head for processed commands.
*/
@@ -1375,7 +1381,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
return SUCCESS;
/*
* when the low level driver returns did_soft_error,
- * it is responsible for keeping an internal retry counter
+ * it is responsible for keeping an internal retry counter
* in order to avoid endless loops (db)
*
* actually this is a bug in this function here. we should
@@ -1413,7 +1419,6 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
*/
break;
/* fallthrough */
-
case DID_BUS_BUSY:
case DID_PARITY:
goto maybe_retry;
@@ -1469,6 +1474,14 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
rtn = scsi_check_sense(scmd);
if (rtn == NEEDS_RETRY)
goto maybe_retry;
+ else if (rtn == TARGET_ERROR) {
+ /*
+ * Need to modify host byte to signal a
+ * permanent target failure
+ */
+ scmd->result |= (DID_TARGET_FAILURE << 16);
+ rtn = SUCCESS;
+ }
/* if rtn == FAILED, we have no sense information;
* returning FAILED will wake the error handler thread
* to collect the sense and redo the decide
@@ -1486,6 +1499,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
case RESERVATION_CONFLICT:
sdev_printk(KERN_INFO, scmd->device,
"reservation conflict\n");
+ scmd->result |= (DID_NEXUS_FAILURE << 16);
return SUCCESS; /* causes immediate i/o error */
default:
return FAILED;
@@ -1972,7 +1986,7 @@ int scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
if (sb_len > 7)
sshdr->additional_length = sense_buffer[7];
} else {
- /*
+ /*
* fixed format
*/
if (sb_len > 2)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index fb2bb35c62cb..2d63c8ad1442 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -667,6 +667,30 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
}
EXPORT_SYMBOL(scsi_release_buffers);
+static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
+{
+ int error = 0;
+
+ switch(host_byte(result)) {
+ case DID_TRANSPORT_FAILFAST:
+ error = -ENOLINK;
+ break;
+ case DID_TARGET_FAILURE:
+ cmd->result |= (DID_OK << 16);
+ error = -EREMOTEIO;
+ break;
+ case DID_NEXUS_FAILURE:
+ cmd->result |= (DID_OK << 16);
+ error = -EBADE;
+ break;
+ default:
+ error = -EIO;
+ break;
+ }
+
+ return error;
+}
+
/*
* Function: scsi_io_completion()
*
@@ -737,7 +761,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
req->sense_len = len;
}
if (!sense_deferred)
- error = -EIO;
+ error = __scsi_error_from_host_byte(cmd, result);
}
req->resid_len = scsi_get_resid(cmd);
@@ -796,7 +820,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
return;
- error = -EIO;
+ error = __scsi_error_from_host_byte(cmd, result);
if (host_byte(result) == DID_RESET) {
/* Third party bus reset or reset for error recovery
@@ -843,6 +867,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
description = "Host Data Integrity Failure";
action = ACTION_FAIL;
error = -EILSEQ;
+ /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
+ } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
+ (cmd->cmnd[0] == UNMAP ||
+ cmd->cmnd[0] == WRITE_SAME_16 ||
+ cmd->cmnd[0] == WRITE_SAME)) {
+ description = "Discard failure";
+ action = ACTION_FAIL;
} else
action = ACTION_FAIL;
break;
@@ -1038,6 +1069,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
cmd->request = req;
cmd->cmnd = req->cmd;
+ cmd->prot_op = SCSI_PROT_NORMAL;
return cmd;
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 342ee1a9c41d..2a588955423a 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -45,6 +45,7 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
enum {
SCSI_DEVINFO_GLOBAL = 0,
SCSI_DEVINFO_SPI,
+ SCSI_DEVINFO_DH,
};
extern int scsi_get_device_flags(struct scsi_device *sdev,
@@ -56,6 +57,7 @@ extern int scsi_get_device_flags_keyed(struct scsi_device *sdev,
extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
char *model, char *strflags,
int flags, int key);
+extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key);
extern int scsi_dev_info_add_list(int key, const char *name);
extern int scsi_dev_info_remove_list(int key);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index f905ecb5704d..b4218390941e 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -954,6 +954,7 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
if (dd_size)
conn->dd_data = &conn[1];
+ mutex_init(&conn->ep_mutex);
INIT_LIST_HEAD(&conn->conn_list);
conn->transport = transport;
conn->cid = cid;
@@ -975,7 +976,6 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
spin_lock_irqsave(&connlock, flags);
list_add(&conn->conn_list, &connlist);
- conn->active = 1;
spin_unlock_irqrestore(&connlock, flags);
ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n");
@@ -1001,7 +1001,6 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
unsigned long flags;
spin_lock_irqsave(&connlock, flags);
- conn->active = 0;
list_del(&conn->conn_list);
spin_unlock_irqrestore(&connlock, flags);
@@ -1430,6 +1429,29 @@ release_host:
return err;
}
+static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
+ u64 ep_handle)
+{
+ struct iscsi_cls_conn *conn;
+ struct iscsi_endpoint *ep;
+
+ if (!transport->ep_disconnect)
+ return -EINVAL;
+
+ ep = iscsi_lookup_endpoint(ep_handle);
+ if (!ep)
+ return -EINVAL;
+ conn = ep->conn;
+ if (conn) {
+ mutex_lock(&conn->ep_mutex);
+ conn->ep = NULL;
+ mutex_unlock(&conn->ep_mutex);
+ }
+
+ transport->ep_disconnect(ep);
+ return 0;
+}
+
static int
iscsi_if_transport_ep(struct iscsi_transport *transport,
struct iscsi_uevent *ev, int msg_type)
@@ -1454,14 +1476,8 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
ev->u.ep_poll.timeout_ms);
break;
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
- if (!transport->ep_disconnect)
- return -EINVAL;
-
- ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
- if (!ep)
- return -EINVAL;
-
- transport->ep_disconnect(ep);
+ rc = iscsi_if_ep_disconnect(transport,
+ ev->u.ep_disconnect.ep_handle);
break;
}
return rc;
@@ -1609,12 +1625,31 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
session = iscsi_session_lookup(ev->u.b_conn.sid);
conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
- if (session && conn)
- ev->r.retcode = transport->bind_conn(session, conn,
- ev->u.b_conn.transport_eph,
- ev->u.b_conn.is_leading);
- else
+ if (conn && conn->ep)
+ iscsi_if_ep_disconnect(transport, conn->ep->id);
+
+ if (!session || !conn) {
err = -EINVAL;
+ break;
+ }
+
+ ev->r.retcode = transport->bind_conn(session, conn,
+ ev->u.b_conn.transport_eph,
+ ev->u.b_conn.is_leading);
+ if (ev->r.retcode || !transport->ep_connect)
+ break;
+
+ ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
+ if (ep) {
+ ep->conn = conn;
+
+ mutex_lock(&conn->ep_mutex);
+ conn->ep = ep;
+ mutex_unlock(&conn->ep_mutex);
+ } else
+ iscsi_cls_conn_printk(KERN_ERR, conn,
+ "Could not set ep conn "
+ "binding\n");
break;
case ISCSI_UEVENT_SET_PARAM:
err = iscsi_set_param(transport, ev);
@@ -1747,13 +1782,48 @@ iscsi_conn_attr(data_digest, ISCSI_PARAM_DATADGST_EN);
iscsi_conn_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN);
iscsi_conn_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN);
iscsi_conn_attr(persistent_port, ISCSI_PARAM_PERSISTENT_PORT);
-iscsi_conn_attr(port, ISCSI_PARAM_CONN_PORT);
iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
-iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
+#define iscsi_conn_ep_attr_show(param) \
+static ssize_t show_conn_ep_param_##param(struct device *dev, \
+ struct device_attribute *attr,\
+ char *buf) \
+{ \
+ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
+ struct iscsi_transport *t = conn->transport; \
+ struct iscsi_endpoint *ep; \
+ ssize_t rc; \
+ \
+ /* \
+ * Need to make sure ep_disconnect does not free the LLD's \
+ * interconnect resources while we are trying to read them. \
+ */ \
+ mutex_lock(&conn->ep_mutex); \
+ ep = conn->ep; \
+ if (!ep && t->ep_connect) { \
+ mutex_unlock(&conn->ep_mutex); \
+ return -ENOTCONN; \
+ } \
+ \
+ if (ep) \
+ rc = t->get_ep_param(ep, param, buf); \
+ else \
+ rc = t->get_conn_param(conn, param, buf); \
+ mutex_unlock(&conn->ep_mutex); \
+ return rc; \
+}
+
+#define iscsi_conn_ep_attr(field, param) \
+ iscsi_conn_ep_attr_show(param) \
+static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, \
+ show_conn_ep_param_##param, NULL);
+
+iscsi_conn_ep_attr(address, ISCSI_PARAM_CONN_ADDRESS);
+iscsi_conn_ep_attr(port, ISCSI_PARAM_CONN_PORT);
+
/*
* iSCSI session attrs
*/
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e56730214c05..7ff61d76b4c5 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -96,6 +96,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
#define SD_MINORS 0
#endif
+static void sd_config_discard(struct scsi_disk *, unsigned int);
static int sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
static int sd_probe(struct device *);
@@ -294,7 +295,54 @@ sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning);
+ return snprintf(buf, 20, "%u\n", sdkp->lbpme);
+}
+
+static const char *lbp_mode[] = {
+ [SD_LBP_FULL] = "full",
+ [SD_LBP_UNMAP] = "unmap",
+ [SD_LBP_WS16] = "writesame_16",
+ [SD_LBP_WS10] = "writesame_10",
+ [SD_LBP_ZERO] = "writesame_zero",
+ [SD_LBP_DISABLE] = "disabled",
+};
+
+static ssize_t
+sd_show_provisioning_mode(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%s\n", lbp_mode[sdkp->provisioning_mode]);
+}
+
+static ssize_t
+sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (sdp->type != TYPE_DISK)
+ return -EINVAL;
+
+ if (!strncmp(buf, lbp_mode[SD_LBP_UNMAP], 20))
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else if (!strncmp(buf, lbp_mode[SD_LBP_WS16], 20))
+ sd_config_discard(sdkp, SD_LBP_WS16);
+ else if (!strncmp(buf, lbp_mode[SD_LBP_WS10], 20))
+ sd_config_discard(sdkp, SD_LBP_WS10);
+ else if (!strncmp(buf, lbp_mode[SD_LBP_ZERO], 20))
+ sd_config_discard(sdkp, SD_LBP_ZERO);
+ else if (!strncmp(buf, lbp_mode[SD_LBP_DISABLE], 20))
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
+ else
+ return -EINVAL;
+
+ return count;
}
static struct device_attribute sd_disk_attrs[] = {
@@ -309,6 +357,8 @@ static struct device_attribute sd_disk_attrs[] = {
__ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL),
__ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
__ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
+ __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode,
+ sd_store_provisioning_mode),
__ATTR_NULL,
};
@@ -433,6 +483,49 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
scsi_set_prot_type(scmd, dif);
}
+static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
+{
+ struct request_queue *q = sdkp->disk->queue;
+ unsigned int logical_block_size = sdkp->device->sector_size;
+ unsigned int max_blocks = 0;
+
+ q->limits.discard_zeroes_data = sdkp->lbprz;
+ q->limits.discard_alignment = sdkp->unmap_alignment;
+ q->limits.discard_granularity =
+ max(sdkp->physical_block_size,
+ sdkp->unmap_granularity * logical_block_size);
+
+ switch (mode) {
+
+ case SD_LBP_DISABLE:
+ q->limits.max_discard_sectors = 0;
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+ return;
+
+ case SD_LBP_UNMAP:
+ max_blocks = min_not_zero(sdkp->max_unmap_blocks, 0xffffffff);
+ break;
+
+ case SD_LBP_WS16:
+ max_blocks = min_not_zero(sdkp->max_ws_blocks, 0xffffffff);
+ break;
+
+ case SD_LBP_WS10:
+ max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff);
+ break;
+
+ case SD_LBP_ZERO:
+ max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff);
+ q->limits.discard_zeroes_data = 1;
+ break;
+ }
+
+ q->limits.max_discard_sectors = max_blocks * (logical_block_size >> 9);
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+
+ sdkp->provisioning_mode = mode;
+}
+
/**
* scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device
* @sdp: scsi device to operate one
@@ -449,6 +542,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
unsigned int nr_sectors = bio_sectors(bio);
unsigned int len;
int ret;
+ char *buf;
struct page *page;
if (sdkp->device->sector_size == 4096) {
@@ -464,8 +558,9 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
if (!page)
return BLKPREP_DEFER;
- if (sdkp->unmap) {
- char *buf = page_address(page);
+ switch (sdkp->provisioning_mode) {
+ case SD_LBP_UNMAP:
+ buf = page_address(page);
rq->cmd_len = 10;
rq->cmd[0] = UNMAP;
@@ -477,7 +572,9 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
put_unaligned_be32(nr_sectors, &buf[16]);
len = 24;
- } else {
+ break;
+
+ case SD_LBP_WS16:
rq->cmd_len = 16;
rq->cmd[0] = WRITE_SAME_16;
rq->cmd[1] = 0x8; /* UNMAP */
@@ -485,11 +582,30 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
put_unaligned_be32(nr_sectors, &rq->cmd[10]);
len = sdkp->device->sector_size;
+ break;
+
+ case SD_LBP_WS10:
+ case SD_LBP_ZERO:
+ rq->cmd_len = 10;
+ rq->cmd[0] = WRITE_SAME;
+ if (sdkp->provisioning_mode == SD_LBP_WS10)
+ rq->cmd[1] = 0x8; /* UNMAP */
+ put_unaligned_be32(sector, &rq->cmd[2]);
+ put_unaligned_be16(nr_sectors, &rq->cmd[7]);
+
+ len = sdkp->device->sector_size;
+ break;
+
+ default:
+ ret = BLKPREP_KILL;
+ goto out;
}
blk_add_request_payload(rq, page, len);
ret = scsi_setup_blk_pc_cmnd(sdp, rq);
rq->buffer = page_address(page);
+
+out:
if (ret != BLKPREP_OK) {
__free_page(page);
rq->buffer = NULL;
@@ -1251,12 +1367,10 @@ static int sd_done(struct scsi_cmnd *SCpnt)
struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
int sense_valid = 0;
int sense_deferred = 0;
+ unsigned char op = SCpnt->cmnd[0];
- if (SCpnt->request->cmd_flags & REQ_DISCARD) {
- if (!result)
- scsi_set_resid(SCpnt, 0);
- return good_bytes;
- }
+ if ((SCpnt->request->cmd_flags & REQ_DISCARD) && !result)
+ scsi_set_resid(SCpnt, 0);
if (result) {
sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
@@ -1295,10 +1409,17 @@ static int sd_done(struct scsi_cmnd *SCpnt)
SCpnt->result = 0;
memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
break;
- case ABORTED_COMMAND: /* DIF: Target detected corruption */
- case ILLEGAL_REQUEST: /* DIX: Host detected corruption */
- if (sshdr.asc == 0x10)
+ case ABORTED_COMMAND:
+ if (sshdr.asc == 0x10) /* DIF: Target detected corruption */
+ good_bytes = sd_completed_bytes(SCpnt);
+ break;
+ case ILLEGAL_REQUEST:
+ if (sshdr.asc == 0x10) /* DIX: Host detected corruption */
good_bytes = sd_completed_bytes(SCpnt);
+ /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
+ if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
+ (op == UNMAP || op == WRITE_SAME_16 || op == WRITE_SAME))
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
break;
default:
break;
@@ -1596,17 +1717,13 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
sd_printk(KERN_NOTICE, sdkp,
"physical block alignment offset: %u\n", alignment);
- if (buffer[14] & 0x80) { /* TPE */
- struct request_queue *q = sdp->request_queue;
+ if (buffer[14] & 0x80) { /* LBPME */
+ sdkp->lbpme = 1;
- sdkp->thin_provisioning = 1;
- q->limits.discard_granularity = sdkp->physical_block_size;
- q->limits.max_discard_sectors = 0xffffffff;
+ if (buffer[14] & 0x40) /* LBPRZ */
+ sdkp->lbprz = 1;
- if (buffer[14] & 0x40) /* TPRZ */
- q->limits.discard_zeroes_data = 1;
-
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ sd_config_discard(sdkp, SD_LBP_WS16);
}
sdkp->capacity = lba + 1;
@@ -2091,7 +2208,6 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
*/
static void sd_read_block_limits(struct scsi_disk *sdkp)
{
- struct request_queue *q = sdkp->disk->queue;
unsigned int sector_sz = sdkp->device->sector_size;
const int vpd_len = 64;
unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
@@ -2106,39 +2222,46 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
blk_queue_io_opt(sdkp->disk->queue,
get_unaligned_be32(&buffer[12]) * sector_sz);
- /* Thin provisioning enabled and page length indicates TP support */
- if (sdkp->thin_provisioning && buffer[3] == 0x3c) {
- unsigned int lba_count, desc_count, granularity;
+ if (buffer[3] == 0x3c) {
+ unsigned int lba_count, desc_count;
- lba_count = get_unaligned_be32(&buffer[20]);
- desc_count = get_unaligned_be32(&buffer[24]);
-
- if (lba_count && desc_count) {
- if (sdkp->tpvpd && !sdkp->tpu)
- sdkp->unmap = 0;
- else
- sdkp->unmap = 1;
- }
+ sdkp->max_ws_blocks =
+ (u32) min_not_zero(get_unaligned_be64(&buffer[36]),
+ (u64)0xffffffff);
- if (sdkp->tpvpd && !sdkp->tpu && !sdkp->tpws) {
- sd_printk(KERN_ERR, sdkp, "Thin provisioning is " \
- "enabled but neither TPU, nor TPWS are " \
- "set. Disabling discard!\n");
+ if (!sdkp->lbpme)
goto out;
- }
- if (lba_count)
- q->limits.max_discard_sectors =
- lba_count * sector_sz >> 9;
+ lba_count = get_unaligned_be32(&buffer[20]);
+ desc_count = get_unaligned_be32(&buffer[24]);
- granularity = get_unaligned_be32(&buffer[28]);
+ if (lba_count && desc_count)
+ sdkp->max_unmap_blocks = lba_count;
- if (granularity)
- q->limits.discard_granularity = granularity * sector_sz;
+ sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
if (buffer[32] & 0x80)
- q->limits.discard_alignment =
+ sdkp->unmap_alignment =
get_unaligned_be32(&buffer[32]) & ~(1 << 31);
+
+ if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
+
+ if (sdkp->max_unmap_blocks)
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else
+ sd_config_discard(sdkp, SD_LBP_WS16);
+
+ } else { /* LBP VPD page tells us what to use */
+
+ if (sdkp->lbpu && sdkp->max_unmap_blocks)
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else if (sdkp->lbpws)
+ sd_config_discard(sdkp, SD_LBP_WS16);
+ else if (sdkp->lbpws10)
+ sd_config_discard(sdkp, SD_LBP_WS10);
+ else
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
+ }
}
out:
@@ -2172,15 +2295,15 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
}
/**
- * sd_read_thin_provisioning - Query thin provisioning VPD page
+ * sd_read_block_provisioning - Query provisioning VPD page
* @disk: disk to query
*/
-static void sd_read_thin_provisioning(struct scsi_disk *sdkp)
+static void sd_read_block_provisioning(struct scsi_disk *sdkp)
{
unsigned char *buffer;
const int vpd_len = 8;
- if (sdkp->thin_provisioning == 0)
+ if (sdkp->lbpme == 0)
return;
buffer = kmalloc(vpd_len, GFP_KERNEL);
@@ -2188,9 +2311,10 @@ static void sd_read_thin_provisioning(struct scsi_disk *sdkp)
if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
goto out;
- sdkp->tpvpd = 1;
- sdkp->tpu = (buffer[5] >> 7) & 1; /* UNMAP */
- sdkp->tpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
+ sdkp->lbpvpd = 1;
+ sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */
+ sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
+ sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
out:
kfree(buffer);
@@ -2247,7 +2371,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_capacity(sdkp, buffer);
if (sd_try_extended_inquiry(sdp)) {
- sd_read_thin_provisioning(sdkp);
+ sd_read_block_provisioning(sdkp);
sd_read_block_limits(sdkp);
sd_read_block_characteristics(sdkp);
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index c9d8f6ca49e2..6ad798bfd52a 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -43,6 +43,15 @@ enum {
SD_MEMPOOL_SIZE = 2, /* CDB pool size */
};
+enum {
+ SD_LBP_FULL = 0, /* Full logical block provisioning */
+ SD_LBP_UNMAP, /* Use UNMAP command */
+ SD_LBP_WS16, /* Use WRITE SAME(16) with UNMAP bit */
+ SD_LBP_WS10, /* Use WRITE SAME(10) with UNMAP bit */
+ SD_LBP_ZERO, /* Use WRITE SAME(10) with zero payload */
+ SD_LBP_DISABLE, /* Discard disabled due to failed cmd */
+};
+
struct scsi_disk {
struct scsi_driver *driver; /* always &sd_template */
struct scsi_device *device;
@@ -50,21 +59,27 @@ struct scsi_disk {
struct gendisk *disk;
atomic_t openers;
sector_t capacity; /* size in 512-byte sectors */
+ u32 max_ws_blocks;
+ u32 max_unmap_blocks;
+ u32 unmap_granularity;
+ u32 unmap_alignment;
u32 index;
unsigned int physical_block_size;
u8 media_present;
u8 write_prot;
u8 protection_type;/* Data Integrity Field */
+ u8 provisioning_mode;
unsigned ATO : 1; /* state of disk ATO bit */
unsigned WCE : 1; /* state of disk WCE bit */
unsigned RCD : 1; /* state of disk RCD bit, unused */
unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
unsigned first_scan : 1;
- unsigned thin_provisioning : 1;
- unsigned unmap : 1;
- unsigned tpws : 1;
- unsigned tpu : 1;
- unsigned tpvpd : 1;
+ unsigned lbpme : 1;
+ unsigned lbprz : 1;
+ unsigned lbpu : 1;
+ unsigned lbpws : 1;
+ unsigned lbpws10 : 1;
+ unsigned lbpvpd : 1;
};
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index bb233a9cbad2..fc14b8dea0d7 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -53,6 +53,12 @@ if SPI_MASTER
comment "SPI Master Controller Drivers"
+config SPI_ALTERA
+ tristate "Altera SPI Controller"
+ select SPI_BITBANG
+ help
+ This is the driver for the Altera SPI Controller.
+
config SPI_ATH79
tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
depends on ATH79 && GENERIC_GPIO
@@ -161,13 +167,13 @@ config SPI_IMX_VER_0_0
def_bool y if SOC_IMX21 || SOC_IMX27
config SPI_IMX_VER_0_4
- def_bool y if ARCH_MX31
+ def_bool y if SOC_IMX31
config SPI_IMX_VER_0_7
- def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51 || ARCH_MX53
+ def_bool y if ARCH_MX25 || SOC_IMX35 || SOC_IMX51 || SOC_IMX53
config SPI_IMX_VER_2_3
- def_bool y if ARCH_MX51 || ARCH_MX53
+ def_bool y if SOC_IMX51 || SOC_IMX53
config SPI_IMX
tristate "Freescale i.MX SPI controllers"
@@ -231,6 +237,13 @@ config SPI_FSL_ESPI
From MPC8536, 85xx platform uses the controller, and all P10xx,
P20xx, P30xx,P40xx, P50xx uses this controller.
+config SPI_OC_TINY
+ tristate "OpenCores tiny SPI"
+ depends on GENERIC_GPIO
+ select SPI_BITBANG
+ help
+ This is the driver for OpenCores tiny SPI master controller.
+
config SPI_OMAP_UWIRE
tristate "OMAP1 MicroWire"
depends on ARCH_OMAP1
@@ -330,6 +343,12 @@ config SPI_SH_MSIOF
help
SPI driver for SuperH MSIOF blocks.
+config SPI_SH
+ tristate "SuperH SPI controller"
+ depends on SUPERH
+ help
+ SPI driver for SuperH SPI blocks.
+
config SPI_SH_SCI
tristate "SuperH SCI SPI controller"
depends on SUPERH
@@ -350,6 +369,16 @@ config SPI_TEGRA
help
SPI driver for NVidia Tegra SoCs
+config SPI_TI_SSP
+ tristate "TI Sequencer Serial Port - SPI Support"
+ depends on MFD_TI_SSP
+ help
+ This selects an SPI master implementation using a TI sequencer
+ serial port.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ti-ssp-spi.
+
config SPI_TOPCLIFF_PCH
tristate "Topcliff PCH SPI Controller"
depends on PCI
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 86d1b5f9bbd9..fd2fc5f6505f 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -9,6 +9,7 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
obj-$(CONFIG_SPI_MASTER) += spi.o
# SPI master controller drivers (bus)
+obj-$(CONFIG_SPI_ALTERA) += spi_altera.o
obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o
obj-$(CONFIG_SPI_ATH79) += ath79_spi.o
obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
@@ -27,6 +28,7 @@ obj-$(CONFIG_SPI_IMX) += spi_imx.o
obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += pxa2xx_spi_pci.o
+obj-$(CONFIG_SPI_OC_TINY) += spi_oc_tiny.o
obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o
@@ -43,9 +45,11 @@ obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o
obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o
obj-$(CONFIG_SPI_TEGRA) += spi_tegra.o
+obj-$(CONFIG_SPI_TI_SSP) += ti-ssp-spi.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o
obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
+obj-$(CONFIG_SPI_SH) += spi_sh.o
obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o
obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index 71a1219a995d..5c2b092a915e 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -329,15 +329,16 @@ struct vendor_data {
/**
* struct pl022 - This is the private SSP driver data structure
* @adev: AMBA device model hookup
- * @vendor: Vendor data for the IP block
- * @phybase: The physical memory where the SSP device resides
- * @virtbase: The virtual memory where the SSP is mapped
+ * @vendor: vendor data for the IP block
+ * @phybase: the physical memory where the SSP device resides
+ * @virtbase: the virtual memory where the SSP is mapped
+ * @clk: outgoing clock "SPICLK" for the SPI bus
* @master: SPI framework hookup
* @master_info: controller-specific data from machine setup
- * @regs: SSP controller register's virtual address
- * @pump_messages: Work struct for scheduling work to the workqueue
- * @lock: spinlock to syncronise access to driver data
* @workqueue: a workqueue on which any spi_message request is queued
+ * @pump_messages: work struct for scheduling work to the workqueue
+ * @queue_lock: spinlock to syncronise access to message queue
+ * @queue: message queue
* @busy: workqueue is busy
* @running: workqueue is running
* @pump_transfers: Tasklet used in Interrupt Transfer mode
@@ -348,8 +349,14 @@ struct vendor_data {
* @tx_end: end position in TX buffer to be read
* @rx: current position in RX buffer to be written
* @rx_end: end position in RX buffer to be written
- * @readingtype: the type of read currently going on
- * @writingtype: the type or write currently going on
+ * @read: the type of read currently going on
+ * @write: the type of write currently going on
+ * @exp_fifo_level: expected FIFO level
+ * @dma_rx_channel: optional channel for RX DMA
+ * @dma_tx_channel: optional channel for TX DMA
+ * @sgt_rx: scattertable for the RX transfer
+ * @sgt_tx: scattertable for the TX transfer
+ * @dummypage: a dummy page used for driving data on the bus with DMA
*/
struct pl022 {
struct amba_device *adev;
@@ -397,8 +404,8 @@ struct pl022 {
* @cpsr: Value of Clock prescale register
* @n_bytes: how many bytes(power of 2) reqd for a given data width of client
* @enable_dma: Whether to enable DMA or not
- * @write: function ptr to be used to write when doing xfer for this chip
* @read: function ptr to be used to read when doing xfer for this chip
+ * @write: function ptr to be used to write when doing xfer for this chip
* @cs_control: chip select callback provided by chip
* @xfer_type: polling/interrupt/DMA
*
@@ -508,9 +515,10 @@ static void giveback(struct pl022 *pl022)
msg->state = NULL;
if (msg->complete)
msg->complete(msg->context);
- /* This message is completed, so let's turn off the clocks! */
+ /* This message is completed, so let's turn off the clocks & power */
clk_disable(pl022->clk);
amba_pclk_disable(pl022->adev);
+ amba_vcore_disable(pl022->adev);
}
/**
@@ -917,7 +925,6 @@ static int configure_dma(struct pl022 *pl022)
struct dma_chan *txchan = pl022->dma_tx_channel;
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
- dma_cookie_t cookie;
/* Check that the channels are available */
if (!rxchan || !txchan)
@@ -962,10 +969,8 @@ static int configure_dma(struct pl022 *pl022)
tx_conf.dst_addr_width = rx_conf.src_addr_width;
BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width);
- rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
- (unsigned long) &rx_conf);
- txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
- (unsigned long) &tx_conf);
+ dmaengine_slave_config(rxchan, &rx_conf);
+ dmaengine_slave_config(txchan, &tx_conf);
/* Create sglists for the transfers */
pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1;
@@ -1018,23 +1023,17 @@ static int configure_dma(struct pl022 *pl022)
rxdesc->callback_param = pl022;
/* Submit and fire RX and TX with TX last so we're ready to read! */
- cookie = rxdesc->tx_submit(rxdesc);
- if (dma_submit_error(cookie))
- goto err_submit_rx;
- cookie = txdesc->tx_submit(txdesc);
- if (dma_submit_error(cookie))
- goto err_submit_tx;
- rxchan->device->device_issue_pending(rxchan);
- txchan->device->device_issue_pending(txchan);
+ dmaengine_submit(rxdesc);
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(rxchan);
+ dma_async_issue_pending(txchan);
return 0;
-err_submit_tx:
-err_submit_rx:
err_txdesc:
- txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(txchan);
err_rxdesc:
- rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(rxchan);
dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl,
pl022->sgt_tx.nents, DMA_TO_DEVICE);
err_tx_sgmap:
@@ -1101,8 +1100,8 @@ static void terminate_dma(struct pl022 *pl022)
struct dma_chan *rxchan = pl022->dma_rx_channel;
struct dma_chan *txchan = pl022->dma_tx_channel;
- rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
- txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(rxchan);
+ dmaengine_terminate_all(txchan);
unmap_free_dma_scatter(pl022);
}
@@ -1482,9 +1481,11 @@ static void pump_messages(struct work_struct *work)
/* Setup the SPI using the per chip configuration */
pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
/*
- * We enable the clocks here, then the clocks will be disabled when
- * giveback() is called in each method (poll/interrupt/DMA)
+ * We enable the core voltage and clocks here, then the clocks
+ * and core will be disabled when giveback() is called in each method
+ * (poll/interrupt/DMA)
*/
+ amba_vcore_enable(pl022->adev);
amba_pclk_enable(pl022->adev);
clk_enable(pl022->clk);
restore_state(pl022);
@@ -1910,8 +1911,6 @@ static int pl022_setup(struct spi_device *spi)
&& ((pl022->master_info)->enable_dma)) {
chip->enable_dma = true;
dev_dbg(&spi->dev, "DMA mode set in controller state\n");
- if (status < 0)
- goto err_config_params;
SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
SSP_DMACR_MASK_RXDMAE, 0);
SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
@@ -2021,7 +2020,7 @@ static void pl022_cleanup(struct spi_device *spi)
static int __devinit
-pl022_probe(struct amba_device *adev, struct amba_id *id)
+pl022_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
@@ -2130,8 +2129,12 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
goto err_spi_register;
}
dev_dbg(dev, "probe succeded\n");
- /* Disable the silicon block pclk and clock it when needed */
+ /*
+ * Disable the silicon block pclk and any voltage domain and just
+ * power it up and clock it when it's needed
+ */
amba_pclk_disable(adev);
+ amba_vcore_disable(adev);
return 0;
err_spi_register:
@@ -2196,9 +2199,11 @@ static int pl022_suspend(struct amba_device *adev, pm_message_t state)
return status;
}
+ amba_vcore_enable(adev);
amba_pclk_enable(adev);
load_ssp_default_config(pl022);
amba_pclk_disable(adev);
+ amba_vcore_disable(adev);
dev_dbg(&adev->dev, "suspended\n");
return 0;
}
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c
index 6beab99bf95b..1f0ed8005c91 100644
--- a/drivers/spi/davinci_spi.c
+++ b/drivers/spi/davinci_spi.c
@@ -571,6 +571,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
unsigned long tx_reg, rx_reg;
struct edmacc_param param;
void *rx_buf;
+ int b, c;
dma = &dspi->dma;
@@ -591,22 +592,38 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
if (t->tx_buf) {
t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf,
- dspi->wcount, DMA_TO_DEVICE);
+ t->len, DMA_TO_DEVICE);
if (dma_mapping_error(&spi->dev, t->tx_dma)) {
dev_dbg(sdev, "Unable to DMA map %d bytes"
- "TX buffer\n", dspi->wcount);
+ "TX buffer\n", t->len);
return -ENOMEM;
}
}
+ /*
+ * If number of words is greater than 65535, then we need
+ * to configure a 3 dimension transfer. Use the BCNTRLD
+ * feature to allow for transfers that aren't even multiples
+ * of 65535 (or any other possible b size) by first transferring
+ * the remainder amount then grabbing the next N blocks of
+ * 65535 words.
+ */
+
+ c = dspi->wcount / (SZ_64K - 1); /* N 65535 Blocks */
+ b = dspi->wcount - c * (SZ_64K - 1); /* Remainder */
+ if (b)
+ c++;
+ else
+ b = SZ_64K - 1;
+
param.opt = TCINTEN | EDMA_TCC(dma->tx_channel);
param.src = t->tx_buf ? t->tx_dma : tx_reg;
- param.a_b_cnt = dspi->wcount << 16 | data_type;
+ param.a_b_cnt = b << 16 | data_type;
param.dst = tx_reg;
param.src_dst_bidx = t->tx_buf ? data_type : 0;
- param.link_bcntrld = 0xffff;
- param.src_dst_cidx = 0;
- param.ccnt = 1;
+ param.link_bcntrld = 0xffffffff;
+ param.src_dst_cidx = t->tx_buf ? data_type : 0;
+ param.ccnt = c;
edma_write_slot(dma->tx_channel, &param);
edma_link(dma->tx_channel, dma->dummy_param_slot);
@@ -624,7 +641,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
if (t->rx_buf) {
rx_buf = t->rx_buf;
- rx_buf_count = dspi->rcount;
+ rx_buf_count = t->len;
} else {
rx_buf = dspi->rx_tmp_buf;
rx_buf_count = sizeof(dspi->rx_tmp_buf);
@@ -636,19 +653,19 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
rx_buf_count);
if (t->tx_buf)
- dma_unmap_single(NULL, t->tx_dma, dspi->wcount,
+ dma_unmap_single(NULL, t->tx_dma, t->len,
DMA_TO_DEVICE);
return -ENOMEM;
}
param.opt = TCINTEN | EDMA_TCC(dma->rx_channel);
param.src = rx_reg;
- param.a_b_cnt = dspi->rcount << 16 | data_type;
+ param.a_b_cnt = b << 16 | data_type;
param.dst = t->rx_dma;
param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16;
- param.link_bcntrld = 0xffff;
- param.src_dst_cidx = 0;
- param.ccnt = 1;
+ param.link_bcntrld = 0xffffffff;
+ param.src_dst_cidx = (t->rx_buf ? data_type : 0) << 16;
+ param.ccnt = c;
edma_write_slot(dma->rx_channel, &param);
if (pdata->cshold_bug)
@@ -675,7 +692,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
if (spicfg->io_type == SPI_IO_TYPE_DMA) {
if (t->tx_buf)
- dma_unmap_single(NULL, t->tx_dma, dspi->wcount,
+ dma_unmap_single(NULL, t->tx_dma, t->len,
DMA_TO_DEVICE);
dma_unmap_single(NULL, t->rx_dma, rx_buf_count,
@@ -790,7 +807,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
struct resource *r, *mem;
resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
- resource_size_t dma_eventq = SPI_NO_RESOURCE;
int i = 0, ret = 0;
u32 spipc0;
@@ -878,17 +894,13 @@ static int davinci_spi_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (r)
dma_tx_chan = r->start;
- r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
- if (r)
- dma_eventq = r->start;
dspi->bitbang.txrx_bufs = davinci_spi_bufs;
if (dma_rx_chan != SPI_NO_RESOURCE &&
- dma_tx_chan != SPI_NO_RESOURCE &&
- dma_eventq != SPI_NO_RESOURCE) {
+ dma_tx_chan != SPI_NO_RESOURCE) {
dspi->dma.rx_channel = dma_rx_chan;
dspi->dma.tx_channel = dma_tx_chan;
- dspi->dma.eventq = dma_eventq;
+ dspi->dma.eventq = pdata->dma_event_q;
ret = davinci_spi_request_dma(dspi);
if (ret)
@@ -897,7 +909,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "DMA: supported\n");
dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, "
"event queue: %d\n", dma_rx_chan, dma_tx_chan,
- dma_eventq);
+ pdata->dma_event_q);
}
dspi->get_rx = davinci_spi_rx_buf_u8;
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
index 22af77f98816..9a6196461b27 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/dw_spi.c
@@ -22,10 +22,10 @@
#include <linux/highmem.h>
#include <linux/delay.h>
#include <linux/slab.h>
-
-#include <linux/spi/dw_spi.h>
#include <linux/spi/spi.h>
+#include "dw_spi.h"
+
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#endif
diff --git a/drivers/spi/dw_spi.h b/drivers/spi/dw_spi.h
new file mode 100644
index 000000000000..fb0bce564844
--- /dev/null
+++ b/drivers/spi/dw_spi.h
@@ -0,0 +1,234 @@
+#ifndef DW_SPI_HEADER_H
+#define DW_SPI_HEADER_H
+
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+
+/* Bit fields in CTRLR0 */
+#define SPI_DFS_OFFSET 0
+
+#define SPI_FRF_OFFSET 4
+#define SPI_FRF_SPI 0x0
+#define SPI_FRF_SSP 0x1
+#define SPI_FRF_MICROWIRE 0x2
+#define SPI_FRF_RESV 0x3
+
+#define SPI_MODE_OFFSET 6
+#define SPI_SCPH_OFFSET 6
+#define SPI_SCOL_OFFSET 7
+
+#define SPI_TMOD_OFFSET 8
+#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET)
+#define SPI_TMOD_TR 0x0 /* xmit & recv */
+#define SPI_TMOD_TO 0x1 /* xmit only */
+#define SPI_TMOD_RO 0x2 /* recv only */
+#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
+
+#define SPI_SLVOE_OFFSET 10
+#define SPI_SRL_OFFSET 11
+#define SPI_CFS_OFFSET 12
+
+/* Bit fields in SR, 7 bits */
+#define SR_MASK 0x7f /* cover 7 bits */
+#define SR_BUSY (1 << 0)
+#define SR_TF_NOT_FULL (1 << 1)
+#define SR_TF_EMPT (1 << 2)
+#define SR_RF_NOT_EMPT (1 << 3)
+#define SR_RF_FULL (1 << 4)
+#define SR_TX_ERR (1 << 5)
+#define SR_DCOL (1 << 6)
+
+/* Bit fields in ISR, IMR, RISR, 7 bits */
+#define SPI_INT_TXEI (1 << 0)
+#define SPI_INT_TXOI (1 << 1)
+#define SPI_INT_RXUI (1 << 2)
+#define SPI_INT_RXOI (1 << 3)
+#define SPI_INT_RXFI (1 << 4)
+#define SPI_INT_MSTI (1 << 5)
+
+/* TX RX interrupt level threshhold, max can be 256 */
+#define SPI_INT_THRESHOLD 32
+
+enum dw_ssi_type {
+ SSI_MOTO_SPI = 0,
+ SSI_TI_SSP,
+ SSI_NS_MICROWIRE,
+};
+
+struct dw_spi_reg {
+ u32 ctrl0;
+ u32 ctrl1;
+ u32 ssienr;
+ u32 mwcr;
+ u32 ser;
+ u32 baudr;
+ u32 txfltr;
+ u32 rxfltr;
+ u32 txflr;
+ u32 rxflr;
+ u32 sr;
+ u32 imr;
+ u32 isr;
+ u32 risr;
+ u32 txoicr;
+ u32 rxoicr;
+ u32 rxuicr;
+ u32 msticr;
+ u32 icr;
+ u32 dmacr;
+ u32 dmatdlr;
+ u32 dmardlr;
+ u32 idr;
+ u32 version;
+ u32 dr; /* Currently oper as 32 bits,
+ though only low 16 bits matters */
+} __packed;
+
+struct dw_spi;
+struct dw_spi_dma_ops {
+ int (*dma_init)(struct dw_spi *dws);
+ void (*dma_exit)(struct dw_spi *dws);
+ int (*dma_transfer)(struct dw_spi *dws, int cs_change);
+};
+
+struct dw_spi {
+ struct spi_master *master;
+ struct spi_device *cur_dev;
+ struct device *parent_dev;
+ enum dw_ssi_type type;
+
+ void __iomem *regs;
+ unsigned long paddr;
+ u32 iolen;
+ int irq;
+ u32 fifo_len; /* depth of the FIFO buffer */
+ u32 max_freq; /* max bus freq supported */
+
+ u16 bus_num;
+ u16 num_cs; /* supported slave numbers */
+
+ /* Driver message queue */
+ struct workqueue_struct *workqueue;
+ struct work_struct pump_messages;
+ spinlock_t lock;
+ struct list_head queue;
+ int busy;
+ int run;
+
+ /* Message Transfer pump */
+ struct tasklet_struct pump_transfers;
+
+ /* Current message transfer state info */
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct chip_data *cur_chip;
+ struct chip_data *prev_chip;
+ size_t len;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ int dma_mapped;
+ dma_addr_t rx_dma;
+ dma_addr_t tx_dma;
+ size_t rx_map_len;
+ size_t tx_map_len;
+ u8 n_bytes; /* current is a 1/2 bytes op */
+ u8 max_bits_per_word; /* maxim is 16b */
+ u32 dma_width;
+ int cs_change;
+ int (*write)(struct dw_spi *dws);
+ int (*read)(struct dw_spi *dws);
+ irqreturn_t (*transfer_handler)(struct dw_spi *dws);
+ void (*cs_control)(u32 command);
+
+ /* Dma info */
+ int dma_inited;
+ struct dma_chan *txchan;
+ struct scatterlist tx_sgl;
+ struct dma_chan *rxchan;
+ struct scatterlist rx_sgl;
+ int dma_chan_done;
+ struct device *dma_dev;
+ dma_addr_t dma_addr; /* phy address of the Data register */
+ struct dw_spi_dma_ops *dma_ops;
+ void *dma_priv; /* platform relate info */
+ struct pci_dev *dmac;
+
+ /* Bus interface info */
+ void *priv;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
+};
+
+#define dw_readl(dw, name) \
+ __raw_readl(&(((struct dw_spi_reg *)dw->regs)->name))
+#define dw_writel(dw, name, val) \
+ __raw_writel((val), &(((struct dw_spi_reg *)dw->regs)->name))
+#define dw_readw(dw, name) \
+ __raw_readw(&(((struct dw_spi_reg *)dw->regs)->name))
+#define dw_writew(dw, name, val) \
+ __raw_writew((val), &(((struct dw_spi_reg *)dw->regs)->name))
+
+static inline void spi_enable_chip(struct dw_spi *dws, int enable)
+{
+ dw_writel(dws, ssienr, (enable ? 1 : 0));
+}
+
+static inline void spi_set_clk(struct dw_spi *dws, u16 div)
+{
+ dw_writel(dws, baudr, div);
+}
+
+static inline void spi_chip_sel(struct dw_spi *dws, u16 cs)
+{
+ if (cs > dws->num_cs)
+ return;
+
+ if (dws->cs_control)
+ dws->cs_control(1);
+
+ dw_writel(dws, ser, 1 << cs);
+}
+
+/* Disable IRQ bits */
+static inline void spi_mask_intr(struct dw_spi *dws, u32 mask)
+{
+ u32 new_mask;
+
+ new_mask = dw_readl(dws, imr) & ~mask;
+ dw_writel(dws, imr, new_mask);
+}
+
+/* Enable IRQ bits */
+static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
+{
+ u32 new_mask;
+
+ new_mask = dw_readl(dws, imr) | mask;
+ dw_writel(dws, imr, new_mask);
+}
+
+/*
+ * Each SPI slave device to work with dw_api controller should
+ * has such a structure claiming its working mode (PIO/DMA etc),
+ * which can be save in the "controller_data" member of the
+ * struct spi_device
+ */
+struct dw_spi_chip {
+ u8 poll_mode; /* 0 for contoller polling mode */
+ u8 type; /* SPI/SSP/Micrwire */
+ u8 enable_dma;
+ void (*cs_control)(u32 command);
+};
+
+extern int dw_spi_add_host(struct dw_spi *dws);
+extern void dw_spi_remove_host(struct dw_spi *dws);
+extern int dw_spi_suspend_host(struct dw_spi *dws);
+extern int dw_spi_resume_host(struct dw_spi *dws);
+extern void dw_spi_xfer_done(struct dw_spi *dws);
+
+/* platform related setup */
+extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */
+#endif /* DW_SPI_HEADER_H */
diff --git a/drivers/spi/dw_spi_mid.c b/drivers/spi/dw_spi_mid.c
index c91c966e0717..489178243d88 100644
--- a/drivers/spi/dw_spi_mid.c
+++ b/drivers/spi/dw_spi_mid.c
@@ -22,7 +22,8 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include <linux/spi/dw_spi.h>
+
+#include "dw_spi.h"
#ifdef CONFIG_SPI_DW_MID_DMA
#include <linux/intel_mid_dma.h>
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c
index 2fa012c109bc..e0e813dad150 100644
--- a/drivers/spi/dw_spi_mmio.c
+++ b/drivers/spi/dw_spi_mmio.c
@@ -13,8 +13,10 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/spi/dw_spi.h>
#include <linux/spi/spi.h>
+#include <linux/scatterlist.h>
+
+#include "dw_spi.h"
#define DRIVER_NAME "dw_spi_mmio"
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c
index 49ec3aa1219f..ad260aa5e526 100644
--- a/drivers/spi/dw_spi_pci.c
+++ b/drivers/spi/dw_spi_pci.c
@@ -20,9 +20,10 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/spi/dw_spi.h>
#include <linux/spi/spi.h>
+#include "dw_spi.h"
+
#define DRIVER_NAME "dw_spi_pci"
struct dw_spi_pci {
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index abb1ffbf3d20..3a5ed06d3d2f 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2005, 2006 Nokia Corporation
* Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
- * Juha Yrjölä <juha.yrjola@nokia.com>
+ * Juha Yrj�l� <juha.yrjola@nokia.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -33,6 +33,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -46,7 +47,6 @@
#define OMAP2_MCSPI_MAX_CTRL 4
#define OMAP2_MCSPI_REVISION 0x00
-#define OMAP2_MCSPI_SYSCONFIG 0x10
#define OMAP2_MCSPI_SYSSTATUS 0x14
#define OMAP2_MCSPI_IRQSTATUS 0x18
#define OMAP2_MCSPI_IRQENABLE 0x1c
@@ -63,13 +63,6 @@
/* per-register bitmasks: */
-#define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE BIT(4)
-#define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP BIT(2)
-#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE BIT(0)
-#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET BIT(1)
-
-#define OMAP2_MCSPI_SYSSTATUS_RESETDONE BIT(0)
-
#define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
#define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
#define OMAP2_MCSPI_MODULCTRL_STEST BIT(3)
@@ -122,13 +115,12 @@ struct omap2_mcspi {
spinlock_t lock;
struct list_head msg_queue;
struct spi_master *master;
- struct clk *ick;
- struct clk *fck;
/* Virtual base address of the controller */
void __iomem *base;
unsigned long phys;
/* SPI1 has 4 channels, while SPI2 has 2 */
struct omap2_mcspi_dma *dma_channels;
+ struct device *dev;
};
struct omap2_mcspi_cs {
@@ -144,7 +136,6 @@ struct omap2_mcspi_cs {
* corresponding registers are modified.
*/
struct omap2_mcspi_regs {
- u32 sysconfig;
u32 modulctrl;
u32 wakeupenable;
struct list_head cs;
@@ -268,9 +259,6 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL,
omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl);
- mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_SYSCONFIG,
- omap2_mcspi_ctx[spi_cntrl->bus_num - 1].sysconfig);
-
mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE,
omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable);
@@ -280,20 +268,12 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
}
static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi)
{
- clk_disable(mcspi->ick);
- clk_disable(mcspi->fck);
+ pm_runtime_put_sync(mcspi->dev);
}
static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
{
- if (clk_enable(mcspi->ick))
- return -ENODEV;
- if (clk_enable(mcspi->fck))
- return -ENODEV;
-
- omap2_mcspi_restore_ctx(mcspi);
-
- return 0;
+ return pm_runtime_get_sync(mcspi->dev);
}
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
@@ -487,6 +467,9 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
rx_reg = base + OMAP2_MCSPI_RX0;
chstat_reg = base + OMAP2_MCSPI_CHSTAT0;
+ if (c < (word_len>>3))
+ return 0;
+
if (word_len <= 8) {
u8 *rx;
const u8 *tx;
@@ -534,7 +517,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
dev_vdbg(&spi->dev, "read-%d %02x\n",
word_len, *(rx - 1));
}
- } while (c);
+ } while (c > (word_len>>3));
} else if (word_len <= 16) {
u16 *rx;
const u16 *tx;
@@ -581,7 +564,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
dev_vdbg(&spi->dev, "read-%d %04x\n",
word_len, *(rx - 1));
}
- } while (c);
+ } while (c > (word_len>>3));
} else if (word_len <= 32) {
u32 *rx;
const u32 *tx;
@@ -628,7 +611,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
dev_vdbg(&spi->dev, "read-%d %08x\n",
word_len, *(rx - 1));
}
- } while (c);
+ } while (c > (word_len>>3));
}
/* for TX_ONLY mode, be sure all words have shifted out */
@@ -651,6 +634,17 @@ out:
return count - c;
}
+static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
+{
+ u32 div;
+
+ for (div = 0; div < 15; div++)
+ if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
+ return div;
+
+ return 15;
+}
+
/* called only when no transfer is active to this device */
static int omap2_mcspi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
@@ -673,12 +667,8 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
if (t && t->speed_hz)
speed_hz = t->speed_hz;
- if (speed_hz) {
- while (div <= 15 && (OMAP2_MCSPI_MAX_FREQ / (1 << div))
- > speed_hz)
- div++;
- } else
- div = 15;
+ speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
+ div = omap2_mcspi_calc_divisor(speed_hz);
l = mcspi_cached_chconf0(spi);
@@ -715,7 +705,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
mcspi_write_chconf0(spi, l);
dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
- OMAP2_MCSPI_MAX_FREQ / (1 << div),
+ OMAP2_MCSPI_MAX_FREQ >> div,
(spi->mode & SPI_CPHA) ? "trailing" : "leading",
(spi->mode & SPI_CPOL) ? "inverted" : "normal");
@@ -819,8 +809,9 @@ static int omap2_mcspi_setup(struct spi_device *spi)
return ret;
}
- if (omap2_mcspi_enable_clocks(mcspi))
- return -ENODEV;
+ ret = omap2_mcspi_enable_clocks(mcspi);
+ if (ret < 0)
+ return ret;
ret = omap2_mcspi_setup_transfer(spi, NULL);
omap2_mcspi_disable_clocks(mcspi);
@@ -863,10 +854,11 @@ static void omap2_mcspi_work(struct work_struct *work)
struct omap2_mcspi *mcspi;
mcspi = container_of(work, struct omap2_mcspi, work);
- spin_lock_irq(&mcspi->lock);
- if (omap2_mcspi_enable_clocks(mcspi))
- goto out;
+ if (omap2_mcspi_enable_clocks(mcspi) < 0)
+ return;
+
+ spin_lock_irq(&mcspi->lock);
/* We only enable one channel at a time -- the one whose message is
* at the head of the queue -- although this controller would gladly
@@ -979,10 +971,9 @@ static void omap2_mcspi_work(struct work_struct *work)
spin_lock_irq(&mcspi->lock);
}
- omap2_mcspi_disable_clocks(mcspi);
-
-out:
spin_unlock_irq(&mcspi->lock);
+
+ omap2_mcspi_disable_clocks(mcspi);
}
static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
@@ -1015,10 +1006,10 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
t->bits_per_word);
return -EINVAL;
}
- if (t->speed_hz && t->speed_hz < OMAP2_MCSPI_MAX_FREQ/(1<<16)) {
- dev_dbg(&spi->dev, "%d Hz max exceeds %d\n",
- t->speed_hz,
- OMAP2_MCSPI_MAX_FREQ/(1<<16));
+ if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
+ dev_dbg(&spi->dev, "speed_hz %d below minimum %d Hz\n",
+ t->speed_hz,
+ OMAP2_MCSPI_MAX_FREQ >> 15);
return -EINVAL;
}
@@ -1058,25 +1049,15 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
return 0;
}
-static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
+static int __init omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
{
struct spi_master *master = mcspi->master;
u32 tmp;
+ int ret = 0;
- if (omap2_mcspi_enable_clocks(mcspi))
- return -1;
-
- mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
- OMAP2_MCSPI_SYSCONFIG_SOFTRESET);
- do {
- tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS);
- } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE));
-
- tmp = OMAP2_MCSPI_SYSCONFIG_AUTOIDLE |
- OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP |
- OMAP2_MCSPI_SYSCONFIG_SMARTIDLE;
- mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, tmp);
- omap2_mcspi_ctx[master->bus_num - 1].sysconfig = tmp;
+ ret = omap2_mcspi_enable_clocks(mcspi);
+ if (ret < 0)
+ return ret;
tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp);
@@ -1087,91 +1068,26 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
return 0;
}
-static u8 __initdata spi1_rxdma_id [] = {
- OMAP24XX_DMA_SPI1_RX0,
- OMAP24XX_DMA_SPI1_RX1,
- OMAP24XX_DMA_SPI1_RX2,
- OMAP24XX_DMA_SPI1_RX3,
-};
-
-static u8 __initdata spi1_txdma_id [] = {
- OMAP24XX_DMA_SPI1_TX0,
- OMAP24XX_DMA_SPI1_TX1,
- OMAP24XX_DMA_SPI1_TX2,
- OMAP24XX_DMA_SPI1_TX3,
-};
-
-static u8 __initdata spi2_rxdma_id[] = {
- OMAP24XX_DMA_SPI2_RX0,
- OMAP24XX_DMA_SPI2_RX1,
-};
-
-static u8 __initdata spi2_txdma_id[] = {
- OMAP24XX_DMA_SPI2_TX0,
- OMAP24XX_DMA_SPI2_TX1,
-};
-
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
- || defined(CONFIG_ARCH_OMAP4)
-static u8 __initdata spi3_rxdma_id[] = {
- OMAP24XX_DMA_SPI3_RX0,
- OMAP24XX_DMA_SPI3_RX1,
-};
+static int omap_mcspi_runtime_resume(struct device *dev)
+{
+ struct omap2_mcspi *mcspi;
+ struct spi_master *master;
-static u8 __initdata spi3_txdma_id[] = {
- OMAP24XX_DMA_SPI3_TX0,
- OMAP24XX_DMA_SPI3_TX1,
-};
-#endif
+ master = dev_get_drvdata(dev);
+ mcspi = spi_master_get_devdata(master);
+ omap2_mcspi_restore_ctx(mcspi);
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
-static u8 __initdata spi4_rxdma_id[] = {
- OMAP34XX_DMA_SPI4_RX0,
-};
+ return 0;
+}
-static u8 __initdata spi4_txdma_id[] = {
- OMAP34XX_DMA_SPI4_TX0,
-};
-#endif
static int __init omap2_mcspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
+ struct omap2_mcspi_platform_config *pdata = pdev->dev.platform_data;
struct omap2_mcspi *mcspi;
struct resource *r;
int status = 0, i;
- const u8 *rxdma_id, *txdma_id;
- unsigned num_chipselect;
-
- switch (pdev->id) {
- case 1:
- rxdma_id = spi1_rxdma_id;
- txdma_id = spi1_txdma_id;
- num_chipselect = 4;
- break;
- case 2:
- rxdma_id = spi2_rxdma_id;
- txdma_id = spi2_txdma_id;
- num_chipselect = 2;
- break;
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
- || defined(CONFIG_ARCH_OMAP4)
- case 3:
- rxdma_id = spi3_rxdma_id;
- txdma_id = spi3_txdma_id;
- num_chipselect = 2;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
- case 4:
- rxdma_id = spi4_rxdma_id;
- txdma_id = spi4_txdma_id;
- num_chipselect = 1;
- break;
-#endif
- default:
- return -EINVAL;
- }
master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
if (master == NULL) {
@@ -1188,7 +1104,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
master->setup = omap2_mcspi_setup;
master->transfer = omap2_mcspi_transfer;
master->cleanup = omap2_mcspi_cleanup;
- master->num_chipselect = num_chipselect;
+ master->num_chipselect = pdata->num_cs;
dev_set_drvdata(&pdev->dev, master);
@@ -1206,49 +1122,62 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
goto err1;
}
+ r->start += pdata->regs_offset;
+ r->end += pdata->regs_offset;
mcspi->phys = r->start;
mcspi->base = ioremap(r->start, r->end - r->start + 1);
if (!mcspi->base) {
dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
status = -ENOMEM;
- goto err1aa;
+ goto err2;
}
+ mcspi->dev = &pdev->dev;
INIT_WORK(&mcspi->work, omap2_mcspi_work);
spin_lock_init(&mcspi->lock);
INIT_LIST_HEAD(&mcspi->msg_queue);
INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs);
- mcspi->ick = clk_get(&pdev->dev, "ick");
- if (IS_ERR(mcspi->ick)) {
- dev_dbg(&pdev->dev, "can't get mcspi_ick\n");
- status = PTR_ERR(mcspi->ick);
- goto err1a;
- }
- mcspi->fck = clk_get(&pdev->dev, "fck");
- if (IS_ERR(mcspi->fck)) {
- dev_dbg(&pdev->dev, "can't get mcspi_fck\n");
- status = PTR_ERR(mcspi->fck);
- goto err2;
- }
-
mcspi->dma_channels = kcalloc(master->num_chipselect,
sizeof(struct omap2_mcspi_dma),
GFP_KERNEL);
if (mcspi->dma_channels == NULL)
- goto err3;
+ goto err2;
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ char dma_ch_name[14];
+ struct resource *dma_res;
+
+ sprintf(dma_ch_name, "rx%d", i);
+ dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+ dma_ch_name);
+ if (!dma_res) {
+ dev_dbg(&pdev->dev, "cannot get DMA RX channel\n");
+ status = -ENODEV;
+ break;
+ }
- for (i = 0; i < num_chipselect; i++) {
mcspi->dma_channels[i].dma_rx_channel = -1;
- mcspi->dma_channels[i].dma_rx_sync_dev = rxdma_id[i];
+ mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
+ sprintf(dma_ch_name, "tx%d", i);
+ dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+ dma_ch_name);
+ if (!dma_res) {
+ dev_dbg(&pdev->dev, "cannot get DMA TX channel\n");
+ status = -ENODEV;
+ break;
+ }
+
mcspi->dma_channels[i].dma_tx_channel = -1;
- mcspi->dma_channels[i].dma_tx_sync_dev = txdma_id[i];
+ mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
}
- if (omap2_mcspi_reset(mcspi) < 0)
- goto err4;
+ pm_runtime_enable(&pdev->dev);
+
+ if (status || omap2_mcspi_master_setup(mcspi) < 0)
+ goto err3;
status = spi_register_master(master);
if (status < 0)
@@ -1257,17 +1186,13 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
return status;
err4:
- kfree(mcspi->dma_channels);
+ spi_master_put(master);
err3:
- clk_put(mcspi->fck);
+ kfree(mcspi->dma_channels);
err2:
- clk_put(mcspi->ick);
-err1a:
- iounmap(mcspi->base);
-err1aa:
release_mem_region(r->start, (r->end - r->start) + 1);
+ iounmap(mcspi->base);
err1:
- spi_master_put(master);
return status;
}
@@ -1283,9 +1208,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
mcspi = spi_master_get_devdata(master);
dma_channels = mcspi->dma_channels;
- clk_put(mcspi->fck);
- clk_put(mcspi->ick);
-
+ omap2_mcspi_disable_clocks(mcspi);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(r->start, (r->end - r->start) + 1);
@@ -1336,6 +1259,7 @@ static int omap2_mcspi_resume(struct device *dev)
static const struct dev_pm_ops omap2_mcspi_pm_ops = {
.resume = omap2_mcspi_resume,
+ .runtime_resume = omap_mcspi_runtime_resume,
};
static struct platform_driver omap2_mcspi_driver = {
diff --git a/drivers/spi/spi_altera.c b/drivers/spi/spi_altera.c
new file mode 100644
index 000000000000..4813a63ce6fb
--- /dev/null
+++ b/drivers/spi/spi_altera.c
@@ -0,0 +1,339 @@
+/*
+ * Altera SPI driver
+ *
+ * Copyright (C) 2008 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * Based on spi_s3c24xx.c, which is:
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#define DRV_NAME "spi_altera"
+
+#define ALTERA_SPI_RXDATA 0
+#define ALTERA_SPI_TXDATA 4
+#define ALTERA_SPI_STATUS 8
+#define ALTERA_SPI_CONTROL 12
+#define ALTERA_SPI_SLAVE_SEL 20
+
+#define ALTERA_SPI_STATUS_ROE_MSK 0x8
+#define ALTERA_SPI_STATUS_TOE_MSK 0x10
+#define ALTERA_SPI_STATUS_TMT_MSK 0x20
+#define ALTERA_SPI_STATUS_TRDY_MSK 0x40
+#define ALTERA_SPI_STATUS_RRDY_MSK 0x80
+#define ALTERA_SPI_STATUS_E_MSK 0x100
+
+#define ALTERA_SPI_CONTROL_IROE_MSK 0x8
+#define ALTERA_SPI_CONTROL_ITOE_MSK 0x10
+#define ALTERA_SPI_CONTROL_ITRDY_MSK 0x40
+#define ALTERA_SPI_CONTROL_IRRDY_MSK 0x80
+#define ALTERA_SPI_CONTROL_IE_MSK 0x100
+#define ALTERA_SPI_CONTROL_SSO_MSK 0x400
+
+struct altera_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ void __iomem *base;
+ int irq;
+ int len;
+ int count;
+ int bytes_per_word;
+ unsigned long imr;
+
+ /* data buffers */
+ const unsigned char *tx;
+ unsigned char *rx;
+};
+
+static inline struct altera_spi *altera_spi_to_hw(struct spi_device *sdev)
+{
+ return spi_master_get_devdata(sdev->master);
+}
+
+static void altera_spi_chipsel(struct spi_device *spi, int value)
+{
+ struct altera_spi *hw = altera_spi_to_hw(spi);
+
+ if (spi->mode & SPI_CS_HIGH) {
+ switch (value) {
+ case BITBANG_CS_INACTIVE:
+ writel(1 << spi->chip_select,
+ hw->base + ALTERA_SPI_SLAVE_SEL);
+ hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ break;
+
+ case BITBANG_CS_ACTIVE:
+ hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ writel(0, hw->base + ALTERA_SPI_SLAVE_SEL);
+ break;
+ }
+ } else {
+ switch (value) {
+ case BITBANG_CS_INACTIVE:
+ hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ break;
+
+ case BITBANG_CS_ACTIVE:
+ writel(1 << spi->chip_select,
+ hw->base + ALTERA_SPI_SLAVE_SEL);
+ hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ break;
+ }
+ }
+}
+
+static int altera_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ return 0;
+}
+
+static int altera_spi_setup(struct spi_device *spi)
+{
+ return 0;
+}
+
+static inline unsigned int hw_txbyte(struct altera_spi *hw, int count)
+{
+ if (hw->tx) {
+ switch (hw->bytes_per_word) {
+ case 1:
+ return hw->tx[count];
+ case 2:
+ return (hw->tx[count * 2]
+ | (hw->tx[count * 2 + 1] << 8));
+ }
+ }
+ return 0;
+}
+
+static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct altera_spi *hw = altera_spi_to_hw(spi);
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ hw->count = 0;
+ hw->bytes_per_word = (t->bits_per_word ? : spi->bits_per_word) / 8;
+ hw->len = t->len / hw->bytes_per_word;
+
+ if (hw->irq >= 0) {
+ /* enable receive interrupt */
+ hw->imr |= ALTERA_SPI_CONTROL_IRRDY_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+
+ /* send the first byte */
+ writel(hw_txbyte(hw, 0), hw->base + ALTERA_SPI_TXDATA);
+
+ wait_for_completion(&hw->done);
+ /* disable receive interrupt */
+ hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ } else {
+ /* send the first byte */
+ writel(hw_txbyte(hw, 0), hw->base + ALTERA_SPI_TXDATA);
+
+ while (1) {
+ unsigned int rxd;
+
+ while (!(readl(hw->base + ALTERA_SPI_STATUS) &
+ ALTERA_SPI_STATUS_RRDY_MSK))
+ cpu_relax();
+
+ rxd = readl(hw->base + ALTERA_SPI_RXDATA);
+ if (hw->rx) {
+ switch (hw->bytes_per_word) {
+ case 1:
+ hw->rx[hw->count] = rxd;
+ break;
+ case 2:
+ hw->rx[hw->count * 2] = rxd;
+ hw->rx[hw->count * 2 + 1] = rxd >> 8;
+ break;
+ }
+ }
+
+ hw->count++;
+
+ if (hw->count < hw->len)
+ writel(hw_txbyte(hw, hw->count),
+ hw->base + ALTERA_SPI_TXDATA);
+ else
+ break;
+ }
+
+ }
+
+ return hw->count * hw->bytes_per_word;
+}
+
+static irqreturn_t altera_spi_irq(int irq, void *dev)
+{
+ struct altera_spi *hw = dev;
+ unsigned int rxd;
+
+ rxd = readl(hw->base + ALTERA_SPI_RXDATA);
+ if (hw->rx) {
+ switch (hw->bytes_per_word) {
+ case 1:
+ hw->rx[hw->count] = rxd;
+ break;
+ case 2:
+ hw->rx[hw->count * 2] = rxd;
+ hw->rx[hw->count * 2 + 1] = rxd >> 8;
+ break;
+ }
+ }
+
+ hw->count++;
+
+ if (hw->count < hw->len)
+ writel(hw_txbyte(hw, hw->count), hw->base + ALTERA_SPI_TXDATA);
+ else
+ complete(&hw->done);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit altera_spi_probe(struct platform_device *pdev)
+{
+ struct altera_spi_platform_data *platp = pdev->dev.platform_data;
+ struct altera_spi *hw;
+ struct spi_master *master;
+ struct resource *res;
+ int err = -ENODEV;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi));
+ if (!master)
+ return err;
+
+ /* setup the master state. */
+ master->bus_num = pdev->id;
+ master->num_chipselect = 16;
+ master->mode_bits = SPI_CS_HIGH;
+ master->setup = altera_spi_setup;
+
+ hw = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, hw);
+
+ /* setup the state for the bitbang driver */
+ hw->bitbang.master = spi_master_get(master);
+ if (!hw->bitbang.master)
+ return err;
+ hw->bitbang.setup_transfer = altera_spi_setupxfer;
+ hw->bitbang.chipselect = altera_spi_chipsel;
+ hw->bitbang.txrx_bufs = altera_spi_txrx;
+
+ /* find and map our resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ goto exit_busy;
+ if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+ pdev->name))
+ goto exit_busy;
+ hw->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!hw->base)
+ goto exit_busy;
+ /* program defaults into the registers */
+ hw->imr = 0; /* disable spi interrupts */
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ writel(0, hw->base + ALTERA_SPI_STATUS); /* clear status reg */
+ if (readl(hw->base + ALTERA_SPI_STATUS) & ALTERA_SPI_STATUS_RRDY_MSK)
+ readl(hw->base + ALTERA_SPI_RXDATA); /* flush rxdata */
+ /* irq is optional */
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq >= 0) {
+ init_completion(&hw->done);
+ err = devm_request_irq(&pdev->dev, hw->irq, altera_spi_irq, 0,
+ pdev->name, hw);
+ if (err)
+ goto exit;
+ }
+ /* find platform data */
+ if (!platp)
+ hw->bitbang.master->dev.of_node = pdev->dev.of_node;
+
+ /* register our spi controller */
+ err = spi_bitbang_start(&hw->bitbang);
+ if (err)
+ goto exit;
+ dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq);
+
+ return 0;
+
+exit_busy:
+ err = -EBUSY;
+exit:
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(master);
+ return err;
+}
+
+static int __devexit altera_spi_remove(struct platform_device *dev)
+{
+ struct altera_spi *hw = platform_get_drvdata(dev);
+ struct spi_master *master = hw->bitbang.master;
+
+ spi_bitbang_stop(&hw->bitbang);
+ platform_set_drvdata(dev, NULL);
+ spi_master_put(master);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id altera_spi_match[] = {
+ { .compatible = "ALTR,spi-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_spi_match);
+#else /* CONFIG_OF */
+#define altera_spi_match NULL
+#endif /* CONFIG_OF */
+
+static struct platform_driver altera_spi_driver = {
+ .probe = altera_spi_probe,
+ .remove = __devexit_p(altera_spi_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = NULL,
+ .of_match_table = altera_spi_match,
+ },
+};
+
+static int __init altera_spi_init(void)
+{
+ return platform_driver_register(&altera_spi_driver);
+}
+module_init(altera_spi_init);
+
+static void __exit altera_spi_exit(void)
+{
+ platform_driver_unregister(&altera_spi_driver);
+}
+module_exit(altera_spi_exit);
+
+MODULE_DESCRIPTION("Altera SPI driver");
+MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index 3f223511127b..a28462486df8 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -425,6 +425,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
struct bfin_spi_slave_data *chip = drv_data->cur_chip;
struct spi_message *msg = drv_data->cur_msg;
int n_bytes = drv_data->n_bytes;
+ int loop = 0;
/* wait until transfer finished. */
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
@@ -435,10 +436,15 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
/* last read */
if (drv_data->rx) {
dev_dbg(&drv_data->pdev->dev, "last read\n");
- if (n_bytes == 2)
- *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
- else if (n_bytes == 1)
- *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
+ if (n_bytes % 2) {
+ u16 *buf = (u16 *)drv_data->rx;
+ for (loop = 0; loop < n_bytes / 2; loop++)
+ *buf++ = read_RDBR(drv_data);
+ } else {
+ u8 *buf = (u8 *)drv_data->rx;
+ for (loop = 0; loop < n_bytes; loop++)
+ *buf++ = read_RDBR(drv_data);
+ }
drv_data->rx += n_bytes;
}
@@ -458,29 +464,53 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
if (drv_data->rx && drv_data->tx) {
/* duplex */
dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n");
- if (drv_data->n_bytes == 2) {
- *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
- write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
- } else if (drv_data->n_bytes == 1) {
- *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
- write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
+ if (n_bytes % 2) {
+ u16 *buf = (u16 *)drv_data->rx;
+ u16 *buf2 = (u16 *)drv_data->tx;
+ for (loop = 0; loop < n_bytes / 2; loop++) {
+ *buf++ = read_RDBR(drv_data);
+ write_TDBR(drv_data, *buf2++);
+ }
+ } else {
+ u8 *buf = (u8 *)drv_data->rx;
+ u8 *buf2 = (u8 *)drv_data->tx;
+ for (loop = 0; loop < n_bytes; loop++) {
+ *buf++ = read_RDBR(drv_data);
+ write_TDBR(drv_data, *buf2++);
+ }
}
} else if (drv_data->rx) {
/* read */
dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n");
- if (drv_data->n_bytes == 2)
- *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
- else if (drv_data->n_bytes == 1)
- *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
- write_TDBR(drv_data, chip->idle_tx_val);
+ if (n_bytes % 2) {
+ u16 *buf = (u16 *)drv_data->rx;
+ for (loop = 0; loop < n_bytes / 2; loop++) {
+ *buf++ = read_RDBR(drv_data);
+ write_TDBR(drv_data, chip->idle_tx_val);
+ }
+ } else {
+ u8 *buf = (u8 *)drv_data->rx;
+ for (loop = 0; loop < n_bytes; loop++) {
+ *buf++ = read_RDBR(drv_data);
+ write_TDBR(drv_data, chip->idle_tx_val);
+ }
+ }
} else if (drv_data->tx) {
/* write */
dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n");
- bfin_spi_dummy_read(drv_data);
- if (drv_data->n_bytes == 2)
- write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
- else if (drv_data->n_bytes == 1)
- write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
+ if (n_bytes % 2) {
+ u16 *buf = (u16 *)drv_data->tx;
+ for (loop = 0; loop < n_bytes / 2; loop++) {
+ read_RDBR(drv_data);
+ write_TDBR(drv_data, *buf++);
+ }
+ } else {
+ u8 *buf = (u8 *)drv_data->tx;
+ for (loop = 0; loop < n_bytes; loop++) {
+ read_RDBR(drv_data);
+ write_TDBR(drv_data, *buf++);
+ }
+ }
}
if (drv_data->tx)
@@ -623,6 +653,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
message->state = bfin_spi_next_transfer(drv_data);
/* Schedule next transfer tasklet */
tasklet_schedule(&drv_data->pump_transfers);
+ return;
}
if (transfer->tx_buf != NULL) {
@@ -651,16 +682,16 @@ static void bfin_spi_pump_transfers(unsigned long data)
/* Bits per word setup */
bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
- if (bits_per_word == 8) {
- drv_data->n_bytes = 1;
- drv_data->len = transfer->len;
- cr_width = 0;
- drv_data->ops = &bfin_bfin_spi_transfer_ops_u8;
- } else if (bits_per_word == 16) {
- drv_data->n_bytes = 2;
+ if ((bits_per_word > 0) && (bits_per_word % 16 == 0)) {
+ drv_data->n_bytes = bits_per_word/8;
drv_data->len = (transfer->len) >> 1;
cr_width = BIT_CTL_WORDSIZE;
drv_data->ops = &bfin_bfin_spi_transfer_ops_u16;
+ } else if ((bits_per_word > 0) && (bits_per_word % 8 == 0)) {
+ drv_data->n_bytes = bits_per_word/8;
+ drv_data->len = transfer->len;
+ cr_width = 0;
+ drv_data->ops = &bfin_bfin_spi_transfer_ops_u8;
} else {
dev_err(&drv_data->pdev->dev, "transfer: unsupported bits_per_word\n");
message->status = -EINVAL;
@@ -815,10 +846,19 @@ static void bfin_spi_pump_transfers(unsigned long data)
if (drv_data->tx == NULL)
write_TDBR(drv_data, chip->idle_tx_val);
else {
- if (bits_per_word == 8)
- write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
- else
- write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
+ int loop;
+ if (bits_per_word % 16 == 0) {
+ u16 *buf = (u16 *)drv_data->tx;
+ for (loop = 0; loop < bits_per_word / 16;
+ loop++) {
+ write_TDBR(drv_data, *buf++);
+ }
+ } else if (bits_per_word % 8 == 0) {
+ u8 *buf = (u8 *)drv_data->tx;
+ for (loop = 0; loop < bits_per_word / 8; loop++)
+ write_TDBR(drv_data, *buf++);
+ }
+
drv_data->tx += drv_data->n_bytes;
}
@@ -1031,7 +1071,7 @@ static int bfin_spi_setup(struct spi_device *spi)
chip->ctl_reg &= bfin_ctl_reg;
}
- if (spi->bits_per_word != 8 && spi->bits_per_word != 16) {
+ if (spi->bits_per_word % 8) {
dev_err(&spi->dev, "%d bits_per_word is not supported\n",
spi->bits_per_word);
goto error;
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 8b55724d5f39..14a63f6010d1 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -259,10 +259,6 @@ static void bitbang_work(struct work_struct *work)
struct spi_bitbang *bitbang =
container_of(work, struct spi_bitbang, work);
unsigned long flags;
- int (*setup_transfer)(struct spi_device *,
- struct spi_transfer *);
-
- setup_transfer = bitbang->setup_transfer;
spin_lock_irqsave(&bitbang->lock, flags);
bitbang->busy = 1;
@@ -300,11 +296,7 @@ static void bitbang_work(struct work_struct *work)
/* init (-1) or override (1) transfer params */
if (do_setup != 0) {
- if (!setup_transfer) {
- status = -ENOPROTOOPT;
- break;
- }
- status = setup_transfer(spi, t);
+ status = bitbang->setup_transfer(spi, t);
if (status < 0)
break;
if (do_setup == -1)
@@ -465,6 +457,9 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
}
} else if (!bitbang->master->setup)
return -EINVAL;
+ if (bitbang->master->transfer == spi_bitbang_transfer &&
+ !bitbang->setup_transfer)
+ return -EINVAL;
/* this task is the only thing to touch the SPI bits */
bitbang->busy = 0;
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 1cf9d5faabf4..69d6dba67c19 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -174,7 +174,7 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
#define SPI_IMX2_3_CTRL 0x08
#define SPI_IMX2_3_CTRL_ENABLE (1 << 0)
#define SPI_IMX2_3_CTRL_XCH (1 << 2)
-#define SPI_IMX2_3_CTRL_MODE(cs) (1 << ((cs) + 4))
+#define SPI_IMX2_3_CTRL_MODE_MASK (0xf << 4)
#define SPI_IMX2_3_CTRL_POSTDIV_OFFSET 8
#define SPI_IMX2_3_CTRL_PREDIV_OFFSET 12
#define SPI_IMX2_3_CTRL_CS(cs) ((cs) << 18)
@@ -253,8 +253,14 @@ static int __maybe_unused spi_imx2_3_config(struct spi_imx_data *spi_imx,
{
u32 ctrl = SPI_IMX2_3_CTRL_ENABLE, cfg = 0;
- /* set master mode */
- ctrl |= SPI_IMX2_3_CTRL_MODE(config->cs);
+ /*
+ * The hardware seems to have a race condition when changing modes. The
+ * current assumption is that the selection of the channel arrives
+ * earlier in the hardware than the mode bits when they are written at
+ * the same time.
+ * So set master mode for all channels as we do not support slave mode.
+ */
+ ctrl |= SPI_IMX2_3_CTRL_MODE_MASK;
/* set clock speed */
ctrl |= spi_imx2_3_clkdiv(spi_imx->spi_clk, config->speed_hz);
diff --git a/drivers/spi/spi_oc_tiny.c b/drivers/spi/spi_oc_tiny.c
new file mode 100644
index 000000000000..f1bde66cea19
--- /dev/null
+++ b/drivers/spi/spi_oc_tiny.c
@@ -0,0 +1,425 @@
+/*
+ * OpenCores tiny SPI master driver
+ *
+ * http://opencores.org/project,tiny_spi
+ *
+ * Copyright (C) 2011 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * Based on spi_s3c24xx.c, which is:
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/spi_oc_tiny.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+
+#define DRV_NAME "spi_oc_tiny"
+
+#define TINY_SPI_RXDATA 0
+#define TINY_SPI_TXDATA 4
+#define TINY_SPI_STATUS 8
+#define TINY_SPI_CONTROL 12
+#define TINY_SPI_BAUD 16
+
+#define TINY_SPI_STATUS_TXE 0x1
+#define TINY_SPI_STATUS_TXR 0x2
+
+struct tiny_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ void __iomem *base;
+ int irq;
+ unsigned int freq;
+ unsigned int baudwidth;
+ unsigned int baud;
+ unsigned int speed_hz;
+ unsigned int mode;
+ unsigned int len;
+ unsigned int txc, rxc;
+ const u8 *txp;
+ u8 *rxp;
+ unsigned int gpio_cs_count;
+ int *gpio_cs;
+};
+
+static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev)
+{
+ return spi_master_get_devdata(sdev->master);
+}
+
+static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+
+ return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1;
+}
+
+static void tiny_spi_chipselect(struct spi_device *spi, int is_active)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+
+ if (hw->gpio_cs_count) {
+ gpio_set_value(hw->gpio_cs[spi->chip_select],
+ (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
+ }
+}
+
+static int tiny_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+ unsigned int baud = hw->baud;
+
+ if (t) {
+ if (t->speed_hz && t->speed_hz != hw->speed_hz)
+ baud = tiny_spi_baud(spi, t->speed_hz);
+ }
+ writel(baud, hw->base + TINY_SPI_BAUD);
+ writel(hw->mode, hw->base + TINY_SPI_CONTROL);
+ return 0;
+}
+
+static int tiny_spi_setup(struct spi_device *spi)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+
+ if (spi->max_speed_hz != hw->speed_hz) {
+ hw->speed_hz = spi->max_speed_hz;
+ hw->baud = tiny_spi_baud(spi, hw->speed_hz);
+ }
+ hw->mode = spi->mode & (SPI_CPOL | SPI_CPHA);
+ return 0;
+}
+
+static inline void tiny_spi_wait_txr(struct tiny_spi *hw)
+{
+ while (!(readb(hw->base + TINY_SPI_STATUS) &
+ TINY_SPI_STATUS_TXR))
+ cpu_relax();
+}
+
+static inline void tiny_spi_wait_txe(struct tiny_spi *hw)
+{
+ while (!(readb(hw->base + TINY_SPI_STATUS) &
+ TINY_SPI_STATUS_TXE))
+ cpu_relax();
+}
+
+static int tiny_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+ const u8 *txp = t->tx_buf;
+ u8 *rxp = t->rx_buf;
+ unsigned int i;
+
+ if (hw->irq >= 0) {
+ /* use intrrupt driven data transfer */
+ hw->len = t->len;
+ hw->txp = t->tx_buf;
+ hw->rxp = t->rx_buf;
+ hw->txc = 0;
+ hw->rxc = 0;
+
+ /* send the first byte */
+ if (t->len > 1) {
+ writeb(hw->txp ? *hw->txp++ : 0,
+ hw->base + TINY_SPI_TXDATA);
+ hw->txc++;
+ writeb(hw->txp ? *hw->txp++ : 0,
+ hw->base + TINY_SPI_TXDATA);
+ hw->txc++;
+ writeb(TINY_SPI_STATUS_TXR, hw->base + TINY_SPI_STATUS);
+ } else {
+ writeb(hw->txp ? *hw->txp++ : 0,
+ hw->base + TINY_SPI_TXDATA);
+ hw->txc++;
+ writeb(TINY_SPI_STATUS_TXE, hw->base + TINY_SPI_STATUS);
+ }
+
+ wait_for_completion(&hw->done);
+ } else if (txp && rxp) {
+ /* we need to tighten the transfer loop */
+ writeb(*txp++, hw->base + TINY_SPI_TXDATA);
+ if (t->len > 1) {
+ writeb(*txp++, hw->base + TINY_SPI_TXDATA);
+ for (i = 2; i < t->len; i++) {
+ u8 rx, tx = *txp++;
+ tiny_spi_wait_txr(hw);
+ rx = readb(hw->base + TINY_SPI_TXDATA);
+ writeb(tx, hw->base + TINY_SPI_TXDATA);
+ *rxp++ = rx;
+ }
+ tiny_spi_wait_txr(hw);
+ *rxp++ = readb(hw->base + TINY_SPI_TXDATA);
+ }
+ tiny_spi_wait_txe(hw);
+ *rxp++ = readb(hw->base + TINY_SPI_RXDATA);
+ } else if (rxp) {
+ writeb(0, hw->base + TINY_SPI_TXDATA);
+ if (t->len > 1) {
+ writeb(0,
+ hw->base + TINY_SPI_TXDATA);
+ for (i = 2; i < t->len; i++) {
+ u8 rx;
+ tiny_spi_wait_txr(hw);
+ rx = readb(hw->base + TINY_SPI_TXDATA);
+ writeb(0, hw->base + TINY_SPI_TXDATA);
+ *rxp++ = rx;
+ }
+ tiny_spi_wait_txr(hw);
+ *rxp++ = readb(hw->base + TINY_SPI_TXDATA);
+ }
+ tiny_spi_wait_txe(hw);
+ *rxp++ = readb(hw->base + TINY_SPI_RXDATA);
+ } else if (txp) {
+ writeb(*txp++, hw->base + TINY_SPI_TXDATA);
+ if (t->len > 1) {
+ writeb(*txp++, hw->base + TINY_SPI_TXDATA);
+ for (i = 2; i < t->len; i++) {
+ u8 tx = *txp++;
+ tiny_spi_wait_txr(hw);
+ writeb(tx, hw->base + TINY_SPI_TXDATA);
+ }
+ }
+ tiny_spi_wait_txe(hw);
+ } else {
+ writeb(0, hw->base + TINY_SPI_TXDATA);
+ if (t->len > 1) {
+ writeb(0, hw->base + TINY_SPI_TXDATA);
+ for (i = 2; i < t->len; i++) {
+ tiny_spi_wait_txr(hw);
+ writeb(0, hw->base + TINY_SPI_TXDATA);
+ }
+ }
+ tiny_spi_wait_txe(hw);
+ }
+ return t->len;
+}
+
+static irqreturn_t tiny_spi_irq(int irq, void *dev)
+{
+ struct tiny_spi *hw = dev;
+
+ writeb(0, hw->base + TINY_SPI_STATUS);
+ if (hw->rxc + 1 == hw->len) {
+ if (hw->rxp)
+ *hw->rxp++ = readb(hw->base + TINY_SPI_RXDATA);
+ hw->rxc++;
+ complete(&hw->done);
+ } else {
+ if (hw->rxp)
+ *hw->rxp++ = readb(hw->base + TINY_SPI_TXDATA);
+ hw->rxc++;
+ if (hw->txc < hw->len) {
+ writeb(hw->txp ? *hw->txp++ : 0,
+ hw->base + TINY_SPI_TXDATA);
+ hw->txc++;
+ writeb(TINY_SPI_STATUS_TXR,
+ hw->base + TINY_SPI_STATUS);
+ } else {
+ writeb(TINY_SPI_STATUS_TXE,
+ hw->base + TINY_SPI_STATUS);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_OF
+#include <linux/of_gpio.h>
+
+static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
+{
+ struct tiny_spi *hw = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int i;
+ const __be32 *val;
+ int len;
+
+ if (!np)
+ return 0;
+ hw->gpio_cs_count = of_gpio_count(np);
+ if (hw->gpio_cs_count) {
+ hw->gpio_cs = devm_kzalloc(&pdev->dev,
+ hw->gpio_cs_count * sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!hw->gpio_cs)
+ return -ENOMEM;
+ }
+ for (i = 0; i < hw->gpio_cs_count; i++) {
+ hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL);
+ if (hw->gpio_cs[i] < 0)
+ return -ENODEV;
+ }
+ hw->bitbang.master->dev.of_node = pdev->dev.of_node;
+ val = of_get_property(pdev->dev.of_node,
+ "clock-frequency", &len);
+ if (val && len >= sizeof(__be32))
+ hw->freq = be32_to_cpup(val);
+ val = of_get_property(pdev->dev.of_node, "baud-width", &len);
+ if (val && len >= sizeof(__be32))
+ hw->baudwidth = be32_to_cpup(val);
+ return 0;
+}
+#else /* !CONFIG_OF */
+static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF */
+
+static int __devinit tiny_spi_probe(struct platform_device *pdev)
+{
+ struct tiny_spi_platform_data *platp = pdev->dev.platform_data;
+ struct tiny_spi *hw;
+ struct spi_master *master;
+ struct resource *res;
+ unsigned int i;
+ int err = -ENODEV;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi));
+ if (!master)
+ return err;
+
+ /* setup the master state. */
+ master->bus_num = pdev->id;
+ master->num_chipselect = 255;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->setup = tiny_spi_setup;
+
+ hw = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, hw);
+
+ /* setup the state for the bitbang driver */
+ hw->bitbang.master = spi_master_get(master);
+ if (!hw->bitbang.master)
+ return err;
+ hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
+ hw->bitbang.chipselect = tiny_spi_chipselect;
+ hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
+
+ /* find and map our resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ goto exit_busy;
+ if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+ pdev->name))
+ goto exit_busy;
+ hw->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!hw->base)
+ goto exit_busy;
+ /* irq is optional */
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq >= 0) {
+ init_completion(&hw->done);
+ err = devm_request_irq(&pdev->dev, hw->irq, tiny_spi_irq, 0,
+ pdev->name, hw);
+ if (err)
+ goto exit;
+ }
+ /* find platform data */
+ if (platp) {
+ hw->gpio_cs_count = platp->gpio_cs_count;
+ hw->gpio_cs = platp->gpio_cs;
+ if (platp->gpio_cs_count && !platp->gpio_cs)
+ goto exit_busy;
+ hw->freq = platp->freq;
+ hw->baudwidth = platp->baudwidth;
+ } else {
+ err = tiny_spi_of_probe(pdev);
+ if (err)
+ goto exit;
+ }
+ for (i = 0; i < hw->gpio_cs_count; i++) {
+ err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev));
+ if (err)
+ goto exit_gpio;
+ gpio_direction_output(hw->gpio_cs[i], 1);
+ }
+ hw->bitbang.master->num_chipselect = max(1U, hw->gpio_cs_count);
+
+ /* register our spi controller */
+ err = spi_bitbang_start(&hw->bitbang);
+ if (err)
+ goto exit;
+ dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq);
+
+ return 0;
+
+exit_gpio:
+ while (i-- > 0)
+ gpio_free(hw->gpio_cs[i]);
+exit_busy:
+ err = -EBUSY;
+exit:
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(master);
+ return err;
+}
+
+static int __devexit tiny_spi_remove(struct platform_device *pdev)
+{
+ struct tiny_spi *hw = platform_get_drvdata(pdev);
+ struct spi_master *master = hw->bitbang.master;
+ unsigned int i;
+
+ spi_bitbang_stop(&hw->bitbang);
+ for (i = 0; i < hw->gpio_cs_count; i++)
+ gpio_free(hw->gpio_cs[i]);
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(master);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id tiny_spi_match[] = {
+ { .compatible = "opencores,tiny-spi-rtlsvn2", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tiny_spi_match);
+#else /* CONFIG_OF */
+#define tiny_spi_match NULL
+#endif /* CONFIG_OF */
+
+static struct platform_driver tiny_spi_driver = {
+ .probe = tiny_spi_probe,
+ .remove = __devexit_p(tiny_spi_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = NULL,
+ .of_match_table = tiny_spi_match,
+ },
+};
+
+static int __init tiny_spi_init(void)
+{
+ return platform_driver_register(&tiny_spi_driver);
+}
+module_init(tiny_spi_init);
+
+static void __exit tiny_spi_exit(void)
+{
+ platform_driver_unregister(&tiny_spi_driver);
+}
+module_exit(tiny_spi_exit);
+
+MODULE_DESCRIPTION("OpenCores tiny SPI driver");
+MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/spi/spi_sh.c b/drivers/spi/spi_sh.c
new file mode 100644
index 000000000000..869a07d375d6
--- /dev/null
+++ b/drivers/spi/spi_sh.c
@@ -0,0 +1,543 @@
+/*
+ * SH SPI bus driver
+ *
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ *
+ * Based on pxa2xx_spi.c:
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+
+#define SPI_SH_TBR 0x00
+#define SPI_SH_RBR 0x00
+#define SPI_SH_CR1 0x08
+#define SPI_SH_CR2 0x10
+#define SPI_SH_CR3 0x18
+#define SPI_SH_CR4 0x20
+#define SPI_SH_CR5 0x28
+
+/* CR1 */
+#define SPI_SH_TBE 0x80
+#define SPI_SH_TBF 0x40
+#define SPI_SH_RBE 0x20
+#define SPI_SH_RBF 0x10
+#define SPI_SH_PFONRD 0x08
+#define SPI_SH_SSDB 0x04
+#define SPI_SH_SSD 0x02
+#define SPI_SH_SSA 0x01
+
+/* CR2 */
+#define SPI_SH_RSTF 0x80
+#define SPI_SH_LOOPBK 0x40
+#define SPI_SH_CPOL 0x20
+#define SPI_SH_CPHA 0x10
+#define SPI_SH_L1M0 0x08
+
+/* CR3 */
+#define SPI_SH_MAX_BYTE 0xFF
+
+/* CR4 */
+#define SPI_SH_TBEI 0x80
+#define SPI_SH_TBFI 0x40
+#define SPI_SH_RBEI 0x20
+#define SPI_SH_RBFI 0x10
+#define SPI_SH_WPABRT 0x04
+#define SPI_SH_SSS 0x01
+
+/* CR8 */
+#define SPI_SH_P1L0 0x80
+#define SPI_SH_PP1L0 0x40
+#define SPI_SH_MUXI 0x20
+#define SPI_SH_MUXIRQ 0x10
+
+#define SPI_SH_FIFO_SIZE 32
+#define SPI_SH_SEND_TIMEOUT (3 * HZ)
+#define SPI_SH_RECEIVE_TIMEOUT (HZ >> 3)
+
+#undef DEBUG
+
+struct spi_sh_data {
+ void __iomem *addr;
+ int irq;
+ struct spi_master *master;
+ struct list_head queue;
+ struct workqueue_struct *workqueue;
+ struct work_struct ws;
+ unsigned long cr1;
+ wait_queue_head_t wait;
+ spinlock_t lock;
+};
+
+static void spi_sh_write(struct spi_sh_data *ss, unsigned long data,
+ unsigned long offset)
+{
+ writel(data, ss->addr + offset);
+}
+
+static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset)
+{
+ return readl(ss->addr + offset);
+}
+
+static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val,
+ unsigned long offset)
+{
+ unsigned long tmp;
+
+ tmp = spi_sh_read(ss, offset);
+ tmp |= val;
+ spi_sh_write(ss, tmp, offset);
+}
+
+static void spi_sh_clear_bit(struct spi_sh_data *ss, unsigned long val,
+ unsigned long offset)
+{
+ unsigned long tmp;
+
+ tmp = spi_sh_read(ss, offset);
+ tmp &= ~val;
+ spi_sh_write(ss, tmp, offset);
+}
+
+static void clear_fifo(struct spi_sh_data *ss)
+{
+ spi_sh_set_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
+ spi_sh_clear_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
+}
+
+static int spi_sh_wait_receive_buffer(struct spi_sh_data *ss)
+{
+ int timeout = 100000;
+
+ while (spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
+ udelay(10);
+ if (timeout-- < 0)
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int spi_sh_wait_write_buffer_empty(struct spi_sh_data *ss)
+{
+ int timeout = 100000;
+
+ while (!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBE)) {
+ udelay(10);
+ if (timeout-- < 0)
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
+ struct spi_transfer *t)
+{
+ int i, retval = 0;
+ int remain = t->len;
+ int cur_len;
+ unsigned char *data;
+ unsigned long tmp;
+ long ret;
+
+ if (t->len)
+ spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
+
+ data = (unsigned char *)t->tx_buf;
+ while (remain > 0) {
+ cur_len = min(SPI_SH_FIFO_SIZE, remain);
+ for (i = 0; i < cur_len &&
+ !(spi_sh_read(ss, SPI_SH_CR4) &
+ SPI_SH_WPABRT) &&
+ !(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBF);
+ i++)
+ spi_sh_write(ss, (unsigned long)data[i], SPI_SH_TBR);
+
+ if (spi_sh_read(ss, SPI_SH_CR4) & SPI_SH_WPABRT) {
+ /* Abort SPI operation */
+ spi_sh_set_bit(ss, SPI_SH_WPABRT, SPI_SH_CR4);
+ retval = -EIO;
+ break;
+ }
+
+ cur_len = i;
+
+ remain -= cur_len;
+ data += cur_len;
+
+ if (remain > 0) {
+ ss->cr1 &= ~SPI_SH_TBE;
+ spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
+ ret = wait_event_interruptible_timeout(ss->wait,
+ ss->cr1 & SPI_SH_TBE,
+ SPI_SH_SEND_TIMEOUT);
+ if (ret == 0 && !(ss->cr1 & SPI_SH_TBE)) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+ }
+
+ if (list_is_last(&t->transfer_list, &mesg->transfers)) {
+ tmp = spi_sh_read(ss, SPI_SH_CR1);
+ tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB);
+ spi_sh_write(ss, tmp, SPI_SH_CR1);
+ spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
+
+ ss->cr1 &= ~SPI_SH_TBE;
+ spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
+ ret = wait_event_interruptible_timeout(ss->wait,
+ ss->cr1 & SPI_SH_TBE,
+ SPI_SH_SEND_TIMEOUT);
+ if (ret == 0 && (ss->cr1 & SPI_SH_TBE)) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return retval;
+}
+
+static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
+ struct spi_transfer *t)
+{
+ int i;
+ int remain = t->len;
+ int cur_len;
+ unsigned char *data;
+ unsigned long tmp;
+ long ret;
+
+ if (t->len > SPI_SH_MAX_BYTE)
+ spi_sh_write(ss, SPI_SH_MAX_BYTE, SPI_SH_CR3);
+ else
+ spi_sh_write(ss, t->len, SPI_SH_CR3);
+
+ tmp = spi_sh_read(ss, SPI_SH_CR1);
+ tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB);
+ spi_sh_write(ss, tmp, SPI_SH_CR1);
+ spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
+
+ spi_sh_wait_write_buffer_empty(ss);
+
+ data = (unsigned char *)t->rx_buf;
+ while (remain > 0) {
+ if (remain >= SPI_SH_FIFO_SIZE) {
+ ss->cr1 &= ~SPI_SH_RBF;
+ spi_sh_set_bit(ss, SPI_SH_RBF, SPI_SH_CR4);
+ ret = wait_event_interruptible_timeout(ss->wait,
+ ss->cr1 & SPI_SH_RBF,
+ SPI_SH_RECEIVE_TIMEOUT);
+ if (ret == 0 &&
+ spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+
+ cur_len = min(SPI_SH_FIFO_SIZE, remain);
+ for (i = 0; i < cur_len; i++) {
+ if (spi_sh_wait_receive_buffer(ss))
+ break;
+ data[i] = (unsigned char)spi_sh_read(ss, SPI_SH_RBR);
+ }
+
+ remain -= cur_len;
+ data += cur_len;
+ }
+
+ /* deassert CS when SPI is receiving. */
+ if (t->len > SPI_SH_MAX_BYTE) {
+ clear_fifo(ss);
+ spi_sh_write(ss, 1, SPI_SH_CR3);
+ } else {
+ spi_sh_write(ss, 0, SPI_SH_CR3);
+ }
+
+ return 0;
+}
+
+static void spi_sh_work(struct work_struct *work)
+{
+ struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws);
+ struct spi_message *mesg;
+ struct spi_transfer *t;
+ unsigned long flags;
+ int ret;
+
+ pr_debug("%s: enter\n", __func__);
+
+ spin_lock_irqsave(&ss->lock, flags);
+ while (!list_empty(&ss->queue)) {
+ mesg = list_entry(ss->queue.next, struct spi_message, queue);
+ list_del_init(&mesg->queue);
+
+ spin_unlock_irqrestore(&ss->lock, flags);
+ list_for_each_entry(t, &mesg->transfers, transfer_list) {
+ pr_debug("tx_buf = %p, rx_buf = %p\n",
+ t->tx_buf, t->rx_buf);
+ pr_debug("len = %d, delay_usecs = %d\n",
+ t->len, t->delay_usecs);
+
+ if (t->tx_buf) {
+ ret = spi_sh_send(ss, mesg, t);
+ if (ret < 0)
+ goto error;
+ }
+ if (t->rx_buf) {
+ ret = spi_sh_receive(ss, mesg, t);
+ if (ret < 0)
+ goto error;
+ }
+ mesg->actual_length += t->len;
+ }
+ spin_lock_irqsave(&ss->lock, flags);
+
+ mesg->status = 0;
+ mesg->complete(mesg->context);
+ }
+
+ clear_fifo(ss);
+ spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1);
+ udelay(100);
+
+ spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
+ SPI_SH_CR1);
+
+ clear_fifo(ss);
+
+ spin_unlock_irqrestore(&ss->lock, flags);
+
+ return;
+
+ error:
+ mesg->status = ret;
+ mesg->complete(mesg->context);
+
+ spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
+ SPI_SH_CR1);
+ clear_fifo(ss);
+
+}
+
+static int spi_sh_setup(struct spi_device *spi)
+{
+ struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
+
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+
+ pr_debug("%s: enter\n", __func__);
+
+ spi_sh_write(ss, 0xfe, SPI_SH_CR1); /* SPI sycle stop */
+ spi_sh_write(ss, 0x00, SPI_SH_CR1); /* CR1 init */
+ spi_sh_write(ss, 0x00, SPI_SH_CR3); /* CR3 init */
+
+ clear_fifo(ss);
+
+ /* 1/8 clock */
+ spi_sh_write(ss, spi_sh_read(ss, SPI_SH_CR2) | 0x07, SPI_SH_CR2);
+ udelay(10);
+
+ return 0;
+}
+
+static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg)
+{
+ struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ pr_debug("%s: enter\n", __func__);
+ pr_debug("\tmode = %02x\n", spi->mode);
+
+ spin_lock_irqsave(&ss->lock, flags);
+
+ mesg->actual_length = 0;
+ mesg->status = -EINPROGRESS;
+
+ spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
+
+ list_add_tail(&mesg->queue, &ss->queue);
+ queue_work(ss->workqueue, &ss->ws);
+
+ spin_unlock_irqrestore(&ss->lock, flags);
+
+ return 0;
+}
+
+static void spi_sh_cleanup(struct spi_device *spi)
+{
+ struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
+
+ pr_debug("%s: enter\n", __func__);
+
+ spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
+ SPI_SH_CR1);
+}
+
+static irqreturn_t spi_sh_irq(int irq, void *_ss)
+{
+ struct spi_sh_data *ss = (struct spi_sh_data *)_ss;
+ unsigned long cr1;
+
+ cr1 = spi_sh_read(ss, SPI_SH_CR1);
+ if (cr1 & SPI_SH_TBE)
+ ss->cr1 |= SPI_SH_TBE;
+ if (cr1 & SPI_SH_TBF)
+ ss->cr1 |= SPI_SH_TBF;
+ if (cr1 & SPI_SH_RBE)
+ ss->cr1 |= SPI_SH_RBE;
+ if (cr1 & SPI_SH_RBF)
+ ss->cr1 |= SPI_SH_RBF;
+
+ if (ss->cr1) {
+ spi_sh_clear_bit(ss, ss->cr1, SPI_SH_CR4);
+ wake_up(&ss->wait);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devexit spi_sh_remove(struct platform_device *pdev)
+{
+ struct spi_sh_data *ss = dev_get_drvdata(&pdev->dev);
+
+ destroy_workqueue(ss->workqueue);
+ free_irq(ss->irq, ss);
+ iounmap(ss->addr);
+ spi_master_put(ss->master);
+
+ return 0;
+}
+
+static int __devinit spi_sh_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct spi_master *master;
+ struct spi_sh_data *ss;
+ int ret, irq;
+
+ /* get base addr */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(res == NULL)) {
+ dev_err(&pdev->dev, "invalid resource\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "platform_get_irq error\n");
+ return -ENODEV;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "spi_alloc_master error.\n");
+ return -ENOMEM;
+ }
+
+ ss = spi_master_get_devdata(master);
+ dev_set_drvdata(&pdev->dev, ss);
+
+ ss->irq = irq;
+ ss->master = master;
+ ss->addr = ioremap(res->start, resource_size(res));
+ if (ss->addr == NULL) {
+ dev_err(&pdev->dev, "ioremap error.\n");
+ ret = -ENOMEM;
+ goto error1;
+ }
+ INIT_LIST_HEAD(&ss->queue);
+ spin_lock_init(&ss->lock);
+ INIT_WORK(&ss->ws, spi_sh_work);
+ init_waitqueue_head(&ss->wait);
+ ss->workqueue = create_singlethread_workqueue(
+ dev_name(master->dev.parent));
+ if (ss->workqueue == NULL) {
+ dev_err(&pdev->dev, "create workqueue error\n");
+ ret = -EBUSY;
+ goto error2;
+ }
+
+ ret = request_irq(irq, spi_sh_irq, IRQF_DISABLED, "spi_sh", ss);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq error\n");
+ goto error3;
+ }
+
+ master->num_chipselect = 2;
+ master->bus_num = pdev->id;
+ master->setup = spi_sh_setup;
+ master->transfer = spi_sh_transfer;
+ master->cleanup = spi_sh_cleanup;
+
+ ret = spi_register_master(master);
+ if (ret < 0) {
+ printk(KERN_ERR "spi_register_master error.\n");
+ goto error4;
+ }
+
+ return 0;
+
+ error4:
+ free_irq(irq, ss);
+ error3:
+ destroy_workqueue(ss->workqueue);
+ error2:
+ iounmap(ss->addr);
+ error1:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static struct platform_driver spi_sh_driver = {
+ .probe = spi_sh_probe,
+ .remove = __devexit_p(spi_sh_remove),
+ .driver = {
+ .name = "sh_spi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init spi_sh_init(void)
+{
+ return platform_driver_register(&spi_sh_driver);
+}
+module_init(spi_sh_init);
+
+static void __exit spi_sh_exit(void)
+{
+ platform_driver_unregister(&spi_sh_driver);
+}
+module_exit(spi_sh_exit);
+
+MODULE_DESCRIPTION("SH SPI bus driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_ALIAS("platform:sh_spi");
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c
index 2c665fceaac7..e00d94b22250 100644
--- a/drivers/spi/spi_sh_msiof.c
+++ b/drivers/spi/spi_sh_msiof.c
@@ -9,22 +9,22 @@
*
*/
-#include <linux/kernel.h>
-#include <linux/init.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/platform_device.h>
-#include <linux/completion.h>
#include <linux/pm_runtime.h>
-#include <linux/gpio.h>
-#include <linux/bitmap.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/err.h>
+#include <linux/spi/sh_msiof.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
-#include <linux/spi/sh_msiof.h>
#include <asm/unaligned.h>
@@ -67,7 +67,7 @@ struct sh_msiof_spi_priv {
#define STR_TEOF (1 << 23)
#define STR_REOF (1 << 7)
-static unsigned long sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
+static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
{
switch (reg_offs) {
case TSCR:
@@ -79,7 +79,7 @@ static unsigned long sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
}
static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
- unsigned long value)
+ u32 value)
{
switch (reg_offs) {
case TSCR:
@@ -93,10 +93,10 @@ static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
}
static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
- unsigned long clr, unsigned long set)
+ u32 clr, u32 set)
{
- unsigned long mask = clr | set;
- unsigned long data;
+ u32 mask = clr | set;
+ u32 data;
int k;
data = sh_msiof_read(p, CTR);
@@ -166,10 +166,10 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
}
static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
- int cpol, int cpha,
- int tx_hi_z, int lsb_first)
+ u32 cpol, u32 cpha,
+ u32 tx_hi_z, u32 lsb_first)
{
- unsigned long tmp;
+ u32 tmp;
int edge;
/*
@@ -187,7 +187,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
tmp |= cpol << 30; /* TSCKIZ */
tmp |= cpol << 28; /* RSCKIZ */
- edge = cpol ? cpha : !cpha;
+ edge = cpol ^ !cpha;
tmp |= edge << 27; /* TEDG */
tmp |= edge << 26; /* REDG */
@@ -197,11 +197,9 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
const void *tx_buf, void *rx_buf,
- int bits, int words)
+ u32 bits, u32 words)
{
- unsigned long dr2;
-
- dr2 = ((bits - 1) << 24) | ((words - 1) << 16);
+ u32 dr2 = ((bits - 1) << 24) | ((words - 1) << 16);
if (tx_buf)
sh_msiof_write(p, TMDR2, dr2);
@@ -222,7 +220,7 @@ static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
- const unsigned char *buf_8 = tx_buf;
+ const u8 *buf_8 = tx_buf;
int k;
for (k = 0; k < words; k++)
@@ -232,7 +230,7 @@ static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
- const unsigned short *buf_16 = tx_buf;
+ const u16 *buf_16 = tx_buf;
int k;
for (k = 0; k < words; k++)
@@ -242,7 +240,7 @@ static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
- const unsigned short *buf_16 = tx_buf;
+ const u16 *buf_16 = tx_buf;
int k;
for (k = 0; k < words; k++)
@@ -252,7 +250,7 @@ static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
- const unsigned int *buf_32 = tx_buf;
+ const u32 *buf_32 = tx_buf;
int k;
for (k = 0; k < words; k++)
@@ -262,17 +260,37 @@ static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
- const unsigned int *buf_32 = tx_buf;
+ const u32 *buf_32 = tx_buf;
int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
}
+static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u32 *buf_32 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs));
+}
+
+static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u32 *buf_32 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs));
+}
+
static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
- unsigned char *buf_8 = rx_buf;
+ u8 *buf_8 = rx_buf;
int k;
for (k = 0; k < words; k++)
@@ -282,7 +300,7 @@ static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
- unsigned short *buf_16 = rx_buf;
+ u16 *buf_16 = rx_buf;
int k;
for (k = 0; k < words; k++)
@@ -292,7 +310,7 @@ static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
- unsigned short *buf_16 = rx_buf;
+ u16 *buf_16 = rx_buf;
int k;
for (k = 0; k < words; k++)
@@ -302,7 +320,7 @@ static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
- unsigned int *buf_32 = rx_buf;
+ u32 *buf_32 = rx_buf;
int k;
for (k = 0; k < words; k++)
@@ -312,19 +330,40 @@ static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
- unsigned int *buf_32 = rx_buf;
+ u32 *buf_32 = rx_buf;
int k;
for (k = 0; k < words; k++)
put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
}
+static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u32 *buf_32 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs);
+}
+
+static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u32 *buf_32 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
+}
+
static int sh_msiof_spi_bits(struct spi_device *spi, struct spi_transfer *t)
{
int bits;
bits = t ? t->bits_per_word : 0;
- bits = bits ? bits : spi->bits_per_word;
+ if (!bits)
+ bits = spi->bits_per_word;
return bits;
}
@@ -334,7 +373,8 @@ static unsigned long sh_msiof_spi_hz(struct spi_device *spi,
unsigned long hz;
hz = t ? t->speed_hz : 0;
- hz = hz ? hz : spi->max_speed_hz;
+ if (!hz)
+ hz = spi->max_speed_hz;
return hz;
}
@@ -468,9 +508,17 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
int bytes_done;
int words;
int n;
+ bool swab;
bits = sh_msiof_spi_bits(spi, t);
+ if (bits <= 8 && t->len > 15 && !(t->len & 3)) {
+ bits = 32;
+ swab = true;
+ } else {
+ swab = false;
+ }
+
/* setup bytes per word and fifo read/write functions */
if (bits <= 8) {
bytes_per_word = 1;
@@ -487,6 +535,17 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
rx_fifo = sh_msiof_spi_read_fifo_16u;
else
rx_fifo = sh_msiof_spi_read_fifo_16;
+ } else if (swab) {
+ bytes_per_word = 4;
+ if ((unsigned long)t->tx_buf & 0x03)
+ tx_fifo = sh_msiof_spi_write_fifo_s32u;
+ else
+ tx_fifo = sh_msiof_spi_write_fifo_s32;
+
+ if ((unsigned long)t->rx_buf & 0x03)
+ rx_fifo = sh_msiof_spi_read_fifo_s32u;
+ else
+ rx_fifo = sh_msiof_spi_read_fifo_s32;
} else {
bytes_per_word = 4;
if ((unsigned long)t->tx_buf & 0x03)
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 603428213d21..d9fd86211365 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -30,6 +30,7 @@
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/compat.h>
#include <linux/spi/spi.h>
#include <linux/spi/spidev.h>
@@ -471,6 +472,16 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return retval;
}
+#ifdef CONFIG_COMPAT
+static long
+spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define spidev_compat_ioctl NULL
+#endif /* CONFIG_COMPAT */
+
static int spidev_open(struct inode *inode, struct file *filp)
{
struct spidev_data *spidev;
@@ -543,6 +554,7 @@ static const struct file_operations spidev_fops = {
.write = spidev_write,
.read = spidev_read,
.unlocked_ioctl = spidev_ioctl,
+ .compat_ioctl = spidev_compat_ioctl,
.open = spidev_open,
.release = spidev_release,
.llseek = no_llseek,
diff --git a/drivers/spi/ti-ssp-spi.c b/drivers/spi/ti-ssp-spi.c
new file mode 100644
index 000000000000..ee22795c7973
--- /dev/null
+++ b/drivers/spi/ti-ssp-spi.c
@@ -0,0 +1,402 @@
+/*
+ * Sequencer Serial Port (SSP) based SPI master driver
+ *
+ * Copyright (C) 2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/mfd/ti_ssp.h>
+
+#define MODE_BITS (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH)
+
+struct ti_ssp_spi {
+ struct spi_master *master;
+ struct device *dev;
+ spinlock_t lock;
+ struct list_head msg_queue;
+ struct completion complete;
+ bool shutdown;
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+ u8 mode, bpw;
+ int cs_active;
+ u32 pc_en, pc_dis, pc_wr, pc_rd;
+ void (*select)(int cs);
+};
+
+static u32 ti_ssp_spi_rx(struct ti_ssp_spi *hw)
+{
+ u32 ret;
+
+ ti_ssp_run(hw->dev, hw->pc_rd, 0, &ret);
+ return ret;
+}
+
+static void ti_ssp_spi_tx(struct ti_ssp_spi *hw, u32 data)
+{
+ ti_ssp_run(hw->dev, hw->pc_wr, data << (32 - hw->bpw), NULL);
+}
+
+static int ti_ssp_spi_txrx(struct ti_ssp_spi *hw, struct spi_message *msg,
+ struct spi_transfer *t)
+{
+ int count;
+
+ if (hw->bpw <= 8) {
+ u8 *rx = t->rx_buf;
+ const u8 *tx = t->tx_buf;
+
+ for (count = 0; count < t->len; count += 1) {
+ if (t->tx_buf)
+ ti_ssp_spi_tx(hw, *tx++);
+ if (t->rx_buf)
+ *rx++ = ti_ssp_spi_rx(hw);
+ }
+ } else if (hw->bpw <= 16) {
+ u16 *rx = t->rx_buf;
+ const u16 *tx = t->tx_buf;
+
+ for (count = 0; count < t->len; count += 2) {
+ if (t->tx_buf)
+ ti_ssp_spi_tx(hw, *tx++);
+ if (t->rx_buf)
+ *rx++ = ti_ssp_spi_rx(hw);
+ }
+ } else {
+ u32 *rx = t->rx_buf;
+ const u32 *tx = t->tx_buf;
+
+ for (count = 0; count < t->len; count += 4) {
+ if (t->tx_buf)
+ ti_ssp_spi_tx(hw, *tx++);
+ if (t->rx_buf)
+ *rx++ = ti_ssp_spi_rx(hw);
+ }
+ }
+
+ msg->actual_length += count; /* bytes transferred */
+
+ dev_dbg(&msg->spi->dev, "xfer %s%s, %d bytes, %d bpw, count %d%s\n",
+ t->tx_buf ? "tx" : "", t->rx_buf ? "rx" : "", t->len,
+ hw->bpw, count, (count < t->len) ? " (under)" : "");
+
+ return (count < t->len) ? -EIO : 0; /* left over data */
+}
+
+static void ti_ssp_spi_chip_select(struct ti_ssp_spi *hw, int cs_active)
+{
+ cs_active = !!cs_active;
+ if (cs_active == hw->cs_active)
+ return;
+ ti_ssp_run(hw->dev, cs_active ? hw->pc_en : hw->pc_dis, 0, NULL);
+ hw->cs_active = cs_active;
+}
+
+#define __SHIFT_OUT(bits) (SSP_OPCODE_SHIFT | SSP_OUT_MODE | \
+ cs_en | clk | SSP_COUNT((bits) * 2 - 1))
+#define __SHIFT_IN(bits) (SSP_OPCODE_SHIFT | SSP_IN_MODE | \
+ cs_en | clk | SSP_COUNT((bits) * 2 - 1))
+
+static int ti_ssp_spi_setup_transfer(struct ti_ssp_spi *hw, u8 bpw, u8 mode)
+{
+ int error, idx = 0;
+ u32 seqram[16];
+ u32 cs_en, cs_dis, clk;
+ u32 topbits, botbits;
+
+ mode &= MODE_BITS;
+ if (mode == hw->mode && bpw == hw->bpw)
+ return 0;
+
+ cs_en = (mode & SPI_CS_HIGH) ? SSP_CS_HIGH : SSP_CS_LOW;
+ cs_dis = (mode & SPI_CS_HIGH) ? SSP_CS_LOW : SSP_CS_HIGH;
+ clk = (mode & SPI_CPOL) ? SSP_CLK_HIGH : SSP_CLK_LOW;
+
+ /* Construct instructions */
+
+ /* Disable Chip Select */
+ hw->pc_dis = idx;
+ seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_dis | clk;
+ seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_dis | clk;
+
+ /* Enable Chip Select */
+ hw->pc_en = idx;
+ seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_en | clk;
+ seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
+
+ /* Reads and writes need to be split for bpw > 16 */
+ topbits = (bpw > 16) ? 16 : bpw;
+ botbits = bpw - topbits;
+
+ /* Write */
+ hw->pc_wr = idx;
+ seqram[idx++] = __SHIFT_OUT(topbits) | SSP_ADDR_REG;
+ if (botbits)
+ seqram[idx++] = __SHIFT_OUT(botbits) | SSP_DATA_REG;
+ seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
+
+ /* Read */
+ hw->pc_rd = idx;
+ if (botbits)
+ seqram[idx++] = __SHIFT_IN(botbits) | SSP_ADDR_REG;
+ seqram[idx++] = __SHIFT_IN(topbits) | SSP_DATA_REG;
+ seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
+
+ error = ti_ssp_load(hw->dev, 0, seqram, idx);
+ if (error < 0)
+ return error;
+
+ error = ti_ssp_set_mode(hw->dev, ((mode & SPI_CPHA) ?
+ 0 : SSP_EARLY_DIN));
+ if (error < 0)
+ return error;
+
+ hw->bpw = bpw;
+ hw->mode = mode;
+
+ return error;
+}
+
+static void ti_ssp_spi_work(struct work_struct *work)
+{
+ struct ti_ssp_spi *hw = container_of(work, struct ti_ssp_spi, work);
+
+ spin_lock(&hw->lock);
+
+ while (!list_empty(&hw->msg_queue)) {
+ struct spi_message *m;
+ struct spi_device *spi;
+ struct spi_transfer *t = NULL;
+ int status = 0;
+
+ m = container_of(hw->msg_queue.next, struct spi_message,
+ queue);
+
+ list_del_init(&m->queue);
+
+ spin_unlock(&hw->lock);
+
+ spi = m->spi;
+
+ if (hw->select)
+ hw->select(spi->chip_select);
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ int bpw = spi->bits_per_word;
+ int xfer_status;
+
+ if (t->bits_per_word)
+ bpw = t->bits_per_word;
+
+ if (ti_ssp_spi_setup_transfer(hw, bpw, spi->mode) < 0)
+ break;
+
+ ti_ssp_spi_chip_select(hw, 1);
+
+ xfer_status = ti_ssp_spi_txrx(hw, m, t);
+ if (xfer_status < 0)
+ status = xfer_status;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (t->cs_change)
+ ti_ssp_spi_chip_select(hw, 0);
+ }
+
+ ti_ssp_spi_chip_select(hw, 0);
+ m->status = status;
+ m->complete(m->context);
+
+ spin_lock(&hw->lock);
+ }
+
+ if (hw->shutdown)
+ complete(&hw->complete);
+
+ spin_unlock(&hw->lock);
+}
+
+static int ti_ssp_spi_setup(struct spi_device *spi)
+{
+ if (spi->bits_per_word > 32)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ti_ssp_spi_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct ti_ssp_spi *hw;
+ struct spi_transfer *t;
+ int error = 0;
+
+ m->actual_length = 0;
+ m->status = -EINPROGRESS;
+
+ hw = spi_master_get_devdata(spi->master);
+
+ if (list_empty(&m->transfers) || !m->complete)
+ return -EINVAL;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->len && !(t->rx_buf || t->tx_buf)) {
+ dev_err(&spi->dev, "invalid xfer, no buffer\n");
+ return -EINVAL;
+ }
+
+ if (t->len && t->rx_buf && t->tx_buf) {
+ dev_err(&spi->dev, "invalid xfer, full duplex\n");
+ return -EINVAL;
+ }
+
+ if (t->bits_per_word > 32) {
+ dev_err(&spi->dev, "invalid xfer width %d\n",
+ t->bits_per_word);
+ return -EINVAL;
+ }
+ }
+
+ spin_lock(&hw->lock);
+ if (hw->shutdown) {
+ error = -ESHUTDOWN;
+ goto error_unlock;
+ }
+ list_add_tail(&m->queue, &hw->msg_queue);
+ queue_work(hw->workqueue, &hw->work);
+error_unlock:
+ spin_unlock(&hw->lock);
+ return error;
+}
+
+static int __devinit ti_ssp_spi_probe(struct platform_device *pdev)
+{
+ const struct ti_ssp_spi_data *pdata;
+ struct ti_ssp_spi *hw;
+ struct spi_master *master;
+ struct device *dev = &pdev->dev;
+ int error = 0;
+
+ pdata = dev->platform_data;
+ if (!pdata) {
+ dev_err(dev, "platform data not found\n");
+ return -EINVAL;
+ }
+
+ master = spi_alloc_master(dev, sizeof(struct ti_ssp_spi));
+ if (!master) {
+ dev_err(dev, "cannot allocate SPI master\n");
+ return -ENOMEM;
+ }
+
+ hw = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, hw);
+
+ hw->master = master;
+ hw->dev = dev;
+ hw->select = pdata->select;
+
+ spin_lock_init(&hw->lock);
+ init_completion(&hw->complete);
+ INIT_LIST_HEAD(&hw->msg_queue);
+ INIT_WORK(&hw->work, ti_ssp_spi_work);
+
+ hw->workqueue = create_singlethread_workqueue(dev_name(dev));
+ if (!hw->workqueue) {
+ error = -ENOMEM;
+ dev_err(dev, "work queue creation failed\n");
+ goto error_wq;
+ }
+
+ error = ti_ssp_set_iosel(hw->dev, pdata->iosel);
+ if (error < 0) {
+ dev_err(dev, "io setup failed\n");
+ goto error_iosel;
+ }
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = pdata->num_cs;
+ master->mode_bits = MODE_BITS;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->setup = ti_ssp_spi_setup;
+ master->transfer = ti_ssp_spi_transfer;
+
+ error = spi_register_master(master);
+ if (error) {
+ dev_err(dev, "master registration failed\n");
+ goto error_reg;
+ }
+
+ return 0;
+
+error_reg:
+error_iosel:
+ destroy_workqueue(hw->workqueue);
+error_wq:
+ spi_master_put(master);
+ return error;
+}
+
+static int __devexit ti_ssp_spi_remove(struct platform_device *pdev)
+{
+ struct ti_ssp_spi *hw = platform_get_drvdata(pdev);
+ int error;
+
+ hw->shutdown = 1;
+ while (!list_empty(&hw->msg_queue)) {
+ error = wait_for_completion_interruptible(&hw->complete);
+ if (error < 0) {
+ hw->shutdown = 0;
+ return error;
+ }
+ }
+ destroy_workqueue(hw->workqueue);
+ spi_unregister_master(hw->master);
+
+ return 0;
+}
+
+static struct platform_driver ti_ssp_spi_driver = {
+ .probe = ti_ssp_spi_probe,
+ .remove = __devexit_p(ti_ssp_spi_remove),
+ .driver = {
+ .name = "ti-ssp-spi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ti_ssp_spi_init(void)
+{
+ return platform_driver_register(&ti_ssp_spi_driver);
+}
+module_init(ti_ssp_spi_init);
+
+static void __exit ti_ssp_spi_exit(void)
+{
+ platform_driver_unregister(&ti_ssp_spi_driver);
+}
+module_exit(ti_ssp_spi_exit);
+
+MODULE_DESCRIPTION("SSP SPI Master");
+MODULE_AUTHOR("Cyril Chemparathy");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ti-ssp-spi");
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 4d2c75df886c..c69c6f2c2c5c 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/spi/xilinx_spi.h>
@@ -470,7 +471,7 @@ static int __devinit xilinx_spi_probe(struct platform_device *dev)
struct spi_master *master;
u8 i;
- pdata = dev->dev.platform_data;
+ pdata = mfd_get_data(dev);
if (pdata) {
num_cs = pdata->num_chipselect;
little_endian = pdata->little_endian;
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index b90c2cf3e247..750fe5045efa 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -574,6 +574,7 @@ static const struct backlight_ops dcon_bl_ops = {
static struct backlight_properties dcon_bl_props = {
.max_brightness = 15,
+ .type = BACKLIGHT_RAW,
.power = FB_BLANK_UNBLANK,
};
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c
index 6607a89ccb4b..25294462b8b6 100644
--- a/drivers/staging/samsung-laptop/samsung-laptop.c
+++ b/drivers/staging/samsung-laptop/samsung-laptop.c
@@ -781,6 +781,7 @@ static int __init samsung_init(void)
/* create a backlight device to talk to this one */
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = sabi_config->max_brightness;
backlight_device = backlight_device_register("samsung", &sdev->dev,
NULL, &backlight_ops,
diff --git a/drivers/staging/tty/specialix.c b/drivers/staging/tty/specialix.c
index 47e5753f732a..17a1be536a46 100644
--- a/drivers/staging/tty/specialix.c
+++ b/drivers/staging/tty/specialix.c
@@ -1416,7 +1416,7 @@ static int sx_open(struct tty_struct *tty, struct file *filp)
board, bp, port, SX_PORT(tty->index));
if (sx_paranoia_check(port, tty->name, "sx_open")) {
- func_enter();
+ func_exit();
return -ENODEV;
}
@@ -1435,13 +1435,13 @@ static int sx_open(struct tty_struct *tty, struct file *filp)
error = sx_setup_port(bp, port);
if (error) {
- func_enter();
+ func_exit();
return error;
}
error = block_til_ready(tty, filp, port);
if (error) {
- func_enter();
+ func_exit();
return error;
}
@@ -1860,7 +1860,7 @@ static int sx_set_serial_info(struct specialix_port *port,
func_enter();
if (copy_from_user(&tmp, newinfo, sizeof(tmp))) {
- func_enter();
+ func_exit();
return -EFAULT;
}
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 366080baf474..7f19c8b7b84c 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -667,7 +667,13 @@ target_emulate_readcapacity(struct se_cmd *cmd)
{
struct se_device *dev = SE_DEV(cmd);
unsigned char *buf = cmd->t_task->t_task_buf;
- u32 blocks = dev->transport->get_blocks(dev);
+ unsigned long long blocks_long = dev->transport->get_blocks(dev);
+ u32 blocks;
+
+ if (blocks_long >= 0x00000000ffffffff)
+ blocks = 0xffffffff;
+ else
+ blocks = (u32)blocks_long;
buf[0] = (blocks >> 24) & 0xff;
buf[1] = (blocks >> 16) & 0xff;
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 73f7d6d81b4c..6ec51cbc018e 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -37,7 +37,6 @@
#include <target/target_core_base.h>
#include <target/target_core_device.h>
-#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_transport.h>
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c
index 16402445f2b2..03c285bb2f18 100644
--- a/drivers/tty/bfin_jtag_comm.c
+++ b/drivers/tty/bfin_jtag_comm.c
@@ -251,11 +251,11 @@ static int __init bfin_jc_init(void)
bfin_jc_write_buf.head = bfin_jc_write_buf.tail = 0;
bfin_jc_write_buf.buf = kmalloc(CIRC_SIZE, GFP_KERNEL);
if (!bfin_jc_write_buf.buf)
- goto err;
+ goto err_buf;
bfin_jc_driver = alloc_tty_driver(1);
if (!bfin_jc_driver)
- goto err;
+ goto err_driver;
bfin_jc_driver->owner = THIS_MODULE;
bfin_jc_driver->driver_name = DRV_NAME;
@@ -275,7 +275,9 @@ static int __init bfin_jc_init(void)
err:
put_tty_driver(bfin_jc_driver);
+ err_driver:
kfree(bfin_jc_write_buf.buf);
+ err_buf:
kthread_stop(bfin_jc_kthread);
return ret;
}
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 3740e327f180..c35f1a73bc8b 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -177,6 +177,8 @@ static int __init xen_hvc_init(void)
}
if (xencons_irq < 0)
xencons_irq = 0; /* NO_IRQ */
+ else
+ set_irq_noprobe(xencons_irq);
hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256);
if (IS_ERR(hp))
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index bedc6c1b6fa5..bef238f9ffd7 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -292,7 +292,7 @@ struct hvcs_struct {
/*
* Any variable below the kref is valid before a tty is connected and
* stays valid after the tty is disconnected. These shouldn't be
- * whacked until the koject refcount reaches zero though some entries
+ * whacked until the kobject refcount reaches zero though some entries
* may be changed via sysfs initiatives.
*/
struct kref kref; /* ref count & hvcs_struct lifetime */
@@ -309,6 +309,7 @@ struct hvcs_struct {
static LIST_HEAD(hvcs_structs);
static DEFINE_SPINLOCK(hvcs_structs_lock);
+static DEFINE_MUTEX(hvcs_init_mutex);
static void hvcs_unthrottle(struct tty_struct *tty);
static void hvcs_throttle(struct tty_struct *tty);
@@ -340,6 +341,7 @@ static int __devinit hvcs_probe(struct vio_dev *dev,
static int __devexit hvcs_remove(struct vio_dev *dev);
static int __init hvcs_module_init(void);
static void __exit hvcs_module_exit(void);
+static int __devinit hvcs_initialize(void);
#define HVCS_SCHED_READ 0x00000001
#define HVCS_QUICK_READ 0x00000002
@@ -762,7 +764,7 @@ static int __devinit hvcs_probe(
const struct vio_device_id *id)
{
struct hvcs_struct *hvcsd;
- int index;
+ int index, rc;
int retval;
if (!dev || !id) {
@@ -770,6 +772,13 @@ static int __devinit hvcs_probe(
return -EPERM;
}
+ /* Make sure we are properly initialized */
+ rc = hvcs_initialize();
+ if (rc) {
+ pr_err("HVCS: Failed to initialize core driver.\n");
+ return rc;
+ }
+
/* early to avoid cleanup on failure */
index = hvcs_get_index();
if (index < 0) {
@@ -1464,12 +1473,15 @@ static void hvcs_free_index_list(void)
hvcs_index_count = 0;
}
-static int __init hvcs_module_init(void)
+static int __devinit hvcs_initialize(void)
{
- int rc;
- int num_ttys_to_alloc;
+ int rc, num_ttys_to_alloc;
- printk(KERN_INFO "Initializing %s\n", hvcs_driver_string);
+ mutex_lock(&hvcs_init_mutex);
+ if (hvcs_task) {
+ mutex_unlock(&hvcs_init_mutex);
+ return 0;
+ }
/* Has the user specified an overload with an insmod param? */
if (hvcs_parm_num_devs <= 0 ||
@@ -1528,35 +1540,13 @@ static int __init hvcs_module_init(void)
hvcs_task = kthread_run(khvcsd, NULL, "khvcsd");
if (IS_ERR(hvcs_task)) {
- printk(KERN_ERR "HVCS: khvcsd creation failed. Driver not loaded.\n");
+ printk(KERN_ERR "HVCS: khvcsd creation failed.\n");
rc = -EIO;
goto kthread_fail;
}
-
- rc = vio_register_driver(&hvcs_vio_driver);
- if (rc) {
- printk(KERN_ERR "HVCS: can't register vio driver\n");
- goto vio_fail;
- }
-
- /*
- * This needs to be done AFTER the vio_register_driver() call or else
- * the kobjects won't be initialized properly.
- */
- rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan);
- if (rc) {
- printk(KERN_ERR "HVCS: sysfs attr create failed\n");
- goto attr_fail;
- }
-
- printk(KERN_INFO "HVCS: driver module inserted.\n");
-
+ mutex_unlock(&hvcs_init_mutex);
return 0;
-attr_fail:
- vio_unregister_driver(&hvcs_vio_driver);
-vio_fail:
- kthread_stop(hvcs_task);
kthread_fail:
kfree(hvcs_pi_buff);
buff_alloc_fail:
@@ -1566,15 +1556,39 @@ register_fail:
index_fail:
put_tty_driver(hvcs_tty_driver);
hvcs_tty_driver = NULL;
+ mutex_unlock(&hvcs_init_mutex);
return rc;
}
+static int __init hvcs_module_init(void)
+{
+ int rc = vio_register_driver(&hvcs_vio_driver);
+ if (rc) {
+ printk(KERN_ERR "HVCS: can't register vio driver\n");
+ return rc;
+ }
+
+ pr_info("HVCS: Driver registered.\n");
+
+ /* This needs to be done AFTER the vio_register_driver() call or else
+ * the kobjects won't be initialized properly.
+ */
+ rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan);
+ if (rc)
+ pr_warning(KERN_ERR "HVCS: Failed to create rescan file (err %d)\n", rc);
+
+ return 0;
+}
+
static void __exit hvcs_module_exit(void)
{
/*
* This driver receives hvcs_remove callbacks for each device upon
* module removal.
*/
+ vio_unregister_driver(&hvcs_vio_driver);
+ if (!hvcs_task)
+ return;
/*
* This synchronous operation will wake the khvcsd kthread if it is
@@ -1589,8 +1603,6 @@ static void __exit hvcs_module_exit(void)
driver_remove_file(&hvcs_vio_driver.driver, &driver_attr_rescan);
- vio_unregister_driver(&hvcs_vio_driver);
-
tty_unregister_driver(hvcs_tty_driver);
hvcs_free_index_list();
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index e461be164f67..e1aee37270f5 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -537,8 +537,8 @@ config SERIAL_S3C6400
config SERIAL_S5PV210
tristate "Samsung S5PV210 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_S5P6442 || CPU_S5PV310)
- select SERIAL_SAMSUNG_UARTS_4 if (CPU_S5PV210 || CPU_S5PV310)
+ depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_S5P6442 || CPU_EXYNOS4210)
+ select SERIAL_SAMSUNG_UARTS_4 if (CPU_S5PV210 || CPU_EXYNOS4210)
default y
help
Serial port support for Samsung's S5P Family of SoC's
@@ -1598,4 +1598,18 @@ config SERIAL_MSM_SMD
Enables userspace clients to read and write to some streaming SMD
ports via tty device interface for MSM chipset.
+config SERIAL_MXS_AUART
+ depends on ARCH_MXS
+ tristate "MXS AUART support"
+ select SERIAL_CORE
+ help
+ This driver supports the MXS Application UART (AUART) port.
+
+config SERIAL_MXS_AUART_CONSOLE
+ bool "MXS AUART console support"
+ depends on SERIAL_MXS_AUART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Enable a MXS AUART port to be the system console.
+
endmenu
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 31e868cb49b2..fee0690ef8e3 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -93,3 +93,4 @@ obj-$(CONFIG_SERIAL_MFD_HSU) += mfd.o
obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o
obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o
obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o
+obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 2904aa044126..d742dd2c525c 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -676,7 +676,7 @@ static struct uart_driver amba_reg = {
.cons = AMBA_CONSOLE,
};
-static int pl010_probe(struct amba_device *dev, struct amba_id *id)
+static int pl010_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
void __iomem *base;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index e76d7d000128..57731e870085 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -96,6 +96,22 @@ static struct vendor_data vendor_st = {
};
/* Deals with DMA transactions */
+
+struct pl011_sgbuf {
+ struct scatterlist sg;
+ char *buf;
+};
+
+struct pl011_dmarx_data {
+ struct dma_chan *chan;
+ struct completion complete;
+ bool use_buf_b;
+ struct pl011_sgbuf sgbuf_a;
+ struct pl011_sgbuf sgbuf_b;
+ dma_cookie_t cookie;
+ bool running;
+};
+
struct pl011_dmatx_data {
struct dma_chan *chan;
struct scatterlist sg;
@@ -120,12 +136,70 @@ struct uart_amba_port {
char type[12];
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
- bool using_dma;
+ bool using_tx_dma;
+ bool using_rx_dma;
+ struct pl011_dmarx_data dmarx;
struct pl011_dmatx_data dmatx;
#endif
};
/*
+ * Reads up to 256 characters from the FIFO or until it's empty and
+ * inserts them into the TTY layer. Returns the number of characters
+ * read from the FIFO.
+ */
+static int pl011_fifo_to_tty(struct uart_amba_port *uap)
+{
+ u16 status, ch;
+ unsigned int flag, max_count = 256;
+ int fifotaken = 0;
+
+ while (max_count--) {
+ status = readw(uap->port.membase + UART01x_FR);
+ if (status & UART01x_FR_RXFE)
+ break;
+
+ /* Take chars from the FIFO and update status */
+ ch = readw(uap->port.membase + UART01x_DR) |
+ UART_DUMMY_DR_RX;
+ flag = TTY_NORMAL;
+ uap->port.icount.rx++;
+ fifotaken++;
+
+ if (unlikely(ch & UART_DR_ERROR)) {
+ if (ch & UART011_DR_BE) {
+ ch &= ~(UART011_DR_FE | UART011_DR_PE);
+ uap->port.icount.brk++;
+ if (uart_handle_break(&uap->port))
+ continue;
+ } else if (ch & UART011_DR_PE)
+ uap->port.icount.parity++;
+ else if (ch & UART011_DR_FE)
+ uap->port.icount.frame++;
+ if (ch & UART011_DR_OE)
+ uap->port.icount.overrun++;
+
+ ch &= uap->port.read_status_mask;
+
+ if (ch & UART011_DR_BE)
+ flag = TTY_BREAK;
+ else if (ch & UART011_DR_PE)
+ flag = TTY_PARITY;
+ else if (ch & UART011_DR_FE)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(&uap->port, ch & 255))
+ continue;
+
+ uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
+ }
+
+ return fifotaken;
+}
+
+
+/*
* All the DMA operation mode stuff goes inside this ifdef.
* This assumes that you have a generic DMA device interface,
* no custom DMA interfaces are supported.
@@ -134,6 +208,31 @@ struct uart_amba_port {
#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
+static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
+ enum dma_data_direction dir)
+{
+ sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
+ if (!sg->buf)
+ return -ENOMEM;
+
+ sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE);
+
+ if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) {
+ kfree(sg->buf);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
+ enum dma_data_direction dir)
+{
+ if (sg->buf) {
+ dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir);
+ kfree(sg->buf);
+ }
+}
+
static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
{
/* DMA is the sole user of the platform data right now */
@@ -153,7 +252,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
return;
}
- /* Try to acquire a generic DMA engine slave channel */
+ /* Try to acquire a generic DMA engine slave TX channel */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -168,6 +267,28 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
dev_info(uap->port.dev, "DMA channel TX %s\n",
dma_chan_name(uap->dmatx.chan));
+
+ /* Optionally make use of an RX channel as well */
+ if (plat->dma_rx_param) {
+ struct dma_slave_config rx_conf = {
+ .src_addr = uap->port.mapbase + UART01x_DR,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .direction = DMA_FROM_DEVICE,
+ .src_maxburst = uap->fifosize >> 1,
+ };
+
+ chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
+ if (!chan) {
+ dev_err(uap->port.dev, "no RX DMA channel!\n");
+ return;
+ }
+
+ dmaengine_slave_config(chan, &rx_conf);
+ uap->dmarx.chan = chan;
+
+ dev_info(uap->port.dev, "DMA channel RX %s\n",
+ dma_chan_name(uap->dmarx.chan));
+ }
}
#ifndef MODULE
@@ -219,9 +340,10 @@ static void pl011_dma_remove(struct uart_amba_port *uap)
/* TODO: remove the initcall if it has not yet executed */
if (uap->dmatx.chan)
dma_release_channel(uap->dmatx.chan);
+ if (uap->dmarx.chan)
+ dma_release_channel(uap->dmarx.chan);
}
-
/* Forward declare this for the refill routine */
static int pl011_dma_tx_refill(struct uart_amba_port *uap);
@@ -380,7 +502,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
*/
static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
{
- if (!uap->using_dma)
+ if (!uap->using_tx_dma)
return false;
/*
@@ -432,7 +554,7 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
{
u16 dmacr;
- if (!uap->using_dma)
+ if (!uap->using_tx_dma)
return false;
if (!uap->port.x_char) {
@@ -492,7 +614,7 @@ static void pl011_dma_flush_buffer(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
- if (!uap->using_dma)
+ if (!uap->using_tx_dma)
return;
/* Avoid deadlock with the DMA engine callback */
@@ -508,9 +630,219 @@ static void pl011_dma_flush_buffer(struct uart_port *port)
}
}
+static void pl011_dma_rx_callback(void *data);
+
+static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
+{
+ struct dma_chan *rxchan = uap->dmarx.chan;
+ struct dma_device *dma_dev;
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ struct dma_async_tx_descriptor *desc;
+ struct pl011_sgbuf *sgbuf;
+
+ if (!rxchan)
+ return -EIO;
+
+ /* Start the RX DMA job */
+ sgbuf = uap->dmarx.use_buf_b ?
+ &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ dma_dev = rxchan->device;
+ desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ /*
+ * If the DMA engine is busy and cannot prepare a
+ * channel, no big deal, the driver will fall back
+ * to interrupt mode as a result of this error code.
+ */
+ if (!desc) {
+ uap->dmarx.running = false;
+ dmaengine_terminate_all(rxchan);
+ return -EBUSY;
+ }
+
+ /* Some data to go along to the callback */
+ desc->callback = pl011_dma_rx_callback;
+ desc->callback_param = uap;
+ dmarx->cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(rxchan);
+
+ uap->dmacr |= UART011_RXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+ uap->dmarx.running = true;
+
+ uap->im &= ~UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+
+ return 0;
+}
+
+/*
+ * This is called when either the DMA job is complete, or
+ * the FIFO timeout interrupt occurred. This must be called
+ * with the port spinlock uap->port.lock held.
+ */
+static void pl011_dma_rx_chars(struct uart_amba_port *uap,
+ u32 pending, bool use_buf_b,
+ bool readfifo)
+{
+ struct tty_struct *tty = uap->port.state->port.tty;
+ struct pl011_sgbuf *sgbuf = use_buf_b ?
+ &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ struct device *dev = uap->dmarx.chan->device->dev;
+ int dma_count = 0;
+ u32 fifotaken = 0; /* only used for vdbg() */
+
+ /* Pick everything from the DMA first */
+ if (pending) {
+ /* Sync in buffer */
+ dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+
+ /*
+ * First take all chars in the DMA pipe, then look in the FIFO.
+ * Note that tty_insert_flip_buf() tries to take as many chars
+ * as it can.
+ */
+ dma_count = tty_insert_flip_string(uap->port.state->port.tty,
+ sgbuf->buf, pending);
+
+ /* Return buffer to device */
+ dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+
+ uap->port.icount.rx += dma_count;
+ if (dma_count < pending)
+ dev_warn(uap->port.dev,
+ "couldn't insert all characters (TTY is full?)\n");
+ }
+
+ /*
+ * Only continue with trying to read the FIFO if all DMA chars have
+ * been taken first.
+ */
+ if (dma_count == pending && readfifo) {
+ /* Clear any error flags */
+ writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
+ uap->port.membase + UART011_ICR);
+
+ /*
+ * If we read all the DMA'd characters, and we had an
+ * incomplete buffer, that could be due to an rx error, or
+ * maybe we just timed out. Read any pending chars and check
+ * the error status.
+ *
+ * Error conditions will only occur in the FIFO, these will
+ * trigger an immediate interrupt and stop the DMA job, so we
+ * will always find the error in the FIFO, never in the DMA
+ * buffer.
+ */
+ fifotaken = pl011_fifo_to_tty(uap);
+ }
+
+ spin_unlock(&uap->port.lock);
+ dev_vdbg(uap->port.dev,
+ "Took %d chars from DMA buffer and %d chars from the FIFO\n",
+ dma_count, fifotaken);
+ tty_flip_buffer_push(tty);
+ spin_lock(&uap->port.lock);
+}
+
+static void pl011_dma_rx_irq(struct uart_amba_port *uap)
+{
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+ struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
+ &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+ size_t pending;
+ struct dma_tx_state state;
+ enum dma_status dmastat;
+
+ /*
+ * Pause the transfer so we can trust the current counter,
+ * do this before we pause the PL011 block, else we may
+ * overflow the FIFO.
+ */
+ if (dmaengine_pause(rxchan))
+ dev_err(uap->port.dev, "unable to pause DMA transfer\n");
+ dmastat = rxchan->device->device_tx_status(rxchan,
+ dmarx->cookie, &state);
+ if (dmastat != DMA_PAUSED)
+ dev_err(uap->port.dev, "unable to pause DMA transfer\n");
+
+ /* Disable RX DMA - incoming data will wait in the FIFO */
+ uap->dmacr &= ~UART011_RXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+ uap->dmarx.running = false;
+
+ pending = sgbuf->sg.length - state.residue;
+ BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
+ /* Then we terminate the transfer - we now know our residue */
+ dmaengine_terminate_all(rxchan);
+
+ /*
+ * This will take the chars we have so far and insert
+ * into the framework.
+ */
+ pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
+
+ /* Switch buffer & re-trigger DMA job */
+ dmarx->use_buf_b = !dmarx->use_buf_b;
+ if (pl011_dma_rx_trigger_dma(uap)) {
+ dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
+ "fall back to interrupt mode\n");
+ uap->im |= UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ }
+}
+
+static void pl011_dma_rx_callback(void *data)
+{
+ struct uart_amba_port *uap = data;
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ bool lastbuf = dmarx->use_buf_b;
+ int ret;
+
+ /*
+ * This completion interrupt occurs typically when the
+ * RX buffer is totally stuffed but no timeout has yet
+ * occurred. When that happens, we just want the RX
+ * routine to flush out the secondary DMA buffer while
+ * we immediately trigger the next DMA job.
+ */
+ spin_lock_irq(&uap->port.lock);
+ uap->dmarx.running = false;
+ dmarx->use_buf_b = !lastbuf;
+ ret = pl011_dma_rx_trigger_dma(uap);
+
+ pl011_dma_rx_chars(uap, PL011_DMA_BUFFER_SIZE, lastbuf, false);
+ spin_unlock_irq(&uap->port.lock);
+ /*
+ * Do this check after we picked the DMA chars so we don't
+ * get some IRQ immediately from RX.
+ */
+ if (ret) {
+ dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
+ "fall back to interrupt mode\n");
+ uap->im |= UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ }
+}
+
+/*
+ * Stop accepting received characters, when we're shutting down or
+ * suspending this port.
+ * Locking: called with port lock held and IRQs disabled.
+ */
+static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
+{
+ /* FIXME. Just disable the DMA enable */
+ uap->dmacr &= ~UART011_RXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+}
static void pl011_dma_startup(struct uart_amba_port *uap)
{
+ int ret;
+
if (!uap->dmatx.chan)
return;
@@ -525,8 +857,33 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
/* The DMA buffer is now the FIFO the TTY subsystem can use */
uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
- uap->using_dma = true;
+ uap->using_tx_dma = true;
+
+ if (!uap->dmarx.chan)
+ goto skip_rx;
+
+ /* Allocate and map DMA RX buffers */
+ ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+ "RX buffer A", ret);
+ goto skip_rx;
+ }
+
+ ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+ "RX buffer B", ret);
+ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+ DMA_FROM_DEVICE);
+ goto skip_rx;
+ }
+ uap->using_rx_dma = true;
+
+skip_rx:
/* Turn on DMA error (RX/TX will be enabled on demand) */
uap->dmacr |= UART011_DMAONERR;
writew(uap->dmacr, uap->port.membase + UART011_DMACR);
@@ -539,11 +896,17 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
if (uap->vendor->dma_threshold)
writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
uap->port.membase + ST_UART011_DMAWM);
+
+ if (uap->using_rx_dma) {
+ if (pl011_dma_rx_trigger_dma(uap))
+ dev_dbg(uap->port.dev, "could not trigger initial "
+ "RX DMA job, fall back to interrupt mode\n");
+ }
}
static void pl011_dma_shutdown(struct uart_amba_port *uap)
{
- if (!uap->using_dma)
+ if (!(uap->using_tx_dma || uap->using_rx_dma))
return;
/* Disable RX and TX DMA */
@@ -555,19 +918,39 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
writew(uap->dmacr, uap->port.membase + UART011_DMACR);
spin_unlock_irq(&uap->port.lock);
- /* In theory, this should already be done by pl011_dma_flush_buffer */
- dmaengine_terminate_all(uap->dmatx.chan);
- if (uap->dmatx.queued) {
- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
- DMA_TO_DEVICE);
- uap->dmatx.queued = false;
+ if (uap->using_tx_dma) {
+ /* In theory, this should already be done by pl011_dma_flush_buffer */
+ dmaengine_terminate_all(uap->dmatx.chan);
+ if (uap->dmatx.queued) {
+ dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
+ DMA_TO_DEVICE);
+ uap->dmatx.queued = false;
+ }
+
+ kfree(uap->dmatx.buf);
+ uap->using_tx_dma = false;
}
- kfree(uap->dmatx.buf);
+ if (uap->using_rx_dma) {
+ dmaengine_terminate_all(uap->dmarx.chan);
+ /* Clean up the RX DMA */
+ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
+ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
+ uap->using_rx_dma = false;
+ }
+}
- uap->using_dma = false;
+static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
+{
+ return uap->using_rx_dma;
}
+static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
+{
+ return uap->using_rx_dma && uap->dmarx.running;
+}
+
+
#else
/* Blank functions if the DMA engine is not available */
static inline void pl011_dma_probe(struct uart_amba_port *uap)
@@ -600,6 +983,29 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
return false;
}
+static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
+{
+}
+
+static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
+{
+}
+
+static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
+{
+ return -EIO;
+}
+
+static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
+{
+ return false;
+}
+
+static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
+{
+ return false;
+}
+
#define pl011_dma_flush_buffer NULL
#endif
@@ -630,6 +1036,8 @@ static void pl011_stop_rx(struct uart_port *port)
uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
UART011_PEIM|UART011_BEIM|UART011_OEIM);
writew(uap->im, uap->port.membase + UART011_IMSC);
+
+ pl011_dma_rx_stop(uap);
}
static void pl011_enable_ms(struct uart_port *port)
@@ -643,51 +1051,24 @@ static void pl011_enable_ms(struct uart_port *port)
static void pl011_rx_chars(struct uart_amba_port *uap)
{
struct tty_struct *tty = uap->port.state->port.tty;
- unsigned int status, ch, flag, max_count = 256;
-
- status = readw(uap->port.membase + UART01x_FR);
- while ((status & UART01x_FR_RXFE) == 0 && max_count--) {
- ch = readw(uap->port.membase + UART01x_DR) | UART_DUMMY_DR_RX;
- flag = TTY_NORMAL;
- uap->port.icount.rx++;
-
- /*
- * Note that the error handling code is
- * out of the main execution path
- */
- if (unlikely(ch & UART_DR_ERROR)) {
- if (ch & UART011_DR_BE) {
- ch &= ~(UART011_DR_FE | UART011_DR_PE);
- uap->port.icount.brk++;
- if (uart_handle_break(&uap->port))
- goto ignore_char;
- } else if (ch & UART011_DR_PE)
- uap->port.icount.parity++;
- else if (ch & UART011_DR_FE)
- uap->port.icount.frame++;
- if (ch & UART011_DR_OE)
- uap->port.icount.overrun++;
-
- ch &= uap->port.read_status_mask;
-
- if (ch & UART011_DR_BE)
- flag = TTY_BREAK;
- else if (ch & UART011_DR_PE)
- flag = TTY_PARITY;
- else if (ch & UART011_DR_FE)
- flag = TTY_FRAME;
- }
- if (uart_handle_sysrq_char(&uap->port, ch & 255))
- goto ignore_char;
-
- uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
+ pl011_fifo_to_tty(uap);
- ignore_char:
- status = readw(uap->port.membase + UART01x_FR);
- }
spin_unlock(&uap->port.lock);
tty_flip_buffer_push(tty);
+ /*
+ * If we were temporarily out of DMA mode for a while,
+ * attempt to switch back to DMA mode again.
+ */
+ if (pl011_dma_rx_available(uap)) {
+ if (pl011_dma_rx_trigger_dma(uap)) {
+ dev_dbg(uap->port.dev, "could not trigger RX DMA job "
+ "fall back to interrupt mode again\n");
+ uap->im |= UART011_RXIM;
+ } else
+ uap->im &= ~UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ }
spin_lock(&uap->port.lock);
}
@@ -767,8 +1148,12 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
UART011_RXIS),
uap->port.membase + UART011_ICR);
- if (status & (UART011_RTIS|UART011_RXIS))
- pl011_rx_chars(uap);
+ if (status & (UART011_RTIS|UART011_RXIS)) {
+ if (pl011_dma_rx_running(uap))
+ pl011_dma_rx_irq(uap);
+ else
+ pl011_rx_chars(uap);
+ }
if (status & (UART011_DSRMIS|UART011_DCDMIS|
UART011_CTSMIS|UART011_RIMIS))
pl011_modem_status(uap);
@@ -945,10 +1330,14 @@ static int pl011_startup(struct uart_port *port)
pl011_dma_startup(uap);
/*
- * Finally, enable interrupts
+ * Finally, enable interrupts, only timeouts when using DMA
+ * if initial RX DMA job failed, start in interrupt mode
+ * as well.
*/
spin_lock_irq(&uap->port.lock);
- uap->im = UART011_RXIM | UART011_RTIM;
+ uap->im = UART011_RTIM;
+ if (!pl011_dma_rx_running(uap))
+ uap->im |= UART011_RXIM;
writew(uap->im, uap->port.membase + UART011_IMSC);
spin_unlock_irq(&uap->port.lock);
@@ -1349,7 +1738,7 @@ static struct uart_driver amba_reg = {
.cons = AMBA_CONSOLE,
};
-static int pl011_probe(struct amba_device *dev, struct amba_id *id)
+static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
struct vendor_data *vendor = id->data;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 8e43a7b69e64..bfee9b4c6661 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2007 Google, Inc.
* Author: Robert Love <rlove@google.com>
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -31,6 +32,7 @@
#include <linux/serial.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/delay.h>
#include "msm_serial.h"
@@ -38,9 +40,20 @@ struct msm_port {
struct uart_port uart;
char name[16];
struct clk *clk;
+ struct clk *pclk;
unsigned int imr;
+ unsigned int *gsbi_base;
+ int is_uartdm;
+ unsigned int old_snap_state;
};
+static inline void wait_for_xmitr(struct uart_port *port, int bits)
+{
+ if (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY))
+ while ((msm_read(port, UART_ISR) & bits) != bits)
+ cpu_relax();
+}
+
static void msm_stop_tx(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
@@ -73,6 +86,61 @@ static void msm_enable_ms(struct uart_port *port)
msm_write(port, msm_port->imr, UART_IMR);
}
+static void handle_rx_dm(struct uart_port *port, unsigned int misr)
+{
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int sr;
+ int count = 0;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ port->icount.overrun++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ }
+
+ if (misr & UART_IMR_RXSTALE) {
+ count = msm_read(port, UARTDM_RX_TOTAL_SNAP) -
+ msm_port->old_snap_state;
+ msm_port->old_snap_state = 0;
+ } else {
+ count = 4 * (msm_read(port, UART_RFWR));
+ msm_port->old_snap_state += count;
+ }
+
+ /* TODO: Precise error reporting */
+
+ port->icount.rx += count;
+
+ while (count > 0) {
+ unsigned int c;
+
+ sr = msm_read(port, UART_SR);
+ if ((sr & UART_SR_RX_READY) == 0) {
+ msm_port->old_snap_state -= count;
+ break;
+ }
+ c = msm_read(port, UARTDM_RF);
+ if (sr & UART_SR_RX_BREAK) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (sr & UART_SR_PAR_FRAME_ERR)
+ port->icount.frame++;
+
+ /* TODO: handle sysrq */
+ tty_insert_flip_string(tty, (char *) &c,
+ (count > 4) ? 4 : count);
+ count -= 4;
+ }
+
+ tty_flip_buffer_push(tty);
+ if (misr & (UART_IMR_RXSTALE))
+ msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, 0xFFFFFF, UARTDM_DMRX);
+ msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+}
+
static void handle_rx(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
@@ -121,6 +189,12 @@ static void handle_rx(struct uart_port *port)
tty_flip_buffer_push(tty);
}
+static void reset_dm_count(struct uart_port *port)
+{
+ wait_for_xmitr(port, UART_ISR_TX_READY);
+ msm_write(port, 1, UARTDM_NCF_TX);
+}
+
static void handle_tx(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
@@ -128,11 +202,18 @@ static void handle_tx(struct uart_port *port)
int sent_tx;
if (port->x_char) {
- msm_write(port, port->x_char, UART_TF);
+ if (msm_port->is_uartdm)
+ reset_dm_count(port);
+
+ msm_write(port, port->x_char,
+ msm_port->is_uartdm ? UARTDM_TF : UART_TF);
port->icount.tx++;
port->x_char = 0;
}
+ if (msm_port->is_uartdm)
+ reset_dm_count(port);
+
while (msm_read(port, UART_SR) & UART_SR_TX_READY) {
if (uart_circ_empty(xmit)) {
/* disable tx interrupts */
@@ -140,8 +221,11 @@ static void handle_tx(struct uart_port *port)
msm_write(port, msm_port->imr, UART_IMR);
break;
}
+ msm_write(port, xmit->buf[xmit->tail],
+ msm_port->is_uartdm ? UARTDM_TF : UART_TF);
- msm_write(port, xmit->buf[xmit->tail], UART_TF);
+ if (msm_port->is_uartdm)
+ reset_dm_count(port);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
@@ -169,8 +253,12 @@ static irqreturn_t msm_irq(int irq, void *dev_id)
misr = msm_read(port, UART_MISR);
msm_write(port, 0, UART_IMR); /* disable interrupt */
- if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE))
- handle_rx(port);
+ if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) {
+ if (msm_port->is_uartdm)
+ handle_rx_dm(port, misr);
+ else
+ handle_rx(port);
+ }
if (misr & UART_IMR_TXLEV)
handle_tx(port);
if (misr & UART_IMR_DELTA_CTS)
@@ -192,10 +280,21 @@ static unsigned int msm_get_mctrl(struct uart_port *port)
return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR | TIOCM_RTS;
}
-static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
+
+static void msm_reset(struct uart_port *port)
{
- unsigned int mr;
+ /* reset everything */
+ msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
+}
+void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ unsigned int mr;
mr = msm_read(port, UART_MR1);
if (!(mctrl & TIOCM_RTS)) {
@@ -219,6 +318,7 @@ static void msm_break_ctl(struct uart_port *port, int break_ctl)
static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
{
unsigned int baud_code, rxstale, watermark;
+ struct msm_port *msm_port = UART_TO_MSM(port);
switch (baud) {
case 300:
@@ -273,6 +373,9 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
break;
}
+ if (msm_port->is_uartdm)
+ msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
+
msm_write(port, baud_code, UART_CSR);
/* RX stale watermark */
@@ -288,25 +391,23 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
/* set TX watermark */
msm_write(port, 10, UART_TFWR);
+ if (msm_port->is_uartdm) {
+ msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, 0xFFFFFF, UARTDM_DMRX);
+ msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ }
+
return baud;
}
-static void msm_reset(struct uart_port *port)
-{
- /* reset everything */
- msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
- msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
-}
static void msm_init_clock(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
clk_enable(msm_port->clk);
+ if (!IS_ERR(msm_port->pclk))
+ clk_enable(msm_port->pclk);
msm_serial_set_mnd_regs(port);
}
@@ -347,15 +448,31 @@ static int msm_startup(struct uart_port *port)
msm_write(port, data, UART_IPR);
}
- msm_reset(port);
+ data = 0;
+ if (!port->cons || (port->cons && !(port->cons->flags & CON_ENABLED))) {
+ msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
+ msm_reset(port);
+ data = UART_CR_TX_ENABLE;
+ }
+
+ data |= UART_CR_RX_ENABLE;
+ msm_write(port, data, UART_CR); /* enable TX & RX */
- msm_write(port, 0x05, UART_CR); /* enable TX & RX */
+ /* Make sure IPR is not 0 to start with*/
+ if (msm_port->is_uartdm)
+ msm_write(port, UART_IPR_STALE_LSB, UART_IPR);
/* turn on RX and CTS interrupts */
msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
UART_IMR_CURRENT_CTS;
- msm_write(port, msm_port->imr, UART_IMR);
+ if (msm_port->is_uartdm) {
+ msm_write(port, 0xFFFFFF, UARTDM_DMRX);
+ msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ }
+
+ msm_write(port, msm_port->imr, UART_IMR);
return 0;
}
@@ -384,7 +501,7 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
baud = msm_set_baud_rate(port, baud);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
-
+
/* calculate parity */
mr = msm_read(port, UART_MR2);
mr &= ~UART_MR2_PARITY_MODE;
@@ -454,48 +571,105 @@ static const char *msm_type(struct uart_port *port)
static void msm_release_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
- struct resource *resource;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ struct resource *uart_resource;
+ struct resource *gsbi_resource;
resource_size_t size;
- resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(!resource))
+ uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!uart_resource))
return;
- size = resource->end - resource->start + 1;
+ size = resource_size(uart_resource);
release_mem_region(port->mapbase, size);
iounmap(port->membase);
port->membase = NULL;
+
+ if (msm_port->gsbi_base) {
+ iowrite32(GSBI_PROTOCOL_IDLE, msm_port->gsbi_base +
+ GSBI_CONTROL);
+
+ gsbi_resource = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "gsbi_resource");
+
+ if (unlikely(!gsbi_resource))
+ return;
+
+ size = resource_size(gsbi_resource);
+ release_mem_region(gsbi_resource->start, size);
+ iounmap(msm_port->gsbi_base);
+ msm_port->gsbi_base = NULL;
+ }
}
static int msm_request_port(struct uart_port *port)
{
+ struct msm_port *msm_port = UART_TO_MSM(port);
struct platform_device *pdev = to_platform_device(port->dev);
- struct resource *resource;
+ struct resource *uart_resource;
+ struct resource *gsbi_resource;
resource_size_t size;
+ int ret;
- resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(!resource))
+ uart_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "uart_resource");
+ if (unlikely(!uart_resource))
return -ENXIO;
- size = resource->end - resource->start + 1;
- if (unlikely(!request_mem_region(port->mapbase, size, "msm_serial")))
+ size = resource_size(uart_resource);
+
+ if (!request_mem_region(port->mapbase, size, "msm_serial"))
return -EBUSY;
port->membase = ioremap(port->mapbase, size);
if (!port->membase) {
- release_mem_region(port->mapbase, size);
- return -EBUSY;
+ ret = -EBUSY;
+ goto fail_release_port;
+ }
+
+ gsbi_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "gsbi_resource");
+ /* Is this a GSBI-based port? */
+ if (gsbi_resource) {
+ size = resource_size(gsbi_resource);
+
+ if (!request_mem_region(gsbi_resource->start, size,
+ "msm_serial")) {
+ ret = -EBUSY;
+ goto fail_release_port;
+ }
+
+ msm_port->gsbi_base = ioremap(gsbi_resource->start, size);
+ if (!msm_port->gsbi_base) {
+ ret = -EBUSY;
+ goto fail_release_gsbi;
+ }
}
return 0;
+
+fail_release_gsbi:
+ release_mem_region(gsbi_resource->start, size);
+fail_release_port:
+ release_mem_region(port->mapbase, size);
+ return ret;
}
static void msm_config_port(struct uart_port *port, int flags)
{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ int ret;
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_MSM;
- msm_request_port(port);
+ ret = msm_request_port(port);
+ if (ret)
+ return;
}
+
+ if (msm_port->is_uartdm)
+ iowrite32(GSBI_PROTOCOL_UART, msm_port->gsbi_base +
+ GSBI_CONTROL);
}
static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
@@ -515,9 +689,13 @@ static void msm_power(struct uart_port *port, unsigned int state,
switch (state) {
case 0:
clk_enable(msm_port->clk);
+ if (!IS_ERR(msm_port->pclk))
+ clk_enable(msm_port->pclk);
break;
case 3:
clk_disable(msm_port->clk);
+ if (!IS_ERR(msm_port->pclk))
+ clk_disable(msm_port->pclk);
break;
default:
printk(KERN_ERR "msm_serial: Unknown PM state %d\n", state);
@@ -550,7 +728,7 @@ static struct msm_port msm_uart_ports[] = {
.iotype = UPIO_MEM,
.ops = &msm_uart_pops,
.flags = UPF_BOOT_AUTOCONF,
- .fifosize = 512,
+ .fifosize = 64,
.line = 0,
},
},
@@ -559,7 +737,7 @@ static struct msm_port msm_uart_ports[] = {
.iotype = UPIO_MEM,
.ops = &msm_uart_pops,
.flags = UPF_BOOT_AUTOCONF,
- .fifosize = 512,
+ .fifosize = 64,
.line = 1,
},
},
@@ -585,9 +763,14 @@ static inline struct uart_port *get_port_from_line(unsigned int line)
static void msm_console_putchar(struct uart_port *port, int c)
{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ if (msm_port->is_uartdm)
+ reset_dm_count(port);
+
while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
;
- msm_write(port, c, UART_TF);
+ msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : UART_TF);
}
static void msm_console_write(struct console *co, const char *s,
@@ -609,12 +792,14 @@ static void msm_console_write(struct console *co, const char *s,
static int __init msm_console_setup(struct console *co, char *options)
{
struct uart_port *port;
+ struct msm_port *msm_port;
int baud, flow, bits, parity;
if (unlikely(co->index >= UART_NR || co->index < 0))
return -ENXIO;
port = get_port_from_line(co->index);
+ msm_port = UART_TO_MSM(port);
if (unlikely(!port->membase))
return -ENXIO;
@@ -638,6 +823,11 @@ static int __init msm_console_setup(struct console *co, char *options)
msm_reset(port);
+ if (msm_port->is_uartdm) {
+ msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
+ msm_write(port, UART_CR_TX_ENABLE, UART_CR);
+ }
+
printk(KERN_INFO "msm_serial: console setup on port #%d\n", port->line);
return uart_set_options(port, co, baud, parity, bits, flow);
@@ -685,14 +875,32 @@ static int __init msm_serial_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
msm_port = UART_TO_MSM(port);
- msm_port->clk = clk_get(&pdev->dev, "uart_clk");
- if (IS_ERR(msm_port->clk))
- return PTR_ERR(msm_port->clk);
+ if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsbi_resource"))
+ msm_port->is_uartdm = 1;
+ else
+ msm_port->is_uartdm = 0;
+
+ if (msm_port->is_uartdm) {
+ msm_port->clk = clk_get(&pdev->dev, "gsbi_uart_clk");
+ msm_port->pclk = clk_get(&pdev->dev, "gsbi_pclk");
+ } else {
+ msm_port->clk = clk_get(&pdev->dev, "uart_clk");
+ msm_port->pclk = ERR_PTR(-ENOENT);
+ }
+
+ if (unlikely(IS_ERR(msm_port->clk) || (IS_ERR(msm_port->pclk) &&
+ msm_port->is_uartdm)))
+ return PTR_ERR(msm_port->clk);
+
+ if (msm_port->is_uartdm)
+ clk_set_rate(msm_port->clk, 7372800);
+
port->uartclk = clk_get_rate(msm_port->clk);
printk(KERN_INFO "uartclk = %d\n", port->uartclk);
- resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "uart_resource");
if (unlikely(!resource))
return -ENXIO;
port->mapbase = resource->start;
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index f6ca9ca79e98..9b8dc5d0d855 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -3,6 +3,7 @@
*
* Copyright (C) 2007 Google, Inc.
* Author: Robert Love <rlove@google.com>
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -54,6 +55,7 @@
#define UART_CSR_300 0x22
#define UART_TF 0x000C
+#define UARTDM_TF 0x0070
#define UART_CR 0x0010
#define UART_CR_CMD_NULL (0 << 4)
@@ -64,14 +66,17 @@
#define UART_CR_CMD_START_BREAK (5 << 4)
#define UART_CR_CMD_STOP_BREAK (6 << 4)
#define UART_CR_CMD_RESET_CTS (7 << 4)
+#define UART_CR_CMD_RESET_STALE_INT (8 << 4)
#define UART_CR_CMD_PACKET_MODE (9 << 4)
#define UART_CR_CMD_MODE_RESET (12 << 4)
#define UART_CR_CMD_SET_RFR (13 << 4)
#define UART_CR_CMD_RESET_RFR (14 << 4)
+#define UART_CR_CMD_PROTECTION_EN (16 << 4)
+#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
#define UART_CR_TX_DISABLE (1 << 3)
-#define UART_CR_TX_ENABLE (1 << 3)
-#define UART_CR_RX_DISABLE (1 << 3)
-#define UART_CR_RX_ENABLE (1 << 3)
+#define UART_CR_TX_ENABLE (1 << 2)
+#define UART_CR_RX_DISABLE (1 << 1)
+#define UART_CR_RX_ENABLE (1 << 0)
#define UART_IMR 0x0014
#define UART_IMR_TXLEV (1 << 0)
@@ -110,9 +115,20 @@
#define UART_SR_RX_FULL (1 << 1)
#define UART_SR_RX_READY (1 << 0)
-#define UART_RF 0x000C
-#define UART_MISR 0x0010
-#define UART_ISR 0x0014
+#define UART_RF 0x000C
+#define UARTDM_RF 0x0070
+#define UART_MISR 0x0010
+#define UART_ISR 0x0014
+#define UART_ISR_TX_READY (1 << 7)
+
+#define GSBI_CONTROL 0x0
+#define GSBI_PROTOCOL_CODE 0x30
+#define GSBI_PROTOCOL_UART 0x40
+#define GSBI_PROTOCOL_IDLE 0x0
+
+#define UARTDM_DMRX 0x34
+#define UARTDM_NCF_TX 0x40
+#define UARTDM_RX_TOTAL_SNAP 0x38
#define UART_TO_MSM(uart_port) ((struct msm_port *) uart_port)
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
new file mode 100644
index 000000000000..7e02c9c344fe
--- /dev/null
+++ b/drivers/tty/serial/mxs-auart.c
@@ -0,0 +1,798 @@
+/*
+ * Freescale STMP37XX/STMP378X Application UART driver
+ *
+ * Author: dmitry pervushin <dimka@embeddedalley.com>
+ *
+ * Copyright 2008-2010 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+
+#define MXS_AUART_PORTS 5
+
+#define AUART_CTRL0 0x00000000
+#define AUART_CTRL0_SET 0x00000004
+#define AUART_CTRL0_CLR 0x00000008
+#define AUART_CTRL0_TOG 0x0000000c
+#define AUART_CTRL1 0x00000010
+#define AUART_CTRL1_SET 0x00000014
+#define AUART_CTRL1_CLR 0x00000018
+#define AUART_CTRL1_TOG 0x0000001c
+#define AUART_CTRL2 0x00000020
+#define AUART_CTRL2_SET 0x00000024
+#define AUART_CTRL2_CLR 0x00000028
+#define AUART_CTRL2_TOG 0x0000002c
+#define AUART_LINECTRL 0x00000030
+#define AUART_LINECTRL_SET 0x00000034
+#define AUART_LINECTRL_CLR 0x00000038
+#define AUART_LINECTRL_TOG 0x0000003c
+#define AUART_LINECTRL2 0x00000040
+#define AUART_LINECTRL2_SET 0x00000044
+#define AUART_LINECTRL2_CLR 0x00000048
+#define AUART_LINECTRL2_TOG 0x0000004c
+#define AUART_INTR 0x00000050
+#define AUART_INTR_SET 0x00000054
+#define AUART_INTR_CLR 0x00000058
+#define AUART_INTR_TOG 0x0000005c
+#define AUART_DATA 0x00000060
+#define AUART_STAT 0x00000070
+#define AUART_DEBUG 0x00000080
+#define AUART_VERSION 0x00000090
+#define AUART_AUTOBAUD 0x000000a0
+
+#define AUART_CTRL0_SFTRST (1 << 31)
+#define AUART_CTRL0_CLKGATE (1 << 30)
+
+#define AUART_CTRL2_CTSEN (1 << 15)
+#define AUART_CTRL2_RTS (1 << 11)
+#define AUART_CTRL2_RXE (1 << 9)
+#define AUART_CTRL2_TXE (1 << 8)
+#define AUART_CTRL2_UARTEN (1 << 0)
+
+#define AUART_LINECTRL_BAUD_DIVINT_SHIFT 16
+#define AUART_LINECTRL_BAUD_DIVINT_MASK 0xffff0000
+#define AUART_LINECTRL_BAUD_DIVINT(v) (((v) & 0xffff) << 16)
+#define AUART_LINECTRL_BAUD_DIVFRAC_SHIFT 8
+#define AUART_LINECTRL_BAUD_DIVFRAC_MASK 0x00003f00
+#define AUART_LINECTRL_BAUD_DIVFRAC(v) (((v) & 0x3f) << 8)
+#define AUART_LINECTRL_WLEN_MASK 0x00000060
+#define AUART_LINECTRL_WLEN(v) (((v) & 0x3) << 5)
+#define AUART_LINECTRL_FEN (1 << 4)
+#define AUART_LINECTRL_STP2 (1 << 3)
+#define AUART_LINECTRL_EPS (1 << 2)
+#define AUART_LINECTRL_PEN (1 << 1)
+#define AUART_LINECTRL_BRK (1 << 0)
+
+#define AUART_INTR_RTIEN (1 << 22)
+#define AUART_INTR_TXIEN (1 << 21)
+#define AUART_INTR_RXIEN (1 << 20)
+#define AUART_INTR_CTSMIEN (1 << 17)
+#define AUART_INTR_RTIS (1 << 6)
+#define AUART_INTR_TXIS (1 << 5)
+#define AUART_INTR_RXIS (1 << 4)
+#define AUART_INTR_CTSMIS (1 << 1)
+
+#define AUART_STAT_BUSY (1 << 29)
+#define AUART_STAT_CTS (1 << 28)
+#define AUART_STAT_TXFE (1 << 27)
+#define AUART_STAT_TXFF (1 << 25)
+#define AUART_STAT_RXFE (1 << 24)
+#define AUART_STAT_OERR (1 << 19)
+#define AUART_STAT_BERR (1 << 18)
+#define AUART_STAT_PERR (1 << 17)
+#define AUART_STAT_FERR (1 << 16)
+
+static struct uart_driver auart_driver;
+
+struct mxs_auart_port {
+ struct uart_port port;
+
+ unsigned int flags;
+ unsigned int ctrl;
+
+ unsigned int irq;
+
+ struct clk *clk;
+ struct device *dev;
+};
+
+static void mxs_auart_stop_tx(struct uart_port *u);
+
+#define to_auart_port(u) container_of(u, struct mxs_auart_port, port)
+
+static inline void mxs_auart_tx_chars(struct mxs_auart_port *s)
+{
+ struct circ_buf *xmit = &s->port.state->xmit;
+
+ while (!(readl(s->port.membase + AUART_STAT) &
+ AUART_STAT_TXFF)) {
+ if (s->port.x_char) {
+ s->port.icount.tx++;
+ writel(s->port.x_char,
+ s->port.membase + AUART_DATA);
+ s->port.x_char = 0;
+ continue;
+ }
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&s->port)) {
+ s->port.icount.tx++;
+ writel(xmit->buf[xmit->tail],
+ s->port.membase + AUART_DATA);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&s->port);
+ } else
+ break;
+ }
+ if (uart_circ_empty(&(s->port.state->xmit)))
+ writel(AUART_INTR_TXIEN,
+ s->port.membase + AUART_INTR_CLR);
+ else
+ writel(AUART_INTR_TXIEN,
+ s->port.membase + AUART_INTR_SET);
+
+ if (uart_tx_stopped(&s->port))
+ mxs_auart_stop_tx(&s->port);
+}
+
+static void mxs_auart_rx_char(struct mxs_auart_port *s)
+{
+ int flag;
+ u32 stat;
+ u8 c;
+
+ c = readl(s->port.membase + AUART_DATA);
+ stat = readl(s->port.membase + AUART_STAT);
+
+ flag = TTY_NORMAL;
+ s->port.icount.rx++;
+
+ if (stat & AUART_STAT_BERR) {
+ s->port.icount.brk++;
+ if (uart_handle_break(&s->port))
+ goto out;
+ } else if (stat & AUART_STAT_PERR) {
+ s->port.icount.parity++;
+ } else if (stat & AUART_STAT_FERR) {
+ s->port.icount.frame++;
+ }
+
+ /*
+ * Mask off conditions which should be ingored.
+ */
+ stat &= s->port.read_status_mask;
+
+ if (stat & AUART_STAT_BERR) {
+ flag = TTY_BREAK;
+ } else if (stat & AUART_STAT_PERR)
+ flag = TTY_PARITY;
+ else if (stat & AUART_STAT_FERR)
+ flag = TTY_FRAME;
+
+ if (stat & AUART_STAT_OERR)
+ s->port.icount.overrun++;
+
+ if (uart_handle_sysrq_char(&s->port, c))
+ goto out;
+
+ uart_insert_char(&s->port, stat, AUART_STAT_OERR, c, flag);
+out:
+ writel(stat, s->port.membase + AUART_STAT);
+}
+
+static void mxs_auart_rx_chars(struct mxs_auart_port *s)
+{
+ struct tty_struct *tty = s->port.state->port.tty;
+ u32 stat = 0;
+
+ for (;;) {
+ stat = readl(s->port.membase + AUART_STAT);
+ if (stat & AUART_STAT_RXFE)
+ break;
+ mxs_auart_rx_char(s);
+ }
+
+ writel(stat, s->port.membase + AUART_STAT);
+ tty_flip_buffer_push(tty);
+}
+
+static int mxs_auart_request_port(struct uart_port *u)
+{
+ return 0;
+}
+
+static int mxs_auart_verify_port(struct uart_port *u,
+ struct serial_struct *ser)
+{
+ if (u->type != PORT_UNKNOWN && u->type != PORT_IMX)
+ return -EINVAL;
+ return 0;
+}
+
+static void mxs_auart_config_port(struct uart_port *u, int flags)
+{
+}
+
+static const char *mxs_auart_type(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ return dev_name(s->dev);
+}
+
+static void mxs_auart_release_port(struct uart_port *u)
+{
+}
+
+static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ u32 ctrl = readl(u->membase + AUART_CTRL2);
+
+ ctrl &= ~AUART_CTRL2_RTS;
+ if (mctrl & TIOCM_RTS)
+ ctrl |= AUART_CTRL2_RTS;
+ s->ctrl = mctrl;
+ writel(ctrl, u->membase + AUART_CTRL2);
+}
+
+static u32 mxs_auart_get_mctrl(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+ u32 stat = readl(u->membase + AUART_STAT);
+ int ctrl2 = readl(u->membase + AUART_CTRL2);
+ u32 mctrl = s->ctrl;
+
+ mctrl &= ~TIOCM_CTS;
+ if (stat & AUART_STAT_CTS)
+ mctrl |= TIOCM_CTS;
+
+ if (ctrl2 & AUART_CTRL2_RTS)
+ mctrl |= TIOCM_RTS;
+
+ return mctrl;
+}
+
+static void mxs_auart_settermios(struct uart_port *u,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ u32 bm, ctrl, ctrl2, div;
+ unsigned int cflag, baud;
+
+ cflag = termios->c_cflag;
+
+ ctrl = AUART_LINECTRL_FEN;
+ ctrl2 = readl(u->membase + AUART_CTRL2);
+
+ /* byte size */
+ switch (cflag & CSIZE) {
+ case CS5:
+ bm = 0;
+ break;
+ case CS6:
+ bm = 1;
+ break;
+ case CS7:
+ bm = 2;
+ break;
+ case CS8:
+ bm = 3;
+ break;
+ default:
+ return;
+ }
+
+ ctrl |= AUART_LINECTRL_WLEN(bm);
+
+ /* parity */
+ if (cflag & PARENB) {
+ ctrl |= AUART_LINECTRL_PEN;
+ if ((cflag & PARODD) == 0)
+ ctrl |= AUART_LINECTRL_EPS;
+ }
+
+ u->read_status_mask = 0;
+
+ if (termios->c_iflag & INPCK)
+ u->read_status_mask |= AUART_STAT_PERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ u->read_status_mask |= AUART_STAT_BERR;
+
+ /*
+ * Characters to ignore
+ */
+ u->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ u->ignore_status_mask |= AUART_STAT_PERR;
+ if (termios->c_iflag & IGNBRK) {
+ u->ignore_status_mask |= AUART_STAT_BERR;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ u->ignore_status_mask |= AUART_STAT_OERR;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if (cflag & CREAD)
+ ctrl2 |= AUART_CTRL2_RXE;
+ else
+ ctrl2 &= ~AUART_CTRL2_RXE;
+
+ /* figure out the stop bits requested */
+ if (cflag & CSTOPB)
+ ctrl |= AUART_LINECTRL_STP2;
+
+ /* figure out the hardware flow control settings */
+ if (cflag & CRTSCTS)
+ ctrl2 |= AUART_CTRL2_CTSEN;
+ else
+ ctrl2 &= ~AUART_CTRL2_CTSEN;
+
+ /* set baud rate */
+ baud = uart_get_baud_rate(u, termios, old, 0, u->uartclk);
+ div = u->uartclk * 32 / baud;
+ ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
+ ctrl |= AUART_LINECTRL_BAUD_DIVINT(div >> 6);
+
+ writel(ctrl, u->membase + AUART_LINECTRL);
+ writel(ctrl2, u->membase + AUART_CTRL2);
+}
+
+static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
+{
+ u32 istatus, istat;
+ struct mxs_auart_port *s = context;
+ u32 stat = readl(s->port.membase + AUART_STAT);
+
+ istatus = istat = readl(s->port.membase + AUART_INTR);
+
+ if (istat & AUART_INTR_CTSMIS) {
+ uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS);
+ writel(AUART_INTR_CTSMIS,
+ s->port.membase + AUART_INTR_CLR);
+ istat &= ~AUART_INTR_CTSMIS;
+ }
+
+ if (istat & (AUART_INTR_RTIS | AUART_INTR_RXIS)) {
+ mxs_auart_rx_chars(s);
+ istat &= ~(AUART_INTR_RTIS | AUART_INTR_RXIS);
+ }
+
+ if (istat & AUART_INTR_TXIS) {
+ mxs_auart_tx_chars(s);
+ istat &= ~AUART_INTR_TXIS;
+ }
+
+ writel(istatus & (AUART_INTR_RTIS
+ | AUART_INTR_TXIS
+ | AUART_INTR_RXIS
+ | AUART_INTR_CTSMIS),
+ s->port.membase + AUART_INTR_CLR);
+
+ return IRQ_HANDLED;
+}
+
+static void mxs_auart_reset(struct uart_port *u)
+{
+ int i;
+ unsigned int reg;
+
+ writel(AUART_CTRL0_SFTRST, u->membase + AUART_CTRL0_CLR);
+
+ for (i = 0; i < 10000; i++) {
+ reg = readl(u->membase + AUART_CTRL0);
+ if (!(reg & AUART_CTRL0_SFTRST))
+ break;
+ udelay(3);
+ }
+ writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
+}
+
+static int mxs_auart_startup(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ clk_enable(s->clk);
+
+ writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
+
+ writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_SET);
+
+ writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
+ u->membase + AUART_INTR);
+
+ /*
+ * Enable fifo so all four bytes of a DMA word are written to
+ * output (otherwise, only the LSB is written, ie. 1 in 4 bytes)
+ */
+ writel(AUART_LINECTRL_FEN, u->membase + AUART_LINECTRL_SET);
+
+ return 0;
+}
+
+static void mxs_auart_shutdown(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_CLR);
+
+ writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_SET);
+
+ writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
+ u->membase + AUART_INTR_CLR);
+
+ clk_disable(s->clk);
+}
+
+static unsigned int mxs_auart_tx_empty(struct uart_port *u)
+{
+ if (readl(u->membase + AUART_STAT) & AUART_STAT_TXFE)
+ return TIOCSER_TEMT;
+ else
+ return 0;
+}
+
+static void mxs_auart_start_tx(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ /* enable transmitter */
+ writel(AUART_CTRL2_TXE, u->membase + AUART_CTRL2_SET);
+
+ mxs_auart_tx_chars(s);
+}
+
+static void mxs_auart_stop_tx(struct uart_port *u)
+{
+ writel(AUART_CTRL2_TXE, u->membase + AUART_CTRL2_CLR);
+}
+
+static void mxs_auart_stop_rx(struct uart_port *u)
+{
+ writel(AUART_CTRL2_RXE, u->membase + AUART_CTRL2_CLR);
+}
+
+static void mxs_auart_break_ctl(struct uart_port *u, int ctl)
+{
+ if (ctl)
+ writel(AUART_LINECTRL_BRK,
+ u->membase + AUART_LINECTRL_SET);
+ else
+ writel(AUART_LINECTRL_BRK,
+ u->membase + AUART_LINECTRL_CLR);
+}
+
+static void mxs_auart_enable_ms(struct uart_port *port)
+{
+ /* just empty */
+}
+
+static struct uart_ops mxs_auart_ops = {
+ .tx_empty = mxs_auart_tx_empty,
+ .start_tx = mxs_auart_start_tx,
+ .stop_tx = mxs_auart_stop_tx,
+ .stop_rx = mxs_auart_stop_rx,
+ .enable_ms = mxs_auart_enable_ms,
+ .break_ctl = mxs_auart_break_ctl,
+ .set_mctrl = mxs_auart_set_mctrl,
+ .get_mctrl = mxs_auart_get_mctrl,
+ .startup = mxs_auart_startup,
+ .shutdown = mxs_auart_shutdown,
+ .set_termios = mxs_auart_settermios,
+ .type = mxs_auart_type,
+ .release_port = mxs_auart_release_port,
+ .request_port = mxs_auart_request_port,
+ .config_port = mxs_auart_config_port,
+ .verify_port = mxs_auart_verify_port,
+};
+
+static struct mxs_auart_port *auart_port[MXS_AUART_PORTS];
+
+#ifdef CONFIG_SERIAL_MXS_AUART_CONSOLE
+static void mxs_auart_console_putchar(struct uart_port *port, int ch)
+{
+ unsigned int to = 1000;
+
+ while (readl(port->membase + AUART_STAT) & AUART_STAT_TXFF) {
+ if (!to--)
+ break;
+ udelay(1);
+ }
+
+ writel(ch, port->membase + AUART_DATA);
+}
+
+static void
+auart_console_write(struct console *co, const char *str, unsigned int count)
+{
+ struct mxs_auart_port *s;
+ struct uart_port *port;
+ unsigned int old_ctrl0, old_ctrl2;
+ unsigned int to = 1000;
+
+ if (co->index > MXS_AUART_PORTS || co->index < 0)
+ return;
+
+ s = auart_port[co->index];
+ port = &s->port;
+
+ clk_enable(s->clk);
+
+ /* First save the CR then disable the interrupts */
+ old_ctrl2 = readl(port->membase + AUART_CTRL2);
+ old_ctrl0 = readl(port->membase + AUART_CTRL0);
+
+ writel(AUART_CTRL0_CLKGATE,
+ port->membase + AUART_CTRL0_CLR);
+ writel(AUART_CTRL2_UARTEN | AUART_CTRL2_TXE,
+ port->membase + AUART_CTRL2_SET);
+
+ uart_console_write(port, str, count, mxs_auart_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the TCR
+ */
+ while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) {
+ if (!to--)
+ break;
+ udelay(1);
+ }
+
+ writel(old_ctrl0, port->membase + AUART_CTRL0);
+ writel(old_ctrl2, port->membase + AUART_CTRL2);
+
+ clk_disable(s->clk);
+}
+
+static void __init
+auart_console_get_options(struct uart_port *port, int *baud,
+ int *parity, int *bits)
+{
+ unsigned int lcr_h, quot;
+
+ if (!(readl(port->membase + AUART_CTRL2) & AUART_CTRL2_UARTEN))
+ return;
+
+ lcr_h = readl(port->membase + AUART_LINECTRL);
+
+ *parity = 'n';
+ if (lcr_h & AUART_LINECTRL_PEN) {
+ if (lcr_h & AUART_LINECTRL_EPS)
+ *parity = 'e';
+ else
+ *parity = 'o';
+ }
+
+ if ((lcr_h & AUART_LINECTRL_WLEN_MASK) == AUART_LINECTRL_WLEN(2))
+ *bits = 7;
+ else
+ *bits = 8;
+
+ quot = ((readl(port->membase + AUART_LINECTRL)
+ & AUART_LINECTRL_BAUD_DIVINT_MASK))
+ >> (AUART_LINECTRL_BAUD_DIVINT_SHIFT - 6);
+ quot |= ((readl(port->membase + AUART_LINECTRL)
+ & AUART_LINECTRL_BAUD_DIVFRAC_MASK))
+ >> AUART_LINECTRL_BAUD_DIVFRAC_SHIFT;
+ if (quot == 0)
+ quot = 1;
+
+ *baud = (port->uartclk << 2) / quot;
+}
+
+static int __init
+auart_console_setup(struct console *co, char *options)
+{
+ struct mxs_auart_port *s;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret;
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index == -1 || co->index >= ARRAY_SIZE(auart_port))
+ co->index = 0;
+ s = auart_port[co->index];
+ if (!s)
+ return -ENODEV;
+
+ clk_enable(s->clk);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ auart_console_get_options(&s->port, &baud, &parity, &bits);
+
+ ret = uart_set_options(&s->port, co, baud, parity, bits, flow);
+
+ clk_disable(s->clk);
+
+ return ret;
+}
+
+static struct console auart_console = {
+ .name = "ttyAPP",
+ .write = auart_console_write,
+ .device = uart_console_device,
+ .setup = auart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &auart_driver,
+};
+#endif
+
+static struct uart_driver auart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyAPP",
+ .dev_name = "ttyAPP",
+ .major = 0,
+ .minor = 0,
+ .nr = MXS_AUART_PORTS,
+#ifdef CONFIG_SERIAL_MXS_AUART_CONSOLE
+ .cons = &auart_console,
+#endif
+};
+
+static int __devinit mxs_auart_probe(struct platform_device *pdev)
+{
+ struct mxs_auart_port *s;
+ u32 version;
+ int ret = 0;
+ struct resource *r;
+
+ s = kzalloc(sizeof(struct mxs_auart_port), GFP_KERNEL);
+ if (!s) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ s->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(s->clk)) {
+ ret = PTR_ERR(s->clk);
+ goto out_free;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ ret = -ENXIO;
+ goto out_free_clk;
+ }
+
+ s->port.mapbase = r->start;
+ s->port.membase = ioremap(r->start, resource_size(r));
+ s->port.ops = &mxs_auart_ops;
+ s->port.iotype = UPIO_MEM;
+ s->port.line = pdev->id < 0 ? 0 : pdev->id;
+ s->port.fifosize = 16;
+ s->port.uartclk = clk_get_rate(s->clk);
+ s->port.type = PORT_IMX;
+ s->port.dev = s->dev = get_device(&pdev->dev);
+
+ s->flags = 0;
+ s->ctrl = 0;
+
+ s->irq = platform_get_irq(pdev, 0);
+ s->port.irq = s->irq;
+ ret = request_irq(s->irq, mxs_auart_irq_handle, 0, dev_name(&pdev->dev), s);
+ if (ret)
+ goto out_free_clk;
+
+ platform_set_drvdata(pdev, s);
+
+ auart_port[pdev->id] = s;
+
+ mxs_auart_reset(&s->port);
+
+ ret = uart_add_one_port(&auart_driver, &s->port);
+ if (ret)
+ goto out_free_irq;
+
+ version = readl(s->port.membase + AUART_VERSION);
+ dev_info(&pdev->dev, "Found APPUART %d.%d.%d\n",
+ (version >> 24) & 0xff,
+ (version >> 16) & 0xff, version & 0xffff);
+
+ return 0;
+
+out_free_irq:
+ auart_port[pdev->id] = NULL;
+ free_irq(s->irq, s);
+out_free_clk:
+ clk_put(s->clk);
+out_free:
+ kfree(s);
+out:
+ return ret;
+}
+
+static int __devexit mxs_auart_remove(struct platform_device *pdev)
+{
+ struct mxs_auart_port *s = platform_get_drvdata(pdev);
+
+ uart_remove_one_port(&auart_driver, &s->port);
+
+ auart_port[pdev->id] = NULL;
+
+ clk_put(s->clk);
+ free_irq(s->irq, s);
+ kfree(s);
+
+ return 0;
+}
+
+static struct platform_driver mxs_auart_driver = {
+ .probe = mxs_auart_probe,
+ .remove = __devexit_p(mxs_auart_remove),
+ .driver = {
+ .name = "mxs-auart",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mxs_auart_init(void)
+{
+ int r;
+
+ r = uart_register_driver(&auart_driver);
+ if (r)
+ goto out;
+
+ r = platform_driver_register(&mxs_auart_driver);
+ if (r)
+ goto out_err;
+
+ return 0;
+out_err:
+ uart_unregister_driver(&auart_driver);
+out:
+ return r;
+}
+
+static void __exit mxs_auart_exit(void)
+{
+ platform_driver_unregister(&mxs_auart_driver);
+ uart_unregister_driver(&auart_driver);
+}
+
+module_init(mxs_auart_init);
+module_exit(mxs_auart_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Freescale MXS application uart driver");
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index a9ad7f33526d..26403b8e4b9b 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -15,7 +15,6 @@
*Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/serial_reg.h>
-#include <linux/pci.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index ff51dae1df0c..c327218cad44 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -1269,13 +1269,12 @@ static int ucc_uart_probe(struct platform_device *ofdev)
ret = of_address_to_resource(np, 0, &res);
if (ret) {
dev_err(&ofdev->dev, "missing 'reg' property in device tree\n");
- kfree(qe_port);
- return ret;
+ goto out_free;
}
if (!res.start) {
dev_err(&ofdev->dev, "invalid 'reg' property in device tree\n");
- kfree(qe_port);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_free;
}
qe_port->port.mapbase = res.start;
@@ -1285,17 +1284,17 @@ static int ucc_uart_probe(struct platform_device *ofdev)
if (!iprop) {
iprop = of_get_property(np, "device-id", NULL);
if (!iprop) {
- kfree(qe_port);
dev_err(&ofdev->dev, "UCC is unspecified in "
"device tree\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_free;
}
}
if ((*iprop < 1) || (*iprop > UCC_MAX_NUM)) {
dev_err(&ofdev->dev, "no support for UCC%u\n", *iprop);
- kfree(qe_port);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_free;
}
qe_port->ucc_num = *iprop - 1;
@@ -1309,16 +1308,16 @@ static int ucc_uart_probe(struct platform_device *ofdev)
sprop = of_get_property(np, "rx-clock-name", NULL);
if (!sprop) {
dev_err(&ofdev->dev, "missing rx-clock-name in device tree\n");
- kfree(qe_port);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_free;
}
qe_port->us_info.rx_clock = qe_clock_source(sprop);
if ((qe_port->us_info.rx_clock < QE_BRG1) ||
(qe_port->us_info.rx_clock > QE_BRG16)) {
dev_err(&ofdev->dev, "rx-clock-name must be a BRG for UART\n");
- kfree(qe_port);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_free;
}
#ifdef LOOPBACK
@@ -1328,39 +1327,39 @@ static int ucc_uart_probe(struct platform_device *ofdev)
sprop = of_get_property(np, "tx-clock-name", NULL);
if (!sprop) {
dev_err(&ofdev->dev, "missing tx-clock-name in device tree\n");
- kfree(qe_port);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_free;
}
qe_port->us_info.tx_clock = qe_clock_source(sprop);
#endif
if ((qe_port->us_info.tx_clock < QE_BRG1) ||
(qe_port->us_info.tx_clock > QE_BRG16)) {
dev_err(&ofdev->dev, "tx-clock-name must be a BRG for UART\n");
- kfree(qe_port);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_free;
}
/* Get the port number, numbered 0-3 */
iprop = of_get_property(np, "port-number", NULL);
if (!iprop) {
dev_err(&ofdev->dev, "missing port-number in device tree\n");
- kfree(qe_port);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_free;
}
qe_port->port.line = *iprop;
if (qe_port->port.line >= UCC_MAX_UART) {
dev_err(&ofdev->dev, "port-number must be 0-%u\n",
UCC_MAX_UART - 1);
- kfree(qe_port);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_free;
}
qe_port->port.irq = irq_of_parse_and_map(np, 0);
if (qe_port->port.irq == NO_IRQ) {
dev_err(&ofdev->dev, "could not map IRQ for UCC%u\n",
qe_port->ucc_num + 1);
- kfree(qe_port);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_free;
}
/*
@@ -1372,8 +1371,8 @@ static int ucc_uart_probe(struct platform_device *ofdev)
np = of_find_node_by_type(NULL, "qe");
if (!np) {
dev_err(&ofdev->dev, "could not find 'qe' node\n");
- kfree(qe_port);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_free;
}
}
@@ -1381,8 +1380,8 @@ static int ucc_uart_probe(struct platform_device *ofdev)
if (!iprop) {
dev_err(&ofdev->dev,
"missing brg-frequency in device tree\n");
- kfree(qe_port);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_np;
}
if (*iprop)
@@ -1397,16 +1396,16 @@ static int ucc_uart_probe(struct platform_device *ofdev)
if (!iprop) {
dev_err(&ofdev->dev,
"missing QE bus-frequency in device tree\n");
- kfree(qe_port);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_np;
}
if (*iprop)
qe_port->port.uartclk = *iprop / 2;
else {
dev_err(&ofdev->dev,
"invalid QE bus-frequency in device tree\n");
- kfree(qe_port);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_np;
}
}
@@ -1444,8 +1443,7 @@ static int ucc_uart_probe(struct platform_device *ofdev)
if (ret) {
dev_err(&ofdev->dev, "could not add /dev/ttyQE%u\n",
qe_port->port.line);
- kfree(qe_port);
- return ret;
+ goto out_np;
}
dev_set_drvdata(&ofdev->dev, qe_port);
@@ -1459,6 +1457,11 @@ static int ucc_uart_probe(struct platform_device *ofdev)
SERIAL_QE_MINOR + qe_port->port.line);
return 0;
+out_np:
+ of_node_put(np);
+out_free:
+ kfree(qe_port);
+ return ret;
}
static int ucc_uart_remove(struct platform_device *ofdev)
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index d8210ca00720..b9451219528b 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -322,7 +322,7 @@ void tty_schedule_flip(struct tty_struct *tty)
if (tty->buf.tail != NULL)
tty->buf.tail->commit = tty->buf.tail->used;
spin_unlock_irqrestore(&tty->buf.lock, flags);
- schedule_delayed_work(&tty->buf.work, 1);
+ schedule_work(&tty->buf.work);
}
EXPORT_SYMBOL(tty_schedule_flip);
@@ -402,7 +402,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
static void flush_to_ldisc(struct work_struct *work)
{
struct tty_struct *tty =
- container_of(work, struct tty_struct, buf.work.work);
+ container_of(work, struct tty_struct, buf.work);
unsigned long flags;
struct tty_ldisc *disc;
@@ -443,7 +443,7 @@ static void flush_to_ldisc(struct work_struct *work)
if (test_bit(TTY_FLUSHPENDING, &tty->flags))
break;
if (!tty->receive_room || seen_tail) {
- schedule_delayed_work(&tty->buf.work, 1);
+ schedule_work(&tty->buf.work);
break;
}
if (count > tty->receive_room)
@@ -481,7 +481,7 @@ static void flush_to_ldisc(struct work_struct *work)
*/
void tty_flush_to_ldisc(struct tty_struct *tty)
{
- flush_delayed_work(&tty->buf.work);
+ flush_work(&tty->buf.work);
}
/**
@@ -506,9 +506,9 @@ void tty_flip_buffer_push(struct tty_struct *tty)
spin_unlock_irqrestore(&tty->buf.lock, flags);
if (tty->low_latency)
- flush_to_ldisc(&tty->buf.work.work);
+ flush_to_ldisc(&tty->buf.work);
else
- schedule_delayed_work(&tty->buf.work, 1);
+ schedule_work(&tty->buf.work);
}
EXPORT_SYMBOL(tty_flip_buffer_push);
@@ -529,6 +529,6 @@ void tty_buffer_init(struct tty_struct *tty)
tty->buf.tail = NULL;
tty->buf.free = NULL;
tty->buf.memory_used = 0;
- INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
+ INIT_WORK(&tty->buf.work, flush_to_ldisc);
}
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 0fc564a97706..e19e13647116 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -529,7 +529,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
static int tty_ldisc_halt(struct tty_struct *tty)
{
clear_bit(TTY_LDISC, &tty->flags);
- return cancel_delayed_work_sync(&tty->buf.work);
+ return cancel_work_sync(&tty->buf.work);
}
/**
@@ -542,7 +542,7 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
{
flush_work_sync(&tty->hangup_work);
flush_work_sync(&tty->SAK_work);
- flush_delayed_work_sync(&tty->buf.work);
+ flush_work_sync(&tty->buf.work);
}
/**
@@ -722,9 +722,9 @@ enable:
/* Restart the work queue in case no characters kick it off. Safe if
already running */
if (work)
- schedule_delayed_work(&tty->buf.work, 1);
+ schedule_work(&tty->buf.work);
if (o_work)
- schedule_delayed_work(&o_tty->buf.work, 1);
+ schedule_work(&o_tty->buf.work);
mutex_unlock(&tty->ldisc_mutex);
tty_unlock();
return retval;
@@ -830,12 +830,12 @@ void tty_ldisc_hangup(struct tty_struct *tty)
/*
* this is like tty_ldisc_halt, but we need to give up
- * the BTM before calling cancel_delayed_work_sync,
- * which may need to wait for another function taking the BTM
+ * the BTM before calling cancel_work_sync, which may
+ * need to wait for another function taking the BTM
*/
clear_bit(TTY_LDISC, &tty->flags);
tty_unlock();
- cancel_delayed_work_sync(&tty->buf.work);
+ cancel_work_sync(&tty->buf.work);
mutex_unlock(&tty->ldisc_mutex);
tty_lock();
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 38072e4e74bd..e35a17687c05 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1646,7 +1646,7 @@ static int autosuspend_check(struct usb_device *udev)
return 0;
}
-static int usb_runtime_suspend(struct device *dev)
+int usb_runtime_suspend(struct device *dev)
{
struct usb_device *udev = to_usb_device(dev);
int status;
@@ -1667,7 +1667,7 @@ static int usb_runtime_suspend(struct device *dev)
return status;
}
-static int usb_runtime_resume(struct device *dev)
+int usb_runtime_resume(struct device *dev)
{
struct usb_device *udev = to_usb_device(dev);
int status;
@@ -1679,7 +1679,7 @@ static int usb_runtime_resume(struct device *dev)
return status;
}
-static int usb_runtime_idle(struct device *dev)
+int usb_runtime_idle(struct device *dev)
{
struct usb_device *udev = to_usb_device(dev);
@@ -1691,19 +1691,10 @@ static int usb_runtime_idle(struct device *dev)
return 0;
}
-static const struct dev_pm_ops usb_bus_pm_ops = {
- .runtime_suspend = usb_runtime_suspend,
- .runtime_resume = usb_runtime_resume,
- .runtime_idle = usb_runtime_idle,
-};
-
#endif /* CONFIG_USB_SUSPEND */
struct bus_type usb_bus_type = {
.name = "usb",
.match = usb_device_match,
.uevent = usb_uevent,
-#ifdef CONFIG_USB_SUSPEND
- .pm = &usb_bus_pm_ops,
-#endif
};
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 079cb57bab4f..d9d4b169404f 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -315,6 +315,11 @@ static const struct dev_pm_ops usb_device_pm_ops = {
.thaw = usb_dev_thaw,
.poweroff = usb_dev_poweroff,
.restore = usb_dev_restore,
+#ifdef CONFIG_USB_SUSPEND
+ .runtime_suspend = usb_runtime_suspend,
+ .runtime_resume = usb_runtime_resume,
+ .runtime_idle = usb_runtime_idle,
+#endif
};
#endif /* CONFIG_PM */
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index a9cf484ecae4..d450b742137e 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -77,6 +77,9 @@ static inline int usb_port_resume(struct usb_device *udev, pm_message_t msg)
extern void usb_autosuspend_device(struct usb_device *udev);
extern int usb_autoresume_device(struct usb_device *udev);
extern int usb_remote_wakeup(struct usb_device *dev);
+extern int usb_runtime_suspend(struct device *dev);
+extern int usb_runtime_resume(struct device *dev);
+extern int usb_runtime_idle(struct device *dev);
#else
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index c8e360d7d975..25c8c10bb689 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -203,11 +203,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
mdelay(10);
}
- /* setup specific usb hw */
- ret = mxc_initialize_usb_hw(pdev->id, pdata->flags);
- if (ret < 0)
- goto err_init;
-
ehci = hcd_to_ehci(hcd);
/* EHCI registers start at offset 0x100 */
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index 8dabe8e31d8c..3558491dd87d 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -185,7 +185,7 @@ static struct platform_driver ohci_hcd_tmio_driver;
static int __devinit ohci_hcd_tmio_drv_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct resource *regs = platform_get_resource(dev, IORESOURCE_MEM, 0);
struct resource *config = platform_get_resource(dev, IORESOURCE_MEM, 1);
struct resource *sram = platform_get_resource(dev, IORESOURCE_MEM, 2);
@@ -274,7 +274,7 @@ static int __devexit ohci_hcd_tmio_drv_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct tmio_hcd *tmio = hcd_to_tmio(hcd);
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
usb_remove_hcd(hcd);
tmio_stop_hc(dev);
@@ -293,7 +293,7 @@ static int __devexit ohci_hcd_tmio_drv_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct tmio_hcd *tmio = hcd_to_tmio(hcd);
@@ -326,7 +326,7 @@ static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t s
static int ohci_hcd_tmio_drv_resume(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct tmio_hcd *tmio = hcd_to_tmio(hcd);
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 1fa6ce3e4a23..68ab460a735c 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -282,6 +282,7 @@ static int appledisplay_probe(struct usb_interface *iface,
snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
atomic_inc_return(&count_displays) - 1);
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = 0xff;
pdata->bd = backlight_device_register(bl_name, NULL, pdata,
&appledisplay_bl_data, &props);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index a914010d9d12..630ae7f3cd4c 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1530,7 +1530,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) || \
+#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \
defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \
defined(CONFIG_ARCH_U5500)
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 4f0dd2ed3964..4bd9e2145ee4 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -212,8 +212,8 @@ enum musb_g_ep0_state {
* directly with the "flat" model, or after setting up an index register.
*/
-#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
- || defined(CONFIG_ARCH_OMAP3430) || defined(CONFIG_BLACKFIN) \
+#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_SOC_OMAP2430) \
+ || defined(CONFIG_SOC_OMAP3430) || defined(CONFIG_BLACKFIN) \
|| defined(CONFIG_ARCH_OMAP4)
/* REVISIT indexed access seemed to
* misbehave (on DaVinci) for at least peripheral IN ...
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index 21056c924c74..320fd4afb93f 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -31,7 +31,7 @@
*
*/
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430)
#include "omap2430.h"
#endif
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
index e00fa1b22ecd..8c6fdef61d1c 100644
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -1510,7 +1510,7 @@ isp1301_start_hnp(struct otg_transceiver *dev)
/*-------------------------------------------------------------------------*/
-static int __init
+static int __devinit
isp1301_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
int status;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f616cefc95ba..2f7c76a85e53 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -60,6 +60,7 @@ static int move_iovec_hdr(struct iovec *from, struct iovec *to,
{
int seg = 0;
size_t size;
+
while (len && seg < iov_count) {
size = min(from->iov_len, len);
to->iov_base = from->iov_base;
@@ -79,6 +80,7 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
{
int seg = 0;
size_t size;
+
while (len && seg < iovcount) {
size = min(from->iov_len, len);
to->iov_base = from->iov_base;
@@ -211,12 +213,13 @@ static int peek_head_len(struct sock *sk)
{
struct sk_buff *head;
int len = 0;
+ unsigned long flags;
- lock_sock(sk);
+ spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
head = skb_peek(&sk->sk_receive_queue);
- if (head)
+ if (likely(head))
len = head->len;
- release_sock(sk);
+ spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
return len;
}
@@ -227,6 +230,7 @@ static int peek_head_len(struct sock *sk)
* @iovcount - returned count of io vectors we fill
* @log - vhost log
* @log_num - log offset
+ * @quota - headcount quota, 1 for big buffer
* returns number of buffer heads allocated, negative on error
*/
static int get_rx_bufs(struct vhost_virtqueue *vq,
@@ -234,7 +238,8 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
int datalen,
unsigned *iovcount,
struct vhost_log *log,
- unsigned *log_num)
+ unsigned *log_num,
+ unsigned int quota)
{
unsigned int out, in;
int seg = 0;
@@ -242,7 +247,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
unsigned d;
int r, nlogs = 0;
- while (datalen > 0) {
+ while (datalen > 0 && headcount < quota) {
if (unlikely(seg >= UIO_MAXIOV)) {
r = -ENOBUFS;
goto err;
@@ -282,117 +287,7 @@ err:
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
-static void handle_rx_big(struct vhost_net *net)
-{
- struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
- unsigned out, in, log, s;
- int head;
- struct vhost_log *vq_log;
- struct msghdr msg = {
- .msg_name = NULL,
- .msg_namelen = 0,
- .msg_control = NULL, /* FIXME: get and handle RX aux data. */
- .msg_controllen = 0,
- .msg_iov = vq->iov,
- .msg_flags = MSG_DONTWAIT,
- };
-
- struct virtio_net_hdr hdr = {
- .flags = 0,
- .gso_type = VIRTIO_NET_HDR_GSO_NONE
- };
-
- size_t len, total_len = 0;
- int err;
- size_t hdr_size;
- /* TODO: check that we are running from vhost_worker? */
- struct socket *sock = rcu_dereference_check(vq->private_data, 1);
- if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
- return;
-
- mutex_lock(&vq->mutex);
- vhost_disable_notify(vq);
- hdr_size = vq->vhost_hlen;
-
- vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
- vq->log : NULL;
-
- for (;;) {
- head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
- ARRAY_SIZE(vq->iov),
- &out, &in,
- vq_log, &log);
- /* On error, stop handling until the next kick. */
- if (unlikely(head < 0))
- break;
- /* OK, now we need to know about added descriptors. */
- if (head == vq->num) {
- if (unlikely(vhost_enable_notify(vq))) {
- /* They have slipped one in as we were
- * doing that: check again. */
- vhost_disable_notify(vq);
- continue;
- }
- /* Nothing new? Wait for eventfd to tell us
- * they refilled. */
- break;
- }
- /* We don't need to be notified again. */
- if (out) {
- vq_err(vq, "Unexpected descriptor format for RX: "
- "out %d, int %d\n",
- out, in);
- break;
- }
- /* Skip header. TODO: support TSO/mergeable rx buffers. */
- s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, in);
- msg.msg_iovlen = in;
- len = iov_length(vq->iov, in);
- /* Sanity check */
- if (!len) {
- vq_err(vq, "Unexpected header len for RX: "
- "%zd expected %zd\n",
- iov_length(vq->hdr, s), hdr_size);
- break;
- }
- err = sock->ops->recvmsg(NULL, sock, &msg,
- len, MSG_DONTWAIT | MSG_TRUNC);
- /* TODO: Check specific error and bomb out unless EAGAIN? */
- if (err < 0) {
- vhost_discard_vq_desc(vq, 1);
- break;
- }
- /* TODO: Should check and handle checksum. */
- if (err > len) {
- pr_debug("Discarded truncated rx packet: "
- " len %d > %zd\n", err, len);
- vhost_discard_vq_desc(vq, 1);
- continue;
- }
- len = err;
- err = memcpy_toiovec(vq->hdr, (unsigned char *)&hdr, hdr_size);
- if (err) {
- vq_err(vq, "Unable to write vnet_hdr at addr %p: %d\n",
- vq->iov->iov_base, err);
- break;
- }
- len += hdr_size;
- vhost_add_used_and_signal(&net->dev, vq, head, len);
- if (unlikely(vq_log))
- vhost_log_write(vq, vq_log, log, len);
- total_len += len;
- if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
- vhost_poll_queue(&vq->poll);
- break;
- }
- }
-
- mutex_unlock(&vq->mutex);
-}
-
-/* Expects to be always run from workqueue - which acts as
- * read-size critical section for our kind of RCU. */
-static void handle_rx_mergeable(struct vhost_net *net)
+static void handle_rx(struct vhost_net *net)
{
struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
unsigned uninitialized_var(in), log;
@@ -405,19 +300,18 @@ static void handle_rx_mergeable(struct vhost_net *net)
.msg_iov = vq->iov,
.msg_flags = MSG_DONTWAIT,
};
-
struct virtio_net_hdr_mrg_rxbuf hdr = {
.hdr.flags = 0,
.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
};
-
size_t total_len = 0;
- int err, headcount;
+ int err, headcount, mergeable;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
/* TODO: check that we are running from vhost_worker? */
struct socket *sock = rcu_dereference_check(vq->private_data, 1);
- if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
+
+ if (!sock)
return;
mutex_lock(&vq->mutex);
@@ -427,12 +321,14 @@ static void handle_rx_mergeable(struct vhost_net *net)
vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
vq->log : NULL;
+ mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);
while ((sock_len = peek_head_len(sock->sk))) {
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads, vhost_len,
- &in, vq_log, &log);
+ &in, vq_log, &log,
+ likely(mergeable) ? UIO_MAXIOV : 1);
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
break;
@@ -476,7 +372,7 @@ static void handle_rx_mergeable(struct vhost_net *net)
break;
}
/* TODO: Should check and handle checksum. */
- if (vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF) &&
+ if (likely(mergeable) &&
memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount,
offsetof(typeof(hdr), num_buffers),
sizeof hdr.num_buffers)) {
@@ -498,14 +394,6 @@ static void handle_rx_mergeable(struct vhost_net *net)
mutex_unlock(&vq->mutex);
}
-static void handle_rx(struct vhost_net *net)
-{
- if (vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF))
- handle_rx_mergeable(net);
- else
- handle_rx_big(net);
-}
-
static void handle_tx_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@@ -654,6 +542,7 @@ static struct socket *get_raw_socket(int fd)
} uaddr;
int uaddr_len = sizeof uaddr, r;
struct socket *sock = sockfd_lookup(fd, &r);
+
if (!sock)
return ERR_PTR(-ENOTSOCK);
@@ -682,6 +571,7 @@ static struct socket *get_tap_socket(int fd)
{
struct file *file = fget(fd);
struct socket *sock;
+
if (!file)
return ERR_PTR(-EBADF);
sock = tun_get_socket(file);
@@ -696,6 +586,7 @@ static struct socket *get_tap_socket(int fd)
static struct socket *get_socket(int fd)
{
struct socket *sock;
+
/* special case to disable backend */
if (fd == -1)
return NULL;
@@ -741,9 +632,9 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
oldsock = rcu_dereference_protected(vq->private_data,
lockdep_is_held(&vq->mutex));
if (sock != oldsock) {
- vhost_net_disable_vq(n, vq);
- rcu_assign_pointer(vq->private_data, sock);
- vhost_net_enable_vq(n, vq);
+ vhost_net_disable_vq(n, vq);
+ rcu_assign_pointer(vq->private_data, sock);
+ vhost_net_enable_vq(n, vq);
}
mutex_unlock(&vq->mutex);
@@ -768,6 +659,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
struct socket *tx_sock = NULL;
struct socket *rx_sock = NULL;
long err;
+
mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev);
if (err)
@@ -829,6 +721,7 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
struct vhost_vring_file backend;
u64 features;
int r;
+
switch (ioctl) {
case VHOST_NET_SET_BACKEND:
if (copy_from_user(&backend, argp, sizeof backend))
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ade0568c07a4..2ab291241635 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -41,8 +41,8 @@ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
struct vhost_poll *poll;
- poll = container_of(pt, struct vhost_poll, table);
+ poll = container_of(pt, struct vhost_poll, table);
poll->wqh = wqh;
add_wait_queue(wqh, &poll->wait);
}
@@ -85,6 +85,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
void vhost_poll_start(struct vhost_poll *poll, struct file *file)
{
unsigned long mask;
+
mask = file->f_op->poll(file, &poll->table);
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
@@ -101,6 +102,7 @@ static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
unsigned seq)
{
int left;
+
spin_lock_irq(&dev->work_lock);
left = seq - work->done_seq;
spin_unlock_irq(&dev->work_lock);
@@ -222,6 +224,7 @@ static int vhost_worker(void *data)
static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
{
int i;
+
for (i = 0; i < dev->nvqs; ++i) {
dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect *
UIO_MAXIOV, GFP_KERNEL);
@@ -235,6 +238,7 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
goto err_nomem;
}
return 0;
+
err_nomem:
for (; i >= 0; --i) {
kfree(dev->vqs[i].indirect);
@@ -247,6 +251,7 @@ err_nomem:
static void vhost_dev_free_iovecs(struct vhost_dev *dev)
{
int i;
+
for (i = 0; i < dev->nvqs; ++i) {
kfree(dev->vqs[i].indirect);
dev->vqs[i].indirect = NULL;
@@ -296,26 +301,28 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
}
struct vhost_attach_cgroups_struct {
- struct vhost_work work;
- struct task_struct *owner;
- int ret;
+ struct vhost_work work;
+ struct task_struct *owner;
+ int ret;
};
static void vhost_attach_cgroups_work(struct vhost_work *work)
{
- struct vhost_attach_cgroups_struct *s;
- s = container_of(work, struct vhost_attach_cgroups_struct, work);
- s->ret = cgroup_attach_task_all(s->owner, current);
+ struct vhost_attach_cgroups_struct *s;
+
+ s = container_of(work, struct vhost_attach_cgroups_struct, work);
+ s->ret = cgroup_attach_task_all(s->owner, current);
}
static int vhost_attach_cgroups(struct vhost_dev *dev)
{
- struct vhost_attach_cgroups_struct attach;
- attach.owner = current;
- vhost_work_init(&attach.work, vhost_attach_cgroups_work);
- vhost_work_queue(dev, &attach.work);
- vhost_work_flush(dev, &attach.work);
- return attach.ret;
+ struct vhost_attach_cgroups_struct attach;
+
+ attach.owner = current;
+ vhost_work_init(&attach.work, vhost_attach_cgroups_work);
+ vhost_work_queue(dev, &attach.work);
+ vhost_work_flush(dev, &attach.work);
+ return attach.ret;
}
/* Caller should have device mutex */
@@ -323,11 +330,13 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
{
struct task_struct *worker;
int err;
+
/* Is there an owner already? */
if (dev->mm) {
err = -EBUSY;
goto err_mm;
}
+
/* No owner, become one */
dev->mm = get_task_mm(current);
worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
@@ -380,6 +389,7 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
void vhost_dev_cleanup(struct vhost_dev *dev)
{
int i;
+
for (i = 0; i < dev->nvqs; ++i) {
if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
vhost_poll_stop(&dev->vqs[i].poll);
@@ -421,6 +431,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
{
u64 a = addr / VHOST_PAGE_SIZE / 8;
+
/* Make sure 64 bit math will not overflow. */
if (a > ULONG_MAX - (unsigned long)log_base ||
a + (unsigned long)log_base > ULONG_MAX)
@@ -461,6 +472,7 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
int log_all)
{
int i;
+
for (i = 0; i < d->nvqs; ++i) {
int ok;
mutex_lock(&d->vqs[i].mutex);
@@ -527,6 +539,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
struct vhost_memory mem, *newmem, *oldmem;
unsigned long size = offsetof(struct vhost_memory, regions);
+
if (copy_from_user(&mem, m, size))
return -EFAULT;
if (mem.padding)
@@ -544,7 +557,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
return -EFAULT;
}
- if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) {
+ if (!memory_access_ok(d, newmem,
+ vhost_has_feature(d, VHOST_F_LOG_ALL))) {
kfree(newmem);
return -EFAULT;
}
@@ -560,6 +574,7 @@ static int init_used(struct vhost_virtqueue *vq,
struct vring_used __user *used)
{
int r = put_user(vq->used_flags, &used->flags);
+
if (r)
return r;
return get_user(vq->last_used_idx, &used->idx);
@@ -849,6 +864,7 @@ static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
{
struct vhost_memory_region *reg;
int i;
+
/* linear search is not brilliant, but we really have on the order of 6
* regions in practice */
for (i = 0; i < mem->nregions; ++i) {
@@ -871,6 +887,7 @@ static int set_bit_to_user(int nr, void __user *addr)
void *base;
int bit = nr + (log % PAGE_SIZE) * 8;
int r;
+
r = get_user_pages_fast(log, 1, 1, &page);
if (r < 0)
return r;
@@ -888,6 +905,7 @@ static int log_write(void __user *log_base,
{
u64 write_page = write_address / VHOST_PAGE_SIZE;
int r;
+
if (!write_length)
return 0;
write_length += write_address % VHOST_PAGE_SIZE;
@@ -1037,8 +1055,8 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
i, count);
return -EINVAL;
}
- if (unlikely(memcpy_fromiovec((unsigned char *)&desc, vq->indirect,
- sizeof desc))) {
+ if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
+ vq->indirect, sizeof desc))) {
vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
i, (size_t)indirect->addr + i * sizeof desc);
return -EINVAL;
@@ -1153,7 +1171,7 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
i, vq->num, head);
return -EINVAL;
}
- ret = copy_from_user(&desc, vq->desc + i, sizeof desc);
+ ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
if (unlikely(ret)) {
vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
i, vq->desc + i);
@@ -1317,6 +1335,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
__u16 flags;
+
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
* interrupts. */
@@ -1361,6 +1380,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
{
u16 avail_idx;
int r;
+
if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
return false;
vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
@@ -1387,6 +1407,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
void vhost_disable_notify(struct vhost_virtqueue *vq)
{
int r;
+
if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
return;
vq->used_flags |= VRING_USED_F_NO_NOTIFY;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 2a753f1e9183..e6a8d8c0101d 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -376,6 +376,24 @@ config FB_CYBER2000
Say Y if you have a NetWinder or a graphics card containing this
device, otherwise say N.
+config FB_CYBER2000_DDC
+ bool "DDC for CyberPro support"
+ depends on FB_CYBER2000
+ select FB_DDC
+ default y
+ help
+ Say Y here if you want DDC support for your CyberPro graphics
+ card. This is only I2C bus support, driver does not use EDID.
+
+config FB_CYBER2000_I2C
+ bool "CyberPro 2000/2010/5000 I2C support"
+ depends on FB_CYBER2000 && I2C && ARCH_NETWINDER
+ select I2C_ALGOBIT
+ help
+ Enable support for the I2C video decoder interface on the
+ Integraphics CyberPro 20x0 and 5000 VGA chips. This is used
+ on the Netwinder machines for the SAA7111 video capture.
+
config FB_APOLLO
bool
depends on (FB = y) && APOLLO
@@ -2303,6 +2321,15 @@ config FB_JZ4740
help
Framebuffer support for the JZ4740 SoC.
+config FB_MXS
+ tristate "MXS LCD framebuffer support"
+ depends on FB && ARCH_MXS
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ Framebuffer support for the MXS SoC.
+
config FB_PUV3_UNIGFX
tristate "PKUnity v3 Unigfx framebuffer support"
depends on FB && UNICORE32 && ARCH_PUV3
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index b0eb3da24670..2ea44b6625fe 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -154,6 +154,7 @@ obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o
obj-$(CONFIG_FB_BFIN_7393) += bfin_adv7393fb.o
obj-$(CONFIG_FB_MX3) += mx3fb.o
obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
+obj-$(CONFIG_FB_MXS) += mxsfb.o
# the test framebuffer is last
obj-$(CONFIG_FB_VIRTUAL) += vfb.o
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 1c2c68356ea7..5fc983c5b92c 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -120,8 +120,23 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl)
static int
clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
{
+ u32 caps;
int ret = 0;
+ if (fb->panel->caps && fb->board->caps)
+ caps = fb->panel->caps & fb->board->caps;
+ else {
+ /* Old way of specifying what can be used */
+ caps = fb->panel->cntl & CNTL_BGR ?
+ CLCD_CAP_BGR : CLCD_CAP_RGB;
+ /* But mask out 444 modes as they weren't supported */
+ caps &= ~CLCD_CAP_444;
+ }
+
+ /* Only TFT panels can do RGB888/BGR888 */
+ if (!(fb->panel->cntl & CNTL_LCDTFT))
+ caps &= ~CLCD_CAP_888;
+
memset(&var->transp, 0, sizeof(var->transp));
var->red.msb_right = 0;
@@ -133,6 +148,13 @@ clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
case 2:
case 4:
case 8:
+ /* If we can't do 5551, reject */
+ caps &= CLCD_CAP_5551;
+ if (!caps) {
+ ret = -EINVAL;
+ break;
+ }
+
var->red.length = var->bits_per_pixel;
var->red.offset = 0;
var->green.length = var->bits_per_pixel;
@@ -140,23 +162,61 @@ clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
var->blue.length = var->bits_per_pixel;
var->blue.offset = 0;
break;
+
case 16:
- var->red.length = 5;
- var->blue.length = 5;
+ /* If we can't do 444, 5551 or 565, reject */
+ if (!(caps & (CLCD_CAP_444 | CLCD_CAP_5551 | CLCD_CAP_565))) {
+ ret = -EINVAL;
+ break;
+ }
+
/*
- * Green length can be 5 or 6 depending whether
- * we're operating in RGB555 or RGB565 mode.
+ * Green length can be 4, 5 or 6 depending whether
+ * we're operating in 444, 5551 or 565 mode.
*/
- if (var->green.length != 5 && var->green.length != 6)
- var->green.length = 6;
+ if (var->green.length == 4 && caps & CLCD_CAP_444)
+ caps &= CLCD_CAP_444;
+ if (var->green.length == 5 && caps & CLCD_CAP_5551)
+ caps &= CLCD_CAP_5551;
+ else if (var->green.length == 6 && caps & CLCD_CAP_565)
+ caps &= CLCD_CAP_565;
+ else {
+ /*
+ * PL110 officially only supports RGB555,
+ * but may be wired up to allow RGB565.
+ */
+ if (caps & CLCD_CAP_565) {
+ var->green.length = 6;
+ caps &= CLCD_CAP_565;
+ } else if (caps & CLCD_CAP_5551) {
+ var->green.length = 5;
+ caps &= CLCD_CAP_5551;
+ } else {
+ var->green.length = 4;
+ caps &= CLCD_CAP_444;
+ }
+ }
+
+ if (var->green.length >= 5) {
+ var->red.length = 5;
+ var->blue.length = 5;
+ } else {
+ var->red.length = 4;
+ var->blue.length = 4;
+ }
break;
case 32:
- if (fb->panel->cntl & CNTL_LCDTFT) {
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
+ /* If we can't do 888, reject */
+ caps &= CLCD_CAP_888;
+ if (!caps) {
+ ret = -EINVAL;
break;
}
+
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ break;
default:
ret = -EINVAL;
break;
@@ -168,7 +228,20 @@ clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
* the bitfield length defined above.
*/
if (ret == 0 && var->bits_per_pixel >= 16) {
- if (fb->panel->cntl & CNTL_BGR) {
+ bool bgr, rgb;
+
+ bgr = caps & CLCD_CAP_BGR && var->blue.offset == 0;
+ rgb = caps & CLCD_CAP_RGB && var->red.offset == 0;
+
+ if (!bgr && !rgb)
+ /*
+ * The requested format was not possible, try just
+ * our capabilities. One of BGR or RGB must be
+ * supported.
+ */
+ bgr = caps & CLCD_CAP_BGR;
+
+ if (bgr) {
var->blue.offset = 0;
var->green.offset = var->blue.offset + var->blue.length;
var->red.offset = var->green.offset + var->green.length;
@@ -443,8 +516,8 @@ static int clcdfb_register(struct clcd_fb *fb)
fb_set_var(&fb->fb, &fb->fb.var);
- printk(KERN_INFO "CLCD: %s hardware, %s display\n",
- fb->board->name, fb->panel->mode.name);
+ dev_info(&fb->dev->dev, "%s hardware, %s display\n",
+ fb->board->name, fb->panel->mode.name);
ret = register_framebuffer(&fb->fb);
if (ret == 0)
@@ -461,7 +534,7 @@ static int clcdfb_register(struct clcd_fb *fb)
return ret;
}
-static int clcdfb_probe(struct amba_device *dev, struct amba_id *id)
+static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
{
struct clcd_board *board = dev->dev.platform_data;
struct clcd_fb *fb;
@@ -486,6 +559,10 @@ static int clcdfb_probe(struct amba_device *dev, struct amba_id *id)
fb->dev = dev;
fb->board = board;
+ dev_info(&fb->dev->dev, "PL%03x rev%u at 0x%08llx\n",
+ amba_part(dev), amba_rev(dev),
+ (unsigned long long)dev->res.start);
+
ret = fb->board->setup(fb);
if (ret)
goto free_fb;
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
index 391ac939f011..8686429cbdf0 100644
--- a/drivers/video/arkfb.c
+++ b/drivers/video/arkfb.c
@@ -158,12 +158,19 @@ static void arkfb_settile(struct fb_info *info, struct fb_tilemap *map)
}
}
+static void arkfb_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor)
+{
+ struct arkfb_info *par = info->par;
+
+ svga_tilecursor(par->state.vgabase, info, cursor);
+}
+
static struct fb_tile_ops arkfb_tile_ops = {
.fb_settile = arkfb_settile,
.fb_tilecopy = svga_tilecopy,
.fb_tilefill = svga_tilefill,
.fb_tileblit = svga_tileblit,
- .fb_tilecursor = svga_tilecursor,
+ .fb_tilecursor = arkfb_tilecursor,
.fb_get_tilemax = svga_get_tilemax,
};
@@ -466,32 +473,40 @@ static unsigned short dac_regs[4] = {0x3c8, 0x3c9, 0x3c6, 0x3c7};
static void ark_dac_read_regs(void *data, u8 *code, int count)
{
- u8 regval = vga_rseq(NULL, 0x1C);
+ struct fb_info *info = data;
+ struct arkfb_info *par;
+ u8 regval;
+ par = info->par;
+ regval = vga_rseq(par->state.vgabase, 0x1C);
while (count != 0)
{
- vga_wseq(NULL, 0x1C, regval | (code[0] & 4 ? 0x80 : 0));
- code[1] = vga_r(NULL, dac_regs[code[0] & 3]);
+ vga_wseq(par->state.vgabase, 0x1C, regval | (code[0] & 4 ? 0x80 : 0));
+ code[1] = vga_r(par->state.vgabase, dac_regs[code[0] & 3]);
count--;
code += 2;
}
- vga_wseq(NULL, 0x1C, regval);
+ vga_wseq(par->state.vgabase, 0x1C, regval);
}
static void ark_dac_write_regs(void *data, u8 *code, int count)
{
- u8 regval = vga_rseq(NULL, 0x1C);
+ struct fb_info *info = data;
+ struct arkfb_info *par;
+ u8 regval;
+ par = info->par;
+ regval = vga_rseq(par->state.vgabase, 0x1C);
while (count != 0)
{
- vga_wseq(NULL, 0x1C, regval | (code[0] & 4 ? 0x80 : 0));
- vga_w(NULL, dac_regs[code[0] & 3], code[1]);
+ vga_wseq(par->state.vgabase, 0x1C, regval | (code[0] & 4 ? 0x80 : 0));
+ vga_w(par->state.vgabase, dac_regs[code[0] & 3], code[1]);
count--;
code += 2;
}
- vga_wseq(NULL, 0x1C, regval);
+ vga_wseq(par->state.vgabase, 0x1C, regval);
}
@@ -507,8 +522,8 @@ static void ark_set_pixclock(struct fb_info *info, u32 pixclock)
}
/* Set VGA misc register */
- regval = vga_r(NULL, VGA_MIS_R);
- vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
+ regval = vga_r(par->state.vgabase, VGA_MIS_R);
+ vga_w(par->state.vgabase, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
}
@@ -520,7 +535,10 @@ static int arkfb_open(struct fb_info *info, int user)
mutex_lock(&(par->open_lock));
if (par->ref_count == 0) {
+ void __iomem *vgabase = par->state.vgabase;
+
memset(&(par->state), 0, sizeof(struct vgastate));
+ par->state.vgabase = vgabase;
par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS | VGA_SAVE_CMAP;
par->state.num_crtc = 0x60;
par->state.num_seq = 0x30;
@@ -646,50 +664,50 @@ static int arkfb_set_par(struct fb_info *info)
info->var.activate = FB_ACTIVATE_NOW;
/* Unlock registers */
- svga_wcrt_mask(0x11, 0x00, 0x80);
+ svga_wcrt_mask(par->state.vgabase, 0x11, 0x00, 0x80);
/* Blank screen and turn off sync */
- svga_wseq_mask(0x01, 0x20, 0x20);
- svga_wcrt_mask(0x17, 0x00, 0x80);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x17, 0x00, 0x80);
/* Set default values */
- svga_set_default_gfx_regs();
- svga_set_default_atc_regs();
- svga_set_default_seq_regs();
- svga_set_default_crt_regs();
- svga_wcrt_multi(ark_line_compare_regs, 0xFFFFFFFF);
- svga_wcrt_multi(ark_start_address_regs, 0);
+ svga_set_default_gfx_regs(par->state.vgabase);
+ svga_set_default_atc_regs(par->state.vgabase);
+ svga_set_default_seq_regs(par->state.vgabase);
+ svga_set_default_crt_regs(par->state.vgabase);
+ svga_wcrt_multi(par->state.vgabase, ark_line_compare_regs, 0xFFFFFFFF);
+ svga_wcrt_multi(par->state.vgabase, ark_start_address_regs, 0);
/* ARK specific initialization */
- svga_wseq_mask(0x10, 0x1F, 0x1F); /* enable linear framebuffer and full memory access */
- svga_wseq_mask(0x12, 0x03, 0x03); /* 4 MB linear framebuffer size */
+ svga_wseq_mask(par->state.vgabase, 0x10, 0x1F, 0x1F); /* enable linear framebuffer and full memory access */
+ svga_wseq_mask(par->state.vgabase, 0x12, 0x03, 0x03); /* 4 MB linear framebuffer size */
- vga_wseq(NULL, 0x13, info->fix.smem_start >> 16);
- vga_wseq(NULL, 0x14, info->fix.smem_start >> 24);
- vga_wseq(NULL, 0x15, 0);
- vga_wseq(NULL, 0x16, 0);
+ vga_wseq(par->state.vgabase, 0x13, info->fix.smem_start >> 16);
+ vga_wseq(par->state.vgabase, 0x14, info->fix.smem_start >> 24);
+ vga_wseq(par->state.vgabase, 0x15, 0);
+ vga_wseq(par->state.vgabase, 0x16, 0);
/* Set the FIFO threshold register */
/* It is fascinating way to store 5-bit value in 8-bit register */
regval = 0x10 | ((threshold & 0x0E) >> 1) | (threshold & 0x01) << 7 | (threshold & 0x10) << 1;
- vga_wseq(NULL, 0x18, regval);
+ vga_wseq(par->state.vgabase, 0x18, regval);
/* Set the offset register */
pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
- svga_wcrt_multi(ark_offset_regs, offset_value);
+ svga_wcrt_multi(par->state.vgabase, ark_offset_regs, offset_value);
/* fix for hi-res textmode */
- svga_wcrt_mask(0x40, 0x08, 0x08);
+ svga_wcrt_mask(par->state.vgabase, 0x40, 0x08, 0x08);
if (info->var.vmode & FB_VMODE_DOUBLE)
- svga_wcrt_mask(0x09, 0x80, 0x80);
+ svga_wcrt_mask(par->state.vgabase, 0x09, 0x80, 0x80);
else
- svga_wcrt_mask(0x09, 0x00, 0x80);
+ svga_wcrt_mask(par->state.vgabase, 0x09, 0x00, 0x80);
if (info->var.vmode & FB_VMODE_INTERLACED)
- svga_wcrt_mask(0x44, 0x04, 0x04);
+ svga_wcrt_mask(par->state.vgabase, 0x44, 0x04, 0x04);
else
- svga_wcrt_mask(0x44, 0x00, 0x04);
+ svga_wcrt_mask(par->state.vgabase, 0x44, 0x00, 0x04);
hmul = 1;
hdiv = 1;
@@ -699,40 +717,40 @@ static int arkfb_set_par(struct fb_info *info)
switch (mode) {
case 0:
pr_debug("fb%d: text mode\n", info->node);
- svga_set_textmode_vga_regs();
+ svga_set_textmode_vga_regs(par->state.vgabase);
- vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
- svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
+ vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
+ svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_8);
break;
case 1:
pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
- vga_wgfx(NULL, VGA_GFX_MODE, 0x40);
+ vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
- vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
- svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
+ vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
+ svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_8);
break;
case 2:
pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
- vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
- svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
+ vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
+ svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_8);
break;
case 3:
pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
- vga_wseq(NULL, 0x11, 0x16); /* 8bpp accel mode */
+ vga_wseq(par->state.vgabase, 0x11, 0x16); /* 8bpp accel mode */
if (info->var.pixclock > 20000) {
pr_debug("fb%d: not using multiplex\n", info->node);
- svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
+ svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_8);
} else {
pr_debug("fb%d: using multiplex\n", info->node);
- svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
+ svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_16);
hdiv = 2;
}
@@ -740,22 +758,22 @@ static int arkfb_set_par(struct fb_info *info)
case 4:
pr_debug("fb%d: 5/5/5 truecolor\n", info->node);
- vga_wseq(NULL, 0x11, 0x1A); /* 16bpp accel mode */
- svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
+ vga_wseq(par->state.vgabase, 0x11, 0x1A); /* 16bpp accel mode */
+ svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_RGB1555_16);
break;
case 5:
pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
- vga_wseq(NULL, 0x11, 0x1A); /* 16bpp accel mode */
- svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
+ vga_wseq(par->state.vgabase, 0x11, 0x1A); /* 16bpp accel mode */
+ svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_RGB0565_16);
break;
case 6:
pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
- vga_wseq(NULL, 0x11, 0x16); /* 8bpp accel mode ??? */
- svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
+ vga_wseq(par->state.vgabase, 0x11, 0x16); /* 8bpp accel mode ??? */
+ svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_RGB0888_16);
hmul = 3;
hdiv = 2;
@@ -763,8 +781,8 @@ static int arkfb_set_par(struct fb_info *info)
case 7:
pr_debug("fb%d: 8/8/8/8 truecolor\n", info->node);
- vga_wseq(NULL, 0x11, 0x1E); /* 32bpp accel mode */
- svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
+ vga_wseq(par->state.vgabase, 0x11, 0x1E); /* 32bpp accel mode */
+ svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_RGB8888_16);
hmul = 2;
break;
@@ -774,7 +792,7 @@ static int arkfb_set_par(struct fb_info *info)
}
ark_set_pixclock(info, (hdiv * info->var.pixclock) / hmul);
- svga_set_timings(&ark_timing_regs, &(info->var), hmul, hdiv,
+ svga_set_timings(par->state.vgabase, &ark_timing_regs, &(info->var), hmul, hdiv,
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1,
(info->var.vmode & FB_VMODE_INTERLACED) ? 2 : 1,
hmul, info->node);
@@ -782,12 +800,12 @@ static int arkfb_set_par(struct fb_info *info)
/* Set interlaced mode start/end register */
value = info->var.xres + info->var.left_margin + info->var.right_margin + info->var.hsync_len;
value = ((value * hmul / hdiv) / 8) - 5;
- vga_wcrt(NULL, 0x42, (value + 1) / 2);
+ vga_wcrt(par->state.vgabase, 0x42, (value + 1) / 2);
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
- svga_wcrt_mask(0x17, 0x80, 0x80);
- svga_wseq_mask(0x01, 0x00, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
return 0;
}
@@ -857,23 +875,25 @@ static int arkfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
static int arkfb_blank(int blank_mode, struct fb_info *info)
{
+ struct arkfb_info *par = info->par;
+
switch (blank_mode) {
case FB_BLANK_UNBLANK:
pr_debug("fb%d: unblank\n", info->node);
- svga_wseq_mask(0x01, 0x00, 0x20);
- svga_wcrt_mask(0x17, 0x80, 0x80);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
break;
case FB_BLANK_NORMAL:
pr_debug("fb%d: blank\n", info->node);
- svga_wseq_mask(0x01, 0x20, 0x20);
- svga_wcrt_mask(0x17, 0x80, 0x80);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
break;
case FB_BLANK_POWERDOWN:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_VSYNC_SUSPEND:
pr_debug("fb%d: sync down\n", info->node);
- svga_wseq_mask(0x01, 0x20, 0x20);
- svga_wcrt_mask(0x17, 0x00, 0x80);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x17, 0x00, 0x80);
break;
}
return 0;
@@ -884,6 +904,7 @@ static int arkfb_blank(int blank_mode, struct fb_info *info)
static int arkfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
+ struct arkfb_info *par = info->par;
unsigned int offset;
/* Calculate the offset */
@@ -897,7 +918,7 @@ static int arkfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info
}
/* Set the offset */
- svga_wcrt_multi(ark_start_address_regs, offset);
+ svga_wcrt_multi(par->state.vgabase, ark_start_address_regs, offset);
return 0;
}
@@ -930,6 +951,8 @@ static struct fb_ops arkfb_ops = {
/* PCI probe */
static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ struct pci_bus_region bus_reg;
+ struct resource vga_res;
struct fb_info *info;
struct arkfb_info *par;
int rc;
@@ -985,8 +1008,17 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
goto err_iomap;
}
+ bus_reg.start = 0;
+ bus_reg.end = 64 * 1024;
+
+ vga_res.flags = IORESOURCE_IO;
+
+ pcibios_bus_to_resource(dev, &vga_res, &bus_reg);
+
+ par->state.vgabase = (void __iomem *) vga_res.start;
+
/* FIXME get memsize */
- regval = vga_rseq(NULL, 0x10);
+ regval = vga_rseq(par->state.vgabase, 0x10);
info->screen_size = (1 << (regval >> 6)) << 20;
info->fix.smem_len = info->screen_size;
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index bac163450216..ccecf9974587 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -68,7 +68,7 @@ static void atmel_lcdfb_update_dma2d(struct atmel_lcdfb_info *sinfo,
}
#endif
-static const u32 contrast_ctr = ATMEL_LCDC_PS_DIV8
+static u32 contrast_ctr = ATMEL_LCDC_PS_DIV8
| ATMEL_LCDC_POL_POSITIVE
| ATMEL_LCDC_ENA_PWMENABLE;
@@ -127,6 +127,7 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo)
return;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = 0xff;
bl = backlight_device_register("backlight", &sinfo->pdev->dev, sinfo,
&atmel_lcdc_bl_ops, &props);
@@ -163,6 +164,10 @@ static void exit_backlight(struct atmel_lcdfb_info *sinfo)
static void init_contrast(struct atmel_lcdfb_info *sinfo)
{
+ /* contrast pwm can be 'inverted' */
+ if (sinfo->lcdcon_pol_negative)
+ contrast_ctr &= ~(ATMEL_LCDC_POL_POSITIVE);
+
/* have some default contrast/backlight settings */
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, contrast_ctr);
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, ATMEL_LCDC_CVAL_DEFAULT);
@@ -710,11 +715,35 @@ static int atmel_lcdfb_pan_display(struct fb_var_screeninfo *var,
return 0;
}
+static int atmel_lcdfb_blank(int blank_mode, struct fb_info *info)
+{
+ struct atmel_lcdfb_info *sinfo = info->par;
+
+ switch (blank_mode) {
+ case FB_BLANK_UNBLANK:
+ case FB_BLANK_NORMAL:
+ atmel_lcdfb_start(sinfo);
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ break;
+ case FB_BLANK_POWERDOWN:
+ atmel_lcdfb_stop(sinfo);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* let fbcon do a soft blank for us */
+ return ((blank_mode == FB_BLANK_NORMAL) ? 1 : 0);
+}
+
static struct fb_ops atmel_lcdfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = atmel_lcdfb_check_var,
.fb_set_par = atmel_lcdfb_set_par,
.fb_setcolreg = atmel_lcdfb_setcolreg,
+ .fb_blank = atmel_lcdfb_blank,
.fb_pan_display = atmel_lcdfb_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
@@ -816,6 +845,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
sinfo->guard_time = pdata_sinfo->guard_time;
sinfo->smem_len = pdata_sinfo->smem_len;
sinfo->lcdcon_is_backlight = pdata_sinfo->lcdcon_is_backlight;
+ sinfo->lcdcon_pol_negative = pdata_sinfo->lcdcon_pol_negative;
sinfo->lcd_wiring_mode = pdata_sinfo->lcd_wiring_mode;
} else {
dev_err(dev, "cannot get default configuration\n");
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 4cb6a576c567..b0b2ac335347 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1818,6 +1818,7 @@ static void aty128_bl_init(struct aty128fb_par *par)
snprintf(name, sizeof(name), "aty128bl%d", info->node);
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
bd = backlight_device_register(name, info->dev, par, &aty128_bl_data,
&props);
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 94e293fce1d2..d437b3daf1f5 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2241,6 +2241,7 @@ static void aty_bl_init(struct atyfb_par *par)
snprintf(name, sizeof(name), "atybl%d", info->node);
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
bd = backlight_device_register(name, info->dev, par, &aty_bl_data,
&props);
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index 9b811ddbce83..db572df7e1ef 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/aty/radeon_backlight.c
@@ -158,6 +158,7 @@ void radeonfb_bl_init(struct radeonfb_info *rinfo)
snprintf(name, sizeof(name), "radeonbl%d", rinfo->info->node);
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
bd = backlight_device_register(name, rinfo->info->dev, pdata,
&radeon_bl_data, &props);
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 3c1e13ed1cba..32f8cf6200a7 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -1248,7 +1248,7 @@ static void radeon_write_pll_regs(struct radeonfb_info *rinfo, struct radeon_reg
/* Workaround from XFree */
if (rinfo->is_mobility) {
- /* A temporal workaround for the occational blanking on certain laptop
+ /* A temporal workaround for the occasional blanking on certain laptop
* panels. This appears to related to the PLL divider registers
* (fail to lock?). It occurs even when all dividers are the same
* with their old settings. In this case we really don't need to
diff --git a/drivers/video/aty/radeon_i2c.c b/drivers/video/aty/radeon_i2c.c
index 78d1f4cd1fe0..ab1d0fd76316 100644
--- a/drivers/video/aty/radeon_i2c.c
+++ b/drivers/video/aty/radeon_i2c.c
@@ -100,6 +100,9 @@ void radeon_create_i2c_busses(struct radeonfb_info *rinfo)
{
rinfo->i2c[0].rinfo = rinfo;
rinfo->i2c[0].ddc_reg = GPIO_MONID;
+#ifndef CONFIG_PPC
+ rinfo->i2c[0].adapter.class = I2C_CLASS_HWMON;
+#endif
radeon_setup_i2c_bus(&rinfo->i2c[0], "monid");
rinfo->i2c[1].rinfo = rinfo;
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index b224396b86d5..c8b520e9a11a 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -12,11 +12,12 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/i2c.h>
#include <linux/backlight.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/88pm860x.h>
-#include <linux/slab.h>
#define MAX_BRIGHTNESS (0xFF)
#define MIN_BRIGHTNESS (0)
@@ -161,32 +162,13 @@ static const struct backlight_ops pm860x_backlight_ops = {
.get_brightness = pm860x_backlight_get_brightness,
};
-static int __check_device(struct pm860x_backlight_pdata *pdata, char *name)
-{
- struct pm860x_backlight_pdata *p = pdata;
- int ret = -EINVAL;
-
- while (p && p->id) {
- if ((p->id != PM8606_ID_BACKLIGHT) || (p->flags < 0))
- break;
-
- if (!strncmp(name, pm860x_backlight_name[p->flags],
- MFD_NAME_SIZE)) {
- ret = (int)p->flags;
- break;
- }
- p++;
- }
- return ret;
-}
-
static int pm860x_backlight_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct pm860x_platform_data *pm860x_pdata;
struct pm860x_backlight_pdata *pdata = NULL;
struct pm860x_backlight_data *data;
struct backlight_device *bl;
+ struct mfd_cell *cell;
struct resource *res;
struct backlight_properties props;
unsigned char value;
@@ -199,10 +181,10 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (pdev->dev.parent->platform_data) {
- pm860x_pdata = pdev->dev.parent->platform_data;
- pdata = pm860x_pdata->backlight;
- }
+ cell = pdev->dev.platform_data;
+ if (cell == NULL)
+ return -ENODEV;
+ pdata = cell->mfd_data;
if (pdata == NULL) {
dev_err(&pdev->dev, "platform data isn't assigned to "
"backlight\n");
@@ -219,7 +201,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
data->current_brightness = MAX_BRIGHTNESS;
data->pwm = pdata->pwm;
data->iset = pdata->iset;
- data->port = __check_device(pdata, name);
+ data->port = pdata->flags;
if (data->port < 0) {
dev_err(&pdev->dev, "wrong platform data is assigned");
kfree(data);
@@ -227,6 +209,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
}
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = MAX_BRIGHTNESS;
bl = backlight_device_register(name, &pdev->dev, data,
&pm860x_backlight_ops, &props);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index e54a337227ea..0c9373bedd1f 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -109,6 +109,14 @@ config LCD_S6E63M0
If you have an S6E63M0 LCD Panel, say Y to enable its
LCD control driver.
+config LCD_LD9040
+ tristate "LD9040 AMOLED LCD Driver"
+ depends on SPI && BACKLIGHT_CLASS_DEVICE
+ default n
+ help
+ If you have an LD9040 Panel, say Y to enable its
+ control driver.
+
endif # LCD_CLASS_DEVICE
#
@@ -236,12 +244,12 @@ config BACKLIGHT_MAX8925
If you have a LCD backlight connected to the WLED output of MAX8925
WLED output, say Y here to enable this driver.
-config BACKLIGHT_MBP_NVIDIA
- tristate "MacBook Pro Nvidia Backlight Driver"
- depends on X86
+config BACKLIGHT_APPLE
+ tristate "Apple Backlight Driver"
+ depends on X86 && ACPI
help
- If you have an Apple Macbook Pro with Nvidia graphics hardware say Y
- to enable a driver for its backlight
+ If you have an Intel-based Apple say Y to enable a driver for its
+ backlight.
config BACKLIGHT_TOSA
tristate "Sharp SL-6000 Backlight Driver"
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 44c0f81ad85d..b9ca8490df87 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o
obj-$(CONFIG_LCD_TDO24M) += tdo24m.o
obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o
obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o
+obj-$(CONFIG_LCD_LD9040) += ld9040.o
obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o
@@ -26,7 +27,7 @@ obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o
-obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o
+obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o
obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 9f436e014f85..af3119707dbf 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -303,6 +303,7 @@ static int __devinit adp5520_bl_probe(struct platform_device *pdev)
mutex_init(&data->lock);
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = ADP5020_MAX_BRIGHTNESS;
bl = backlight_device_register(pdev->name, data->master, data,
&adp5520_bl_ops, &props);
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 734c650a47c4..d2a96a421ffd 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -709,6 +709,7 @@ static int __devinit adp8860_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = ADP8860_MAX_BRIGHTNESS;
mutex_init(&data->lock);
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
index fe9af129c5dd..c861c41af442 100644
--- a/drivers/video/backlight/adx_bl.c
+++ b/drivers/video/backlight/adx_bl.c
@@ -104,6 +104,7 @@ static int __devinit adx_backlight_probe(struct platform_device *pdev)
}
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = 0xff;
bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev,
bl, &adx_backlight_ops, &props);
diff --git a/drivers/video/backlight/apple_bl.c b/drivers/video/backlight/apple_bl.c
new file mode 100644
index 000000000000..be98d152b7fd
--- /dev/null
+++ b/drivers/video/backlight/apple_bl.c
@@ -0,0 +1,241 @@
+/*
+ * Backlight Driver for Intel-based Apples
+ *
+ * Copyright (c) Red Hat <mjg@redhat.com>
+ * Based on code from Pommed:
+ * Copyright (C) 2006 Nicolas Boichat <nicolas @boichat.ch>
+ * Copyright (C) 2006 Felipe Alfaro Solana <felipe_alfaro @linuxmail.org>
+ * Copyright (C) 2007 Julien BLACHE <jb@jblache.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver triggers SMIs which cause the firmware to change the
+ * backlight brightness. This is icky in many ways, but it's impractical to
+ * get at the firmware code in order to figure out what it's actually doing.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+
+static struct backlight_device *apple_backlight_device;
+
+struct hw_data {
+ /* I/O resource to allocate. */
+ unsigned long iostart;
+ unsigned long iolen;
+ /* Backlight operations structure. */
+ const struct backlight_ops backlight_ops;
+ void (*set_brightness)(int);
+};
+
+static const struct hw_data *hw_data;
+
+#define DRIVER "apple_backlight: "
+
+/* Module parameters. */
+static int debug;
+module_param_named(debug, debug, int, 0644);
+MODULE_PARM_DESC(debug, "Set to one to enable debugging messages.");
+
+/*
+ * Implementation for machines with Intel chipset.
+ */
+static void intel_chipset_set_brightness(int intensity)
+{
+ outb(0x04 | (intensity << 4), 0xb3);
+ outb(0xbf, 0xb2);
+}
+
+static int intel_chipset_send_intensity(struct backlight_device *bd)
+{
+ int intensity = bd->props.brightness;
+
+ if (debug)
+ printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
+ intensity);
+
+ intel_chipset_set_brightness(intensity);
+ return 0;
+}
+
+static int intel_chipset_get_intensity(struct backlight_device *bd)
+{
+ int intensity;
+
+ outb(0x03, 0xb3);
+ outb(0xbf, 0xb2);
+ intensity = inb(0xb3) >> 4;
+
+ if (debug)
+ printk(KERN_DEBUG DRIVER "read brightness of %d\n",
+ intensity);
+
+ return intensity;
+}
+
+static const struct hw_data intel_chipset_data = {
+ .iostart = 0xb2,
+ .iolen = 2,
+ .backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = intel_chipset_get_intensity,
+ .update_status = intel_chipset_send_intensity,
+ },
+ .set_brightness = intel_chipset_set_brightness,
+};
+
+/*
+ * Implementation for machines with Nvidia chipset.
+ */
+static void nvidia_chipset_set_brightness(int intensity)
+{
+ outb(0x04 | (intensity << 4), 0x52f);
+ outb(0xbf, 0x52e);
+}
+
+static int nvidia_chipset_send_intensity(struct backlight_device *bd)
+{
+ int intensity = bd->props.brightness;
+
+ if (debug)
+ printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
+ intensity);
+
+ nvidia_chipset_set_brightness(intensity);
+ return 0;
+}
+
+static int nvidia_chipset_get_intensity(struct backlight_device *bd)
+{
+ int intensity;
+
+ outb(0x03, 0x52f);
+ outb(0xbf, 0x52e);
+ intensity = inb(0x52f) >> 4;
+
+ if (debug)
+ printk(KERN_DEBUG DRIVER "read brightness of %d\n",
+ intensity);
+
+ return intensity;
+}
+
+static const struct hw_data nvidia_chipset_data = {
+ .iostart = 0x52e,
+ .iolen = 2,
+ .backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = nvidia_chipset_get_intensity,
+ .update_status = nvidia_chipset_send_intensity
+ },
+ .set_brightness = nvidia_chipset_set_brightness,
+};
+
+static int __devinit apple_bl_add(struct acpi_device *dev)
+{
+ struct backlight_properties props;
+ struct pci_dev *host;
+ int intensity;
+
+ host = pci_get_bus_and_slot(0, 0);
+
+ if (!host) {
+ printk(KERN_ERR DRIVER "unable to find PCI host\n");
+ return -ENODEV;
+ }
+
+ if (host->vendor == PCI_VENDOR_ID_INTEL)
+ hw_data = &intel_chipset_data;
+ else if (host->vendor == PCI_VENDOR_ID_NVIDIA)
+ hw_data = &nvidia_chipset_data;
+
+ pci_dev_put(host);
+
+ if (!hw_data) {
+ printk(KERN_ERR DRIVER "unknown hardware\n");
+ return -ENODEV;
+ }
+
+ /* Check that the hardware responds - this may not work under EFI */
+
+ intensity = hw_data->backlight_ops.get_brightness(NULL);
+
+ if (!intensity) {
+ hw_data->set_brightness(1);
+ if (!hw_data->backlight_ops.get_brightness(NULL))
+ return -ENODEV;
+
+ hw_data->set_brightness(0);
+ }
+
+ if (!request_region(hw_data->iostart, hw_data->iolen,
+ "Apple backlight"))
+ return -ENXIO;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = 15;
+ apple_backlight_device = backlight_device_register("apple_backlight",
+ NULL, NULL, &hw_data->backlight_ops, &props);
+
+ if (IS_ERR(apple_backlight_device)) {
+ release_region(hw_data->iostart, hw_data->iolen);
+ return PTR_ERR(apple_backlight_device);
+ }
+
+ apple_backlight_device->props.brightness =
+ hw_data->backlight_ops.get_brightness(apple_backlight_device);
+ backlight_update_status(apple_backlight_device);
+
+ return 0;
+}
+
+static int __devexit apple_bl_remove(struct acpi_device *dev, int type)
+{
+ backlight_device_unregister(apple_backlight_device);
+
+ release_region(hw_data->iostart, hw_data->iolen);
+ hw_data = NULL;
+ return 0;
+}
+
+static const struct acpi_device_id apple_bl_ids[] = {
+ {"APP0002", 0},
+ {"", 0},
+};
+
+static struct acpi_driver apple_bl_driver = {
+ .name = "Apple backlight",
+ .ids = apple_bl_ids,
+ .ops = {
+ .add = apple_bl_add,
+ .remove = apple_bl_remove,
+ },
+};
+
+static int __init apple_bl_init(void)
+{
+ return acpi_bus_register_driver(&apple_bl_driver);
+}
+
+static void __exit apple_bl_exit(void)
+{
+ acpi_bus_unregister_driver(&apple_bl_driver);
+}
+
+module_init(apple_bl_init);
+module_exit(apple_bl_exit);
+
+MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
+MODULE_DESCRIPTION("Apple Backlight Driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(acpi, apple_bl_ids);
+MODULE_ALIAS("mbp_nvidia_bl");
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
index e6a66dab088c..0443a4f71858 100644
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -168,6 +168,7 @@ static int atmel_pwm_bl_probe(struct platform_device *pdev)
}
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min;
bldev = backlight_device_register("atmel-pwm-bl", &pdev->dev, pwmbl,
&atmel_pwm_bl_ops, &props);
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 08703299ef61..80d292fb92d8 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -19,6 +19,12 @@
#include <asm/backlight.h>
#endif
+static const char const *backlight_types[] = {
+ [BACKLIGHT_RAW] = "raw",
+ [BACKLIGHT_PLATFORM] = "platform",
+ [BACKLIGHT_FIRMWARE] = "firmware",
+};
+
#if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE))
/* This callback gets called when something important happens inside a
@@ -169,6 +175,14 @@ static ssize_t backlight_store_brightness(struct device *dev,
return rc;
}
+static ssize_t backlight_show_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct backlight_device *bd = to_backlight_device(dev);
+
+ return sprintf(buf, "%s\n", backlight_types[bd->props.type]);
+}
+
static ssize_t backlight_show_max_brightness(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -234,6 +248,7 @@ static struct device_attribute bl_device_attributes[] = {
__ATTR(actual_brightness, 0444, backlight_show_actual_brightness,
NULL),
__ATTR(max_brightness, 0444, backlight_show_max_brightness, NULL),
+ __ATTR(type, 0444, backlight_show_type, NULL),
__ATTR_NULL,
};
@@ -292,9 +307,16 @@ struct backlight_device *backlight_device_register(const char *name,
dev_set_drvdata(&new_bd->dev, devdata);
/* Set default properties */
- if (props)
+ if (props) {
memcpy(&new_bd->props, props,
sizeof(struct backlight_properties));
+ if (props->type <= 0 || props->type >= BACKLIGHT_TYPE_MAX) {
+ WARN(1, "%s: invalid backlight type", name);
+ new_bd->props.type = BACKLIGHT_RAW;
+ }
+ } else {
+ new_bd->props.type = BACKLIGHT_RAW;
+ }
rc = device_register(&new_bd->dev);
if (rc) {
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 1e71c35083bb..af6098396fe6 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -562,6 +562,7 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi)
lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = pdata->max_intensity;
lcd->bl_dev = backlight_device_register("corgi_bl", &spi->dev, lcd,
&corgi_bl_ops, &props);
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index 397d15eb1ea8..6c8c54041fae 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -193,6 +193,7 @@ static int cr_backlight_probe(struct platform_device *pdev)
}
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
bdp = backlight_device_register("cr-backlight", &pdev->dev, NULL,
&cr_backlight_ops, &props);
if (IS_ERR(bdp)) {
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 87659ed79bd7..62043f12a5a4 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -136,6 +136,7 @@ static int da903x_backlight_probe(struct platform_device *pdev)
da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2,
DA9034_WLED_ISET(pdata->output_current));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = max_brightness;
bl = backlight_device_register(pdev->name, data->da903x_dev, data,
&da903x_backlight_ops, &props);
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index b0cc49184803..9f1e389d51d2 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -87,6 +87,7 @@ static int __init ep93xxbl_probe(struct platform_device *dev)
ep93xxbl->mmio = EP93XX_RASTER_BRIGHTNESS;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = EP93XX_MAX_BRIGHT;
bl = backlight_device_register(dev->name, &dev->dev, ep93xxbl,
&ep93xxbl_ops, &props);
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 312ca619735d..8c6befd65a33 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -91,6 +91,7 @@ static int genericbl_probe(struct platform_device *pdev)
name = machinfo->name;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = machinfo->max_intensity;
bd = backlight_device_register(name, &pdev->dev, NULL, &genericbl_ops,
&props);
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 267d23f8d645..38aa00272141 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -109,6 +109,7 @@ static int __devinit hp680bl_probe(struct platform_device *pdev)
struct backlight_device *bd;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = HP680_MAX_INTENSITY;
bd = backlight_device_register("hp680-bl", &pdev->dev, NULL,
&hp680bl_ops, &props);
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 2f177b3a4885..de65d80159be 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -106,6 +106,7 @@ static int jornada_bl_probe(struct platform_device *pdev)
struct backlight_device *bd;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = BL_MAX_BRIGHT;
bd = backlight_device_register(S1D_DEVICENAME, &pdev->dev, NULL,
&jornada_bl_ops, &props);
@@ -146,12 +147,12 @@ static struct platform_driver jornada_bl_driver = {
},
};
-int __init jornada_bl_init(void)
+static int __init jornada_bl_init(void)
{
return platform_driver_register(&jornada_bl_driver);
}
-void __exit jornada_bl_exit(void)
+static void __exit jornada_bl_exit(void)
{
platform_driver_unregister(&jornada_bl_driver);
}
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c
index cbbb167fd268..d2ff658b4144 100644
--- a/drivers/video/backlight/jornada720_lcd.c
+++ b/drivers/video/backlight/jornada720_lcd.c
@@ -135,12 +135,12 @@ static struct platform_driver jornada_lcd_driver = {
},
};
-int __init jornada_lcd_init(void)
+static int __init jornada_lcd_init(void)
{
return platform_driver_register(&jornada_lcd_driver);
}
-void __exit jornada_lcd_exit(void)
+static void __exit jornada_lcd_exit(void)
{
platform_driver_unregister(&jornada_lcd_driver);
}
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
index f439a8632287..72dd5556a35b 100644
--- a/drivers/video/backlight/kb3886_bl.c
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -149,6 +149,7 @@ static int kb3886bl_probe(struct platform_device *pdev)
machinfo->limit_mask = -1;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = machinfo->max_intensity;
kb3886_backlight_device = backlight_device_register("kb3886-bl",
&pdev->dev, NULL,
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
new file mode 100644
index 000000000000..7281b2506a67
--- /dev/null
+++ b/drivers/video/backlight/ld9040.c
@@ -0,0 +1,819 @@
+/*
+ * ld9040 AMOLED LCD panel driver.
+ *
+ * Copyright (c) 2011 Samsung Electronics
+ * Author: Donghwa Lee <dh09.lee@samsung.com>
+ * Derived from drivers/video/backlight/s6e63m0.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/wait.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/lcd.h>
+#include <linux/backlight.h>
+
+#include "ld9040_gamma.h"
+
+#define SLEEPMSEC 0x1000
+#define ENDDEF 0x2000
+#define DEFMASK 0xFF00
+#define COMMAND_ONLY 0xFE
+#define DATA_ONLY 0xFF
+
+#define MIN_BRIGHTNESS 0
+#define MAX_BRIGHTNESS 24
+#define power_is_on(pwr) ((pwr) <= FB_BLANK_NORMAL)
+
+struct ld9040 {
+ struct device *dev;
+ struct spi_device *spi;
+ unsigned int power;
+ unsigned int current_brightness;
+
+ struct lcd_device *ld;
+ struct backlight_device *bd;
+ struct lcd_platform_data *lcd_pd;
+};
+
+static const unsigned short seq_swreset[] = {
+ 0x01, COMMAND_ONLY,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_user_setting[] = {
+ 0xF0, 0x5A,
+
+ DATA_ONLY, 0x5A,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_elvss_on[] = {
+ 0xB1, 0x0D,
+
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x16,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_gtcon[] = {
+ 0xF7, 0x09,
+
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_panel_condition[] = {
+ 0xF8, 0x05,
+
+ DATA_ONLY, 0x65,
+ DATA_ONLY, 0x96,
+ DATA_ONLY, 0x71,
+ DATA_ONLY, 0x7D,
+ DATA_ONLY, 0x19,
+ DATA_ONLY, 0x3B,
+ DATA_ONLY, 0x0D,
+ DATA_ONLY, 0x19,
+ DATA_ONLY, 0x7E,
+ DATA_ONLY, 0x0D,
+ DATA_ONLY, 0xE2,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x7E,
+ DATA_ONLY, 0x7D,
+ DATA_ONLY, 0x07,
+ DATA_ONLY, 0x07,
+ DATA_ONLY, 0x20,
+ DATA_ONLY, 0x20,
+ DATA_ONLY, 0x20,
+ DATA_ONLY, 0x02,
+ DATA_ONLY, 0x02,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_gamma_set1[] = {
+ 0xF9, 0x00,
+
+ DATA_ONLY, 0xA7,
+ DATA_ONLY, 0xB4,
+ DATA_ONLY, 0xAE,
+ DATA_ONLY, 0xBF,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x91,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0xB2,
+ DATA_ONLY, 0xB4,
+ DATA_ONLY, 0xAA,
+ DATA_ONLY, 0xBB,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0xAC,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0xB3,
+ DATA_ONLY, 0xB1,
+ DATA_ONLY, 0xAA,
+ DATA_ONLY, 0xBC,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0xB3,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_gamma_ctrl[] = {
+ 0xFB, 0x02,
+
+ DATA_ONLY, 0x5A,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_gamma_start[] = {
+ 0xF9, COMMAND_ONLY,
+
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_apon[] = {
+ 0xF3, 0x00,
+
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x0A,
+ DATA_ONLY, 0x02,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_display_ctrl[] = {
+ 0xF2, 0x02,
+
+ DATA_ONLY, 0x08,
+ DATA_ONLY, 0x08,
+ DATA_ONLY, 0x10,
+ DATA_ONLY, 0x10,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_manual_pwr[] = {
+ 0xB0, 0x04,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_pwr_ctrl[] = {
+ 0xF4, 0x0A,
+
+ DATA_ONLY, 0x87,
+ DATA_ONLY, 0x25,
+ DATA_ONLY, 0x6A,
+ DATA_ONLY, 0x44,
+ DATA_ONLY, 0x02,
+ DATA_ONLY, 0x88,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_sleep_out[] = {
+ 0x11, COMMAND_ONLY,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_sleep_in[] = {
+ 0x10, COMMAND_ONLY,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_display_on[] = {
+ 0x29, COMMAND_ONLY,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_display_off[] = {
+ 0x28, COMMAND_ONLY,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_vci1_1st_en[] = {
+ 0xF3, 0x10,
+
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x02,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_vl1_en[] = {
+ 0xF3, 0x11,
+
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x02,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_vl2_en[] = {
+ 0xF3, 0x13,
+
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x02,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_vci1_2nd_en[] = {
+ 0xF3, 0x33,
+
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x02,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_vl3_en[] = {
+ 0xF3, 0x37,
+
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x02,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_vreg1_amp_en[] = {
+ 0xF3, 0x37,
+
+ DATA_ONLY, 0x01,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x02,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_vgh_amp_en[] = {
+ 0xF3, 0x37,
+
+ DATA_ONLY, 0x11,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x02,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_vgl_amp_en[] = {
+ 0xF3, 0x37,
+
+ DATA_ONLY, 0x31,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x02,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_vmos_amp_en[] = {
+ 0xF3, 0x37,
+
+ DATA_ONLY, 0xB1,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x03,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_vint_amp_en[] = {
+ 0xF3, 0x37,
+
+ DATA_ONLY, 0xF1,
+ /* DATA_ONLY, 0x71, VMOS/VBL/VBH not used */
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x03,
+ /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_vbh_amp_en[] = {
+ 0xF3, 0x37,
+
+ DATA_ONLY, 0xF9,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x03,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_vbl_amp_en[] = {
+ 0xF3, 0x37,
+
+ DATA_ONLY, 0xFD,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x03,
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_gam_amp_en[] = {
+ 0xF3, 0x37,
+
+ DATA_ONLY, 0xFF,
+ /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x03,
+ /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_sd_amp_en[] = {
+ 0xF3, 0x37,
+
+ DATA_ONLY, 0xFF,
+ /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
+ DATA_ONLY, 0x80,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x03,
+ /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_gls_en[] = {
+ 0xF3, 0x37,
+
+ DATA_ONLY, 0xFF,
+ /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
+ DATA_ONLY, 0x81,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x03,
+ /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_els_en[] = {
+ 0xF3, 0x37,
+
+ DATA_ONLY, 0xFF,
+ /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
+ DATA_ONLY, 0x83,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x03,
+ /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
+ ENDDEF, 0x00
+};
+
+static const unsigned short seq_el_on[] = {
+ 0xF3, 0x37,
+
+ DATA_ONLY, 0xFF,
+ /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
+ DATA_ONLY, 0x87,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x03,
+ /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
+ ENDDEF, 0x00
+};
+
+static int ld9040_spi_write_byte(struct ld9040 *lcd, int addr, int data)
+{
+ u16 buf[1];
+ struct spi_message msg;
+
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ };
+
+ buf[0] = (addr << 8) | data;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(lcd->spi, &msg);
+}
+
+static int ld9040_spi_write(struct ld9040 *lcd, unsigned char address,
+ unsigned char command)
+{
+ int ret = 0;
+
+ if (address != DATA_ONLY)
+ ret = ld9040_spi_write_byte(lcd, 0x0, address);
+ if (command != COMMAND_ONLY)
+ ret = ld9040_spi_write_byte(lcd, 0x1, command);
+
+ return ret;
+}
+
+static int ld9040_panel_send_sequence(struct ld9040 *lcd,
+ const unsigned short *wbuf)
+{
+ int ret = 0, i = 0;
+
+ while ((wbuf[i] & DEFMASK) != ENDDEF) {
+ if ((wbuf[i] & DEFMASK) != SLEEPMSEC) {
+ ret = ld9040_spi_write(lcd, wbuf[i], wbuf[i+1]);
+ if (ret)
+ break;
+ } else
+ udelay(wbuf[i+1]*1000);
+ i += 2;
+ }
+
+ return ret;
+}
+
+static int _ld9040_gamma_ctl(struct ld9040 *lcd, const unsigned int *gamma)
+{
+ unsigned int i = 0;
+ int ret = 0;
+
+ /* start gamma table updating. */
+ ret = ld9040_panel_send_sequence(lcd, seq_gamma_start);
+ if (ret) {
+ dev_err(lcd->dev, "failed to disable gamma table updating.\n");
+ goto gamma_err;
+ }
+
+ for (i = 0 ; i < GAMMA_TABLE_COUNT; i++) {
+ ret = ld9040_spi_write(lcd, DATA_ONLY, gamma[i]);
+ if (ret) {
+ dev_err(lcd->dev, "failed to set gamma table.\n");
+ goto gamma_err;
+ }
+ }
+
+ /* update gamma table. */
+ ret = ld9040_panel_send_sequence(lcd, seq_gamma_ctrl);
+ if (ret)
+ dev_err(lcd->dev, "failed to update gamma table.\n");
+
+gamma_err:
+ return ret;
+}
+
+static int ld9040_gamma_ctl(struct ld9040 *lcd, int gamma)
+{
+ int ret = 0;
+
+ ret = _ld9040_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
+
+ return ret;
+}
+
+
+static int ld9040_ldi_init(struct ld9040 *lcd)
+{
+ int ret, i;
+ static const unsigned short *init_seq[] = {
+ seq_user_setting,
+ seq_panel_condition,
+ seq_display_ctrl,
+ seq_manual_pwr,
+ seq_elvss_on,
+ seq_gtcon,
+ seq_gamma_set1,
+ seq_gamma_ctrl,
+ seq_sleep_out,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
+ ret = ld9040_panel_send_sequence(lcd, init_seq[i]);
+ /* workaround: minimum delay time for transferring CMD */
+ udelay(300);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int ld9040_ldi_enable(struct ld9040 *lcd)
+{
+ int ret = 0;
+
+ ret = ld9040_panel_send_sequence(lcd, seq_display_on);
+
+ return ret;
+}
+
+static int ld9040_ldi_disable(struct ld9040 *lcd)
+{
+ int ret;
+
+ ret = ld9040_panel_send_sequence(lcd, seq_display_off);
+ ret = ld9040_panel_send_sequence(lcd, seq_sleep_in);
+
+ return ret;
+}
+
+static int ld9040_power_on(struct ld9040 *lcd)
+{
+ int ret = 0;
+ struct lcd_platform_data *pd = NULL;
+ pd = lcd->lcd_pd;
+ if (!pd) {
+ dev_err(lcd->dev, "platform data is NULL.\n");
+ return -EFAULT;
+ }
+
+ if (!pd->power_on) {
+ dev_err(lcd->dev, "power_on is NULL.\n");
+ return -EFAULT;
+ } else {
+ pd->power_on(lcd->ld, 1);
+ mdelay(pd->power_on_delay);
+ }
+
+ if (!pd->reset) {
+ dev_err(lcd->dev, "reset is NULL.\n");
+ return -EFAULT;
+ } else {
+ pd->reset(lcd->ld);
+ mdelay(pd->reset_delay);
+ }
+
+ ret = ld9040_ldi_init(lcd);
+ if (ret) {
+ dev_err(lcd->dev, "failed to initialize ldi.\n");
+ return ret;
+ }
+
+ ret = ld9040_ldi_enable(lcd);
+ if (ret) {
+ dev_err(lcd->dev, "failed to enable ldi.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ld9040_power_off(struct ld9040 *lcd)
+{
+ int ret = 0;
+ struct lcd_platform_data *pd = NULL;
+
+ pd = lcd->lcd_pd;
+ if (!pd) {
+ dev_err(lcd->dev, "platform data is NULL.\n");
+ return -EFAULT;
+ }
+
+ ret = ld9040_ldi_disable(lcd);
+ if (ret) {
+ dev_err(lcd->dev, "lcd setting failed.\n");
+ return -EIO;
+ }
+
+ mdelay(pd->power_off_delay);
+
+ if (!pd->power_on) {
+ dev_err(lcd->dev, "power_on is NULL.\n");
+ return -EFAULT;
+ } else
+ pd->power_on(lcd->ld, 0);
+
+ return 0;
+}
+
+static int ld9040_power(struct ld9040 *lcd, int power)
+{
+ int ret = 0;
+
+ if (power_is_on(power) && !power_is_on(lcd->power))
+ ret = ld9040_power_on(lcd);
+ else if (!power_is_on(power) && power_is_on(lcd->power))
+ ret = ld9040_power_off(lcd);
+
+ if (!ret)
+ lcd->power = power;
+
+ return ret;
+}
+
+static int ld9040_set_power(struct lcd_device *ld, int power)
+{
+ struct ld9040 *lcd = lcd_get_data(ld);
+
+ if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
+ power != FB_BLANK_NORMAL) {
+ dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
+ return -EINVAL;
+ }
+
+ return ld9040_power(lcd, power);
+}
+
+static int ld9040_get_power(struct lcd_device *ld)
+{
+ struct ld9040 *lcd = lcd_get_data(ld);
+
+ return lcd->power;
+}
+
+static int ld9040_get_brightness(struct backlight_device *bd)
+{
+ return bd->props.brightness;
+}
+
+static int ld9040_set_brightness(struct backlight_device *bd)
+{
+ int ret = 0, brightness = bd->props.brightness;
+ struct ld9040 *lcd = bl_get_data(bd);
+
+ if (brightness < MIN_BRIGHTNESS ||
+ brightness > bd->props.max_brightness) {
+ dev_err(&bd->dev, "lcd brightness should be %d to %d.\n",
+ MIN_BRIGHTNESS, MAX_BRIGHTNESS);
+ return -EINVAL;
+ }
+
+ ret = ld9040_gamma_ctl(lcd, bd->props.brightness);
+ if (ret) {
+ dev_err(&bd->dev, "lcd brightness setting failed.\n");
+ return -EIO;
+ }
+
+ return ret;
+}
+
+static struct lcd_ops ld9040_lcd_ops = {
+ .set_power = ld9040_set_power,
+ .get_power = ld9040_get_power,
+};
+
+static const struct backlight_ops ld9040_backlight_ops = {
+ .get_brightness = ld9040_get_brightness,
+ .update_status = ld9040_set_brightness,
+};
+
+
+static int ld9040_probe(struct spi_device *spi)
+{
+ int ret = 0;
+ struct ld9040 *lcd = NULL;
+ struct lcd_device *ld = NULL;
+ struct backlight_device *bd = NULL;
+
+ lcd = kzalloc(sizeof(struct ld9040), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ /* ld9040 lcd panel uses 3-wire 9bits SPI Mode. */
+ spi->bits_per_word = 9;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi setup failed.\n");
+ goto out_free_lcd;
+ }
+
+ lcd->spi = spi;
+ lcd->dev = &spi->dev;
+
+ lcd->lcd_pd = spi->dev.platform_data;
+ if (!lcd->lcd_pd) {
+ dev_err(&spi->dev, "platform data is NULL.\n");
+ goto out_free_lcd;
+ }
+
+ ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
+ if (IS_ERR(ld)) {
+ ret = PTR_ERR(ld);
+ goto out_free_lcd;
+ }
+
+ lcd->ld = ld;
+
+ bd = backlight_device_register("ld9040-bl", &spi->dev,
+ lcd, &ld9040_backlight_ops, NULL);
+ if (IS_ERR(ld)) {
+ ret = PTR_ERR(ld);
+ goto out_free_lcd;
+ }
+
+ bd->props.max_brightness = MAX_BRIGHTNESS;
+ bd->props.brightness = MAX_BRIGHTNESS;
+ lcd->bd = bd;
+
+ /*
+ * if lcd panel was on from bootloader like u-boot then
+ * do not lcd on.
+ */
+ if (!lcd->lcd_pd->lcd_enabled) {
+ /*
+ * if lcd panel was off from bootloader then
+ * current lcd status is powerdown and then
+ * it enables lcd panel.
+ */
+ lcd->power = FB_BLANK_POWERDOWN;
+
+ ld9040_power(lcd, FB_BLANK_UNBLANK);
+ } else
+ lcd->power = FB_BLANK_UNBLANK;
+
+ dev_set_drvdata(&spi->dev, lcd);
+
+ dev_info(&spi->dev, "ld9040 panel driver has been probed.\n");
+ return 0;
+
+out_free_lcd:
+ kfree(lcd);
+ return ret;
+}
+
+static int __devexit ld9040_remove(struct spi_device *spi)
+{
+ struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
+
+ ld9040_power(lcd, FB_BLANK_POWERDOWN);
+ lcd_device_unregister(lcd->ld);
+ kfree(lcd);
+
+ return 0;
+}
+
+#if defined(CONFIG_PM)
+static int ld9040_suspend(struct spi_device *spi, pm_message_t mesg)
+{
+ int ret = 0;
+ struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
+
+ dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power);
+
+ /*
+ * when lcd panel is suspend, lcd panel becomes off
+ * regardless of status.
+ */
+ ret = ld9040_power(lcd, FB_BLANK_POWERDOWN);
+
+ return ret;
+}
+
+static int ld9040_resume(struct spi_device *spi)
+{
+ int ret = 0;
+ struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
+
+ lcd->power = FB_BLANK_POWERDOWN;
+
+ ret = ld9040_power(lcd, FB_BLANK_UNBLANK);
+
+ return ret;
+}
+#else
+#define ld9040_suspend NULL
+#define ld9040_resume NULL
+#endif
+
+/* Power down all displays on reboot, poweroff or halt. */
+static void ld9040_shutdown(struct spi_device *spi)
+{
+ struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
+
+ ld9040_power(lcd, FB_BLANK_POWERDOWN);
+}
+
+static struct spi_driver ld9040_driver = {
+ .driver = {
+ .name = "ld9040",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ld9040_probe,
+ .remove = __devexit_p(ld9040_remove),
+ .shutdown = ld9040_shutdown,
+ .suspend = ld9040_suspend,
+ .resume = ld9040_resume,
+};
+
+static int __init ld9040_init(void)
+{
+ return spi_register_driver(&ld9040_driver);
+}
+
+static void __exit ld9040_exit(void)
+{
+ spi_unregister_driver(&ld9040_driver);
+}
+
+module_init(ld9040_init);
+module_exit(ld9040_exit);
+
+MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
+MODULE_DESCRIPTION("ld9040 LCD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/ld9040_gamma.h b/drivers/video/backlight/ld9040_gamma.h
new file mode 100644
index 000000000000..038d9c86ec03
--- /dev/null
+++ b/drivers/video/backlight/ld9040_gamma.h
@@ -0,0 +1,200 @@
+/*
+ * Gamma level definitions.
+ *
+ * Copyright (c) 2011 Samsung Electronics
+ * InKi Dae <inki.dae@samsung.com>
+ * Donghwa Lee <dh09.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef _LD9040_BRIGHTNESS_H
+#define _LD9040_BRIGHTNESS_H
+
+#define MAX_GAMMA_LEVEL 25
+#define GAMMA_TABLE_COUNT 21
+
+/* gamma value: 2.2 */
+static const unsigned int ld9040_22_300[] = {
+ 0x00, 0xa7, 0xb4, 0xae, 0xbf, 0x00, 0x91,
+ 0x00, 0xb2, 0xb4, 0xaa, 0xbb, 0x00, 0xac,
+ 0x00, 0xb3, 0xb1, 0xaa, 0xbc, 0x00, 0xb3
+};
+
+static const unsigned int ld9040_22_290[] = {
+ 0x00, 0xa9, 0xb7, 0xae, 0xbd, 0x00, 0x89,
+ 0x00, 0xb7, 0xb6, 0xa8, 0xba, 0x00, 0xa4,
+ 0x00, 0xb1, 0xb4, 0xaa, 0xbb, 0x00, 0xaa
+};
+
+static const unsigned int ld9040_22_280[] = {
+ 0x00, 0xa9, 0xb6, 0xad, 0xbf, 0x00, 0x86,
+ 0x00, 0xb8, 0xb5, 0xa8, 0xbc, 0x00, 0xa0,
+ 0x00, 0xb3, 0xb3, 0xa9, 0xbc, 0x00, 0xa7
+};
+
+static const unsigned int ld9040_22_270[] = {
+ 0x00, 0xa8, 0xb8, 0xae, 0xbe, 0x00, 0x84,
+ 0x00, 0xb9, 0xb7, 0xa8, 0xbc, 0x00, 0x9d,
+ 0x00, 0xb2, 0xb5, 0xaa, 0xbc, 0x00, 0xa4
+
+};
+static const unsigned int ld9040_22_260[] = {
+ 0x00, 0xa4, 0xb8, 0xb0, 0xbf, 0x00, 0x80,
+ 0x00, 0xb8, 0xb6, 0xaa, 0xbc, 0x00, 0x9a,
+ 0x00, 0xb0, 0xb5, 0xab, 0xbd, 0x00, 0xa0
+};
+
+static const unsigned int ld9040_22_250[] = {
+ 0x00, 0xa4, 0xb9, 0xaf, 0xc1, 0x00, 0x7d,
+ 0x00, 0xb9, 0xb6, 0xaa, 0xbb, 0x00, 0x97,
+ 0x00, 0xb1, 0xb5, 0xaa, 0xbf, 0x00, 0x9d
+};
+
+static const unsigned int ld9040_22_240[] = {
+ 0x00, 0xa2, 0xb9, 0xaf, 0xc2, 0x00, 0x7a,
+ 0x00, 0xb9, 0xb7, 0xaa, 0xbd, 0x00, 0x94,
+ 0x00, 0xb0, 0xb5, 0xab, 0xbf, 0x00, 0x9a
+};
+
+static const unsigned int ld9040_22_230[] = {
+ 0x00, 0xa0, 0xb9, 0xaf, 0xc3, 0x00, 0x77,
+ 0x00, 0xb9, 0xb7, 0xab, 0xbe, 0x00, 0x90,
+ 0x00, 0xb0, 0xb6, 0xab, 0xbf, 0x00, 0x97
+};
+
+static const unsigned int ld9040_22_220[] = {
+ 0x00, 0x9e, 0xba, 0xb0, 0xc2, 0x00, 0x75,
+ 0x00, 0xb9, 0xb8, 0xab, 0xbe, 0x00, 0x8e,
+ 0x00, 0xb0, 0xb6, 0xac, 0xbf, 0x00, 0x94
+};
+
+static const unsigned int ld9040_22_210[] = {
+ 0x00, 0x9c, 0xb9, 0xb0, 0xc4, 0x00, 0x72,
+ 0x00, 0xb8, 0xb8, 0xac, 0xbf, 0x00, 0x8a,
+ 0x00, 0xb0, 0xb6, 0xac, 0xc0, 0x00, 0x91
+};
+
+static const unsigned int ld9040_22_200[] = {
+ 0x00, 0x9a, 0xba, 0xb1, 0xc4, 0x00, 0x6f,
+ 0x00, 0xb8, 0xb8, 0xad, 0xc0, 0x00, 0x86,
+ 0x00, 0xb0, 0xb7, 0xad, 0xc0, 0x00, 0x8d
+};
+
+static const unsigned int ld9040_22_190[] = {
+ 0x00, 0x97, 0xba, 0xb2, 0xc5, 0x00, 0x6c,
+ 0x00, 0xb8, 0xb8, 0xae, 0xc1, 0x00, 0x82,
+ 0x00, 0xb0, 0xb6, 0xae, 0xc2, 0x00, 0x89
+};
+
+static const unsigned int ld9040_22_180[] = {
+ 0x00, 0x93, 0xba, 0xb3, 0xc5, 0x00, 0x69,
+ 0x00, 0xb8, 0xb9, 0xae, 0xc1, 0x00, 0x7f,
+ 0x00, 0xb0, 0xb6, 0xae, 0xc3, 0x00, 0x85
+};
+
+static const unsigned int ld9040_22_170[] = {
+ 0x00, 0x8b, 0xb9, 0xb3, 0xc7, 0x00, 0x65,
+ 0x00, 0xb7, 0xb8, 0xaf, 0xc3, 0x00, 0x7a,
+ 0x00, 0x80, 0xb6, 0xae, 0xc4, 0x00, 0x81
+};
+
+static const unsigned int ld9040_22_160[] = {
+ 0x00, 0x89, 0xba, 0xb3, 0xc8, 0x00, 0x62,
+ 0x00, 0xb6, 0xba, 0xaf, 0xc3, 0x00, 0x76,
+ 0x00, 0xaf, 0xb7, 0xae, 0xc4, 0x00, 0x7e
+};
+
+static const unsigned int ld9040_22_150[] = {
+ 0x00, 0x82, 0xba, 0xb4, 0xc7, 0x00, 0x5f,
+ 0x00, 0xb5, 0xba, 0xb0, 0xc3, 0x00, 0x72,
+ 0x00, 0xae, 0xb8, 0xb0, 0xc3, 0x00, 0x7a
+};
+
+static const unsigned int ld9040_22_140[] = {
+ 0x00, 0x7b, 0xbb, 0xb4, 0xc8, 0x00, 0x5b,
+ 0x00, 0xb5, 0xba, 0xb1, 0xc4, 0x00, 0x6e,
+ 0x00, 0xae, 0xb9, 0xb0, 0xc5, 0x00, 0x75
+};
+
+static const unsigned int ld9040_22_130[] = {
+ 0x00, 0x71, 0xbb, 0xb5, 0xc8, 0x00, 0x57,
+ 0x00, 0xb5, 0xbb, 0xb0, 0xc5, 0x00, 0x6a,
+ 0x00, 0xae, 0xb9, 0xb1, 0xc6, 0x00, 0x70
+};
+
+static const unsigned int ld9040_22_120[] = {
+ 0x00, 0x47, 0xba, 0xb6, 0xca, 0x00, 0x53,
+ 0x00, 0xb5, 0xbb, 0xb3, 0xc6, 0x00, 0x65,
+ 0x00, 0xae, 0xb8, 0xb3, 0xc7, 0x00, 0x6c
+};
+
+static const unsigned int ld9040_22_110[] = {
+ 0x00, 0x13, 0xbb, 0xb7, 0xca, 0x00, 0x4f,
+ 0x00, 0xb4, 0xbb, 0xb3, 0xc7, 0x00, 0x60,
+ 0x00, 0xad, 0xb8, 0xb4, 0xc7, 0x00, 0x67
+};
+
+static const unsigned int ld9040_22_100[] = {
+ 0x00, 0x13, 0xba, 0xb8, 0xcb, 0x00, 0x4b,
+ 0x00, 0xb3, 0xbc, 0xb4, 0xc7, 0x00, 0x5c,
+ 0x00, 0xac, 0xb8, 0xb4, 0xc8, 0x00, 0x62
+};
+
+static const unsigned int ld9040_22_90[] = {
+ 0x00, 0x13, 0xb9, 0xb8, 0xcd, 0x00, 0x46,
+ 0x00, 0xb1, 0xbc, 0xb5, 0xc8, 0x00, 0x56,
+ 0x00, 0xaa, 0xb8, 0xb4, 0xc9, 0x00, 0x5d
+};
+
+static const unsigned int ld9040_22_80[] = {
+ 0x00, 0x13, 0xba, 0xb9, 0xcd, 0x00, 0x41,
+ 0x00, 0xb0, 0xbe, 0xb5, 0xc9, 0x00, 0x51,
+ 0x00, 0xa9, 0xb9, 0xb5, 0xca, 0x00, 0x57
+};
+
+static const unsigned int ld9040_22_70[] = {
+ 0x00, 0x13, 0xb9, 0xb9, 0xd0, 0x00, 0x3c,
+ 0x00, 0xaf, 0xbf, 0xb6, 0xcb, 0x00, 0x4b,
+ 0x00, 0xa8, 0xb9, 0xb5, 0xcc, 0x00, 0x52
+};
+
+static const unsigned int ld9040_22_50[] = {
+ 0x00, 0x13, 0xb2, 0xba, 0xd2, 0x00, 0x30,
+ 0x00, 0xaf, 0xc0, 0xb8, 0xcd, 0x00, 0x3d,
+ 0x00, 0xa8, 0xb8, 0xb7, 0xcd, 0x00, 0x44
+};
+
+struct ld9040_gamma {
+ unsigned int *gamma_22_table[MAX_GAMMA_LEVEL];
+} gamma_table = {
+ .gamma_22_table[0] = (unsigned int *)&ld9040_22_50,
+ .gamma_22_table[1] = (unsigned int *)&ld9040_22_70,
+ .gamma_22_table[2] = (unsigned int *)&ld9040_22_80,
+ .gamma_22_table[3] = (unsigned int *)&ld9040_22_90,
+ .gamma_22_table[4] = (unsigned int *)&ld9040_22_100,
+ .gamma_22_table[5] = (unsigned int *)&ld9040_22_110,
+ .gamma_22_table[6] = (unsigned int *)&ld9040_22_120,
+ .gamma_22_table[7] = (unsigned int *)&ld9040_22_130,
+ .gamma_22_table[8] = (unsigned int *)&ld9040_22_140,
+ .gamma_22_table[9] = (unsigned int *)&ld9040_22_150,
+ .gamma_22_table[10] = (unsigned int *)&ld9040_22_160,
+ .gamma_22_table[11] = (unsigned int *)&ld9040_22_170,
+ .gamma_22_table[12] = (unsigned int *)&ld9040_22_180,
+ .gamma_22_table[13] = (unsigned int *)&ld9040_22_190,
+ .gamma_22_table[14] = (unsigned int *)&ld9040_22_200,
+ .gamma_22_table[15] = (unsigned int *)&ld9040_22_210,
+ .gamma_22_table[16] = (unsigned int *)&ld9040_22_220,
+ .gamma_22_table[17] = (unsigned int *)&ld9040_22_230,
+ .gamma_22_table[18] = (unsigned int *)&ld9040_22_240,
+ .gamma_22_table[19] = (unsigned int *)&ld9040_22_250,
+ .gamma_22_table[20] = (unsigned int *)&ld9040_22_260,
+ .gamma_22_table[21] = (unsigned int *)&ld9040_22_270,
+ .gamma_22_table[22] = (unsigned int *)&ld9040_22_280,
+ .gamma_22_table[23] = (unsigned int *)&ld9040_22_290,
+ .gamma_22_table[24] = (unsigned int *)&ld9040_22_300,
+};
+
+#endif
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index d2f59015d517..bbca3127071e 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -184,6 +184,7 @@ static int locomolcd_probe(struct locomo_dev *ldev)
local_irq_restore(flags);
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = 4;
locomolcd_bl_device = backlight_device_register("locomo-bl",
&ldev->dev, NULL,
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 209acc105cbc..07e8e273ced0 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -136,6 +136,7 @@ static int __devinit max8925_backlight_probe(struct platform_device *pdev)
data->current_brightness = 0;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = MAX_BRIGHTNESS;
bl = backlight_device_register(name, &pdev->dev, data,
&max8925_backlight_ops, &props);
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
deleted file mode 100644
index 1485f7345f49..000000000000
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Backlight Driver for Nvidia 8600 in Macbook Pro
- *
- * Copyright (c) Red Hat <mjg@redhat.com>
- * Based on code from Pommed:
- * Copyright (C) 2006 Nicolas Boichat <nicolas @boichat.ch>
- * Copyright (C) 2006 Felipe Alfaro Solana <felipe_alfaro @linuxmail.org>
- * Copyright (C) 2007 Julien BLACHE <jb@jblache.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This driver triggers SMIs which cause the firmware to change the
- * backlight brightness. This is icky in many ways, but it's impractical to
- * get at the firmware code in order to figure out what it's actually doing.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/backlight.h>
-#include <linux/err.h>
-#include <linux/dmi.h>
-#include <linux/io.h>
-
-static struct backlight_device *mbp_backlight_device;
-
-/* Structure to be passed to the DMI_MATCH function. */
-struct dmi_match_data {
- /* I/O resource to allocate. */
- unsigned long iostart;
- unsigned long iolen;
- /* Backlight operations structure. */
- const struct backlight_ops backlight_ops;
-};
-
-/* Module parameters. */
-static int debug;
-module_param_named(debug, debug, int, 0644);
-MODULE_PARM_DESC(debug, "Set to one to enable debugging messages.");
-
-/*
- * Implementation for MacBooks with Intel chipset.
- */
-static int intel_chipset_send_intensity(struct backlight_device *bd)
-{
- int intensity = bd->props.brightness;
-
- if (debug)
- printk(KERN_DEBUG "mbp_nvidia_bl: setting brightness to %d\n",
- intensity);
-
- outb(0x04 | (intensity << 4), 0xb3);
- outb(0xbf, 0xb2);
- return 0;
-}
-
-static int intel_chipset_get_intensity(struct backlight_device *bd)
-{
- int intensity;
-
- outb(0x03, 0xb3);
- outb(0xbf, 0xb2);
- intensity = inb(0xb3) >> 4;
-
- if (debug)
- printk(KERN_DEBUG "mbp_nvidia_bl: read brightness of %d\n",
- intensity);
-
- return intensity;
-}
-
-static const struct dmi_match_data intel_chipset_data = {
- .iostart = 0xb2,
- .iolen = 2,
- .backlight_ops = {
- .options = BL_CORE_SUSPENDRESUME,
- .get_brightness = intel_chipset_get_intensity,
- .update_status = intel_chipset_send_intensity,
- }
-};
-
-/*
- * Implementation for MacBooks with Nvidia chipset.
- */
-static int nvidia_chipset_send_intensity(struct backlight_device *bd)
-{
- int intensity = bd->props.brightness;
-
- if (debug)
- printk(KERN_DEBUG "mbp_nvidia_bl: setting brightness to %d\n",
- intensity);
-
- outb(0x04 | (intensity << 4), 0x52f);
- outb(0xbf, 0x52e);
- return 0;
-}
-
-static int nvidia_chipset_get_intensity(struct backlight_device *bd)
-{
- int intensity;
-
- outb(0x03, 0x52f);
- outb(0xbf, 0x52e);
- intensity = inb(0x52f) >> 4;
-
- if (debug)
- printk(KERN_DEBUG "mbp_nvidia_bl: read brightness of %d\n",
- intensity);
-
- return intensity;
-}
-
-static const struct dmi_match_data nvidia_chipset_data = {
- .iostart = 0x52e,
- .iolen = 2,
- .backlight_ops = {
- .options = BL_CORE_SUSPENDRESUME,
- .get_brightness = nvidia_chipset_get_intensity,
- .update_status = nvidia_chipset_send_intensity
- }
-};
-
-/*
- * DMI matching.
- */
-static /* const */ struct dmi_match_data *driver_data;
-
-static int mbp_dmi_match(const struct dmi_system_id *id)
-{
- driver_data = id->driver_data;
-
- printk(KERN_INFO "mbp_nvidia_bl: %s detected\n", id->ident);
- return 1;
-}
-
-static const struct dmi_system_id __initdata mbp_device_table[] = {
- {
- .callback = mbp_dmi_match,
- .ident = "MacBook 1,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
- },
- .driver_data = (void *)&intel_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBook 2,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"),
- },
- .driver_data = (void *)&intel_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBook 3,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook3,1"),
- },
- .driver_data = (void *)&intel_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBook 4,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,1"),
- },
- .driver_data = (void *)&intel_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBook 4,2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,2"),
- },
- .driver_data = (void *)&intel_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookPro 1,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,1"),
- },
- .driver_data = (void *)&intel_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookPro 1,2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,2"),
- },
- .driver_data = (void *)&intel_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookPro 2,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,1"),
- },
- .driver_data = (void *)&intel_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookPro 2,2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2"),
- },
- .driver_data = (void *)&intel_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookPro 3,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3,1"),
- },
- .driver_data = (void *)&intel_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookPro 3,2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3,2"),
- },
- .driver_data = (void *)&intel_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookPro 4,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro4,1"),
- },
- .driver_data = (void *)&intel_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookAir 1,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir1,1"),
- },
- .driver_data = (void *)&intel_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBook 5,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,1"),
- },
- .driver_data = (void *)&nvidia_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBook 5,2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"),
- },
- .driver_data = (void *)&nvidia_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBook 6,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
- },
- .driver_data = (void *)&nvidia_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookAir 2,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir2,1"),
- },
- .driver_data = (void *)&nvidia_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookPro 5,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,1"),
- },
- .driver_data = (void *)&nvidia_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookPro 5,2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,2"),
- },
- .driver_data = (void *)&nvidia_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookPro 5,3",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3"),
- },
- .driver_data = (void *)&nvidia_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookPro 5,4",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4"),
- },
- .driver_data = (void *)&nvidia_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookPro 5,5",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,5"),
- },
- .driver_data = (void *)&nvidia_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookAir 3,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,1"),
- },
- .driver_data = (void *)&nvidia_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookAir 3,2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,2"),
- },
- .driver_data = (void *)&nvidia_chipset_data,
- },
- { }
-};
-
-static int __init mbp_init(void)
-{
- struct backlight_properties props;
- if (!dmi_check_system(mbp_device_table))
- return -ENODEV;
-
- if (!request_region(driver_data->iostart, driver_data->iolen,
- "Macbook Pro backlight"))
- return -ENXIO;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = 15;
- mbp_backlight_device = backlight_device_register("mbp_backlight", NULL,
- NULL,
- &driver_data->backlight_ops,
- &props);
- if (IS_ERR(mbp_backlight_device)) {
- release_region(driver_data->iostart, driver_data->iolen);
- return PTR_ERR(mbp_backlight_device);
- }
-
- mbp_backlight_device->props.brightness =
- driver_data->backlight_ops.get_brightness(mbp_backlight_device);
- backlight_update_status(mbp_backlight_device);
-
- return 0;
-}
-
-static void __exit mbp_exit(void)
-{
- backlight_device_unregister(mbp_backlight_device);
-
- release_region(driver_data->iostart, driver_data->iolen);
-}
-
-module_init(mbp_init);
-module_exit(mbp_exit);
-
-MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
-MODULE_DESCRIPTION("Nvidia-based Macbook Pro Backlight Driver");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(dmi, mbp_device_table);
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index d3bc56296c8d..08d26a72394c 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -146,6 +146,7 @@ static int omapbl_probe(struct platform_device *pdev)
return -ENOMEM;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = OMAPBL_MAX_INTENSITY;
dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops,
&props);
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index 3c424f7efdcc..ef5628d60563 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -112,6 +112,7 @@ static int __devinit pcf50633_bl_probe(struct platform_device *pdev)
if (!pcf_bl)
return -ENOMEM;
+ bl_props.type = BACKLIGHT_RAW;
bl_props.max_brightness = 0x3f;
bl_props.power = FB_BLANK_UNBLANK;
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 809278c90738..6af183d6465e 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -84,6 +84,7 @@ static int progearbl_probe(struct platform_device *pdev)
pci_write_config_byte(sb_dev, SB_MPS1, temp | 0x20);
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = HW_LEVEL_MAX - HW_LEVEL_MIN;
progear_backlight_device = backlight_device_register("progear-bl",
&pdev->dev, NULL,
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 21866ec69656..b8f38ec6eb18 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -28,6 +28,7 @@ struct pwm_bl_data {
unsigned int lth_brightness;
int (*notify)(struct device *,
int brightness);
+ int (*check_fb)(struct device *, struct fb_info *);
};
static int pwm_backlight_update_status(struct backlight_device *bl)
@@ -62,9 +63,18 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
return bl->props.brightness;
}
+static int pwm_backlight_check_fb(struct backlight_device *bl,
+ struct fb_info *info)
+{
+ struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
+
+ return !pb->check_fb || pb->check_fb(pb->dev, info);
+}
+
static const struct backlight_ops pwm_backlight_ops = {
.update_status = pwm_backlight_update_status,
.get_brightness = pwm_backlight_get_brightness,
+ .check_fb = pwm_backlight_check_fb,
};
static int pwm_backlight_probe(struct platform_device *pdev)
@@ -95,6 +105,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb->period = data->pwm_period_ns;
pb->notify = data->notify;
+ pb->check_fb = data->check_fb;
pb->lth_brightness = data->lth_brightness *
(data->pwm_period_ns / data->max_brightness);
pb->dev = &pdev->dev;
@@ -108,6 +119,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "got pwm for backlight\n");
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = data->max_brightness;
bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, pb,
&pwm_backlight_ops, &props);
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index 5927db0da999..322040f686c2 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -778,6 +778,7 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
bd->props.max_brightness = MAX_BRIGHTNESS;
bd->props.brightness = MAX_BRIGHTNESS;
+ bd->props.type = BACKLIGHT_RAW;
lcd->bd = bd;
/*
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 2a04b382ec48..425a7365470b 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -102,6 +102,7 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
data->i2c = client;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = 512 - 1;
data->bl = backlight_device_register("tosa-bl", &client->dev, data,
&bl_ops, &props);
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index 08fd87f3aecc..d4c6eb248ff9 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -193,6 +193,7 @@ static int wm831x_backlight_probe(struct platform_device *pdev)
data->current_brightness = 0;
data->isink_reg = isink_reg;
+ props.type = BACKLIGHT_RAW;
props.max_brightness = max_isel;
bl = backlight_device_register("wm831x", &pdev->dev, data,
&wm831x_backlight_ops, &props);
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index e7d0f525041e..2464b910b590 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -649,6 +649,7 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
}
#ifndef NO_BL_SUPPORT
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = 255;
bl_dev = backlight_device_register("bf54x-bl", NULL, NULL,
&bfin_lq043fb_bl_ops, &props);
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 3cf77676947c..d8de29f0dd8d 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -545,6 +545,7 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
}
#ifndef NO_BL_SUPPORT
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = 255;
bl_dev = backlight_device_register("bf52x-bl", NULL, NULL,
&bfin_lq043fb_bl_ops, &props);
diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c
index e2c85b0db632..f18895006627 100644
--- a/drivers/video/cg14.c
+++ b/drivers/video/cg14.c
@@ -565,6 +565,7 @@ out_dealloc_cmap:
out_unmap_regs:
cg14_unmap_regs(op, info, par);
+ framebuffer_release(info);
out_err:
return err;
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c
index 4ffad90bde42..179e96cdb323 100644
--- a/drivers/video/cg6.c
+++ b/drivers/video/cg6.c
@@ -821,6 +821,7 @@ out_dealloc_cmap:
out_unmap_regs:
cg6_unmap_regs(op, info, par);
+ framebuffer_release(info);
out_err:
return err;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 9c092b8d64e6..c58393402da2 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -823,10 +823,10 @@ static int set_con2fb_map(int unit, int newidx, int user)
if (oldidx == newidx)
return 0;
- if (!info || fbcon_has_exited)
+ if (!info)
return -EINVAL;
- if (!err && !search_for_mapped_con()) {
+ if (!search_for_mapped_con() || !con_is_bound(&fb_con)) {
info_idx = newidx;
return fbcon_takeover(0);
}
diff --git a/drivers/video/console/tileblit.c b/drivers/video/console/tileblit.c
index 0056a41e5c35..15e8e1a89c45 100644
--- a/drivers/video/console/tileblit.c
+++ b/drivers/video/console/tileblit.c
@@ -83,7 +83,7 @@ static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int softback_lines, int fg, int bg)
{
struct fb_tilecursor cursor;
- int use_sw = (vc->vc_cursor_type & 0x01);
+ int use_sw = (vc->vc_cursor_type & 0x10);
cursor.sx = vc->vc_x;
cursor.sy = vc->vc_y;
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index 0c1afd13ddd3..850380795b05 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -47,6 +47,8 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -61,10 +63,10 @@ struct cfb_info {
struct fb_info fb;
struct display_switch *dispsw;
struct display *display;
- struct pci_dev *dev;
unsigned char __iomem *region;
unsigned char __iomem *regs;
u_int id;
+ u_int irq;
int func_use_count;
u_long ref_ps;
@@ -88,6 +90,19 @@ struct cfb_info {
u_char ramdac_powerdown;
u32 pseudo_palette[16];
+
+ spinlock_t reg_b0_lock;
+
+#ifdef CONFIG_FB_CYBER2000_DDC
+ bool ddc_registered;
+ struct i2c_adapter ddc_adapter;
+ struct i2c_algo_bit_data ddc_algo;
+#endif
+
+#ifdef CONFIG_FB_CYBER2000_I2C
+ struct i2c_adapter i2c_adapter;
+ struct i2c_algo_bit_data i2c_algo;
+#endif
};
static char *default_font = "Acorn8x8";
@@ -494,6 +509,7 @@ static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw)
cyber2000_attrw(0x14, 0x00, cfb);
/* PLL registers */
+ spin_lock(&cfb->reg_b0_lock);
cyber2000_grphw(EXT_DCLK_MULT, hw->clock_mult, cfb);
cyber2000_grphw(EXT_DCLK_DIV, hw->clock_div, cfb);
cyber2000_grphw(EXT_MCLK_MULT, cfb->mclk_mult, cfb);
@@ -501,6 +517,7 @@ static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw)
cyber2000_grphw(0x90, 0x01, cfb);
cyber2000_grphw(0xb9, 0x80, cfb);
cyber2000_grphw(0xb9, 0x00, cfb);
+ spin_unlock(&cfb->reg_b0_lock);
cfb->ramdac_ctrl = hw->ramdac;
cyber2000fb_write_ramdac_ctrl(cfb);
@@ -681,9 +698,9 @@ cyber2000fb_decode_clock(struct par_info *hw, struct cfb_info *cfb,
* pll_ps_calc = best_div1 / (ref_ps * best_mult)
*/
best_diff = 0x7fffffff;
- best_mult = 32;
- best_div1 = 255;
- for (t_div1 = 32; t_div1 > 1; t_div1 -= 1) {
+ best_mult = 2;
+ best_div1 = 32;
+ for (t_div1 = 2; t_div1 < 32; t_div1 += 1) {
u_int rr, t_mult, t_pll_ps;
int diff;
@@ -1105,24 +1122,22 @@ void cyber2000fb_disable_extregs(struct cfb_info *cfb)
}
EXPORT_SYMBOL(cyber2000fb_disable_extregs);
-void cyber2000fb_get_fb_var(struct cfb_info *cfb, struct fb_var_screeninfo *var)
-{
- memcpy(var, &cfb->fb.var, sizeof(struct fb_var_screeninfo));
-}
-EXPORT_SYMBOL(cyber2000fb_get_fb_var);
-
/*
* Attach a capture/tv driver to the core CyberX0X0 driver.
*/
int cyber2000fb_attach(struct cyberpro_info *info, int idx)
{
if (int_cfb_info != NULL) {
- info->dev = int_cfb_info->dev;
+ info->dev = int_cfb_info->fb.device;
+#ifdef CONFIG_FB_CYBER2000_I2C
+ info->i2c = &int_cfb_info->i2c_adapter;
+#else
+ info->i2c = NULL;
+#endif
info->regs = int_cfb_info->regs;
+ info->irq = int_cfb_info->irq;
info->fb = int_cfb_info->fb.screen_base;
info->fb_size = int_cfb_info->fb.fix.smem_len;
- info->enable_extregs = cyber2000fb_enable_extregs;
- info->disable_extregs = cyber2000fb_disable_extregs;
info->info = int_cfb_info;
strlcpy(info->dev_name, int_cfb_info->fb.fix.id,
@@ -1141,6 +1156,183 @@ void cyber2000fb_detach(int idx)
}
EXPORT_SYMBOL(cyber2000fb_detach);
+#ifdef CONFIG_FB_CYBER2000_DDC
+
+#define DDC_REG 0xb0
+#define DDC_SCL_OUT (1 << 0)
+#define DDC_SDA_OUT (1 << 4)
+#define DDC_SCL_IN (1 << 2)
+#define DDC_SDA_IN (1 << 6)
+
+static void cyber2000fb_enable_ddc(struct cfb_info *cfb)
+{
+ spin_lock(&cfb->reg_b0_lock);
+ cyber2000fb_writew(0x1bf, 0x3ce, cfb);
+}
+
+static void cyber2000fb_disable_ddc(struct cfb_info *cfb)
+{
+ cyber2000fb_writew(0x0bf, 0x3ce, cfb);
+ spin_unlock(&cfb->reg_b0_lock);
+}
+
+
+static void cyber2000fb_ddc_setscl(void *data, int val)
+{
+ struct cfb_info *cfb = data;
+ unsigned char reg;
+
+ cyber2000fb_enable_ddc(cfb);
+ reg = cyber2000_grphr(DDC_REG, cfb);
+ if (!val) /* bit is inverted */
+ reg |= DDC_SCL_OUT;
+ else
+ reg &= ~DDC_SCL_OUT;
+ cyber2000_grphw(DDC_REG, reg, cfb);
+ cyber2000fb_disable_ddc(cfb);
+}
+
+static void cyber2000fb_ddc_setsda(void *data, int val)
+{
+ struct cfb_info *cfb = data;
+ unsigned char reg;
+
+ cyber2000fb_enable_ddc(cfb);
+ reg = cyber2000_grphr(DDC_REG, cfb);
+ if (!val) /* bit is inverted */
+ reg |= DDC_SDA_OUT;
+ else
+ reg &= ~DDC_SDA_OUT;
+ cyber2000_grphw(DDC_REG, reg, cfb);
+ cyber2000fb_disable_ddc(cfb);
+}
+
+static int cyber2000fb_ddc_getscl(void *data)
+{
+ struct cfb_info *cfb = data;
+ int retval;
+
+ cyber2000fb_enable_ddc(cfb);
+ retval = !!(cyber2000_grphr(DDC_REG, cfb) & DDC_SCL_IN);
+ cyber2000fb_disable_ddc(cfb);
+
+ return retval;
+}
+
+static int cyber2000fb_ddc_getsda(void *data)
+{
+ struct cfb_info *cfb = data;
+ int retval;
+
+ cyber2000fb_enable_ddc(cfb);
+ retval = !!(cyber2000_grphr(DDC_REG, cfb) & DDC_SDA_IN);
+ cyber2000fb_disable_ddc(cfb);
+
+ return retval;
+}
+
+static int __devinit cyber2000fb_setup_ddc_bus(struct cfb_info *cfb)
+{
+ strlcpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
+ sizeof(cfb->ddc_adapter.name));
+ cfb->ddc_adapter.owner = THIS_MODULE;
+ cfb->ddc_adapter.class = I2C_CLASS_DDC;
+ cfb->ddc_adapter.algo_data = &cfb->ddc_algo;
+ cfb->ddc_adapter.dev.parent = cfb->fb.device;
+ cfb->ddc_algo.setsda = cyber2000fb_ddc_setsda;
+ cfb->ddc_algo.setscl = cyber2000fb_ddc_setscl;
+ cfb->ddc_algo.getsda = cyber2000fb_ddc_getsda;
+ cfb->ddc_algo.getscl = cyber2000fb_ddc_getscl;
+ cfb->ddc_algo.udelay = 10;
+ cfb->ddc_algo.timeout = 20;
+ cfb->ddc_algo.data = cfb;
+
+ i2c_set_adapdata(&cfb->ddc_adapter, cfb);
+
+ return i2c_bit_add_bus(&cfb->ddc_adapter);
+}
+#endif /* CONFIG_FB_CYBER2000_DDC */
+
+#ifdef CONFIG_FB_CYBER2000_I2C
+static void cyber2000fb_i2c_setsda(void *data, int state)
+{
+ struct cfb_info *cfb = data;
+ unsigned int latch2;
+
+ spin_lock(&cfb->reg_b0_lock);
+ latch2 = cyber2000_grphr(EXT_LATCH2, cfb);
+ latch2 &= EXT_LATCH2_I2C_CLKEN;
+ if (state)
+ latch2 |= EXT_LATCH2_I2C_DATEN;
+ cyber2000_grphw(EXT_LATCH2, latch2, cfb);
+ spin_unlock(&cfb->reg_b0_lock);
+}
+
+static void cyber2000fb_i2c_setscl(void *data, int state)
+{
+ struct cfb_info *cfb = data;
+ unsigned int latch2;
+
+ spin_lock(&cfb->reg_b0_lock);
+ latch2 = cyber2000_grphr(EXT_LATCH2, cfb);
+ latch2 &= EXT_LATCH2_I2C_DATEN;
+ if (state)
+ latch2 |= EXT_LATCH2_I2C_CLKEN;
+ cyber2000_grphw(EXT_LATCH2, latch2, cfb);
+ spin_unlock(&cfb->reg_b0_lock);
+}
+
+static int cyber2000fb_i2c_getsda(void *data)
+{
+ struct cfb_info *cfb = data;
+ int ret;
+
+ spin_lock(&cfb->reg_b0_lock);
+ ret = !!(cyber2000_grphr(EXT_LATCH2, cfb) & EXT_LATCH2_I2C_DAT);
+ spin_unlock(&cfb->reg_b0_lock);
+
+ return ret;
+}
+
+static int cyber2000fb_i2c_getscl(void *data)
+{
+ struct cfb_info *cfb = data;
+ int ret;
+
+ spin_lock(&cfb->reg_b0_lock);
+ ret = !!(cyber2000_grphr(EXT_LATCH2, cfb) & EXT_LATCH2_I2C_CLK);
+ spin_unlock(&cfb->reg_b0_lock);
+
+ return ret;
+}
+
+static int __devinit cyber2000fb_i2c_register(struct cfb_info *cfb)
+{
+ strlcpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
+ sizeof(cfb->i2c_adapter.name));
+ cfb->i2c_adapter.owner = THIS_MODULE;
+ cfb->i2c_adapter.algo_data = &cfb->i2c_algo;
+ cfb->i2c_adapter.dev.parent = cfb->fb.device;
+ cfb->i2c_algo.setsda = cyber2000fb_i2c_setsda;
+ cfb->i2c_algo.setscl = cyber2000fb_i2c_setscl;
+ cfb->i2c_algo.getsda = cyber2000fb_i2c_getsda;
+ cfb->i2c_algo.getscl = cyber2000fb_i2c_getscl;
+ cfb->i2c_algo.udelay = 5;
+ cfb->i2c_algo.timeout = msecs_to_jiffies(100);
+ cfb->i2c_algo.data = cfb;
+
+ return i2c_bit_add_bus(&cfb->i2c_adapter);
+}
+
+static void cyber2000fb_i2c_unregister(struct cfb_info *cfb)
+{
+ i2c_del_adapter(&cfb->i2c_adapter);
+}
+#else
+#define cyber2000fb_i2c_register(cfb) (0)
+#define cyber2000fb_i2c_unregister(cfb) do { } while (0)
+#endif
+
/*
* These parameters give
* 640x480, hsync 31.5kHz, vsync 60Hz
@@ -1275,6 +1467,8 @@ static struct cfb_info __devinit *cyberpro_alloc_fb_info(unsigned int id,
cfb->fb.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
cfb->fb.pseudo_palette = cfb->pseudo_palette;
+ spin_lock_init(&cfb->reg_b0_lock);
+
fb_alloc_cmap(&cfb->fb.cmap, NR_PALETTE, 0);
return cfb;
@@ -1369,6 +1563,11 @@ static int __devinit cyberpro_common_probe(struct cfb_info *cfb)
cfb->fb.fix.mmio_len = MMIO_SIZE;
cfb->fb.screen_base = cfb->region;
+#ifdef CONFIG_FB_CYBER2000_DDC
+ if (cyber2000fb_setup_ddc_bus(cfb) == 0)
+ cfb->ddc_registered = true;
+#endif
+
err = -EINVAL;
if (!fb_find_mode(&cfb->fb.var, &cfb->fb, NULL, NULL, 0,
&cyber2000fb_default_mode, 8)) {
@@ -1401,14 +1600,32 @@ static int __devinit cyberpro_common_probe(struct cfb_info *cfb)
cfb->fb.var.xres, cfb->fb.var.yres,
h_sync / 1000, h_sync % 1000, v_sync);
- if (cfb->dev)
- cfb->fb.device = &cfb->dev->dev;
+ err = cyber2000fb_i2c_register(cfb);
+ if (err)
+ goto failed;
+
err = register_framebuffer(&cfb->fb);
+ if (err)
+ cyber2000fb_i2c_unregister(cfb);
failed:
+#ifdef CONFIG_FB_CYBER2000_DDC
+ if (err && cfb->ddc_registered)
+ i2c_del_adapter(&cfb->ddc_adapter);
+#endif
return err;
}
+static void __devexit cyberpro_common_remove(struct cfb_info *cfb)
+{
+ unregister_framebuffer(&cfb->fb);
+#ifdef CONFIG_FB_CYBER2000_DDC
+ if (cfb->ddc_registered)
+ i2c_del_adapter(&cfb->ddc_adapter);
+#endif
+ cyber2000fb_i2c_unregister(cfb);
+}
+
static void cyberpro_common_resume(struct cfb_info *cfb)
{
cyberpro_init_hw(cfb);
@@ -1442,12 +1659,13 @@ static int __devinit cyberpro_vl_probe(void)
if (!cfb)
goto failed_release;
- cfb->dev = NULL;
+ cfb->irq = -1;
cfb->region = ioremap(FB_START, FB_SIZE);
if (!cfb->region)
goto failed_ioremap;
cfb->regs = cfb->region + MMIO_OFFSET;
+ cfb->fb.device = NULL;
cfb->fb.fix.mmio_start = FB_START + MMIO_OFFSET;
cfb->fb.fix.smem_start = FB_START;
@@ -1585,12 +1803,13 @@ cyberpro_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err)
goto failed_regions;
- cfb->dev = dev;
+ cfb->irq = dev->irq;
cfb->region = pci_ioremap_bar(dev, 0);
if (!cfb->region)
goto failed_ioremap;
cfb->regs = cfb->region + MMIO_OFFSET;
+ cfb->fb.device = &dev->dev;
cfb->fb.fix.mmio_start = pci_resource_start(dev, 0) + MMIO_OFFSET;
cfb->fb.fix.smem_start = pci_resource_start(dev, 0);
@@ -1648,15 +1867,7 @@ static void __devexit cyberpro_pci_remove(struct pci_dev *dev)
struct cfb_info *cfb = pci_get_drvdata(dev);
if (cfb) {
- /*
- * If unregister_framebuffer fails, then
- * we will be leaving hooks that could cause
- * oopsen laying around.
- */
- if (unregister_framebuffer(&cfb->fb))
- printk(KERN_WARNING "%s: danger Will Robinson, "
- "danger danger! Oopsen imminent!\n",
- cfb->fb.fix.id);
+ cyberpro_common_remove(cfb);
iounmap(cfb->region);
cyberpro_free_fb_info(cfb);
diff --git a/drivers/video/cyber2000fb.h b/drivers/video/cyber2000fb.h
index de4fc43e51c1..bad69102e774 100644
--- a/drivers/video/cyber2000fb.h
+++ b/drivers/video/cyber2000fb.h
@@ -464,12 +464,14 @@ static void debug_printf(char *fmt, ...)
struct cfb_info;
struct cyberpro_info {
- struct pci_dev *dev;
+ struct device *dev;
+ struct i2c_adapter *i2c;
unsigned char __iomem *regs;
char __iomem *fb;
char dev_name[32];
unsigned int fb_size;
unsigned int chip_id;
+ unsigned int irq;
/*
* The following is a pointer to be passed into the
@@ -478,15 +480,6 @@ struct cyberpro_info {
* is within this structure.
*/
struct cfb_info *info;
-
- /*
- * Use these to enable the BM or TV registers. In an SMP
- * environment, these two function pointers should only be
- * called from the module_init() or module_exit()
- * functions.
- */
- void (*enable_extregs)(struct cfb_info *);
- void (*disable_extregs)(struct cfb_info *);
};
#define ID_IGA_1682 0
@@ -494,8 +487,6 @@ struct cyberpro_info {
#define ID_CYBERPRO_2010 2
#define ID_CYBERPRO_5000 3
-struct fb_var_screeninfo;
-
/*
* Note! Writing to the Cyber20x0 registers from an interrupt
* routine is definitely a bad idea atm.
@@ -504,4 +495,3 @@ int cyber2000fb_attach(struct cyberpro_info *info, int idx);
void cyber2000fb_detach(int idx);
void cyber2000fb_enable_extregs(struct cfb_info *cfb);
void cyber2000fb_disable_extregs(struct cfb_info *cfb);
-void cyber2000fb_get_fb_var(struct cfb_info *cfb, struct fb_var_screeninfo *var);
diff --git a/drivers/video/edid.h b/drivers/video/edid.h
index bd89fb3be8c2..d03a232d90b2 100644
--- a/drivers/video/edid.h
+++ b/drivers/video/edid.h
@@ -101,8 +101,8 @@
#define V_SYNC_WIDTH COMBINE_HI_4LO( V_SYNC_WIDTH_HI, V_SYNC_WIDTH_LO )
#define V_SYNC_OFFSET COMBINE_HI_4LO( V_SYNC_OFFSET_HI, V_SYNC_OFFSET_LO )
-#define H_SYNC_WIDTH COMBINE_HI_4LO( H_SYNC_WIDTH_HI, H_SYNC_WIDTH_LO )
-#define H_SYNC_OFFSET COMBINE_HI_4LO( H_SYNC_OFFSET_HI, H_SYNC_OFFSET_LO )
+#define H_SYNC_WIDTH COMBINE_HI_8LO( H_SYNC_WIDTH_HI, H_SYNC_WIDTH_LO )
+#define H_SYNC_OFFSET COMBINE_HI_8LO( H_SYNC_OFFSET_HI, H_SYNC_OFFSET_LO )
#define H_SIZE_LO (unsigned)block[ 12 ]
#define V_SIZE_LO (unsigned)block[ 13 ]
diff --git a/drivers/video/ffb.c b/drivers/video/ffb.c
index 910c5e6f6702..14102a3f70f5 100644
--- a/drivers/video/ffb.c
+++ b/drivers/video/ffb.c
@@ -1010,7 +1010,7 @@ out_dealloc_cmap:
fb_dealloc_cmap(&info->cmap);
out_unmap_dac:
- of_iounmap(&op->resource[2], par->fbc, sizeof(struct ffb_fbc));
+ of_iounmap(&op->resource[1], par->dac, sizeof(struct ffb_dac));
out_unmap_fbc:
of_iounmap(&op->resource[2], par->fbc, sizeof(struct ffb_fbc));
diff --git a/drivers/video/hecubafb.c b/drivers/video/hecubafb.c
index c77bcc6ab463..1b94643ecbcf 100644
--- a/drivers/video/hecubafb.c
+++ b/drivers/video/hecubafb.c
@@ -299,7 +299,7 @@ static int __devexit hecubafb_remove(struct platform_device *dev)
static struct platform_driver hecubafb_driver = {
.probe = hecubafb_probe,
- .remove = hecubafb_remove,
+ .remove = __devexit_p(hecubafb_remove),
.driver = {
.owner = THIS_MODULE,
.name = "hecubafb",
diff --git a/drivers/video/hpfb.c b/drivers/video/hpfb.c
index c8e280f1bb0b..ebf8495ff198 100644
--- a/drivers/video/hpfb.c
+++ b/drivers/video/hpfb.c
@@ -321,11 +321,11 @@ static int __devinit hpfb_dio_probe(struct dio_dev * d, const struct dio_device_
unsigned long paddr, vaddr;
paddr = d->resource.start;
- if (!request_mem_region(d->resource.start, d->resource.end - d->resource.start, d->name))
+ if (!request_mem_region(d->resource.start, resource_size(&d->resource), d->name))
return -EBUSY;
if (d->scode >= DIOII_SCBASE) {
- vaddr = (unsigned long)ioremap(paddr, d->resource.end - d->resource.start);
+ vaddr = (unsigned long)ioremap(paddr, resource_size(&d->resource));
} else {
vaddr = paddr + DIO_VIRADDRBASE;
}
@@ -344,7 +344,7 @@ static void __devexit hpfb_remove_one(struct dio_dev *d)
unregister_framebuffer(&fb_info);
if (d->scode >= DIOII_SCBASE)
iounmap((void *)fb_regs);
- release_mem_region(d->resource.start, d->resource.end - d->resource.start);
+ release_mem_region(d->resource.start, resource_size(&d->resource));
}
static struct dio_device_id hpfb_dio_tbl[] = {
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 69bd4a581d4a..ef72cb483834 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -499,6 +499,7 @@ static void imxfb_init_backlight(struct imxfb_info *fbi)
memset(&props, 0, sizeof(struct backlight_properties));
props.max_brightness = 0xff;
+ props.type = BACKLIGHT_RAW;
writel(fbi->pwmr, fbi->regs + LCDC_PWMR);
bl = backlight_device_register("imxfb-bl", &fbi->pdev->dev, fbi,
diff --git a/drivers/video/intelfb/Makefile b/drivers/video/intelfb/Makefile
index 6c782d3ae1be..f7d631ebee8e 100644
--- a/drivers/video/intelfb/Makefile
+++ b/drivers/video/intelfb/Makefile
@@ -4,7 +4,4 @@ intelfb-y := intelfbdrv.o intelfbhw.o
intelfb-$(CONFIG_FB_INTEL_I2C) += intelfb_i2c.o
intelfb-objs := $(intelfb-y)
-ifdef CONFIG_FB_INTEL_DEBUG
-#EXTRA_CFLAGS += -DDEBUG -DVERBOSE -DREGDUMP
-EXTRA_CFLAGS += -DDEBUG -DREGDUMP
-endif
+ccflags-$(CONFIG_FB_INTEL_DEBUG) := -DDEBUG -DREGDUMP
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index a082debe824b..a74439affce9 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -1461,13 +1461,6 @@ static struct board {
MGA_G100,
&vbG100,
"MGA-G100 (AGP)"},
- {PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G200EV_PCI, 0xFF,
- 0, 0,
- DEVF_G200,
- 230000,
- MGA_G200,
- &vbG200,
- "MGA-G200eV (PCI)"},
{PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G200_PCI, 0xFF,
0, 0,
DEVF_G200,
@@ -2119,8 +2112,6 @@ static struct pci_device_id matroxfb_devices[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G100_AGP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G200EV_PCI,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G200_PCI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G200_AGP,
diff --git a/drivers/video/metronomefb.c b/drivers/video/metronomefb.c
index 63ed3b72b01c..ed64edfd2c43 100644
--- a/drivers/video/metronomefb.c
+++ b/drivers/video/metronomefb.c
@@ -765,7 +765,7 @@ static int __devexit metronomefb_remove(struct platform_device *dev)
static struct platform_driver metronomefb_driver = {
.probe = metronomefb_probe,
- .remove = metronomefb_remove,
+ .remove = __devexit_p(metronomefb_remove),
.driver = {
.owner = THIS_MODULE,
.name = "metronomefb",
diff --git a/drivers/video/msm/mdp_hw.h b/drivers/video/msm/mdp_hw.h
index 4e3deb4e592b..d80477415caa 100644
--- a/drivers/video/msm/mdp_hw.h
+++ b/drivers/video/msm/mdp_hw.h
@@ -449,6 +449,7 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
#define PPP_CFG_MDP_XRGB_8888(dir) PPP_CFG_MDP_ARGB_8888(dir)
#define PPP_CFG_MDP_RGBA_8888(dir) PPP_CFG_MDP_ARGB_8888(dir)
#define PPP_CFG_MDP_BGRA_8888(dir) PPP_CFG_MDP_ARGB_8888(dir)
+#define PPP_CFG_MDP_RGBX_8888(dir) PPP_CFG_MDP_ARGB_8888(dir)
#define PPP_CFG_MDP_Y_CBCR_H2V2(dir) (PPP_##dir##_C2R_8BIT | \
PPP_##dir##_C0G_8BIT | \
@@ -488,12 +489,14 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8)
#define PPP_PACK_PATTERN_MDP_RGB_888 PPP_PACK_PATTERN_MDP_RGB_565
#define PPP_PACK_PATTERN_MDP_XRGB_8888 \
- MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B, 8)
+ MDP_GET_PACK_PATTERN(CLR_B, CLR_G, CLR_R, CLR_ALPHA, 8)
#define PPP_PACK_PATTERN_MDP_ARGB_8888 PPP_PACK_PATTERN_MDP_XRGB_8888
#define PPP_PACK_PATTERN_MDP_RGBA_8888 \
MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G, CLR_R, 8)
#define PPP_PACK_PATTERN_MDP_BGRA_8888 \
MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B, 8)
+#define PPP_PACK_PATTERN_MDP_RGBX_8888 \
+ MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G, CLR_R, 8)
#define PPP_PACK_PATTERN_MDP_Y_CBCR_H2V1 \
MDP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8)
#define PPP_PACK_PATTERN_MDP_Y_CBCR_H2V2 PPP_PACK_PATTERN_MDP_Y_CBCR_H2V1
@@ -509,6 +512,7 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
#define PPP_CHROMA_SAMP_MDP_ARGB_8888(dir) PPP_OP_##dir##_CHROMA_RGB
#define PPP_CHROMA_SAMP_MDP_RGBA_8888(dir) PPP_OP_##dir##_CHROMA_RGB
#define PPP_CHROMA_SAMP_MDP_BGRA_8888(dir) PPP_OP_##dir##_CHROMA_RGB
+#define PPP_CHROMA_SAMP_MDP_RGBX_8888(dir) PPP_OP_##dir##_CHROMA_RGB
#define PPP_CHROMA_SAMP_MDP_Y_CBCR_H2V1(dir) PPP_OP_##dir##_CHROMA_H2V1
#define PPP_CHROMA_SAMP_MDP_Y_CBCR_H2V2(dir) PPP_OP_##dir##_CHROMA_420
#define PPP_CHROMA_SAMP_MDP_Y_CRCB_H2V1(dir) PPP_OP_##dir##_CHROMA_H2V1
@@ -523,6 +527,7 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
[MDP_ARGB_8888] = PPP_##name##_MDP_ARGB_8888,\
[MDP_RGBA_8888] = PPP_##name##_MDP_RGBA_8888,\
[MDP_BGRA_8888] = PPP_##name##_MDP_BGRA_8888,\
+ [MDP_RGBX_8888] = PPP_##name##_MDP_RGBX_8888,\
[MDP_Y_CBCR_H2V1] = PPP_##name##_MDP_Y_CBCR_H2V1,\
[MDP_Y_CBCR_H2V2] = PPP_##name##_MDP_Y_CBCR_H2V2,\
[MDP_Y_CRCB_H2V1] = PPP_##name##_MDP_Y_CRCB_H2V1,\
@@ -536,6 +541,7 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
[MDP_ARGB_8888] = PPP_##name##_MDP_ARGB_8888(dir),\
[MDP_RGBA_8888] = PPP_##name##_MDP_RGBA_8888(dir),\
[MDP_BGRA_8888] = PPP_##name##_MDP_BGRA_8888(dir),\
+ [MDP_RGBX_8888] = PPP_##name##_MDP_RGBX_8888(dir),\
[MDP_Y_CBCR_H2V1] = PPP_##name##_MDP_Y_CBCR_H2V1(dir),\
[MDP_Y_CBCR_H2V2] = PPP_##name##_MDP_Y_CBCR_H2V2(dir),\
[MDP_Y_CRCB_H2V1] = PPP_##name##_MDP_Y_CRCB_H2V1(dir),\
@@ -547,7 +553,8 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
(img == MDP_YCRYCB_H2V1))
#define IS_RGB(img) ((img == MDP_RGB_565) | (img == MDP_RGB_888) | \
(img == MDP_ARGB_8888) | (img == MDP_RGBA_8888) | \
- (img == MDP_XRGB_8888) | (img == MDP_BGRA_8888))
+ (img == MDP_XRGB_8888) | (img == MDP_BGRA_8888) | \
+ (img == MDP_RGBX_8888))
#define HAS_ALPHA(img) ((img == MDP_ARGB_8888) | (img == MDP_RGBA_8888) | \
(img == MDP_BGRA_8888))
diff --git a/drivers/video/msm/mdp_ppp.c b/drivers/video/msm/mdp_ppp.c
index 4ff001f4cbbd..2b6564e8bfea 100644
--- a/drivers/video/msm/mdp_ppp.c
+++ b/drivers/video/msm/mdp_ppp.c
@@ -69,6 +69,7 @@ static uint32_t bytes_per_pixel[] = {
[MDP_ARGB_8888] = 4,
[MDP_RGBA_8888] = 4,
[MDP_BGRA_8888] = 4,
+ [MDP_RGBX_8888] = 4,
[MDP_Y_CBCR_H2V1] = 1,
[MDP_Y_CBCR_H2V2] = 1,
[MDP_Y_CRCB_H2V1] = 1,
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index debe5933fd2e..ec351309e607 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -81,7 +81,6 @@ struct msmfb_info {
spinlock_t update_lock;
struct mutex panel_init_lock;
wait_queue_head_t frame_wq;
- struct workqueue_struct *resume_workqueue;
struct work_struct resume_work;
struct msmfb_callback dma_callback;
struct msmfb_callback vsync_callback;
@@ -111,7 +110,7 @@ static void msmfb_handle_dma_interrupt(struct msmfb_callback *callback)
if (msmfb->sleeping == UPDATING &&
msmfb->frame_done == msmfb->update_frame) {
DLOG(SUSPEND_RESUME, "full update completed\n");
- queue_work(msmfb->resume_workqueue, &msmfb->resume_work);
+ schedule_work(&msmfb->resume_work);
}
spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
wake_up(&msmfb->frame_wq);
@@ -220,8 +219,8 @@ restart:
sleeping = msmfb->sleeping;
/* on a full update, if the last frame has not completed, wait for it */
- if (pan_display && (msmfb->frame_requested != msmfb->frame_done ||
- sleeping == UPDATING)) {
+ if ((pan_display && msmfb->frame_requested != msmfb->frame_done) ||
+ sleeping == UPDATING) {
int ret;
spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
ret = wait_event_interruptible_timeout(msmfb->frame_wq,
@@ -470,6 +469,18 @@ static void setup_fb_info(struct msmfb_info *msmfb)
fb_info->var.yoffset = 0;
if (msmfb->panel->caps & MSMFB_CAP_PARTIAL_UPDATES) {
+ /*
+ * Set the param in the fixed screen, so userspace can't
+ * change it. This will be used to check for the
+ * capability.
+ */
+ fb_info->fix.reserved[0] = 0x5444;
+ fb_info->fix.reserved[1] = 0x5055;
+
+ /*
+ * This preloads the value so that if userspace doesn't
+ * change it, it will be a full update
+ */
fb_info->var.reserved[0] = 0x54445055;
fb_info->var.reserved[1] = 0;
fb_info->var.reserved[2] = (uint16_t)msmfb->xres |
@@ -559,12 +570,6 @@ static int msmfb_probe(struct platform_device *pdev)
spin_lock_init(&msmfb->update_lock);
mutex_init(&msmfb->panel_init_lock);
init_waitqueue_head(&msmfb->frame_wq);
- msmfb->resume_workqueue = create_workqueue("panel_on");
- if (msmfb->resume_workqueue == NULL) {
- printk(KERN_ERR "failed to create panel_on workqueue\n");
- ret = -ENOMEM;
- goto error_create_workqueue;
- }
INIT_WORK(&msmfb->resume_work, power_on_panel);
msmfb->black = kzalloc(msmfb->fb->var.bits_per_pixel*msmfb->xres,
GFP_KERNEL);
@@ -589,8 +594,6 @@ static int msmfb_probe(struct platform_device *pdev)
return 0;
error_register_framebuffer:
- destroy_workqueue(msmfb->resume_workqueue);
-error_create_workqueue:
iounmap(fb->screen_base);
error_setup_fbmem:
framebuffer_release(msmfb->fb);
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
new file mode 100644
index 000000000000..7d0284882984
--- /dev/null
+++ b/drivers/video/mxsfb.c
@@ -0,0 +1,919 @@
+/*
+ * Copyright (C) 2010 Juergen Beisert, Pengutronix
+ *
+ * This code is based on:
+ * Author: Vitaly Wool <vital@embeddedalley.com>
+ *
+ * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define DRIVER_NAME "mxsfb"
+
+/**
+ * @file
+ * @brief LCDIF driver for i.MX23 and i.MX28
+ *
+ * The LCDIF support four modes of operation
+ * - MPU interface (to drive smart displays) -> not supported yet
+ * - VSYNC interface (like MPU interface plus Vsync) -> not supported yet
+ * - Dotclock interface (to drive LC displays with RGB data and sync signals)
+ * - DVI (to drive ITU-R BT656) -> not supported yet
+ *
+ * This driver depends on a correct setup of the pins used for this purpose
+ * (platform specific).
+ *
+ * For the developer: Don't forget to set the data bus width to the display
+ * in the imx_fb_videomode structure. You will else end up with ugly colours.
+ * If you fight against jitter you can vary the clock delay. This is a feature
+ * of the i.MX28 and you can vary it between 2 ns ... 8 ns in 2 ns steps. Give
+ * the required value in the imx_fb_videomode structure.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <mach/mxsfb.h>
+
+#define REG_SET 4
+#define REG_CLR 8
+
+#define LCDC_CTRL 0x00
+#define LCDC_CTRL1 0x10
+#define LCDC_V4_CTRL2 0x20
+#define LCDC_V3_TRANSFER_COUNT 0x20
+#define LCDC_V4_TRANSFER_COUNT 0x30
+#define LCDC_V4_CUR_BUF 0x40
+#define LCDC_V4_NEXT_BUF 0x50
+#define LCDC_V3_CUR_BUF 0x30
+#define LCDC_V3_NEXT_BUF 0x40
+#define LCDC_TIMING 0x60
+#define LCDC_VDCTRL0 0x70
+#define LCDC_VDCTRL1 0x80
+#define LCDC_VDCTRL2 0x90
+#define LCDC_VDCTRL3 0xa0
+#define LCDC_VDCTRL4 0xb0
+#define LCDC_DVICTRL0 0xc0
+#define LCDC_DVICTRL1 0xd0
+#define LCDC_DVICTRL2 0xe0
+#define LCDC_DVICTRL3 0xf0
+#define LCDC_DVICTRL4 0x100
+#define LCDC_V4_DATA 0x180
+#define LCDC_V3_DATA 0x1b0
+#define LCDC_V4_DEBUG0 0x1d0
+#define LCDC_V3_DEBUG0 0x1f0
+
+#define CTRL_SFTRST (1 << 31)
+#define CTRL_CLKGATE (1 << 30)
+#define CTRL_BYPASS_COUNT (1 << 19)
+#define CTRL_VSYNC_MODE (1 << 18)
+#define CTRL_DOTCLK_MODE (1 << 17)
+#define CTRL_DATA_SELECT (1 << 16)
+#define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10)
+#define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3)
+#define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8)
+#define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3)
+#define CTRL_MASTER (1 << 5)
+#define CTRL_DF16 (1 << 3)
+#define CTRL_DF18 (1 << 2)
+#define CTRL_DF24 (1 << 1)
+#define CTRL_RUN (1 << 0)
+
+#define CTRL1_FIFO_CLEAR (1 << 21)
+#define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16)
+#define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf)
+
+#define TRANSFER_COUNT_SET_VCOUNT(x) (((x) & 0xffff) << 16)
+#define TRANSFER_COUNT_GET_VCOUNT(x) (((x) >> 16) & 0xffff)
+#define TRANSFER_COUNT_SET_HCOUNT(x) ((x) & 0xffff)
+#define TRANSFER_COUNT_GET_HCOUNT(x) ((x) & 0xffff)
+
+
+#define VDCTRL0_ENABLE_PRESENT (1 << 28)
+#define VDCTRL0_VSYNC_ACT_HIGH (1 << 27)
+#define VDCTRL0_HSYNC_ACT_HIGH (1 << 26)
+#define VDCTRL0_DOTCLK_ACT_FAILING (1 << 25)
+#define VDCTRL0_ENABLE_ACT_HIGH (1 << 24)
+#define VDCTRL0_VSYNC_PERIOD_UNIT (1 << 21)
+#define VDCTRL0_VSYNC_PULSE_WIDTH_UNIT (1 << 20)
+#define VDCTRL0_HALF_LINE (1 << 19)
+#define VDCTRL0_HALF_LINE_MODE (1 << 18)
+#define VDCTRL0_SET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
+#define VDCTRL0_GET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
+
+#define VDCTRL2_SET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
+#define VDCTRL2_GET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
+
+#define VDCTRL3_MUX_SYNC_SIGNALS (1 << 29)
+#define VDCTRL3_VSYNC_ONLY (1 << 28)
+#define SET_HOR_WAIT_CNT(x) (((x) & 0xfff) << 16)
+#define GET_HOR_WAIT_CNT(x) (((x) >> 16) & 0xfff)
+#define SET_VERT_WAIT_CNT(x) ((x) & 0xffff)
+#define GET_VERT_WAIT_CNT(x) ((x) & 0xffff)
+
+#define VDCTRL4_SET_DOTCLK_DLY(x) (((x) & 0x7) << 29) /* v4 only */
+#define VDCTRL4_GET_DOTCLK_DLY(x) (((x) >> 29) & 0x7) /* v4 only */
+#define VDCTRL4_SYNC_SIGNALS_ON (1 << 18)
+#define SET_DOTCLK_H_VALID_DATA_CNT(x) ((x) & 0x3ffff)
+
+#define DEBUG0_HSYNC (1 < 26)
+#define DEBUG0_VSYNC (1 < 25)
+
+#define MIN_XRES 120
+#define MIN_YRES 120
+
+#define RED 0
+#define GREEN 1
+#define BLUE 2
+#define TRANSP 3
+
+enum mxsfb_devtype {
+ MXSFB_V3,
+ MXSFB_V4,
+};
+
+/* CPU dependent register offsets */
+struct mxsfb_devdata {
+ unsigned transfer_count;
+ unsigned cur_buf;
+ unsigned next_buf;
+ unsigned debug0;
+ unsigned hs_wdth_mask;
+ unsigned hs_wdth_shift;
+ unsigned ipversion;
+};
+
+struct mxsfb_info {
+ struct fb_info fb_info;
+ struct platform_device *pdev;
+ struct clk *clk;
+ void __iomem *base; /* registers */
+ unsigned allocated_size;
+ int enabled;
+ unsigned ld_intf_width;
+ unsigned dotclk_delay;
+ const struct mxsfb_devdata *devdata;
+ int mapped;
+};
+
+#define mxsfb_is_v3(host) (host->devdata->ipversion == 3)
+#define mxsfb_is_v4(host) (host->devdata->ipversion == 4)
+
+static const struct mxsfb_devdata mxsfb_devdata[] = {
+ [MXSFB_V3] = {
+ .transfer_count = LCDC_V3_TRANSFER_COUNT,
+ .cur_buf = LCDC_V3_CUR_BUF,
+ .next_buf = LCDC_V3_NEXT_BUF,
+ .debug0 = LCDC_V3_DEBUG0,
+ .hs_wdth_mask = 0xff,
+ .hs_wdth_shift = 24,
+ .ipversion = 3,
+ },
+ [MXSFB_V4] = {
+ .transfer_count = LCDC_V4_TRANSFER_COUNT,
+ .cur_buf = LCDC_V4_CUR_BUF,
+ .next_buf = LCDC_V4_NEXT_BUF,
+ .debug0 = LCDC_V4_DEBUG0,
+ .hs_wdth_mask = 0x3fff,
+ .hs_wdth_shift = 18,
+ .ipversion = 4,
+ },
+};
+
+#define to_imxfb_host(x) (container_of(x, struct mxsfb_info, fb_info))
+
+/* mask and shift depends on architecture */
+static inline u32 set_hsync_pulse_width(struct mxsfb_info *host, unsigned val)
+{
+ return (val & host->devdata->hs_wdth_mask) <<
+ host->devdata->hs_wdth_shift;
+}
+
+static inline u32 get_hsync_pulse_width(struct mxsfb_info *host, unsigned val)
+{
+ return (val >> host->devdata->hs_wdth_shift) &
+ host->devdata->hs_wdth_mask;
+}
+
+static const struct fb_bitfield def_rgb565[] = {
+ [RED] = {
+ .offset = 11,
+ .length = 5,
+ },
+ [GREEN] = {
+ .offset = 5,
+ .length = 6,
+ },
+ [BLUE] = {
+ .offset = 0,
+ .length = 5,
+ },
+ [TRANSP] = { /* no support for transparency */
+ .length = 0,
+ }
+};
+
+static const struct fb_bitfield def_rgb666[] = {
+ [RED] = {
+ .offset = 16,
+ .length = 6,
+ },
+ [GREEN] = {
+ .offset = 8,
+ .length = 6,
+ },
+ [BLUE] = {
+ .offset = 0,
+ .length = 6,
+ },
+ [TRANSP] = { /* no support for transparency */
+ .length = 0,
+ }
+};
+
+static const struct fb_bitfield def_rgb888[] = {
+ [RED] = {
+ .offset = 16,
+ .length = 8,
+ },
+ [GREEN] = {
+ .offset = 8,
+ .length = 8,
+ },
+ [BLUE] = {
+ .offset = 0,
+ .length = 8,
+ },
+ [TRANSP] = { /* no support for transparency */
+ .length = 0,
+ }
+};
+
+static inline unsigned chan_to_field(unsigned chan, struct fb_bitfield *bf)
+{
+ chan &= 0xffff;
+ chan >>= 16 - bf->length;
+ return chan << bf->offset;
+}
+
+static int mxsfb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ const struct fb_bitfield *rgb = NULL;
+
+ if (var->xres < MIN_XRES)
+ var->xres = MIN_XRES;
+ if (var->yres < MIN_YRES)
+ var->yres = MIN_YRES;
+
+ var->xres_virtual = var->xres;
+
+ var->yres_virtual = var->yres;
+
+ switch (var->bits_per_pixel) {
+ case 16:
+ /* always expect RGB 565 */
+ rgb = def_rgb565;
+ break;
+ case 32:
+ switch (host->ld_intf_width) {
+ case STMLCDIF_8BIT:
+ pr_debug("Unsupported LCD bus width mapping\n");
+ break;
+ case STMLCDIF_16BIT:
+ case STMLCDIF_18BIT:
+ /* 24 bit to 18 bit mapping */
+ rgb = def_rgb666;
+ break;
+ case STMLCDIF_24BIT:
+ /* real 24 bit */
+ rgb = def_rgb888;
+ break;
+ }
+ break;
+ default:
+ pr_debug("Unsupported colour depth: %u\n", var->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ /*
+ * Copy the RGB parameters for this display
+ * from the machine specific parameters.
+ */
+ var->red = rgb[RED];
+ var->green = rgb[GREEN];
+ var->blue = rgb[BLUE];
+ var->transp = rgb[TRANSP];
+
+ return 0;
+}
+
+static void mxsfb_enable_controller(struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ u32 reg;
+
+ dev_dbg(&host->pdev->dev, "%s\n", __func__);
+
+ clk_enable(host->clk);
+ clk_set_rate(host->clk, PICOS2KHZ(fb_info->var.pixclock) * 1000U);
+
+ /* if it was disabled, re-enable the mode again */
+ writel(CTRL_DOTCLK_MODE, host->base + LCDC_CTRL + REG_SET);
+
+ /* enable the SYNC signals first, then the DMA engine */
+ reg = readl(host->base + LCDC_VDCTRL4);
+ reg |= VDCTRL4_SYNC_SIGNALS_ON;
+ writel(reg, host->base + LCDC_VDCTRL4);
+
+ writel(CTRL_RUN, host->base + LCDC_CTRL + REG_SET);
+
+ host->enabled = 1;
+}
+
+static void mxsfb_disable_controller(struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ unsigned loop;
+ u32 reg;
+
+ dev_dbg(&host->pdev->dev, "%s\n", __func__);
+
+ /*
+ * Even if we disable the controller here, it will still continue
+ * until its FIFOs are running out of data
+ */
+ writel(CTRL_DOTCLK_MODE, host->base + LCDC_CTRL + REG_CLR);
+
+ loop = 1000;
+ while (loop) {
+ reg = readl(host->base + LCDC_CTRL);
+ if (!(reg & CTRL_RUN))
+ break;
+ loop--;
+ }
+
+ writel(VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4 + REG_CLR);
+
+ clk_disable(host->clk);
+
+ host->enabled = 0;
+}
+
+static int mxsfb_set_par(struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ u32 ctrl, vdctrl0, vdctrl4;
+ int line_size, fb_size;
+ int reenable = 0;
+
+ line_size = fb_info->var.xres * (fb_info->var.bits_per_pixel >> 3);
+ fb_size = fb_info->var.yres_virtual * line_size;
+
+ if (fb_size > fb_info->fix.smem_len)
+ return -ENOMEM;
+
+ fb_info->fix.line_length = line_size;
+
+ /*
+ * It seems, you can't re-program the controller if it is still running.
+ * This may lead into shifted pictures (FIFO issue?).
+ * So, first stop the controller and drain its FIFOs
+ */
+ if (host->enabled) {
+ reenable = 1;
+ mxsfb_disable_controller(fb_info);
+ }
+
+ /* clear the FIFOs */
+ writel(CTRL1_FIFO_CLEAR, host->base + LCDC_CTRL1 + REG_SET);
+
+ ctrl = CTRL_BYPASS_COUNT | CTRL_MASTER |
+ CTRL_SET_BUS_WIDTH(host->ld_intf_width);;
+
+ switch (fb_info->var.bits_per_pixel) {
+ case 16:
+ dev_dbg(&host->pdev->dev, "Setting up RGB565 mode\n");
+ ctrl |= CTRL_SET_WORD_LENGTH(0);
+ writel(CTRL1_SET_BYTE_PACKAGING(0xf), host->base + LCDC_CTRL1);
+ break;
+ case 32:
+ dev_dbg(&host->pdev->dev, "Setting up RGB888/666 mode\n");
+ ctrl |= CTRL_SET_WORD_LENGTH(3);
+ switch (host->ld_intf_width) {
+ case STMLCDIF_8BIT:
+ dev_dbg(&host->pdev->dev,
+ "Unsupported LCD bus width mapping\n");
+ return -EINVAL;
+ case STMLCDIF_16BIT:
+ case STMLCDIF_18BIT:
+ /* 24 bit to 18 bit mapping */
+ ctrl |= CTRL_DF24; /* ignore the upper 2 bits in
+ * each colour component
+ */
+ break;
+ case STMLCDIF_24BIT:
+ /* real 24 bit */
+ break;
+ }
+ /* do not use packed pixels = one pixel per word instead */
+ writel(CTRL1_SET_BYTE_PACKAGING(0x7), host->base + LCDC_CTRL1);
+ break;
+ default:
+ dev_dbg(&host->pdev->dev, "Unhandled color depth of %u\n",
+ fb_info->var.bits_per_pixel);
+ return -EINVAL;
+ }
+
+ writel(ctrl, host->base + LCDC_CTRL);
+
+ writel(TRANSFER_COUNT_SET_VCOUNT(fb_info->var.yres) |
+ TRANSFER_COUNT_SET_HCOUNT(fb_info->var.xres),
+ host->base + host->devdata->transfer_count);
+
+ vdctrl0 = VDCTRL0_ENABLE_PRESENT | /* always in DOTCLOCK mode */
+ VDCTRL0_VSYNC_PERIOD_UNIT |
+ VDCTRL0_VSYNC_PULSE_WIDTH_UNIT |
+ VDCTRL0_SET_VSYNC_PULSE_WIDTH(fb_info->var.vsync_len);
+ if (fb_info->var.sync & FB_SYNC_HOR_HIGH_ACT)
+ vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
+ if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT)
+ vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
+ if (fb_info->var.sync & FB_SYNC_DATA_ENABLE_HIGH_ACT)
+ vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
+ if (fb_info->var.sync & FB_SYNC_DOTCLK_FAILING_ACT)
+ vdctrl0 |= VDCTRL0_DOTCLK_ACT_FAILING;
+
+ writel(vdctrl0, host->base + LCDC_VDCTRL0);
+
+ /* frame length in lines */
+ writel(fb_info->var.upper_margin + fb_info->var.vsync_len +
+ fb_info->var.lower_margin + fb_info->var.yres,
+ host->base + LCDC_VDCTRL1);
+
+ /* line length in units of clocks or pixels */
+ writel(set_hsync_pulse_width(host, fb_info->var.hsync_len) |
+ VDCTRL2_SET_HSYNC_PERIOD(fb_info->var.left_margin +
+ fb_info->var.hsync_len + fb_info->var.right_margin +
+ fb_info->var.xres),
+ host->base + LCDC_VDCTRL2);
+
+ writel(SET_HOR_WAIT_CNT(fb_info->var.left_margin +
+ fb_info->var.hsync_len) |
+ SET_VERT_WAIT_CNT(fb_info->var.upper_margin +
+ fb_info->var.vsync_len),
+ host->base + LCDC_VDCTRL3);
+
+ vdctrl4 = SET_DOTCLK_H_VALID_DATA_CNT(fb_info->var.xres);
+ if (mxsfb_is_v4(host))
+ vdctrl4 |= VDCTRL4_SET_DOTCLK_DLY(host->dotclk_delay);
+ writel(vdctrl4, host->base + LCDC_VDCTRL4);
+
+ writel(fb_info->fix.smem_start +
+ fb_info->fix.line_length * fb_info->var.yoffset,
+ host->base + host->devdata->next_buf);
+
+ if (reenable)
+ mxsfb_enable_controller(fb_info);
+
+ return 0;
+}
+
+static int mxsfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *fb_info)
+{
+ unsigned int val;
+ int ret = -EINVAL;
+
+ /*
+ * If greyscale is true, then we convert the RGB value
+ * to greyscale no matter what visual we are using.
+ */
+ if (fb_info->var.grayscale)
+ red = green = blue = (19595 * red + 38470 * green +
+ 7471 * blue) >> 16;
+
+ switch (fb_info->fix.visual) {
+ case FB_VISUAL_TRUECOLOR:
+ /*
+ * 12 or 16-bit True Colour. We encode the RGB value
+ * according to the RGB bitfield information.
+ */
+ if (regno < 16) {
+ u32 *pal = fb_info->pseudo_palette;
+
+ val = chan_to_field(red, &fb_info->var.red);
+ val |= chan_to_field(green, &fb_info->var.green);
+ val |= chan_to_field(blue, &fb_info->var.blue);
+
+ pal[regno] = val;
+ ret = 0;
+ }
+ break;
+
+ case FB_VISUAL_STATIC_PSEUDOCOLOR:
+ case FB_VISUAL_PSEUDOCOLOR:
+ break;
+ }
+
+ return ret;
+}
+
+static int mxsfb_blank(int blank, struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+
+ switch (blank) {
+ case FB_BLANK_POWERDOWN:
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_NORMAL:
+ if (host->enabled)
+ mxsfb_disable_controller(fb_info);
+ break;
+
+ case FB_BLANK_UNBLANK:
+ if (!host->enabled)
+ mxsfb_enable_controller(fb_info);
+ break;
+ }
+ return 0;
+}
+
+static int mxsfb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ unsigned offset;
+
+ if (var->xoffset != 0)
+ return -EINVAL;
+
+ offset = fb_info->fix.line_length * var->yoffset;
+
+ /* update on next VSYNC */
+ writel(fb_info->fix.smem_start + offset,
+ host->base + host->devdata->next_buf);
+
+ return 0;
+}
+
+static struct fb_ops mxsfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = mxsfb_check_var,
+ .fb_set_par = mxsfb_set_par,
+ .fb_setcolreg = mxsfb_setcolreg,
+ .fb_blank = mxsfb_blank,
+ .fb_pan_display = mxsfb_pan_display,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+static int __devinit mxsfb_restore_mode(struct mxsfb_info *host)
+{
+ struct fb_info *fb_info = &host->fb_info;
+ unsigned line_count;
+ unsigned period;
+ unsigned long pa, fbsize;
+ int bits_per_pixel, ofs;
+ u32 transfer_count, vdctrl0, vdctrl2, vdctrl3, vdctrl4, ctrl;
+ struct fb_videomode vmode;
+
+ /* Only restore the mode when the controller is running */
+ ctrl = readl(host->base + LCDC_CTRL);
+ if (!(ctrl & CTRL_RUN))
+ return -EINVAL;
+
+ vdctrl0 = readl(host->base + LCDC_VDCTRL0);
+ vdctrl2 = readl(host->base + LCDC_VDCTRL2);
+ vdctrl3 = readl(host->base + LCDC_VDCTRL3);
+ vdctrl4 = readl(host->base + LCDC_VDCTRL4);
+
+ transfer_count = readl(host->base + host->devdata->transfer_count);
+
+ vmode.xres = TRANSFER_COUNT_GET_HCOUNT(transfer_count);
+ vmode.yres = TRANSFER_COUNT_GET_VCOUNT(transfer_count);
+
+ switch (CTRL_GET_WORD_LENGTH(ctrl)) {
+ case 0:
+ bits_per_pixel = 16;
+ break;
+ case 3:
+ bits_per_pixel = 32;
+ case 1:
+ default:
+ return -EINVAL;
+ }
+
+ fb_info->var.bits_per_pixel = bits_per_pixel;
+
+ vmode.pixclock = KHZ2PICOS(clk_get_rate(host->clk) / 1000U);
+ vmode.hsync_len = get_hsync_pulse_width(host, vdctrl2);
+ vmode.left_margin = GET_HOR_WAIT_CNT(vdctrl3) - vmode.hsync_len;
+ vmode.right_margin = VDCTRL2_GET_HSYNC_PERIOD(vdctrl2) - vmode.hsync_len -
+ vmode.left_margin - vmode.xres;
+ vmode.vsync_len = VDCTRL0_GET_VSYNC_PULSE_WIDTH(vdctrl0);
+ period = readl(host->base + LCDC_VDCTRL1);
+ vmode.upper_margin = GET_VERT_WAIT_CNT(vdctrl3) - vmode.vsync_len;
+ vmode.lower_margin = period - vmode.vsync_len - vmode.upper_margin - vmode.yres;
+
+ vmode.vmode = FB_VMODE_NONINTERLACED;
+
+ vmode.sync = 0;
+ if (vdctrl0 & VDCTRL0_HSYNC_ACT_HIGH)
+ vmode.sync |= FB_SYNC_HOR_HIGH_ACT;
+ if (vdctrl0 & VDCTRL0_VSYNC_ACT_HIGH)
+ vmode.sync |= FB_SYNC_VERT_HIGH_ACT;
+
+ pr_debug("Reconstructed video mode:\n");
+ pr_debug("%dx%d, hsync: %u left: %u, right: %u, vsync: %u, upper: %u, lower: %u\n",
+ vmode.xres, vmode.yres,
+ vmode.hsync_len, vmode.left_margin, vmode.right_margin,
+ vmode.vsync_len, vmode.upper_margin, vmode.lower_margin);
+ pr_debug("pixclk: %ldkHz\n", PICOS2KHZ(vmode.pixclock));
+
+ fb_add_videomode(&vmode, &fb_info->modelist);
+
+ host->ld_intf_width = CTRL_GET_BUS_WIDTH(ctrl);
+ host->dotclk_delay = VDCTRL4_GET_DOTCLK_DLY(vdctrl4);
+
+ fb_info->fix.line_length = vmode.xres * (bits_per_pixel >> 3);
+
+ pa = readl(host->base + host->devdata->cur_buf);
+ fbsize = fb_info->fix.line_length * vmode.yres;
+ if (pa < fb_info->fix.smem_start)
+ return -EINVAL;
+ if (pa + fbsize > fb_info->fix.smem_start + fb_info->fix.smem_len)
+ return -EINVAL;
+ ofs = pa - fb_info->fix.smem_start;
+ if (ofs) {
+ memmove(fb_info->screen_base, fb_info->screen_base + ofs, fbsize);
+ writel(fb_info->fix.smem_start, host->base + host->devdata->next_buf);
+ }
+
+ line_count = fb_info->fix.smem_len / fb_info->fix.line_length;
+ fb_info->fix.ypanstep = 1;
+
+ clk_enable(host->clk);
+ host->enabled = 1;
+
+ return 0;
+}
+
+static int __devinit mxsfb_init_fbinfo(struct mxsfb_info *host)
+{
+ struct fb_info *fb_info = &host->fb_info;
+ struct fb_var_screeninfo *var = &fb_info->var;
+ struct mxsfb_platform_data *pdata = host->pdev->dev.platform_data;
+ dma_addr_t fb_phys;
+ void *fb_virt;
+ unsigned fb_size = pdata->fb_size;
+
+ fb_info->fbops = &mxsfb_ops;
+ fb_info->flags = FBINFO_FLAG_DEFAULT | FBINFO_READS_FAST;
+ strlcpy(fb_info->fix.id, "mxs", sizeof(fb_info->fix.id));
+ fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
+ fb_info->fix.ypanstep = 1;
+ fb_info->fix.visual = FB_VISUAL_TRUECOLOR,
+ fb_info->fix.accel = FB_ACCEL_NONE;
+
+ var->bits_per_pixel = pdata->default_bpp ? pdata->default_bpp : 16;
+ var->nonstd = 0;
+ var->activate = FB_ACTIVATE_NOW;
+ var->accel_flags = 0;
+ var->vmode = FB_VMODE_NONINTERLACED;
+
+ host->dotclk_delay = pdata->dotclk_delay;
+ host->ld_intf_width = pdata->ld_intf_width;
+
+ /* Memory allocation for framebuffer */
+ if (pdata->fb_phys) {
+ if (!fb_size)
+ return -EINVAL;
+
+ fb_phys = pdata->fb_phys;
+
+ if (!request_mem_region(fb_phys, fb_size, host->pdev->name))
+ return -ENOMEM;
+
+ fb_virt = ioremap(fb_phys, fb_size);
+ if (!fb_virt) {
+ release_mem_region(fb_phys, fb_size);
+ return -ENOMEM;
+ }
+ host->mapped = 1;
+ } else {
+ if (!fb_size)
+ fb_size = SZ_2M; /* default */
+ fb_virt = alloc_pages_exact(fb_size, GFP_DMA);
+ if (!fb_virt)
+ return -ENOMEM;
+
+ fb_phys = virt_to_phys(fb_virt);
+ }
+
+ fb_info->fix.smem_start = fb_phys;
+ fb_info->screen_base = fb_virt;
+ fb_info->screen_size = fb_info->fix.smem_len = fb_size;
+
+ if (mxsfb_restore_mode(host))
+ memset(fb_virt, 0, fb_size);
+
+ return 0;
+}
+
+static void __devexit mxsfb_free_videomem(struct mxsfb_info *host)
+{
+ struct fb_info *fb_info = &host->fb_info;
+
+ if (host->mapped) {
+ iounmap(fb_info->screen_base);
+ release_mem_region(fb_info->fix.smem_start,
+ fb_info->screen_size);
+ } else {
+ free_pages_exact(fb_info->screen_base, fb_info->fix.smem_len);
+ }
+}
+
+static int __devinit mxsfb_probe(struct platform_device *pdev)
+{
+ struct mxsfb_platform_data *pdata = pdev->dev.platform_data;
+ struct resource *res;
+ struct mxsfb_info *host;
+ struct fb_info *fb_info;
+ struct fb_modelist *modelist;
+ int i, ret;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platformdata. Giving up\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Cannot get memory IO resource\n");
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name))
+ return -EBUSY;
+
+ fb_info = framebuffer_alloc(sizeof(struct mxsfb_info), &pdev->dev);
+ if (!fb_info) {
+ dev_err(&pdev->dev, "Failed to allocate fbdev\n");
+ ret = -ENOMEM;
+ goto error_alloc_info;
+ }
+
+ host = to_imxfb_host(fb_info);
+
+ host->base = ioremap(res->start, resource_size(res));
+ if (!host->base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto error_ioremap;
+ }
+
+ host->pdev = pdev;
+ platform_set_drvdata(pdev, host);
+
+ host->devdata = &mxsfb_devdata[pdev->id_entry->driver_data];
+
+ host->clk = clk_get(&host->pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ goto error_getclock;
+ }
+
+ fb_info->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
+ if (!fb_info->pseudo_palette) {
+ ret = -ENOMEM;
+ goto error_pseudo_pallette;
+ }
+
+ INIT_LIST_HEAD(&fb_info->modelist);
+
+ ret = mxsfb_init_fbinfo(host);
+ if (ret != 0)
+ goto error_init_fb;
+
+ for (i = 0; i < pdata->mode_count; i++)
+ fb_add_videomode(&pdata->mode_list[i], &fb_info->modelist);
+
+ modelist = list_first_entry(&fb_info->modelist,
+ struct fb_modelist, list);
+ fb_videomode_to_var(&fb_info->var, &modelist->mode);
+
+ /* init the color fields */
+ mxsfb_check_var(&fb_info->var, fb_info);
+
+ platform_set_drvdata(pdev, fb_info);
+
+ ret = register_framebuffer(fb_info);
+ if (ret != 0) {
+ dev_err(&pdev->dev,"Failed to register framebuffer\n");
+ goto error_register;
+ }
+
+ if (!host->enabled) {
+ writel(0, host->base + LCDC_CTRL);
+ mxsfb_set_par(fb_info);
+ mxsfb_enable_controller(fb_info);
+ }
+
+ dev_info(&pdev->dev, "initialized\n");
+
+ return 0;
+
+error_register:
+ if (host->enabled)
+ clk_disable(host->clk);
+ fb_destroy_modelist(&fb_info->modelist);
+error_init_fb:
+ kfree(fb_info->pseudo_palette);
+error_pseudo_pallette:
+ clk_put(host->clk);
+error_getclock:
+ iounmap(host->base);
+error_ioremap:
+ framebuffer_release(fb_info);
+error_alloc_info:
+ release_mem_region(res->start, resource_size(res));
+
+ return ret;
+}
+
+static int __devexit mxsfb_remove(struct platform_device *pdev)
+{
+ struct fb_info *fb_info = platform_get_drvdata(pdev);
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (host->enabled)
+ mxsfb_disable_controller(fb_info);
+
+ unregister_framebuffer(fb_info);
+ kfree(fb_info->pseudo_palette);
+ mxsfb_free_videomem(host);
+ iounmap(host->base);
+ clk_put(host->clk);
+
+ framebuffer_release(fb_info);
+ release_mem_region(res->start, resource_size(res));
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_device_id mxsfb_devtype[] = {
+ {
+ .name = "imx23-fb",
+ .driver_data = MXSFB_V3,
+ }, {
+ .name = "imx28-fb",
+ .driver_data = MXSFB_V4,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mxsfb_devtype);
+
+static struct platform_driver mxsfb_driver = {
+ .probe = mxsfb_probe,
+ .remove = __devexit_p(mxsfb_remove),
+ .id_table = mxsfb_devtype,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init mxsfb_init(void)
+{
+ return platform_driver_register(&mxsfb_driver);
+}
+
+static void __exit mxsfb_exit(void)
+{
+ platform_driver_unregister(&mxsfb_driver);
+}
+
+module_init(mxsfb_init);
+module_exit(mxsfb_exit);
+
+MODULE_DESCRIPTION("Freescale mxs framebuffer driver");
+MODULE_AUTHOR("Sascha Hauer, Pengutronix");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
index 6aac6d1b937b..8471008aa6ff 100644
--- a/drivers/video/nvidia/nv_backlight.c
+++ b/drivers/video/nvidia/nv_backlight.c
@@ -111,6 +111,7 @@ void nvidia_bl_init(struct nvidia_par *par)
snprintf(name, sizeof(name), "nvidiabl%d", info->node);
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
bd = backlight_device_register(name, info->dev, par, &nvidia_bl_ops,
&props);
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
index 083c8fe53e24..15e7f1912af9 100644
--- a/drivers/video/omap/Kconfig
+++ b/drivers/video/omap/Kconfig
@@ -5,13 +5,18 @@ config FB_OMAP
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select TWL4030_CORE if MACH_OMAP_2430SDP
help
Frame buffer driver for OMAP based boards.
config FB_OMAP_LCD_VGA
bool "Use LCD in VGA mode"
depends on MACH_OMAP_3430SDP || MACH_OMAP_LDP
-
+ help
+ Set LCD resolution as VGA (640 X 480).
+ Default resolution without this option is QVGA(320 X 240).
+ Please take a look at drivers/video/omap/lcd_ldp.c file
+ for lcd driver code.
choice
depends on FB_OMAP && MACH_OVERO
prompt "Screen resolution"
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c
index 87785c215a52..c0504a8a5079 100644
--- a/drivers/video/omap/blizzard.c
+++ b/drivers/video/omap/blizzard.c
@@ -397,8 +397,7 @@ static inline void free_req(struct blizzard_request *req)
spin_lock_irqsave(&blizzard.req_lock, flags);
- list_del(&req->entry);
- list_add(&req->entry, &blizzard.free_req_list);
+ list_move(&req->entry, &blizzard.free_req_list);
if (!(req->flags & REQ_FROM_IRQ_POOL))
up(&blizzard.req_sema);
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
index 0016f77cd13f..084aa0ac562b 100644
--- a/drivers/video/omap/hwa742.c
+++ b/drivers/video/omap/hwa742.c
@@ -269,8 +269,7 @@ static inline void free_req(struct hwa742_request *req)
spin_lock_irqsave(&hwa742.req_lock, flags);
- list_del(&req->entry);
- list_add(&req->entry, &hwa742.free_req_list);
+ list_move(&req->entry, &hwa742.free_req_list);
if (!(req->flags & REQ_FROM_IRQ_POOL))
up(&hwa742.req_sema);
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
index 940cab394c2e..d18ad6b2372a 100644
--- a/drivers/video/omap2/displays/Kconfig
+++ b/drivers/video/omap2/displays/Kconfig
@@ -9,6 +9,12 @@ config PANEL_GENERIC_DPI
Supports LCD Panel used in TI SDP3430 and EVM boards,
OMAP3517 EVM boards and CM-T35.
+config PANEL_LGPHILIPS_LB035Q02
+ tristate "LG.Philips LB035Q02 LCD Panel"
+ depends on OMAP2_DSS && SPI
+ help
+ LCD Panel used on the Gumstix Overo Palo35
+
config PANEL_SHARP_LS037V7DW01
tristate "Sharp LS037V7DW01 LCD Panel"
depends on OMAP2_DSS
diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile
index 861f0255ec6b..0f601ab3abf4 100644
--- a/drivers/video/omap2/displays/Makefile
+++ b/drivers/video/omap2/displays/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_PANEL_GENERIC_DPI) += panel-generic-dpi.o
+obj-$(CONFIG_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
obj-$(CONFIG_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
obj-$(CONFIG_PANEL_NEC_NL8048HL11_01B) += panel-nec-nl8048hl11-01b.o
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index e77310653207..7e04c921aa2a 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -534,6 +534,7 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
props.fb_blank = FB_BLANK_UNBLANK;
props.power = FB_BLANK_UNBLANK;
+ props.type = BACKLIGHT_RAW;
bldev = backlight_device_register("acx565akm", &md->spi->dev,
md, &acx565akm_bl_ops, &props);
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 07eb30ee59c8..4a9b9ff59467 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -156,6 +156,31 @@ static struct panel_config generic_dpi_panels[] = {
.power_off_delay = 0,
.name = "toppoly_tdo35s",
},
+
+ /* Samsung LTE430WQ-F0C */
+ {
+ {
+ .x_res = 480,
+ .y_res = 272,
+
+ .pixel_clock = 9200,
+
+ .hfp = 8,
+ .hsw = 41,
+ .hbp = 45 - 41,
+
+ .vfp = 4,
+ .vsw = 10,
+ .vbp = 12 - 10,
+ },
+ .acbi = 0x0,
+ .acb = 0x0,
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS,
+ .power_on_delay = 0,
+ .power_off_delay = 0,
+ .name = "samsung_lte430wq_f0c",
+ },
};
struct panel_drv_data {
diff --git a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
new file mode 100644
index 000000000000..271324db2436
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
@@ -0,0 +1,279 @@
+/*
+ * LCD panel driver for LG.Philips LB035Q02
+ *
+ * Author: Steve Sakoman <steve@sakoman.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/mutex.h>
+
+#include <plat/display.h>
+
+struct lb035q02_data {
+ struct mutex lock;
+};
+
+static struct omap_video_timings lb035q02_timings = {
+ .x_res = 320,
+ .y_res = 240,
+
+ .pixel_clock = 6500,
+
+ .hsw = 2,
+ .hfp = 20,
+ .hbp = 68,
+
+ .vsw = 2,
+ .vfp = 4,
+ .vbp = 18,
+};
+
+static int lb035q02_panel_power_on(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
+ r = omapdss_dpi_display_enable(dssdev);
+ if (r)
+ goto err0;
+
+ if (dssdev->platform_enable) {
+ r = dssdev->platform_enable(dssdev);
+ if (r)
+ goto err1;
+ }
+
+ return 0;
+err1:
+ omapdss_dpi_display_disable(dssdev);
+err0:
+ return r;
+}
+
+static void lb035q02_panel_power_off(struct omap_dss_device *dssdev)
+{
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return;
+
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ omapdss_dpi_display_disable(dssdev);
+}
+
+static int lb035q02_panel_probe(struct omap_dss_device *dssdev)
+{
+ struct lb035q02_data *ld;
+ int r;
+
+ dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS;
+ dssdev->panel.timings = lb035q02_timings;
+
+ ld = kzalloc(sizeof(*ld), GFP_KERNEL);
+ if (!ld) {
+ r = -ENOMEM;
+ goto err;
+ }
+ mutex_init(&ld->lock);
+ dev_set_drvdata(&dssdev->dev, ld);
+ return 0;
+err:
+ return r;
+}
+
+static void lb035q02_panel_remove(struct omap_dss_device *dssdev)
+{
+ struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
+
+ kfree(ld);
+}
+
+static int lb035q02_panel_enable(struct omap_dss_device *dssdev)
+{
+ struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
+ int r;
+
+ mutex_lock(&ld->lock);
+
+ r = lb035q02_panel_power_on(dssdev);
+ if (r)
+ goto err;
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ mutex_unlock(&ld->lock);
+ return 0;
+err:
+ mutex_unlock(&ld->lock);
+ return r;
+}
+
+static void lb035q02_panel_disable(struct omap_dss_device *dssdev)
+{
+ struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
+
+ mutex_lock(&ld->lock);
+
+ lb035q02_panel_power_off(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+ mutex_unlock(&ld->lock);
+}
+
+static int lb035q02_panel_suspend(struct omap_dss_device *dssdev)
+{
+ struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
+
+ mutex_lock(&ld->lock);
+
+ lb035q02_panel_power_off(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+ mutex_unlock(&ld->lock);
+ return 0;
+}
+
+static int lb035q02_panel_resume(struct omap_dss_device *dssdev)
+{
+ struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
+ int r;
+
+ mutex_lock(&ld->lock);
+
+ r = lb035q02_panel_power_on(dssdev);
+ if (r)
+ goto err;
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ mutex_unlock(&ld->lock);
+ return 0;
+err:
+ mutex_unlock(&ld->lock);
+ return r;
+}
+
+static struct omap_dss_driver lb035q02_driver = {
+ .probe = lb035q02_panel_probe,
+ .remove = lb035q02_panel_remove,
+
+ .enable = lb035q02_panel_enable,
+ .disable = lb035q02_panel_disable,
+ .suspend = lb035q02_panel_suspend,
+ .resume = lb035q02_panel_resume,
+
+ .driver = {
+ .name = "lgphilips_lb035q02_panel",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int lb035q02_write_reg(struct spi_device *spi, u8 reg, u16 val)
+{
+ struct spi_message msg;
+ struct spi_transfer index_xfer = {
+ .len = 3,
+ .cs_change = 1,
+ };
+ struct spi_transfer value_xfer = {
+ .len = 3,
+ };
+ u8 buffer[16];
+
+ spi_message_init(&msg);
+
+ /* register index */
+ buffer[0] = 0x70;
+ buffer[1] = 0x00;
+ buffer[2] = reg & 0x7f;
+ index_xfer.tx_buf = buffer;
+ spi_message_add_tail(&index_xfer, &msg);
+
+ /* register value */
+ buffer[4] = 0x72;
+ buffer[5] = val >> 8;
+ buffer[6] = val;
+ value_xfer.tx_buf = buffer + 4;
+ spi_message_add_tail(&value_xfer, &msg);
+
+ return spi_sync(spi, &msg);
+}
+
+static void init_lb035q02_panel(struct spi_device *spi)
+{
+ /* Init sequence from page 28 of the lb035q02 spec */
+ lb035q02_write_reg(spi, 0x01, 0x6300);
+ lb035q02_write_reg(spi, 0x02, 0x0200);
+ lb035q02_write_reg(spi, 0x03, 0x0177);
+ lb035q02_write_reg(spi, 0x04, 0x04c7);
+ lb035q02_write_reg(spi, 0x05, 0xffc0);
+ lb035q02_write_reg(spi, 0x06, 0xe806);
+ lb035q02_write_reg(spi, 0x0a, 0x4008);
+ lb035q02_write_reg(spi, 0x0b, 0x0000);
+ lb035q02_write_reg(spi, 0x0d, 0x0030);
+ lb035q02_write_reg(spi, 0x0e, 0x2800);
+ lb035q02_write_reg(spi, 0x0f, 0x0000);
+ lb035q02_write_reg(spi, 0x16, 0x9f80);
+ lb035q02_write_reg(spi, 0x17, 0x0a0f);
+ lb035q02_write_reg(spi, 0x1e, 0x00c1);
+ lb035q02_write_reg(spi, 0x30, 0x0300);
+ lb035q02_write_reg(spi, 0x31, 0x0007);
+ lb035q02_write_reg(spi, 0x32, 0x0000);
+ lb035q02_write_reg(spi, 0x33, 0x0000);
+ lb035q02_write_reg(spi, 0x34, 0x0707);
+ lb035q02_write_reg(spi, 0x35, 0x0004);
+ lb035q02_write_reg(spi, 0x36, 0x0302);
+ lb035q02_write_reg(spi, 0x37, 0x0202);
+ lb035q02_write_reg(spi, 0x3a, 0x0a0d);
+ lb035q02_write_reg(spi, 0x3b, 0x0806);
+}
+
+static int __devinit lb035q02_panel_spi_probe(struct spi_device *spi)
+{
+ init_lb035q02_panel(spi);
+ return omap_dss_register_driver(&lb035q02_driver);
+}
+
+static int __devexit lb035q02_panel_spi_remove(struct spi_device *spi)
+{
+ omap_dss_unregister_driver(&lb035q02_driver);
+ return 0;
+}
+
+static struct spi_driver lb035q02_spi_driver = {
+ .driver = {
+ .name = "lgphilips_lb035q02_panel-spi",
+ .owner = THIS_MODULE,
+ },
+ .probe = lb035q02_panel_spi_probe,
+ .remove = __devexit_p(lb035q02_panel_spi_remove),
+};
+
+static int __init lb035q02_panel_drv_init(void)
+{
+ return spi_register_driver(&lb035q02_spi_driver);
+}
+
+static void __exit lb035q02_panel_drv_exit(void)
+{
+ spi_unregister_driver(&lb035q02_spi_driver);
+}
+
+module_init(lb035q02_panel_drv_init);
+module_exit(lb035q02_panel_drv_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
index 9a138f650e05..d2b35d2df2a6 100644
--- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -99,6 +99,7 @@ static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.max_brightness = dssdev->max_backlight_level;
+ props.type = BACKLIGHT_RAW;
bl = backlight_device_register("sharp-ls", &dssdev->dev, dssdev,
&sharp_ls_bl_ops, &props);
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index 61026f96ad20..adc9900458e1 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -218,6 +218,8 @@ struct taal_data {
u16 w;
u16 h;
} update_region;
+ int channel;
+
struct delayed_work te_timeout_work;
bool use_dsi_bl;
@@ -257,12 +259,12 @@ static void hw_guard_wait(struct taal_data *td)
}
}
-static int taal_dcs_read_1(u8 dcs_cmd, u8 *data)
+static int taal_dcs_read_1(struct taal_data *td, u8 dcs_cmd, u8 *data)
{
int r;
u8 buf[1];
- r = dsi_vc_dcs_read(TCH, dcs_cmd, buf, 1);
+ r = dsi_vc_dcs_read(td->channel, dcs_cmd, buf, 1);
if (r < 0)
return r;
@@ -272,17 +274,17 @@ static int taal_dcs_read_1(u8 dcs_cmd, u8 *data)
return 0;
}
-static int taal_dcs_write_0(u8 dcs_cmd)
+static int taal_dcs_write_0(struct taal_data *td, u8 dcs_cmd)
{
- return dsi_vc_dcs_write(TCH, &dcs_cmd, 1);
+ return dsi_vc_dcs_write(td->channel, &dcs_cmd, 1);
}
-static int taal_dcs_write_1(u8 dcs_cmd, u8 param)
+static int taal_dcs_write_1(struct taal_data *td, u8 dcs_cmd, u8 param)
{
u8 buf[2];
buf[0] = dcs_cmd;
buf[1] = param;
- return dsi_vc_dcs_write(TCH, buf, 2);
+ return dsi_vc_dcs_write(td->channel, buf, 2);
}
static int taal_sleep_in(struct taal_data *td)
@@ -294,7 +296,7 @@ static int taal_sleep_in(struct taal_data *td)
hw_guard_wait(td);
cmd = DCS_SLEEP_IN;
- r = dsi_vc_dcs_write_nosync(TCH, &cmd, 1);
+ r = dsi_vc_dcs_write_nosync(td->channel, &cmd, 1);
if (r)
return r;
@@ -312,7 +314,7 @@ static int taal_sleep_out(struct taal_data *td)
hw_guard_wait(td);
- r = taal_dcs_write_0(DCS_SLEEP_OUT);
+ r = taal_dcs_write_0(td, DCS_SLEEP_OUT);
if (r)
return r;
@@ -324,30 +326,30 @@ static int taal_sleep_out(struct taal_data *td)
return 0;
}
-static int taal_get_id(u8 *id1, u8 *id2, u8 *id3)
+static int taal_get_id(struct taal_data *td, u8 *id1, u8 *id2, u8 *id3)
{
int r;
- r = taal_dcs_read_1(DCS_GET_ID1, id1);
+ r = taal_dcs_read_1(td, DCS_GET_ID1, id1);
if (r)
return r;
- r = taal_dcs_read_1(DCS_GET_ID2, id2);
+ r = taal_dcs_read_1(td, DCS_GET_ID2, id2);
if (r)
return r;
- r = taal_dcs_read_1(DCS_GET_ID3, id3);
+ r = taal_dcs_read_1(td, DCS_GET_ID3, id3);
if (r)
return r;
return 0;
}
-static int taal_set_addr_mode(u8 rotate, bool mirror)
+static int taal_set_addr_mode(struct taal_data *td, u8 rotate, bool mirror)
{
int r;
u8 mode;
int b5, b6, b7;
- r = taal_dcs_read_1(DCS_READ_MADCTL, &mode);
+ r = taal_dcs_read_1(td, DCS_READ_MADCTL, &mode);
if (r)
return r;
@@ -381,10 +383,11 @@ static int taal_set_addr_mode(u8 rotate, bool mirror)
mode &= ~((1<<7) | (1<<6) | (1<<5));
mode |= (b7 << 7) | (b6 << 6) | (b5 << 5);
- return taal_dcs_write_1(DCS_MEM_ACC_CTRL, mode);
+ return taal_dcs_write_1(td, DCS_MEM_ACC_CTRL, mode);
}
-static int taal_set_update_window(u16 x, u16 y, u16 w, u16 h)
+static int taal_set_update_window(struct taal_data *td,
+ u16 x, u16 y, u16 w, u16 h)
{
int r;
u16 x1 = x;
@@ -399,7 +402,7 @@ static int taal_set_update_window(u16 x, u16 y, u16 w, u16 h)
buf[3] = (x2 >> 8) & 0xff;
buf[4] = (x2 >> 0) & 0xff;
- r = dsi_vc_dcs_write_nosync(TCH, buf, sizeof(buf));
+ r = dsi_vc_dcs_write_nosync(td->channel, buf, sizeof(buf));
if (r)
return r;
@@ -409,11 +412,11 @@ static int taal_set_update_window(u16 x, u16 y, u16 w, u16 h)
buf[3] = (y2 >> 8) & 0xff;
buf[4] = (y2 >> 0) & 0xff;
- r = dsi_vc_dcs_write_nosync(TCH, buf, sizeof(buf));
+ r = dsi_vc_dcs_write_nosync(td->channel, buf, sizeof(buf));
if (r)
return r;
- dsi_vc_send_bta_sync(TCH);
+ dsi_vc_send_bta_sync(td->channel);
return r;
}
@@ -439,7 +442,7 @@ static int taal_bl_update_status(struct backlight_device *dev)
if (td->use_dsi_bl) {
if (td->enabled) {
dsi_bus_lock();
- r = taal_dcs_write_1(DCS_BRIGHTNESS, level);
+ r = taal_dcs_write_1(td, DCS_BRIGHTNESS, level);
dsi_bus_unlock();
} else {
r = 0;
@@ -502,7 +505,7 @@ static ssize_t taal_num_errors_show(struct device *dev,
if (td->enabled) {
dsi_bus_lock();
- r = taal_dcs_read_1(DCS_READ_NUM_ERRORS, &errors);
+ r = taal_dcs_read_1(td, DCS_READ_NUM_ERRORS, &errors);
dsi_bus_unlock();
} else {
r = -ENODEV;
@@ -528,7 +531,7 @@ static ssize_t taal_hw_revision_show(struct device *dev,
if (td->enabled) {
dsi_bus_lock();
- r = taal_get_id(&id1, &id2, &id3);
+ r = taal_get_id(td, &id1, &id2, &id3);
dsi_bus_unlock();
} else {
r = -ENODEV;
@@ -590,7 +593,7 @@ static ssize_t store_cabc_mode(struct device *dev,
if (td->enabled) {
dsi_bus_lock();
if (!td->cabc_broken)
- taal_dcs_write_1(DCS_WRITE_CABC, i);
+ taal_dcs_write_1(td, DCS_WRITE_CABC, i);
dsi_bus_unlock();
}
@@ -729,6 +732,8 @@ static int taal_probe(struct omap_dss_device *dssdev)
props.max_brightness = 255;
else
props.max_brightness = 127;
+
+ props.type = BACKLIGHT_RAW;
bldev = backlight_device_register("taal", &dssdev->dev, dssdev,
&taal_bl_ops, &props);
if (IS_ERR(bldev)) {
@@ -774,14 +779,29 @@ static int taal_probe(struct omap_dss_device *dssdev)
dev_dbg(&dssdev->dev, "Using GPIO TE\n");
}
+ r = omap_dsi_request_vc(dssdev, &td->channel);
+ if (r) {
+ dev_err(&dssdev->dev, "failed to get virtual channel\n");
+ goto err_req_vc;
+ }
+
+ r = omap_dsi_set_vc_id(dssdev, td->channel, TCH);
+ if (r) {
+ dev_err(&dssdev->dev, "failed to set VC_ID\n");
+ goto err_vc_id;
+ }
+
r = sysfs_create_group(&dssdev->dev.kobj, &taal_attr_group);
if (r) {
dev_err(&dssdev->dev, "failed to create sysfs files\n");
- goto err_sysfs;
+ goto err_vc_id;
}
return 0;
-err_sysfs:
+
+err_vc_id:
+ omap_dsi_release_vc(dssdev, td->channel);
+err_req_vc:
if (panel_data->use_ext_te)
free_irq(gpio_to_irq(panel_data->ext_te_gpio), dssdev);
err_irq:
@@ -808,6 +828,7 @@ static void taal_remove(struct omap_dss_device *dssdev)
dev_dbg(&dssdev->dev, "remove\n");
sysfs_remove_group(&dssdev->dev.kobj, &taal_attr_group);
+ omap_dsi_release_vc(dssdev, td->channel);
if (panel_data->use_ext_te) {
int gpio = panel_data->ext_te_gpio;
@@ -846,13 +867,13 @@ static int taal_power_on(struct omap_dss_device *dssdev)
taal_hw_reset(dssdev);
- omapdss_dsi_vc_enable_hs(TCH, false);
+ omapdss_dsi_vc_enable_hs(td->channel, false);
r = taal_sleep_out(td);
if (r)
goto err;
- r = taal_get_id(&id1, &id2, &id3);
+ r = taal_get_id(td, &id1, &id2, &id3);
if (r)
goto err;
@@ -861,30 +882,30 @@ static int taal_power_on(struct omap_dss_device *dssdev)
(id2 == 0x00 || id2 == 0xff || id2 == 0x81))
td->cabc_broken = true;
- r = taal_dcs_write_1(DCS_BRIGHTNESS, 0xff);
+ r = taal_dcs_write_1(td, DCS_BRIGHTNESS, 0xff);
if (r)
goto err;
- r = taal_dcs_write_1(DCS_CTRL_DISPLAY,
+ r = taal_dcs_write_1(td, DCS_CTRL_DISPLAY,
(1<<2) | (1<<5)); /* BL | BCTRL */
if (r)
goto err;
- r = taal_dcs_write_1(DCS_PIXEL_FORMAT, 0x7); /* 24bit/pixel */
+ r = taal_dcs_write_1(td, DCS_PIXEL_FORMAT, 0x7); /* 24bit/pixel */
if (r)
goto err;
- r = taal_set_addr_mode(td->rotate, td->mirror);
+ r = taal_set_addr_mode(td, td->rotate, td->mirror);
if (r)
goto err;
if (!td->cabc_broken) {
- r = taal_dcs_write_1(DCS_WRITE_CABC, td->cabc_mode);
+ r = taal_dcs_write_1(td, DCS_WRITE_CABC, td->cabc_mode);
if (r)
goto err;
}
- r = taal_dcs_write_0(DCS_DISPLAY_ON);
+ r = taal_dcs_write_0(td, DCS_DISPLAY_ON);
if (r)
goto err;
@@ -903,7 +924,7 @@ static int taal_power_on(struct omap_dss_device *dssdev)
td->intro_printed = true;
}
- omapdss_dsi_vc_enable_hs(TCH, true);
+ omapdss_dsi_vc_enable_hs(td->channel, true);
return 0;
err:
@@ -921,7 +942,7 @@ static void taal_power_off(struct omap_dss_device *dssdev)
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int r;
- r = taal_dcs_write_0(DCS_DISPLAY_OFF);
+ r = taal_dcs_write_0(td, DCS_DISPLAY_OFF);
if (!r) {
r = taal_sleep_in(td);
/* HACK: wait a bit so that the message goes through */
@@ -1089,7 +1110,7 @@ static irqreturn_t taal_te_isr(int irq, void *data)
if (old) {
cancel_delayed_work(&td->te_timeout_work);
- r = omap_dsi_update(dssdev, TCH,
+ r = omap_dsi_update(dssdev, td->channel,
td->update_region.x,
td->update_region.y,
td->update_region.w,
@@ -1139,7 +1160,7 @@ static int taal_update(struct omap_dss_device *dssdev,
if (r)
goto err;
- r = taal_set_update_window(x, y, w, h);
+ r = taal_set_update_window(td, x, y, w, h);
if (r)
goto err;
@@ -1153,7 +1174,7 @@ static int taal_update(struct omap_dss_device *dssdev,
msecs_to_jiffies(250));
atomic_set(&td->do_update, 1);
} else {
- r = omap_dsi_update(dssdev, TCH, x, y, w, h,
+ r = omap_dsi_update(dssdev, td->channel, x, y, w, h,
taal_framedone_cb, dssdev);
if (r)
goto err;
@@ -1191,9 +1212,9 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable)
int r;
if (enable)
- r = taal_dcs_write_1(DCS_TEAR_ON, 0);
+ r = taal_dcs_write_1(td, DCS_TEAR_ON, 0);
else
- r = taal_dcs_write_0(DCS_TEAR_OFF);
+ r = taal_dcs_write_0(td, DCS_TEAR_OFF);
if (!panel_data->use_ext_te)
omapdss_dsi_enable_te(dssdev, enable);
@@ -1263,7 +1284,7 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
dsi_bus_lock();
if (td->enabled) {
- r = taal_set_addr_mode(rotate, td->mirror);
+ r = taal_set_addr_mode(td, rotate, td->mirror);
if (r)
goto err;
}
@@ -1306,7 +1327,7 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
dsi_bus_lock();
if (td->enabled) {
- r = taal_set_addr_mode(td->rotate, enable);
+ r = taal_set_addr_mode(td, td->rotate, enable);
if (r)
goto err;
}
@@ -1350,13 +1371,13 @@ static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
dsi_bus_lock();
- r = taal_dcs_read_1(DCS_GET_ID1, &id1);
+ r = taal_dcs_read_1(td, DCS_GET_ID1, &id1);
if (r)
goto err2;
- r = taal_dcs_read_1(DCS_GET_ID2, &id2);
+ r = taal_dcs_read_1(td, DCS_GET_ID2, &id2);
if (r)
goto err2;
- r = taal_dcs_read_1(DCS_GET_ID3, &id3);
+ r = taal_dcs_read_1(td, DCS_GET_ID3, &id3);
if (r)
goto err2;
@@ -1404,9 +1425,9 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
else
plen = 2;
- taal_set_update_window(x, y, w, h);
+ taal_set_update_window(td, x, y, w, h);
- r = dsi_vc_set_max_rx_packet_size(TCH, plen);
+ r = dsi_vc_set_max_rx_packet_size(td->channel, plen);
if (r)
goto err2;
@@ -1414,7 +1435,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
u8 dcs_cmd = first ? 0x2e : 0x3e;
first = 0;
- r = dsi_vc_dcs_read(TCH, dcs_cmd,
+ r = dsi_vc_dcs_read(td->channel, dcs_cmd,
buf + buf_used, size - buf_used);
if (r < 0) {
@@ -1440,7 +1461,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
r = buf_used;
err3:
- dsi_vc_set_max_rx_packet_size(TCH, 1);
+ dsi_vc_set_max_rx_packet_size(td->channel, 1);
err2:
dsi_bus_unlock();
err1:
@@ -1466,7 +1487,7 @@ static void taal_esd_work(struct work_struct *work)
dsi_bus_lock();
- r = taal_dcs_read_1(DCS_RDDSDR, &state1);
+ r = taal_dcs_read_1(td, DCS_RDDSDR, &state1);
if (r) {
dev_err(&dssdev->dev, "failed to read Taal status\n");
goto err;
@@ -1479,7 +1500,7 @@ static void taal_esd_work(struct work_struct *work)
goto err;
}
- r = taal_dcs_read_1(DCS_RDDSDR, &state2);
+ r = taal_dcs_read_1(td, DCS_RDDSDR, &state2);
if (r) {
dev_err(&dssdev->dev, "failed to read Taal status\n");
goto err;
@@ -1495,7 +1516,7 @@ static void taal_esd_work(struct work_struct *work)
/* Self-diagnostics result is also shown on TE GPIO line. We need
* to re-enable TE after self diagnostics */
if (td->te_enabled && panel_data->use_ext_te) {
- r = taal_dcs_write_1(DCS_TEAR_ON, 0);
+ r = taal_dcs_write_1(td, DCS_TEAR_ON, 0);
if (r)
goto err;
}
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 43b64403eaa4..bfc5da0e9700 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -1,8 +1,8 @@
menuconfig OMAP2_DSS
- tristate "OMAP2/3 Display Subsystem support (EXPERIMENTAL)"
- depends on ARCH_OMAP2 || ARCH_OMAP3
+ tristate "OMAP2+ Display Subsystem support (EXPERIMENTAL)"
+ depends on ARCH_OMAP2PLUS
help
- OMAP2/3 Display Subsystem support.
+ OMAP2+ Display Subsystem support.
if OMAP2_DSS
@@ -60,6 +60,14 @@ config OMAP2_DSS_VENC
help
OMAP Video Encoder support for S-Video and composite TV-out.
+config OMAP4_DSS_HDMI
+ bool "HDMI support"
+ depends on ARCH_OMAP4
+ default y
+ help
+ HDMI Interface. This adds the High Definition Multimedia Interface.
+ See http://www.hdmi.org/ for HDMI specification.
+
config OMAP2_DSS_SDI
bool "SDI support"
depends on ARCH_OMAP3
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index 7db17b5e570c..10d9d3bb3e24 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -5,3 +5,5 @@ omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
+omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi.o \
+ hdmi_omap4_panel.o
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 8e89f6049280..1aa2ed1e786e 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -34,332 +34,26 @@
#include <linux/regulator/consumer.h>
#include <plat/display.h>
-#include <plat/clock.h>
#include "dss.h"
#include "dss_features.h"
static struct {
struct platform_device *pdev;
- int ctx_id;
-
- struct clk *dss_ick;
- struct clk *dss1_fck;
- struct clk *dss2_fck;
- struct clk *dss_54m_fck;
- struct clk *dss_96m_fck;
- unsigned num_clks_enabled;
struct regulator *vdds_dsi_reg;
struct regulator *vdds_sdi_reg;
- struct regulator *vdda_dac_reg;
} core;
-static void dss_clk_enable_all_no_ctx(void);
-static void dss_clk_disable_all_no_ctx(void);
-static void dss_clk_enable_no_ctx(enum dss_clock clks);
-static void dss_clk_disable_no_ctx(enum dss_clock clks);
-
static char *def_disp_name;
module_param_named(def_disp, def_disp_name, charp, 0);
-MODULE_PARM_DESC(def_disp_name, "default display name");
+MODULE_PARM_DESC(def_disp, "default display name");
#ifdef DEBUG
unsigned int dss_debug;
module_param_named(debug, dss_debug, bool, 0644);
#endif
-/* CONTEXT */
-static int dss_get_ctx_id(void)
-{
- struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
- int r;
-
- if (!pdata->get_last_off_on_transaction_id)
- return 0;
- r = pdata->get_last_off_on_transaction_id(&core.pdev->dev);
- if (r < 0) {
- dev_err(&core.pdev->dev, "getting transaction ID failed, "
- "will force context restore\n");
- r = -1;
- }
- return r;
-}
-
-int dss_need_ctx_restore(void)
-{
- int id = dss_get_ctx_id();
-
- if (id < 0 || id != core.ctx_id) {
- DSSDBG("ctx id %d -> id %d\n",
- core.ctx_id, id);
- core.ctx_id = id;
- return 1;
- } else {
- return 0;
- }
-}
-
-static void save_all_ctx(void)
-{
- DSSDBG("save context\n");
-
- dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1);
-
- dss_save_context();
- dispc_save_context();
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_save_context();
-#endif
-
- dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1);
-}
-
-static void restore_all_ctx(void)
-{
- DSSDBG("restore context\n");
-
- dss_clk_enable_all_no_ctx();
-
- dss_restore_context();
- dispc_restore_context();
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_restore_context();
-#endif
-
- dss_clk_disable_all_no_ctx();
-}
-
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
-/* CLOCKS */
-static void core_dump_clocks(struct seq_file *s)
-{
- int i;
- struct clk *clocks[5] = {
- core.dss_ick,
- core.dss1_fck,
- core.dss2_fck,
- core.dss_54m_fck,
- core.dss_96m_fck
- };
-
- seq_printf(s, "- CORE -\n");
-
- seq_printf(s, "internal clk count\t\t%u\n", core.num_clks_enabled);
-
- for (i = 0; i < 5; i++) {
- if (!clocks[i])
- continue;
- seq_printf(s, "%-15s\t%lu\t%d\n",
- clocks[i]->name,
- clk_get_rate(clocks[i]),
- clocks[i]->usecount);
- }
-}
-#endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */
-
-static int dss_get_clock(struct clk **clock, const char *clk_name)
-{
- struct clk *clk;
-
- clk = clk_get(&core.pdev->dev, clk_name);
-
- if (IS_ERR(clk)) {
- DSSERR("can't get clock %s", clk_name);
- return PTR_ERR(clk);
- }
-
- *clock = clk;
-
- DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk));
-
- return 0;
-}
-
-static int dss_get_clocks(void)
-{
- int r;
-
- core.dss_ick = NULL;
- core.dss1_fck = NULL;
- core.dss2_fck = NULL;
- core.dss_54m_fck = NULL;
- core.dss_96m_fck = NULL;
-
- r = dss_get_clock(&core.dss_ick, "ick");
- if (r)
- goto err;
-
- r = dss_get_clock(&core.dss1_fck, "dss1_fck");
- if (r)
- goto err;
-
- r = dss_get_clock(&core.dss2_fck, "dss2_fck");
- if (r)
- goto err;
-
- r = dss_get_clock(&core.dss_54m_fck, "tv_fck");
- if (r)
- goto err;
-
- r = dss_get_clock(&core.dss_96m_fck, "video_fck");
- if (r)
- goto err;
-
- return 0;
-
-err:
- if (core.dss_ick)
- clk_put(core.dss_ick);
- if (core.dss1_fck)
- clk_put(core.dss1_fck);
- if (core.dss2_fck)
- clk_put(core.dss2_fck);
- if (core.dss_54m_fck)
- clk_put(core.dss_54m_fck);
- if (core.dss_96m_fck)
- clk_put(core.dss_96m_fck);
-
- return r;
-}
-
-static void dss_put_clocks(void)
-{
- if (core.dss_96m_fck)
- clk_put(core.dss_96m_fck);
- clk_put(core.dss_54m_fck);
- clk_put(core.dss1_fck);
- clk_put(core.dss2_fck);
- clk_put(core.dss_ick);
-}
-
-unsigned long dss_clk_get_rate(enum dss_clock clk)
-{
- switch (clk) {
- case DSS_CLK_ICK:
- return clk_get_rate(core.dss_ick);
- case DSS_CLK_FCK1:
- return clk_get_rate(core.dss1_fck);
- case DSS_CLK_FCK2:
- return clk_get_rate(core.dss2_fck);
- case DSS_CLK_54M:
- return clk_get_rate(core.dss_54m_fck);
- case DSS_CLK_96M:
- return clk_get_rate(core.dss_96m_fck);
- }
-
- BUG();
- return 0;
-}
-
-static unsigned count_clk_bits(enum dss_clock clks)
-{
- unsigned num_clks = 0;
-
- if (clks & DSS_CLK_ICK)
- ++num_clks;
- if (clks & DSS_CLK_FCK1)
- ++num_clks;
- if (clks & DSS_CLK_FCK2)
- ++num_clks;
- if (clks & DSS_CLK_54M)
- ++num_clks;
- if (clks & DSS_CLK_96M)
- ++num_clks;
-
- return num_clks;
-}
-
-static void dss_clk_enable_no_ctx(enum dss_clock clks)
-{
- unsigned num_clks = count_clk_bits(clks);
-
- if (clks & DSS_CLK_ICK)
- clk_enable(core.dss_ick);
- if (clks & DSS_CLK_FCK1)
- clk_enable(core.dss1_fck);
- if (clks & DSS_CLK_FCK2)
- clk_enable(core.dss2_fck);
- if (clks & DSS_CLK_54M)
- clk_enable(core.dss_54m_fck);
- if (clks & DSS_CLK_96M)
- clk_enable(core.dss_96m_fck);
-
- core.num_clks_enabled += num_clks;
-}
-
-void dss_clk_enable(enum dss_clock clks)
-{
- bool check_ctx = core.num_clks_enabled == 0;
-
- dss_clk_enable_no_ctx(clks);
-
- if (check_ctx && cpu_is_omap34xx() && dss_need_ctx_restore())
- restore_all_ctx();
-}
-
-static void dss_clk_disable_no_ctx(enum dss_clock clks)
-{
- unsigned num_clks = count_clk_bits(clks);
-
- if (clks & DSS_CLK_ICK)
- clk_disable(core.dss_ick);
- if (clks & DSS_CLK_FCK1)
- clk_disable(core.dss1_fck);
- if (clks & DSS_CLK_FCK2)
- clk_disable(core.dss2_fck);
- if (clks & DSS_CLK_54M)
- clk_disable(core.dss_54m_fck);
- if (clks & DSS_CLK_96M)
- clk_disable(core.dss_96m_fck);
-
- core.num_clks_enabled -= num_clks;
-}
-
-void dss_clk_disable(enum dss_clock clks)
-{
- if (cpu_is_omap34xx()) {
- unsigned num_clks = count_clk_bits(clks);
-
- BUG_ON(core.num_clks_enabled < num_clks);
-
- if (core.num_clks_enabled == num_clks)
- save_all_ctx();
- }
-
- dss_clk_disable_no_ctx(clks);
-}
-
-static void dss_clk_enable_all_no_ctx(void)
-{
- enum dss_clock clks;
-
- clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_96M;
- dss_clk_enable_no_ctx(clks);
-}
-
-static void dss_clk_disable_all_no_ctx(void)
-{
- enum dss_clock clks;
-
- clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_96M;
- dss_clk_disable_no_ctx(clks);
-}
-
-static void dss_clk_disable_all(void)
-{
- enum dss_clock clks;
-
- clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_96M;
- dss_clk_disable(clks);
-}
-
/* REGULATORS */
struct regulator *dss_get_vdds_dsi(void)
@@ -390,32 +84,7 @@ struct regulator *dss_get_vdds_sdi(void)
return reg;
}
-struct regulator *dss_get_vdda_dac(void)
-{
- struct regulator *reg;
-
- if (core.vdda_dac_reg != NULL)
- return core.vdda_dac_reg;
-
- reg = regulator_get(&core.pdev->dev, "vdda_dac");
- if (!IS_ERR(reg))
- core.vdda_dac_reg = reg;
-
- return reg;
-}
-
-/* DEBUGFS */
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
-static void dss_debug_dump_clocks(struct seq_file *s)
-{
- core_dump_clocks(s);
- dss_dump_clocks(s);
- dispc_dump_clocks(s);
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_dump_clocks(s);
-#endif
-}
-
static int dss_debug_show(struct seq_file *s, void *unused)
{
void (*func)(struct seq_file *) = s->private;
@@ -497,7 +166,6 @@ static inline void dss_uninitialize_debugfs(void)
static int omap_dss_probe(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- int skip_init = 0;
int r;
int i;
@@ -508,63 +176,43 @@ static int omap_dss_probe(struct platform_device *pdev)
dss_init_overlay_managers(pdev);
dss_init_overlays(pdev);
- r = dss_get_clocks();
- if (r)
- goto err_clocks;
-
- dss_clk_enable_all_no_ctx();
-
- core.ctx_id = dss_get_ctx_id();
- DSSDBG("initial ctx id %u\n", core.ctx_id);
-
-#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
- /* DISPC_CONTROL */
- if (omap_readl(0x48050440) & 1) /* LCD enabled? */
- skip_init = 1;
-#endif
-
- r = dss_init(skip_init);
+ r = dss_init_platform_driver();
if (r) {
- DSSERR("Failed to initialize DSS\n");
+ DSSERR("Failed to initialize DSS platform driver\n");
goto err_dss;
}
- r = rfbi_init();
- if (r) {
- DSSERR("Failed to initialize rfbi\n");
- goto err_rfbi;
- }
+ /* keep clocks enabled to prevent context saves/restores during init */
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
- r = dpi_init(pdev);
+ r = rfbi_init_platform_driver();
if (r) {
- DSSERR("Failed to initialize dpi\n");
- goto err_dpi;
+ DSSERR("Failed to initialize rfbi platform driver\n");
+ goto err_rfbi;
}
- r = dispc_init();
+ r = dispc_init_platform_driver();
if (r) {
- DSSERR("Failed to initialize dispc\n");
+ DSSERR("Failed to initialize dispc platform driver\n");
goto err_dispc;
}
- r = venc_init(pdev);
+ r = venc_init_platform_driver();
if (r) {
- DSSERR("Failed to initialize venc\n");
+ DSSERR("Failed to initialize venc platform driver\n");
goto err_venc;
}
- if (cpu_is_omap34xx()) {
- r = sdi_init(skip_init);
- if (r) {
- DSSERR("Failed to initialize SDI\n");
- goto err_sdi;
- }
+ r = dsi_init_platform_driver();
+ if (r) {
+ DSSERR("Failed to initialize DSI platform driver\n");
+ goto err_dsi;
+ }
- r = dsi_init(pdev);
- if (r) {
- DSSERR("Failed to initialize DSI\n");
- goto err_dsi;
- }
+ r = hdmi_init_platform_driver();
+ if (r) {
+ DSSERR("Failed to initialize hdmi\n");
+ goto err_hdmi;
}
r = dss_initialize_debugfs();
@@ -589,32 +237,25 @@ static int omap_dss_probe(struct platform_device *pdev)
pdata->default_device = dssdev;
}
- dss_clk_disable_all();
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
return 0;
err_register:
dss_uninitialize_debugfs();
err_debugfs:
- if (cpu_is_omap34xx())
- dsi_exit();
+ hdmi_uninit_platform_driver();
+err_hdmi:
+ dsi_uninit_platform_driver();
err_dsi:
- if (cpu_is_omap34xx())
- sdi_exit();
-err_sdi:
- venc_exit();
+ venc_uninit_platform_driver();
err_venc:
- dispc_exit();
+ dispc_uninit_platform_driver();
err_dispc:
- dpi_exit();
-err_dpi:
- rfbi_exit();
+ rfbi_uninit_platform_driver();
err_rfbi:
- dss_exit();
+ dss_uninit_platform_driver();
err_dss:
- dss_clk_disable_all_no_ctx();
- dss_put_clocks();
-err_clocks:
return r;
}
@@ -623,61 +264,15 @@ static int omap_dss_remove(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
int i;
- int c;
dss_uninitialize_debugfs();
- venc_exit();
- dispc_exit();
- dpi_exit();
- rfbi_exit();
- if (cpu_is_omap34xx()) {
- dsi_exit();
- sdi_exit();
- }
-
- dss_exit();
-
- /* these should be removed at some point */
- c = core.dss_ick->usecount;
- if (c > 0) {
- DSSERR("warning: dss_ick usecount %d, disabling\n", c);
- while (c-- > 0)
- clk_disable(core.dss_ick);
- }
-
- c = core.dss1_fck->usecount;
- if (c > 0) {
- DSSERR("warning: dss1_fck usecount %d, disabling\n", c);
- while (c-- > 0)
- clk_disable(core.dss1_fck);
- }
-
- c = core.dss2_fck->usecount;
- if (c > 0) {
- DSSERR("warning: dss2_fck usecount %d, disabling\n", c);
- while (c-- > 0)
- clk_disable(core.dss2_fck);
- }
-
- c = core.dss_54m_fck->usecount;
- if (c > 0) {
- DSSERR("warning: dss_54m_fck usecount %d, disabling\n", c);
- while (c-- > 0)
- clk_disable(core.dss_54m_fck);
- }
-
- if (core.dss_96m_fck) {
- c = core.dss_96m_fck->usecount;
- if (c > 0) {
- DSSERR("warning: dss_96m_fck usecount %d, disabling\n",
- c);
- while (c-- > 0)
- clk_disable(core.dss_96m_fck);
- }
- }
-
- dss_put_clocks();
+ venc_uninit_platform_driver();
+ dispc_uninit_platform_driver();
+ rfbi_uninit_platform_driver();
+ dsi_uninit_platform_driver();
+ hdmi_uninit_platform_driver();
+ dss_uninit_platform_driver();
dss_uninit_overlays(pdev);
dss_uninit_overlay_managers(pdev);
@@ -965,11 +560,6 @@ static void __exit omap_dss_exit(void)
core.vdds_sdi_reg = NULL;
}
- if (core.vdda_dac_reg != NULL) {
- regulator_put(core.vdda_dac_reg);
- core.vdda_dac_reg = NULL;
- }
-
platform_driver_unregister(&omap_dss_driver);
omap_dss_bus_unregister();
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 9f8c69f16e61..7804779c9da1 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -32,6 +32,7 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/hardirq.h>
+#include <linux/interrupt.h>
#include <plat/sram.h>
#include <plat/clock.h>
@@ -42,8 +43,6 @@
#include "dss_features.h"
/* DISPC */
-#define DISPC_BASE 0x48050400
-
#define DISPC_SZ_REGS SZ_4K
struct dispc_reg { u16 idx; };
@@ -74,7 +73,7 @@ struct dispc_reg { u16 idx; };
#define DISPC_TIMING_H(ch) DISPC_REG(ch != 2 ? 0x0064 : 0x0400)
#define DISPC_TIMING_V(ch) DISPC_REG(ch != 2 ? 0x0068 : 0x0404)
#define DISPC_POL_FREQ(ch) DISPC_REG(ch != 2 ? 0x006C : 0x0408)
-#define DISPC_DIVISOR(ch) DISPC_REG(ch != 2 ? 0x0070 : 0x040C)
+#define DISPC_DIVISORo(ch) DISPC_REG(ch != 2 ? 0x0070 : 0x040C)
#define DISPC_GLOBAL_ALPHA DISPC_REG(0x0074)
#define DISPC_SIZE_DIG DISPC_REG(0x0078)
#define DISPC_SIZE_LCD(ch) DISPC_REG(ch != 2 ? 0x007C : 0x03CC)
@@ -129,6 +128,7 @@ struct dispc_reg { u16 idx; };
#define DISPC_VID_PRELOAD(n) DISPC_REG(0x230 + (n)*0x04)
+#define DISPC_DIVISOR DISPC_REG(0x0804)
#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
DISPC_IRQ_OCP_ERR | \
@@ -178,7 +178,9 @@ struct dispc_irq_stats {
};
static struct {
+ struct platform_device *pdev;
void __iomem *base;
+ int irq;
u32 fifo_size[3];
@@ -230,7 +232,7 @@ void dispc_save_context(void)
SR(TIMING_H(0));
SR(TIMING_V(0));
SR(POL_FREQ(0));
- SR(DIVISOR(0));
+ SR(DIVISORo(0));
SR(GLOBAL_ALPHA);
SR(SIZE_DIG);
SR(SIZE_LCD(0));
@@ -242,7 +244,7 @@ void dispc_save_context(void)
SR(TIMING_H(2));
SR(TIMING_V(2));
SR(POL_FREQ(2));
- SR(DIVISOR(2));
+ SR(DIVISORo(2));
SR(CONFIG2);
}
@@ -373,6 +375,9 @@ void dispc_save_context(void)
SR(VID_FIR_COEF_V(1, 7));
SR(VID_PRELOAD(1));
+
+ if (dss_has_feature(FEAT_CORE_CLK_DIV))
+ SR(DIVISOR);
}
void dispc_restore_context(void)
@@ -389,7 +394,7 @@ void dispc_restore_context(void)
RR(TIMING_H(0));
RR(TIMING_V(0));
RR(POL_FREQ(0));
- RR(DIVISOR(0));
+ RR(DIVISORo(0));
RR(GLOBAL_ALPHA);
RR(SIZE_DIG);
RR(SIZE_LCD(0));
@@ -400,7 +405,7 @@ void dispc_restore_context(void)
RR(TIMING_H(2));
RR(TIMING_V(2));
RR(POL_FREQ(2));
- RR(DIVISOR(2));
+ RR(DIVISORo(2));
RR(CONFIG2);
}
@@ -532,6 +537,9 @@ void dispc_restore_context(void)
RR(VID_PRELOAD(1));
+ if (dss_has_feature(FEAT_CORE_CLK_DIV))
+ RR(DIVISOR);
+
/* enable last, because LCD & DIGIT enable are here */
RR(CONTROL);
if (dss_has_feature(FEAT_MGR_LCD2))
@@ -552,9 +560,9 @@ void dispc_restore_context(void)
static inline void enable_clocks(bool enable)
{
if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
}
bool dispc_go_busy(enum omap_channel channel)
@@ -1000,6 +1008,20 @@ void dispc_set_burst_size(enum omap_plane plane,
enable_clocks(0);
}
+void dispc_enable_gamma_table(bool enable)
+{
+ /*
+ * This is partially implemented to support only disabling of
+ * the gamma table.
+ */
+ if (enable) {
+ DSSWARN("Gamma table enabling for TV not yet supported");
+ return;
+ }
+
+ REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
+}
+
static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable)
{
u32 val;
@@ -1129,10 +1151,16 @@ static void _dispc_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu)
u32 val;
const struct dispc_reg ac0_reg[] = { DISPC_VID_ACCU0(0),
DISPC_VID_ACCU0(1) };
+ u8 hor_start, hor_end, vert_start, vert_end;
BUG_ON(plane == OMAP_DSS_GFX);
- val = FLD_VAL(vaccu, 25, 16) | FLD_VAL(haccu, 9, 0);
+ dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
+ dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
+
+ val = FLD_VAL(vaccu, vert_start, vert_end) |
+ FLD_VAL(haccu, hor_start, hor_end);
+
dispc_write_reg(ac0_reg[plane-1], val);
}
@@ -1141,10 +1169,16 @@ static void _dispc_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu)
u32 val;
const struct dispc_reg ac1_reg[] = { DISPC_VID_ACCU1(0),
DISPC_VID_ACCU1(1) };
+ u8 hor_start, hor_end, vert_start, vert_end;
BUG_ON(plane == OMAP_DSS_GFX);
- val = FLD_VAL(vaccu, 25, 16) | FLD_VAL(haccu, 9, 0);
+ dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
+ dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
+
+ val = FLD_VAL(vaccu, vert_start, vert_end) |
+ FLD_VAL(haccu, hor_start, hor_end);
+
dispc_write_reg(ac1_reg[plane-1], val);
}
@@ -1182,16 +1216,25 @@ static void _dispc_set_scaling(enum omap_plane plane,
_dispc_set_fir(plane, fir_hinc, fir_vinc);
l = dispc_read_reg(dispc_reg_att[plane]);
- l &= ~((0x0f << 5) | (0x3 << 21));
+ /* RESIZEENABLE and VERTICALTAPS */
+ l &= ~((0x3 << 5) | (0x1 << 21));
l |= fir_hinc ? (1 << 5) : 0;
l |= fir_vinc ? (1 << 6) : 0;
+ l |= five_taps ? (1 << 21) : 0;
- l |= hscaleup ? 0 : (1 << 7);
- l |= vscaleup ? 0 : (1 << 8);
+ /* VRESIZECONF and HRESIZECONF */
+ if (dss_has_feature(FEAT_RESIZECONF)) {
+ l &= ~(0x3 << 7);
+ l |= hscaleup ? 0 : (1 << 7);
+ l |= vscaleup ? 0 : (1 << 8);
+ }
- l |= five_taps ? (1 << 21) : 0;
- l |= five_taps ? (1 << 22) : 0;
+ /* LINEBUFFERSPLIT */
+ if (dss_has_feature(FEAT_LINEBUFFERSPLIT)) {
+ l &= ~(0x1 << 22);
+ l |= five_taps ? (1 << 22) : 0;
+ }
dispc_write_reg(dispc_reg_att[plane], l);
@@ -1215,9 +1258,11 @@ static void _dispc_set_scaling(enum omap_plane plane,
static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation,
bool mirroring, enum omap_color_mode color_mode)
{
+ bool row_repeat = false;
+ int vidrot = 0;
+
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY) {
- int vidrot = 0;
if (mirroring) {
switch (rotation) {
@@ -1251,16 +1296,15 @@ static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation,
}
}
- REG_FLD_MOD(dispc_reg_att[plane], vidrot, 13, 12);
-
if (rotation == OMAP_DSS_ROT_90 || rotation == OMAP_DSS_ROT_270)
- REG_FLD_MOD(dispc_reg_att[plane], 0x1, 18, 18);
+ row_repeat = true;
else
- REG_FLD_MOD(dispc_reg_att[plane], 0x0, 18, 18);
- } else {
- REG_FLD_MOD(dispc_reg_att[plane], 0, 13, 12);
- REG_FLD_MOD(dispc_reg_att[plane], 0, 18, 18);
+ row_repeat = false;
}
+
+ REG_FLD_MOD(dispc_reg_att[plane], vidrot, 13, 12);
+ if (dss_has_feature(FEAT_ROWREPEATENABLE))
+ REG_FLD_MOD(dispc_reg_att[plane], row_repeat ? 1 : 0, 18, 18);
}
static int color_mode_to_bpp(enum omap_color_mode color_mode)
@@ -2293,7 +2337,7 @@ static void dispc_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
BUG_ON(pck_div < 2);
enable_clocks(1);
- dispc_write_reg(DISPC_DIVISOR(channel),
+ dispc_write_reg(DISPC_DIVISORo(channel),
FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
enable_clocks(0);
}
@@ -2302,7 +2346,7 @@ static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div,
int *pck_div)
{
u32 l;
- l = dispc_read_reg(DISPC_DIVISOR(channel));
+ l = dispc_read_reg(DISPC_DIVISORo(channel));
*lck_div = FLD_GET(l, 23, 16);
*pck_div = FLD_GET(l, 7, 0);
}
@@ -2311,14 +2355,17 @@ unsigned long dispc_fclk_rate(void)
{
unsigned long r = 0;
- if (dss_get_dispc_clk_source() == DSS_SRC_DSS1_ALWON_FCLK)
- r = dss_clk_get_rate(DSS_CLK_FCK1);
- else
-#ifdef CONFIG_OMAP2_DSS_DSI
- r = dsi_get_dsi1_pll_rate();
-#else
- BUG();
-#endif
+ switch (dss_get_dispc_clk_source()) {
+ case DSS_CLK_SRC_FCK:
+ r = dss_clk_get_rate(DSS_CLK_FCK);
+ break;
+ case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
+ r = dsi_get_pll_hsdiv_dispc_rate();
+ break;
+ default:
+ BUG();
+ }
+
return r;
}
@@ -2328,47 +2375,72 @@ unsigned long dispc_lclk_rate(enum omap_channel channel)
unsigned long r;
u32 l;
- l = dispc_read_reg(DISPC_DIVISOR(channel));
+ l = dispc_read_reg(DISPC_DIVISORo(channel));
lcd = FLD_GET(l, 23, 16);
- r = dispc_fclk_rate();
+ switch (dss_get_lcd_clk_source(channel)) {
+ case DSS_CLK_SRC_FCK:
+ r = dss_clk_get_rate(DSS_CLK_FCK);
+ break;
+ case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
+ r = dsi_get_pll_hsdiv_dispc_rate();
+ break;
+ default:
+ BUG();
+ }
return r / lcd;
}
unsigned long dispc_pclk_rate(enum omap_channel channel)
{
- int lcd, pcd;
+ int pcd;
unsigned long r;
u32 l;
- l = dispc_read_reg(DISPC_DIVISOR(channel));
+ l = dispc_read_reg(DISPC_DIVISORo(channel));
- lcd = FLD_GET(l, 23, 16);
pcd = FLD_GET(l, 7, 0);
- r = dispc_fclk_rate();
+ r = dispc_lclk_rate(channel);
- return r / lcd / pcd;
+ return r / pcd;
}
void dispc_dump_clocks(struct seq_file *s)
{
int lcd, pcd;
+ u32 l;
+ enum dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
+ enum dss_clk_source lcd_clk_src;
enable_clocks(1);
seq_printf(s, "- DISPC -\n");
- seq_printf(s, "dispc fclk source = %s\n",
- dss_get_dispc_clk_source() == DSS_SRC_DSS1_ALWON_FCLK ?
- "dss1_alwon_fclk" : "dsi1_pll_fclk");
+ seq_printf(s, "dispc fclk source = %s (%s)\n",
+ dss_get_generic_clk_source_name(dispc_clk_src),
+ dss_feat_get_clk_source_name(dispc_clk_src));
seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
+ if (dss_has_feature(FEAT_CORE_CLK_DIV)) {
+ seq_printf(s, "- DISPC-CORE-CLK -\n");
+ l = dispc_read_reg(DISPC_DIVISOR);
+ lcd = FLD_GET(l, 23, 16);
+
+ seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
+ (dispc_fclk_rate()/lcd), lcd);
+ }
seq_printf(s, "- LCD1 -\n");
+ lcd_clk_src = dss_get_lcd_clk_source(OMAP_DSS_CHANNEL_LCD);
+
+ seq_printf(s, "lcd1_clk source = %s (%s)\n",
+ dss_get_generic_clk_source_name(lcd_clk_src),
+ dss_feat_get_clk_source_name(lcd_clk_src));
+
dispc_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD, &lcd, &pcd);
seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
@@ -2378,6 +2450,12 @@ void dispc_dump_clocks(struct seq_file *s)
if (dss_has_feature(FEAT_MGR_LCD2)) {
seq_printf(s, "- LCD2 -\n");
+ lcd_clk_src = dss_get_lcd_clk_source(OMAP_DSS_CHANNEL_LCD2);
+
+ seq_printf(s, "lcd2_clk source = %s (%s)\n",
+ dss_get_generic_clk_source_name(lcd_clk_src),
+ dss_feat_get_clk_source_name(lcd_clk_src));
+
dispc_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD2, &lcd, &pcd);
seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
@@ -2440,7 +2518,7 @@ void dispc_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
DUMPREG(DISPC_REVISION);
DUMPREG(DISPC_SYSCONFIG);
@@ -2459,7 +2537,7 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_TIMING_H(0));
DUMPREG(DISPC_TIMING_V(0));
DUMPREG(DISPC_POL_FREQ(0));
- DUMPREG(DISPC_DIVISOR(0));
+ DUMPREG(DISPC_DIVISORo(0));
DUMPREG(DISPC_GLOBAL_ALPHA);
DUMPREG(DISPC_SIZE_DIG);
DUMPREG(DISPC_SIZE_LCD(0));
@@ -2471,7 +2549,7 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_TIMING_H(2));
DUMPREG(DISPC_TIMING_V(2));
DUMPREG(DISPC_POL_FREQ(2));
- DUMPREG(DISPC_DIVISOR(2));
+ DUMPREG(DISPC_DIVISORo(2));
DUMPREG(DISPC_SIZE_LCD(2));
}
@@ -2597,7 +2675,7 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_VID_PRELOAD(0));
DUMPREG(DISPC_VID_PRELOAD(1));
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
#undef DUMPREG
}
@@ -2713,8 +2791,8 @@ int dispc_get_clock_div(enum omap_channel channel,
fck = dispc_fclk_rate();
- cinfo->lck_div = REG_GET(DISPC_DIVISOR(channel), 23, 16);
- cinfo->pck_div = REG_GET(DISPC_DIVISOR(channel), 7, 0);
+ cinfo->lck_div = REG_GET(DISPC_DIVISORo(channel), 23, 16);
+ cinfo->pck_div = REG_GET(DISPC_DIVISORo(channel), 7, 0);
cinfo->lck = fck / cinfo->lck_div;
cinfo->pck = cinfo->lck / cinfo->pck_div;
@@ -2791,6 +2869,9 @@ int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
break;
}
+ if (ret)
+ goto err;
+
_omap_dispc_set_irqs();
spin_unlock_irqrestore(&dispc.irq_lock, flags);
@@ -2866,10 +2947,10 @@ static void print_irq_status(u32 status)
* but we presume they are on because we got an IRQ. However,
* an irq handler may turn the clocks off, so we may not have
* clock later in the function. */
-void dispc_irq_handler(void)
+static irqreturn_t omap_dispc_irq_handler(int irq, void *arg)
{
int i;
- u32 irqstatus;
+ u32 irqstatus, irqenable;
u32 handledirqs = 0;
u32 unhandled_errors;
struct omap_dispc_isr_data *isr_data;
@@ -2878,6 +2959,13 @@ void dispc_irq_handler(void)
spin_lock(&dispc.irq_lock);
irqstatus = dispc_read_reg(DISPC_IRQSTATUS);
+ irqenable = dispc_read_reg(DISPC_IRQENABLE);
+
+ /* IRQ is not for us */
+ if (!(irqstatus & irqenable)) {
+ spin_unlock(&dispc.irq_lock);
+ return IRQ_NONE;
+ }
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
spin_lock(&dispc.irq_stats_lock);
@@ -2929,6 +3017,8 @@ void dispc_irq_handler(void)
}
spin_unlock(&dispc.irq_lock);
+
+ return IRQ_HANDLED;
}
static void dispc_error_worker(struct work_struct *work)
@@ -3253,6 +3343,15 @@ static void _omap_dispc_initial_config(void)
l = FLD_MOD(l, 1, 0, 0); /* AUTOIDLE */
dispc_write_reg(DISPC_SYSCONFIG, l);
+ /* Exclusively enable DISPC_CORE_CLK and set divider to 1 */
+ if (dss_has_feature(FEAT_CORE_CLK_DIV)) {
+ l = dispc_read_reg(DISPC_DIVISOR);
+ /* Use DISPC_DIVISOR.LCD, instead of DISPC_DIVISOR1.LCD */
+ l = FLD_MOD(l, 1, 0, 0);
+ l = FLD_MOD(l, 1, 23, 16);
+ dispc_write_reg(DISPC_DIVISOR, l);
+ }
+
/* FUNCGATED */
if (dss_has_feature(FEAT_FUNCGATED))
REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
@@ -3269,47 +3368,6 @@ static void _omap_dispc_initial_config(void)
dispc_read_plane_fifo_sizes();
}
-int dispc_init(void)
-{
- u32 rev;
-
- spin_lock_init(&dispc.irq_lock);
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spin_lock_init(&dispc.irq_stats_lock);
- dispc.irq_stats.last_reset = jiffies;
-#endif
-
- INIT_WORK(&dispc.error_work, dispc_error_worker);
-
- dispc.base = ioremap(DISPC_BASE, DISPC_SZ_REGS);
- if (!dispc.base) {
- DSSERR("can't ioremap DISPC\n");
- return -ENOMEM;
- }
-
- enable_clocks(1);
-
- _omap_dispc_initial_config();
-
- _omap_dispc_initialize_irq();
-
- dispc_save_context();
-
- rev = dispc_read_reg(DISPC_REVISION);
- printk(KERN_INFO "OMAP DISPC rev %d.%d\n",
- FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
-
- enable_clocks(0);
-
- return 0;
-}
-
-void dispc_exit(void)
-{
- iounmap(dispc.base);
-}
-
int dispc_enable_plane(enum omap_plane plane, bool enable)
{
DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
@@ -3359,3 +3417,94 @@ int dispc_setup_plane(enum omap_plane plane,
return r;
}
+
+/* DISPC HW IP initialisation */
+static int omap_dispchw_probe(struct platform_device *pdev)
+{
+ u32 rev;
+ int r = 0;
+ struct resource *dispc_mem;
+
+ dispc.pdev = pdev;
+
+ spin_lock_init(&dispc.irq_lock);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_lock_init(&dispc.irq_stats_lock);
+ dispc.irq_stats.last_reset = jiffies;
+#endif
+
+ INIT_WORK(&dispc.error_work, dispc_error_worker);
+
+ dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0);
+ if (!dispc_mem) {
+ DSSERR("can't get IORESOURCE_MEM DISPC\n");
+ r = -EINVAL;
+ goto fail0;
+ }
+ dispc.base = ioremap(dispc_mem->start, resource_size(dispc_mem));
+ if (!dispc.base) {
+ DSSERR("can't ioremap DISPC\n");
+ r = -ENOMEM;
+ goto fail0;
+ }
+ dispc.irq = platform_get_irq(dispc.pdev, 0);
+ if (dispc.irq < 0) {
+ DSSERR("platform_get_irq failed\n");
+ r = -ENODEV;
+ goto fail1;
+ }
+
+ r = request_irq(dispc.irq, omap_dispc_irq_handler, IRQF_SHARED,
+ "OMAP DISPC", dispc.pdev);
+ if (r < 0) {
+ DSSERR("request_irq failed\n");
+ goto fail1;
+ }
+
+ enable_clocks(1);
+
+ _omap_dispc_initial_config();
+
+ _omap_dispc_initialize_irq();
+
+ dispc_save_context();
+
+ rev = dispc_read_reg(DISPC_REVISION);
+ dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ enable_clocks(0);
+
+ return 0;
+fail1:
+ iounmap(dispc.base);
+fail0:
+ return r;
+}
+
+static int omap_dispchw_remove(struct platform_device *pdev)
+{
+ free_irq(dispc.irq, dispc.pdev);
+ iounmap(dispc.base);
+ return 0;
+}
+
+static struct platform_driver omap_dispchw_driver = {
+ .probe = omap_dispchw_probe,
+ .remove = omap_dispchw_remove,
+ .driver = {
+ .name = "omapdss_dispc",
+ .owner = THIS_MODULE,
+ },
+};
+
+int dispc_init_platform_driver(void)
+{
+ return platform_driver_register(&omap_dispchw_driver);
+}
+
+void dispc_uninit_platform_driver(void)
+{
+ return platform_driver_unregister(&omap_dispchw_driver);
+}
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index 22dd7a474f79..a85a6f38b40c 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -25,14 +25,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/jiffies.h>
-#include <linux/list.h>
#include <linux/platform_device.h>
#include <plat/display.h>
#include "dss.h"
-static LIST_HEAD(display_list);
-
static ssize_t display_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -345,6 +342,7 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
return 16;
case OMAP_DISPLAY_TYPE_VENC:
case OMAP_DISPLAY_TYPE_SDI:
+ case OMAP_DISPLAY_TYPE_HDMI:
return 24;
default:
BUG();
@@ -371,6 +369,7 @@ bool dss_use_replication(struct omap_dss_device *dssdev,
case OMAP_DISPLAY_TYPE_DPI:
bpp = dssdev->phy.dpi.data_lines;
break;
+ case OMAP_DISPLAY_TYPE_HDMI:
case OMAP_DISPLAY_TYPE_VENC:
case OMAP_DISPLAY_TYPE_SDI:
bpp = 24;
@@ -396,29 +395,6 @@ void dss_init_device(struct platform_device *pdev,
switch (dssdev->type) {
#ifdef CONFIG_OMAP2_DSS_DPI
case OMAP_DISPLAY_TYPE_DPI:
-#endif
-#ifdef CONFIG_OMAP2_DSS_RFBI
- case OMAP_DISPLAY_TYPE_DBI:
-#endif
-#ifdef CONFIG_OMAP2_DSS_SDI
- case OMAP_DISPLAY_TYPE_SDI:
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
- case OMAP_DISPLAY_TYPE_DSI:
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
- case OMAP_DISPLAY_TYPE_VENC:
-#endif
- break;
- default:
- DSSERR("Support for display '%s' not compiled in.\n",
- dssdev->name);
- return;
- }
-
- switch (dssdev->type) {
-#ifdef CONFIG_OMAP2_DSS_DPI
- case OMAP_DISPLAY_TYPE_DPI:
r = dpi_init_display(dssdev);
break;
#endif
@@ -442,8 +418,13 @@ void dss_init_device(struct platform_device *pdev,
r = dsi_init_display(dssdev);
break;
#endif
+ case OMAP_DISPLAY_TYPE_HDMI:
+ r = hdmi_init_display(dssdev);
+ break;
default:
- BUG();
+ DSSERR("Support for display '%s' not compiled in.\n",
+ dssdev->name);
+ return;
}
if (r) {
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 75fb0a515430..2d3ca4ca4a05 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -57,13 +57,13 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft,
if (r)
return r;
- dss_select_dispc_clk_source(DSS_SRC_DSI1_PLL_FCLK);
+ dss_select_dispc_clk_source(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC);
r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
if (r)
return r;
- *fck = dsi_cinfo.dsi1_pll_fclk;
+ *fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
*lck_div = dispc_cinfo.lck_div;
*pck_div = dispc_cinfo.pck_div;
@@ -107,7 +107,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
bool is_tft;
int r = 0;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
dssdev->panel.acbi, dssdev->panel.acb);
@@ -137,7 +137,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
dispc_set_lcd_timings(dssdev->manager->id, t);
err0:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
return r;
}
@@ -173,14 +173,14 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
goto err1;
}
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
r = dpi_basic_init(dssdev);
if (r)
goto err2;
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
- dss_clk_enable(DSS_CLK_FCK2);
+ dss_clk_enable(DSS_CLK_SYSCK);
r = dsi_pll_init(dssdev, 0, 1);
if (r)
goto err3;
@@ -199,10 +199,10 @@ err4:
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
dsi_pll_uninit();
err3:
- dss_clk_disable(DSS_CLK_FCK2);
+ dss_clk_disable(DSS_CLK_SYSCK);
#endif
err2:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
err1:
@@ -217,12 +217,12 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
dssdev->manager->disable(dssdev->manager);
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
- dss_select_dispc_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
+ dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
dsi_pll_uninit();
- dss_clk_disable(DSS_CLK_FCK2);
+ dss_clk_disable(DSS_CLK_SYSCK);
#endif
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
@@ -271,7 +271,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
if (r)
return r;
- fck = dsi_cinfo.dsi1_pll_fclk;
+ fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
lck_div = dispc_cinfo.lck_div;
pck_div = dispc_cinfo.pck_div;
}
@@ -303,22 +303,27 @@ int dpi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("init_display\n");
- return 0;
-}
+ if (cpu_is_omap34xx() && dpi.vdds_dsi_reg == NULL) {
+ struct regulator *vdds_dsi;
-int dpi_init(struct platform_device *pdev)
-{
- if (cpu_is_omap34xx()) {
- dpi.vdds_dsi_reg = dss_get_vdds_dsi();
- if (IS_ERR(dpi.vdds_dsi_reg)) {
+ vdds_dsi = dss_get_vdds_dsi();
+
+ if (IS_ERR(vdds_dsi)) {
DSSERR("can't get VDDS_DSI regulator\n");
- return PTR_ERR(dpi.vdds_dsi_reg);
+ return PTR_ERR(vdds_dsi);
}
+
+ dpi.vdds_dsi_reg = vdds_dsi;
}
return 0;
}
+int dpi_init(void)
+{
+ return 0;
+}
+
void dpi_exit(void)
{
}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index ddf3a0560822..0a7f1a47f8e3 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -38,12 +38,11 @@
#include <plat/clock.h>
#include "dss.h"
+#include "dss_features.h"
/*#define VERBOSE_IRQ*/
#define DSI_CATCH_MISSING_TE
-#define DSI_BASE 0x4804FC00
-
struct dsi_reg { u16 idx; };
#define DSI_REG(idx) ((const struct dsi_reg) { idx })
@@ -186,13 +185,15 @@ struct dsi_reg { u16 idx; };
#define DSI_DT_RX_SHORT_READ_1 0x21
#define DSI_DT_RX_SHORT_READ_2 0x22
-#define FINT_MAX 2100000
-#define FINT_MIN 750000
-#define REGN_MAX (1 << 7)
-#define REGM_MAX ((1 << 11) - 1)
-#define REGM3_MAX (1 << 4)
-#define REGM4_MAX (1 << 4)
-#define LP_DIV_MAX ((1 << 13) - 1)
+typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
+
+#define DSI_MAX_NR_ISRS 2
+
+struct dsi_isr_data {
+ omap_dsi_isr_t isr;
+ void *arg;
+ u32 mask;
+};
enum fifo_size {
DSI_FIFO_SIZE_0 = 0,
@@ -220,9 +221,17 @@ struct dsi_irq_stats {
unsigned cio_irqs[32];
};
+struct dsi_isr_tables {
+ struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
+ struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
+ struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
+};
+
static struct
{
+ struct platform_device *pdev;
void __iomem *base;
+ int irq;
struct dsi_clock_info current_cinfo;
@@ -232,6 +241,7 @@ static struct
enum dsi_vc_mode mode;
struct omap_dss_device *dssdev;
enum fifo_size fifo_size;
+ int vc_id;
} vc[4];
struct mutex lock;
@@ -239,8 +249,10 @@ static struct
unsigned pll_locked;
- struct completion bta_completion;
- void (*bta_callback)(void);
+ spinlock_t irq_lock;
+ struct dsi_isr_tables isr_tables;
+ /* space for a copy used by the interrupt handler */
+ struct dsi_isr_tables isr_tables_copy;
int update_channel;
struct dsi_update_region update_region;
@@ -275,6 +287,11 @@ static struct
spinlock_t irq_stats_lock;
struct dsi_irq_stats irq_stats;
#endif
+ /* DSI PLL Parameter Ranges */
+ unsigned long regm_max, regn_max;
+ unsigned long regm_dispc_max, regm_dsi_max;
+ unsigned long fint_min, fint_max;
+ unsigned long lpdiv_max;
} dsi;
#ifdef DEBUG
@@ -318,6 +335,11 @@ static bool dsi_bus_is_locked(void)
return dsi.bus_lock.count == 0;
}
+static void dsi_completion_handler(void *data, u32 mask)
+{
+ complete((struct completion *)data);
+}
+
static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum,
int value)
{
@@ -387,6 +409,9 @@ static void dsi_perf_show(const char *name)
static void print_irq_status(u32 status)
{
+ if (status == 0)
+ return;
+
#ifndef VERBOSE_IRQ
if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
return;
@@ -422,6 +447,9 @@ static void print_irq_status(u32 status)
static void print_irq_status_vc(int channel, u32 status)
{
+ if (status == 0)
+ return;
+
#ifndef VERBOSE_IRQ
if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
return;
@@ -448,6 +476,9 @@ static void print_irq_status_vc(int channel, u32 status)
static void print_irq_status_cio(u32 status)
{
+ if (status == 0)
+ return;
+
printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
#define PIS(x) \
@@ -478,22 +509,33 @@ static void print_irq_status_cio(u32 status)
printk("\n");
}
-static int debug_irq;
-
-/* called from dss */
-void dsi_irq_handler(void)
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+static void dsi_collect_irq_stats(u32 irqstatus, u32 *vcstatus, u32 ciostatus)
{
- u32 irqstatus, vcstatus, ciostatus;
int i;
- irqstatus = dsi_read_reg(DSI_IRQSTATUS);
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
spin_lock(&dsi.irq_stats_lock);
+
dsi.irq_stats.irq_count++;
dss_collect_irq_stats(irqstatus, dsi.irq_stats.dsi_irqs);
+
+ for (i = 0; i < 4; ++i)
+ dss_collect_irq_stats(vcstatus[i], dsi.irq_stats.vc_irqs[i]);
+
+ dss_collect_irq_stats(ciostatus, dsi.irq_stats.cio_irqs);
+
+ spin_unlock(&dsi.irq_stats_lock);
+}
+#else
+#define dsi_collect_irq_stats(irqstatus, vcstatus, ciostatus)
#endif
+static int debug_irq;
+
+static void dsi_handle_irq_errors(u32 irqstatus, u32 *vcstatus, u32 ciostatus)
+{
+ int i;
+
if (irqstatus & DSI_IRQ_ERROR_MASK) {
DSSERR("DSI error, irqstatus %x\n", irqstatus);
print_irq_status(irqstatus);
@@ -504,37 +546,88 @@ void dsi_irq_handler(void)
print_irq_status(irqstatus);
}
-#ifdef DSI_CATCH_MISSING_TE
- if (irqstatus & DSI_IRQ_TE_TRIGGER)
- del_timer(&dsi.te_timer);
-#endif
+ for (i = 0; i < 4; ++i) {
+ if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) {
+ DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
+ i, vcstatus[i]);
+ print_irq_status_vc(i, vcstatus[i]);
+ } else if (debug_irq) {
+ print_irq_status_vc(i, vcstatus[i]);
+ }
+ }
+
+ if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
+ DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
+ print_irq_status_cio(ciostatus);
+ } else if (debug_irq) {
+ print_irq_status_cio(ciostatus);
+ }
+}
+
+static void dsi_call_isrs(struct dsi_isr_data *isr_array,
+ unsigned isr_array_size, u32 irqstatus)
+{
+ struct dsi_isr_data *isr_data;
+ int i;
+
+ for (i = 0; i < isr_array_size; i++) {
+ isr_data = &isr_array[i];
+ if (isr_data->isr && isr_data->mask & irqstatus)
+ isr_data->isr(isr_data->arg, irqstatus);
+ }
+}
+
+static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
+ u32 irqstatus, u32 *vcstatus, u32 ciostatus)
+{
+ int i;
+
+ dsi_call_isrs(isr_tables->isr_table,
+ ARRAY_SIZE(isr_tables->isr_table),
+ irqstatus);
for (i = 0; i < 4; ++i) {
- if ((irqstatus & (1<<i)) == 0)
+ if (vcstatus[i] == 0)
continue;
+ dsi_call_isrs(isr_tables->isr_table_vc[i],
+ ARRAY_SIZE(isr_tables->isr_table_vc[i]),
+ vcstatus[i]);
+ }
- vcstatus = dsi_read_reg(DSI_VC_IRQSTATUS(i));
+ if (ciostatus != 0)
+ dsi_call_isrs(isr_tables->isr_table_cio,
+ ARRAY_SIZE(isr_tables->isr_table_cio),
+ ciostatus);
+}
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- dss_collect_irq_stats(vcstatus, dsi.irq_stats.vc_irqs[i]);
-#endif
+static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
+{
+ u32 irqstatus, vcstatus[4], ciostatus;
+ int i;
- if (vcstatus & DSI_VC_IRQ_BTA) {
- complete(&dsi.bta_completion);
+ spin_lock(&dsi.irq_lock);
- if (dsi.bta_callback)
- dsi.bta_callback();
- }
+ irqstatus = dsi_read_reg(DSI_IRQSTATUS);
- if (vcstatus & DSI_VC_IRQ_ERROR_MASK) {
- DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
- i, vcstatus);
- print_irq_status_vc(i, vcstatus);
- } else if (debug_irq) {
- print_irq_status_vc(i, vcstatus);
+ /* IRQ is not for us */
+ if (!irqstatus) {
+ spin_unlock(&dsi.irq_lock);
+ return IRQ_NONE;
+ }
+
+ dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
+ /* flush posted write */
+ dsi_read_reg(DSI_IRQSTATUS);
+
+ for (i = 0; i < 4; ++i) {
+ if ((irqstatus & (1 << i)) == 0) {
+ vcstatus[i] = 0;
+ continue;
}
- dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus);
+ vcstatus[i] = dsi_read_reg(DSI_VC_IRQSTATUS(i));
+
+ dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus[i]);
/* flush posted write */
dsi_read_reg(DSI_VC_IRQSTATUS(i));
}
@@ -542,117 +635,307 @@ void dsi_irq_handler(void)
if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- dss_collect_irq_stats(ciostatus, dsi.irq_stats.cio_irqs);
-#endif
-
dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
/* flush posted write */
dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
+ } else {
+ ciostatus = 0;
+ }
- if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
- DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
- print_irq_status_cio(ciostatus);
- } else if (debug_irq) {
- print_irq_status_cio(ciostatus);
- }
+#ifdef DSI_CATCH_MISSING_TE
+ if (irqstatus & DSI_IRQ_TE_TRIGGER)
+ del_timer(&dsi.te_timer);
+#endif
+
+ /* make a copy and unlock, so that isrs can unregister
+ * themselves */
+ memcpy(&dsi.isr_tables_copy, &dsi.isr_tables, sizeof(dsi.isr_tables));
+
+ spin_unlock(&dsi.irq_lock);
+
+ dsi_handle_isrs(&dsi.isr_tables_copy, irqstatus, vcstatus, ciostatus);
+
+ dsi_handle_irq_errors(irqstatus, vcstatus, ciostatus);
+
+ dsi_collect_irq_stats(irqstatus, vcstatus, ciostatus);
+
+ return IRQ_HANDLED;
+}
+
+/* dsi.irq_lock has to be locked by the caller */
+static void _omap_dsi_configure_irqs(struct dsi_isr_data *isr_array,
+ unsigned isr_array_size, u32 default_mask,
+ const struct dsi_reg enable_reg,
+ const struct dsi_reg status_reg)
+{
+ struct dsi_isr_data *isr_data;
+ u32 mask;
+ u32 old_mask;
+ int i;
+
+ mask = default_mask;
+
+ for (i = 0; i < isr_array_size; i++) {
+ isr_data = &isr_array[i];
+
+ if (isr_data->isr == NULL)
+ continue;
+
+ mask |= isr_data->mask;
}
- dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
- /* flush posted write */
- dsi_read_reg(DSI_IRQSTATUS);
+ old_mask = dsi_read_reg(enable_reg);
+ /* clear the irqstatus for newly enabled irqs */
+ dsi_write_reg(status_reg, (mask ^ old_mask) & mask);
+ dsi_write_reg(enable_reg, mask);
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spin_unlock(&dsi.irq_stats_lock);
+ /* flush posted writes */
+ dsi_read_reg(enable_reg);
+ dsi_read_reg(status_reg);
+}
+
+/* dsi.irq_lock has to be locked by the caller */
+static void _omap_dsi_set_irqs(void)
+{
+ u32 mask = DSI_IRQ_ERROR_MASK;
+#ifdef DSI_CATCH_MISSING_TE
+ mask |= DSI_IRQ_TE_TRIGGER;
#endif
+ _omap_dsi_configure_irqs(dsi.isr_tables.isr_table,
+ ARRAY_SIZE(dsi.isr_tables.isr_table), mask,
+ DSI_IRQENABLE, DSI_IRQSTATUS);
+}
+
+/* dsi.irq_lock has to be locked by the caller */
+static void _omap_dsi_set_irqs_vc(int vc)
+{
+ _omap_dsi_configure_irqs(dsi.isr_tables.isr_table_vc[vc],
+ ARRAY_SIZE(dsi.isr_tables.isr_table_vc[vc]),
+ DSI_VC_IRQ_ERROR_MASK,
+ DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
}
+/* dsi.irq_lock has to be locked by the caller */
+static void _omap_dsi_set_irqs_cio(void)
+{
+ _omap_dsi_configure_irqs(dsi.isr_tables.isr_table_cio,
+ ARRAY_SIZE(dsi.isr_tables.isr_table_cio),
+ DSI_CIO_IRQ_ERROR_MASK,
+ DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
+}
static void _dsi_initialize_irq(void)
{
- u32 l;
+ unsigned long flags;
+ int vc;
+
+ spin_lock_irqsave(&dsi.irq_lock, flags);
+
+ memset(&dsi.isr_tables, 0, sizeof(dsi.isr_tables));
+
+ _omap_dsi_set_irqs();
+ for (vc = 0; vc < 4; ++vc)
+ _omap_dsi_set_irqs_vc(vc);
+ _omap_dsi_set_irqs_cio();
+
+ spin_unlock_irqrestore(&dsi.irq_lock, flags);
+}
+
+static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
+ struct dsi_isr_data *isr_array, unsigned isr_array_size)
+{
+ struct dsi_isr_data *isr_data;
+ int free_idx;
int i;
- /* disable all interrupts */
- dsi_write_reg(DSI_IRQENABLE, 0);
- for (i = 0; i < 4; ++i)
- dsi_write_reg(DSI_VC_IRQENABLE(i), 0);
- dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE, 0);
+ BUG_ON(isr == NULL);
- /* clear interrupt status */
- l = dsi_read_reg(DSI_IRQSTATUS);
- dsi_write_reg(DSI_IRQSTATUS, l & ~DSI_IRQ_CHANNEL_MASK);
+ /* check for duplicate entry and find a free slot */
+ free_idx = -1;
+ for (i = 0; i < isr_array_size; i++) {
+ isr_data = &isr_array[i];
- for (i = 0; i < 4; ++i) {
- l = dsi_read_reg(DSI_VC_IRQSTATUS(i));
- dsi_write_reg(DSI_VC_IRQSTATUS(i), l);
+ if (isr_data->isr == isr && isr_data->arg == arg &&
+ isr_data->mask == mask) {
+ return -EINVAL;
+ }
+
+ if (isr_data->isr == NULL && free_idx == -1)
+ free_idx = i;
}
- l = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
- dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, l);
+ if (free_idx == -1)
+ return -EBUSY;
- /* enable error irqs */
- l = DSI_IRQ_ERROR_MASK;
-#ifdef DSI_CATCH_MISSING_TE
- l |= DSI_IRQ_TE_TRIGGER;
-#endif
- dsi_write_reg(DSI_IRQENABLE, l);
+ isr_data = &isr_array[free_idx];
+ isr_data->isr = isr;
+ isr_data->arg = arg;
+ isr_data->mask = mask;
- l = DSI_VC_IRQ_ERROR_MASK;
- for (i = 0; i < 4; ++i)
- dsi_write_reg(DSI_VC_IRQENABLE(i), l);
+ return 0;
+}
+
+static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
+ struct dsi_isr_data *isr_array, unsigned isr_array_size)
+{
+ struct dsi_isr_data *isr_data;
+ int i;
+
+ for (i = 0; i < isr_array_size; i++) {
+ isr_data = &isr_array[i];
+ if (isr_data->isr != isr || isr_data->arg != arg ||
+ isr_data->mask != mask)
+ continue;
+
+ isr_data->isr = NULL;
+ isr_data->arg = NULL;
+ isr_data->mask = 0;
+
+ return 0;
+ }
- l = DSI_CIO_IRQ_ERROR_MASK;
- dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE, l);
+ return -EINVAL;
}
-static u32 dsi_get_errors(void)
+static int dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask)
{
unsigned long flags;
- u32 e;
- spin_lock_irqsave(&dsi.errors_lock, flags);
- e = dsi.errors;
- dsi.errors = 0;
- spin_unlock_irqrestore(&dsi.errors_lock, flags);
- return e;
+ int r;
+
+ spin_lock_irqsave(&dsi.irq_lock, flags);
+
+ r = _dsi_register_isr(isr, arg, mask, dsi.isr_tables.isr_table,
+ ARRAY_SIZE(dsi.isr_tables.isr_table));
+
+ if (r == 0)
+ _omap_dsi_set_irqs();
+
+ spin_unlock_irqrestore(&dsi.irq_lock, flags);
+
+ return r;
}
-static void dsi_vc_enable_bta_irq(int channel)
+static int dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask)
{
- u32 l;
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&dsi.irq_lock, flags);
+
+ r = _dsi_unregister_isr(isr, arg, mask, dsi.isr_tables.isr_table,
+ ARRAY_SIZE(dsi.isr_tables.isr_table));
+
+ if (r == 0)
+ _omap_dsi_set_irqs();
- dsi_write_reg(DSI_VC_IRQSTATUS(channel), DSI_VC_IRQ_BTA);
+ spin_unlock_irqrestore(&dsi.irq_lock, flags);
- l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
- l |= DSI_VC_IRQ_BTA;
- dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
+ return r;
}
-static void dsi_vc_disable_bta_irq(int channel)
+static int dsi_register_isr_vc(int channel, omap_dsi_isr_t isr, void *arg,
+ u32 mask)
{
- u32 l;
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&dsi.irq_lock, flags);
+
+ r = _dsi_register_isr(isr, arg, mask,
+ dsi.isr_tables.isr_table_vc[channel],
+ ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel]));
+
+ if (r == 0)
+ _omap_dsi_set_irqs_vc(channel);
+
+ spin_unlock_irqrestore(&dsi.irq_lock, flags);
+
+ return r;
+}
+
+static int dsi_unregister_isr_vc(int channel, omap_dsi_isr_t isr, void *arg,
+ u32 mask)
+{
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&dsi.irq_lock, flags);
+
+ r = _dsi_unregister_isr(isr, arg, mask,
+ dsi.isr_tables.isr_table_vc[channel],
+ ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel]));
+
+ if (r == 0)
+ _omap_dsi_set_irqs_vc(channel);
+
+ spin_unlock_irqrestore(&dsi.irq_lock, flags);
+
+ return r;
+}
+
+static int dsi_register_isr_cio(omap_dsi_isr_t isr, void *arg, u32 mask)
+{
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&dsi.irq_lock, flags);
+
+ r = _dsi_register_isr(isr, arg, mask, dsi.isr_tables.isr_table_cio,
+ ARRAY_SIZE(dsi.isr_tables.isr_table_cio));
+
+ if (r == 0)
+ _omap_dsi_set_irqs_cio();
+
+ spin_unlock_irqrestore(&dsi.irq_lock, flags);
+
+ return r;
+}
+
+static int dsi_unregister_isr_cio(omap_dsi_isr_t isr, void *arg, u32 mask)
+{
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&dsi.irq_lock, flags);
+
+ r = _dsi_unregister_isr(isr, arg, mask, dsi.isr_tables.isr_table_cio,
+ ARRAY_SIZE(dsi.isr_tables.isr_table_cio));
+
+ if (r == 0)
+ _omap_dsi_set_irqs_cio();
+
+ spin_unlock_irqrestore(&dsi.irq_lock, flags);
- l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
- l &= ~DSI_VC_IRQ_BTA;
- dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
+ return r;
}
-/* DSI func clock. this could also be DSI2_PLL_FCLK */
+static u32 dsi_get_errors(void)
+{
+ unsigned long flags;
+ u32 e;
+ spin_lock_irqsave(&dsi.errors_lock, flags);
+ e = dsi.errors;
+ dsi.errors = 0;
+ spin_unlock_irqrestore(&dsi.errors_lock, flags);
+ return e;
+}
+
+/* DSI func clock. this could also be dsi_pll_hsdiv_dsi_clk */
static inline void enable_clocks(bool enable)
{
if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
}
/* source clock for DSI PLL. this could also be PCLKFREE */
static inline void dsi_enable_pll_clock(bool enable)
{
if (enable)
- dss_clk_enable(DSS_CLK_FCK2);
+ dss_clk_enable(DSS_CLK_SYSCK);
else
- dss_clk_disable(DSS_CLK_FCK2);
+ dss_clk_disable(DSS_CLK_SYSCK);
if (enable && dsi.pll_locked) {
if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1)
@@ -707,14 +990,14 @@ static inline int dsi_if_enable(bool enable)
return 0;
}
-unsigned long dsi_get_dsi1_pll_rate(void)
+unsigned long dsi_get_pll_hsdiv_dispc_rate(void)
{
- return dsi.current_cinfo.dsi1_pll_fclk;
+ return dsi.current_cinfo.dsi_pll_hsdiv_dispc_clk;
}
-static unsigned long dsi_get_dsi2_pll_rate(void)
+static unsigned long dsi_get_pll_hsdiv_dsi_rate(void)
{
- return dsi.current_cinfo.dsi2_pll_fclk;
+ return dsi.current_cinfo.dsi_pll_hsdiv_dsi_clk;
}
static unsigned long dsi_get_txbyteclkhs(void)
@@ -726,12 +1009,12 @@ static unsigned long dsi_fclk_rate(void)
{
unsigned long r;
- if (dss_get_dsi_clk_source() == DSS_SRC_DSS1_ALWON_FCLK) {
- /* DSI FCLK source is DSS1_ALWON_FCK, which is dss1_fck */
- r = dss_clk_get_rate(DSS_CLK_FCK1);
+ if (dss_get_dsi_clk_source() == DSS_CLK_SRC_FCK) {
+ /* DSI FCLK source is DSS_CLK_FCK */
+ r = dss_clk_get_rate(DSS_CLK_FCK);
} else {
- /* DSI FCLK source is DSI2_PLL_FCLK */
- r = dsi_get_dsi2_pll_rate();
+ /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
+ r = dsi_get_pll_hsdiv_dsi_rate();
}
return r;
@@ -745,7 +1028,7 @@ static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
lp_clk_div = dssdev->phy.dsi.div.lp_clk_div;
- if (lp_clk_div == 0 || lp_clk_div > LP_DIV_MAX)
+ if (lp_clk_div == 0 || lp_clk_div > dsi.lpdiv_max)
return -EINVAL;
dsi_fclk = dsi_fclk_rate();
@@ -795,22 +1078,22 @@ static int dsi_pll_power(enum dsi_pll_power_state state)
static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
struct dsi_clock_info *cinfo)
{
- if (cinfo->regn == 0 || cinfo->regn > REGN_MAX)
+ if (cinfo->regn == 0 || cinfo->regn > dsi.regn_max)
return -EINVAL;
- if (cinfo->regm == 0 || cinfo->regm > REGM_MAX)
+ if (cinfo->regm == 0 || cinfo->regm > dsi.regm_max)
return -EINVAL;
- if (cinfo->regm3 > REGM3_MAX)
+ if (cinfo->regm_dispc > dsi.regm_dispc_max)
return -EINVAL;
- if (cinfo->regm4 > REGM4_MAX)
+ if (cinfo->regm_dsi > dsi.regm_dsi_max)
return -EINVAL;
- if (cinfo->use_dss2_fck) {
- cinfo->clkin = dss_clk_get_rate(DSS_CLK_FCK2);
+ if (cinfo->use_sys_clk) {
+ cinfo->clkin = dss_clk_get_rate(DSS_CLK_SYSCK);
/* XXX it is unclear if highfreq should be used
- * with DSS2_FCK source also */
+ * with DSS_SYS_CLK source also */
cinfo->highfreq = 0;
} else {
cinfo->clkin = dispc_pclk_rate(dssdev->manager->id);
@@ -823,7 +1106,7 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
- if (cinfo->fint > FINT_MAX || cinfo->fint < FINT_MIN)
+ if (cinfo->fint > dsi.fint_max || cinfo->fint < dsi.fint_min)
return -EINVAL;
cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
@@ -831,15 +1114,17 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
return -EINVAL;
- if (cinfo->regm3 > 0)
- cinfo->dsi1_pll_fclk = cinfo->clkin4ddr / cinfo->regm3;
+ if (cinfo->regm_dispc > 0)
+ cinfo->dsi_pll_hsdiv_dispc_clk =
+ cinfo->clkin4ddr / cinfo->regm_dispc;
else
- cinfo->dsi1_pll_fclk = 0;
+ cinfo->dsi_pll_hsdiv_dispc_clk = 0;
- if (cinfo->regm4 > 0)
- cinfo->dsi2_pll_fclk = cinfo->clkin4ddr / cinfo->regm4;
+ if (cinfo->regm_dsi > 0)
+ cinfo->dsi_pll_hsdiv_dsi_clk =
+ cinfo->clkin4ddr / cinfo->regm_dsi;
else
- cinfo->dsi2_pll_fclk = 0;
+ cinfo->dsi_pll_hsdiv_dsi_clk = 0;
return 0;
}
@@ -852,23 +1137,25 @@ int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
struct dispc_clock_info best_dispc;
int min_fck_per_pck;
int match = 0;
- unsigned long dss_clk_fck2;
+ unsigned long dss_sys_clk, max_dss_fck;
+
+ dss_sys_clk = dss_clk_get_rate(DSS_CLK_SYSCK);
- dss_clk_fck2 = dss_clk_get_rate(DSS_CLK_FCK2);
+ max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
if (req_pck == dsi.cache_req_pck &&
- dsi.cache_cinfo.clkin == dss_clk_fck2) {
+ dsi.cache_cinfo.clkin == dss_sys_clk) {
DSSDBG("DSI clock info found from cache\n");
*dsi_cinfo = dsi.cache_cinfo;
- dispc_find_clk_divs(is_tft, req_pck, dsi_cinfo->dsi1_pll_fclk,
- dispc_cinfo);
+ dispc_find_clk_divs(is_tft, req_pck,
+ dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo);
return 0;
}
min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
if (min_fck_per_pck &&
- req_pck * min_fck_per_pck > DISPC_MAX_FCK) {
+ req_pck * min_fck_per_pck > max_dss_fck) {
DSSERR("Requested pixel clock not possible with the current "
"OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
"the constraint off.\n");
@@ -882,24 +1169,24 @@ retry:
memset(&best_dispc, 0, sizeof(best_dispc));
memset(&cur, 0, sizeof(cur));
- cur.clkin = dss_clk_fck2;
- cur.use_dss2_fck = 1;
+ cur.clkin = dss_sys_clk;
+ cur.use_sys_clk = 1;
cur.highfreq = 0;
/* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
/* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
/* To reduce PLL lock time, keep Fint high (around 2 MHz) */
- for (cur.regn = 1; cur.regn < REGN_MAX; ++cur.regn) {
+ for (cur.regn = 1; cur.regn < dsi.regn_max; ++cur.regn) {
if (cur.highfreq == 0)
cur.fint = cur.clkin / cur.regn;
else
cur.fint = cur.clkin / (2 * cur.regn);
- if (cur.fint > FINT_MAX || cur.fint < FINT_MIN)
+ if (cur.fint > dsi.fint_max || cur.fint < dsi.fint_min)
continue;
/* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
- for (cur.regm = 1; cur.regm < REGM_MAX; ++cur.regm) {
+ for (cur.regm = 1; cur.regm < dsi.regm_max; ++cur.regm) {
unsigned long a, b;
a = 2 * cur.regm * (cur.clkin/1000);
@@ -909,30 +1196,32 @@ retry:
if (cur.clkin4ddr > 1800 * 1000 * 1000)
break;
- /* DSI1_PLL_FCLK(MHz) = DSIPHY(MHz) / regm3 < 173MHz */
- for (cur.regm3 = 1; cur.regm3 < REGM3_MAX;
- ++cur.regm3) {
+ /* dsi_pll_hsdiv_dispc_clk(MHz) =
+ * DSIPHY(MHz) / regm_dispc < 173MHz/186Mhz */
+ for (cur.regm_dispc = 1; cur.regm_dispc < dsi.regm_dispc_max;
+ ++cur.regm_dispc) {
struct dispc_clock_info cur_dispc;
- cur.dsi1_pll_fclk = cur.clkin4ddr / cur.regm3;
+ cur.dsi_pll_hsdiv_dispc_clk =
+ cur.clkin4ddr / cur.regm_dispc;
/* this will narrow down the search a bit,
* but still give pixclocks below what was
* requested */
- if (cur.dsi1_pll_fclk < req_pck)
+ if (cur.dsi_pll_hsdiv_dispc_clk < req_pck)
break;
- if (cur.dsi1_pll_fclk > DISPC_MAX_FCK)
+ if (cur.dsi_pll_hsdiv_dispc_clk > max_dss_fck)
continue;
if (min_fck_per_pck &&
- cur.dsi1_pll_fclk <
+ cur.dsi_pll_hsdiv_dispc_clk <
req_pck * min_fck_per_pck)
continue;
match = 1;
dispc_find_clk_divs(is_tft, req_pck,
- cur.dsi1_pll_fclk,
+ cur.dsi_pll_hsdiv_dispc_clk,
&cur_dispc);
if (abs(cur_dispc.pck - req_pck) <
@@ -961,9 +1250,9 @@ found:
return -EINVAL;
}
- /* DSI2_PLL_FCLK (regm4) is not used */
- best.regm4 = 0;
- best.dsi2_pll_fclk = 0;
+ /* dsi_pll_hsdiv_dsi_clk (regm_dsi) is not used */
+ best.regm_dsi = 0;
+ best.dsi_pll_hsdiv_dsi_clk = 0;
if (dsi_cinfo)
*dsi_cinfo = best;
@@ -982,23 +1271,27 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
int r = 0;
u32 l;
int f;
+ u8 regn_start, regn_end, regm_start, regm_end;
+ u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
DSSDBGF();
dsi.current_cinfo.fint = cinfo->fint;
dsi.current_cinfo.clkin4ddr = cinfo->clkin4ddr;
- dsi.current_cinfo.dsi1_pll_fclk = cinfo->dsi1_pll_fclk;
- dsi.current_cinfo.dsi2_pll_fclk = cinfo->dsi2_pll_fclk;
+ dsi.current_cinfo.dsi_pll_hsdiv_dispc_clk =
+ cinfo->dsi_pll_hsdiv_dispc_clk;
+ dsi.current_cinfo.dsi_pll_hsdiv_dsi_clk =
+ cinfo->dsi_pll_hsdiv_dsi_clk;
dsi.current_cinfo.regn = cinfo->regn;
dsi.current_cinfo.regm = cinfo->regm;
- dsi.current_cinfo.regm3 = cinfo->regm3;
- dsi.current_cinfo.regm4 = cinfo->regm4;
+ dsi.current_cinfo.regm_dispc = cinfo->regm_dispc;
+ dsi.current_cinfo.regm_dsi = cinfo->regm_dsi;
DSSDBG("DSI Fint %ld\n", cinfo->fint);
DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
- cinfo->use_dss2_fck ? "dss2_fck" : "pclkfree",
+ cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree",
cinfo->clkin,
cinfo->highfreq);
@@ -1015,24 +1308,39 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
- DSSDBG("regm3 = %d, dsi1_pll_fclk = %lu\n",
- cinfo->regm3, cinfo->dsi1_pll_fclk);
- DSSDBG("regm4 = %d, dsi2_pll_fclk = %lu\n",
- cinfo->regm4, cinfo->dsi2_pll_fclk);
+ DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc,
+ dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
+ dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
+ cinfo->dsi_pll_hsdiv_dispc_clk);
+ DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi,
+ dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
+ dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
+ cinfo->dsi_pll_hsdiv_dsi_clk);
+
+ dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, &regn_start, &regn_end);
+ dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM, &regm_start, &regm_end);
+ dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DISPC, &regm_dispc_start,
+ &regm_dispc_end);
+ dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, &regm_dsi_start,
+ &regm_dsi_end);
REG_FLD_MOD(DSI_PLL_CONTROL, 0, 0, 0); /* DSI_PLL_AUTOMODE = manual */
l = dsi_read_reg(DSI_PLL_CONFIGURATION1);
l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
- l = FLD_MOD(l, cinfo->regn - 1, 7, 1); /* DSI_PLL_REGN */
- l = FLD_MOD(l, cinfo->regm, 18, 8); /* DSI_PLL_REGM */
- l = FLD_MOD(l, cinfo->regm3 > 0 ? cinfo->regm3 - 1 : 0,
- 22, 19); /* DSI_CLOCK_DIV */
- l = FLD_MOD(l, cinfo->regm4 > 0 ? cinfo->regm4 - 1 : 0,
- 26, 23); /* DSIPROTO_CLOCK_DIV */
+ /* DSI_PLL_REGN */
+ l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end);
+ /* DSI_PLL_REGM */
+ l = FLD_MOD(l, cinfo->regm, regm_start, regm_end);
+ /* DSI_CLOCK_DIV */
+ l = FLD_MOD(l, cinfo->regm_dispc > 0 ? cinfo->regm_dispc - 1 : 0,
+ regm_dispc_start, regm_dispc_end);
+ /* DSIPROTO_CLOCK_DIV */
+ l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0,
+ regm_dsi_start, regm_dsi_end);
dsi_write_reg(DSI_PLL_CONFIGURATION1, l);
- BUG_ON(cinfo->fint < 750000 || cinfo->fint > 2100000);
+ BUG_ON(cinfo->fint < dsi.fint_min || cinfo->fint > dsi.fint_max);
if (cinfo->fint < 1000000)
f = 0x3;
else if (cinfo->fint < 1250000)
@@ -1046,7 +1354,7 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
- l = FLD_MOD(l, cinfo->use_dss2_fck ? 0 : 1,
+ l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1,
11, 11); /* DSI_PLL_CLKSEL */
l = FLD_MOD(l, cinfo->highfreq,
12, 12); /* DSI_PLL_HIGHFREQ */
@@ -1101,6 +1409,26 @@ int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
DSSDBG("PLL init\n");
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
+ /*
+ * HACK: this is just a quick hack to get the USE_DSI_PLL
+ * option working. USE_DSI_PLL is itself a big hack, and
+ * should be removed.
+ */
+ if (dsi.vdds_dsi_reg == NULL) {
+ struct regulator *vdds_dsi;
+
+ vdds_dsi = regulator_get(&dsi.pdev->dev, "vdds_dsi");
+
+ if (IS_ERR(vdds_dsi)) {
+ DSSERR("can't get VDDS_DSI regulator\n");
+ return PTR_ERR(vdds_dsi);
+ }
+
+ dsi.vdds_dsi_reg = vdds_dsi;
+ }
+#endif
+
enable_clocks(1);
dsi_enable_pll_clock(1);
@@ -1162,6 +1490,10 @@ void dsi_dump_clocks(struct seq_file *s)
{
int clksel;
struct dsi_clock_info *cinfo = &dsi.current_cinfo;
+ enum dss_clk_source dispc_clk_src, dsi_clk_src;
+
+ dispc_clk_src = dss_get_dispc_clk_source();
+ dsi_clk_src = dss_get_dsi_clk_source();
enable_clocks(1);
@@ -1171,30 +1503,34 @@ void dsi_dump_clocks(struct seq_file *s)
seq_printf(s, "dsi pll source = %s\n",
clksel == 0 ?
- "dss2_alwon_fclk" : "pclkfree");
+ "dss_sys_clk" : "pclkfree");
seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
seq_printf(s, "CLKIN4DDR\t%-16luregm %u\n",
cinfo->clkin4ddr, cinfo->regm);
- seq_printf(s, "dsi1_pll_fck\t%-16luregm3 %u\t(%s)\n",
- cinfo->dsi1_pll_fclk,
- cinfo->regm3,
- dss_get_dispc_clk_source() == DSS_SRC_DSS1_ALWON_FCLK ?
+ seq_printf(s, "%s (%s)\t%-16luregm_dispc %u\t(%s)\n",
+ dss_get_generic_clk_source_name(dispc_clk_src),
+ dss_feat_get_clk_source_name(dispc_clk_src),
+ cinfo->dsi_pll_hsdiv_dispc_clk,
+ cinfo->regm_dispc,
+ dispc_clk_src == DSS_CLK_SRC_FCK ?
"off" : "on");
- seq_printf(s, "dsi2_pll_fck\t%-16luregm4 %u\t(%s)\n",
- cinfo->dsi2_pll_fclk,
- cinfo->regm4,
- dss_get_dsi_clk_source() == DSS_SRC_DSS1_ALWON_FCLK ?
+ seq_printf(s, "%s (%s)\t%-16luregm_dsi %u\t(%s)\n",
+ dss_get_generic_clk_source_name(dsi_clk_src),
+ dss_feat_get_clk_source_name(dsi_clk_src),
+ cinfo->dsi_pll_hsdiv_dsi_clk,
+ cinfo->regm_dsi,
+ dsi_clk_src == DSS_CLK_SRC_FCK ?
"off" : "on");
seq_printf(s, "- DSI -\n");
- seq_printf(s, "dsi fclk source = %s\n",
- dss_get_dsi_clk_source() == DSS_SRC_DSS1_ALWON_FCLK ?
- "dss1_alwon_fclk" : "dsi2_pll_fclk");
+ seq_printf(s, "dsi fclk source = %s (%s)\n",
+ dss_get_generic_clk_source_name(dsi_clk_src),
+ dss_feat_get_clk_source_name(dsi_clk_src));
seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate());
@@ -1306,7 +1642,7 @@ void dsi_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
DUMPREG(DSI_REVISION);
DUMPREG(DSI_SYSCONFIG);
@@ -1378,7 +1714,7 @@ void dsi_dump_regs(struct seq_file *s)
DUMPREG(DSI_PLL_CONFIGURATION1);
DUMPREG(DSI_PLL_CONFIGURATION2);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
#undef DUMPREG
}
@@ -1622,20 +1958,6 @@ static int _dsi_reset(void)
return _dsi_wait_reset();
}
-static void dsi_reset_tx_fifo(int channel)
-{
- u32 mask;
- u32 l;
-
- /* set fifosize of the channel to 0, then return the old size */
- l = dsi_read_reg(DSI_TX_FIFO_VC_SIZE);
-
- mask = FLD_MASK((8 * channel) + 7, (8 * channel) + 4);
- dsi_write_reg(DSI_TX_FIFO_VC_SIZE, l & ~mask);
-
- dsi_write_reg(DSI_TX_FIFO_VC_SIZE, l);
-}
-
static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2,
enum fifo_size size3, enum fifo_size size4)
{
@@ -1753,8 +2075,6 @@ static void dsi_vc_initial_config(int channel)
r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
dsi_write_reg(DSI_VC_CTRL(channel), r);
-
- dsi.vc[channel].mode = DSI_VC_MODE_L4;
}
static int dsi_vc_config_l4(int channel)
@@ -1922,33 +2242,44 @@ static int dsi_vc_send_bta(int channel)
int dsi_vc_send_bta_sync(int channel)
{
+ DECLARE_COMPLETION_ONSTACK(completion);
int r = 0;
u32 err;
- INIT_COMPLETION(dsi.bta_completion);
+ r = dsi_register_isr_vc(channel, dsi_completion_handler,
+ &completion, DSI_VC_IRQ_BTA);
+ if (r)
+ goto err0;
- dsi_vc_enable_bta_irq(channel);
+ r = dsi_register_isr(dsi_completion_handler, &completion,
+ DSI_IRQ_ERROR_MASK);
+ if (r)
+ goto err1;
r = dsi_vc_send_bta(channel);
if (r)
- goto err;
+ goto err2;
- if (wait_for_completion_timeout(&dsi.bta_completion,
+ if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(500)) == 0) {
DSSERR("Failed to receive BTA\n");
r = -EIO;
- goto err;
+ goto err2;
}
err = dsi_get_errors();
if (err) {
DSSERR("Error while sending BTA: %x\n", err);
r = -EIO;
- goto err;
+ goto err2;
}
-err:
- dsi_vc_disable_bta_irq(channel);
-
+err2:
+ dsi_unregister_isr(dsi_completion_handler, &completion,
+ DSI_IRQ_ERROR_MASK);
+err1:
+ dsi_unregister_isr_vc(channel, dsi_completion_handler,
+ &completion, DSI_VC_IRQ_BTA);
+err0:
return r;
}
EXPORT_SYMBOL(dsi_vc_send_bta_sync);
@@ -1961,7 +2292,7 @@ static inline void dsi_vc_write_long_header(int channel, u8 data_type,
WARN_ON(!dsi_bus_is_locked());
- data_id = data_type | channel << 6;
+ data_id = data_type | dsi.vc[channel].vc_id << 6;
val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
FLD_VAL(ecc, 31, 24);
@@ -2064,7 +2395,7 @@ static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
return -EINVAL;
}
- data_id = data_type | channel << 6;
+ data_id = data_type | dsi.vc[channel].vc_id << 6;
r = (data_id << 0) | (data << 8) | (ecc << 24);
@@ -2762,19 +3093,20 @@ static void dsi_te_timeout(unsigned long arg)
}
#endif
+static void dsi_framedone_bta_callback(void *data, u32 mask);
+
static void dsi_handle_framedone(int error)
{
const int channel = dsi.update_channel;
- cancel_delayed_work(&dsi.framedone_timeout_work);
+ dsi_unregister_isr_vc(channel, dsi_framedone_bta_callback,
+ NULL, DSI_VC_IRQ_BTA);
- dsi_vc_disable_bta_irq(channel);
+ cancel_delayed_work(&dsi.framedone_timeout_work);
/* SIDLEMODE back to smart-idle */
dispc_enable_sidle();
- dsi.bta_callback = NULL;
-
if (dsi.te_enabled) {
/* enable LP_RX_TO again after the TE */
REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
@@ -2808,7 +3140,7 @@ static void dsi_framedone_timeout_work_callback(struct work_struct *work)
dsi_handle_framedone(-ETIMEDOUT);
}
-static void dsi_framedone_bta_callback(void)
+static void dsi_framedone_bta_callback(void *data, u32 mask)
{
dsi_handle_framedone(0);
@@ -2848,15 +3180,19 @@ static void dsi_framedone_irq_callback(void *data, u32 mask)
* asynchronously.
* */
- dsi.bta_callback = dsi_framedone_bta_callback;
-
- barrier();
-
- dsi_vc_enable_bta_irq(channel);
+ r = dsi_register_isr_vc(channel, dsi_framedone_bta_callback,
+ NULL, DSI_VC_IRQ_BTA);
+ if (r) {
+ DSSERR("Failed to register BTA ISR\n");
+ dsi_handle_framedone(-EIO);
+ return;
+ }
r = dsi_vc_send_bta(channel);
if (r) {
DSSERR("BTA after framedone failed\n");
+ dsi_unregister_isr_vc(channel, dsi_framedone_bta_callback,
+ NULL, DSI_VC_IRQ_BTA);
dsi_handle_framedone(-EIO);
}
}
@@ -2984,12 +3320,12 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
struct dsi_clock_info cinfo;
int r;
- /* we always use DSS2_FCK as input clock */
- cinfo.use_dss2_fck = true;
+ /* we always use DSS_CLK_SYSCK as input clock */
+ cinfo.use_sys_clk = true;
cinfo.regn = dssdev->phy.dsi.div.regn;
cinfo.regm = dssdev->phy.dsi.div.regm;
- cinfo.regm3 = dssdev->phy.dsi.div.regm3;
- cinfo.regm4 = dssdev->phy.dsi.div.regm4;
+ cinfo.regm_dispc = dssdev->phy.dsi.div.regm_dispc;
+ cinfo.regm_dsi = dssdev->phy.dsi.div.regm_dsi;
r = dsi_calc_clock_rates(dssdev, &cinfo);
if (r) {
DSSERR("Failed to calc dsi clocks\n");
@@ -3011,7 +3347,7 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
int r;
unsigned long long fck;
- fck = dsi_get_dsi1_pll_rate();
+ fck = dsi_get_pll_hsdiv_dispc_rate();
dispc_cinfo.lck_div = dssdev->phy.dsi.div.lck_div;
dispc_cinfo.pck_div = dssdev->phy.dsi.div.pck_div;
@@ -3045,8 +3381,8 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
if (r)
goto err1;
- dss_select_dispc_clk_source(DSS_SRC_DSI1_PLL_FCLK);
- dss_select_dsi_clk_source(DSS_SRC_DSI2_PLL_FCLK);
+ dss_select_dispc_clk_source(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC);
+ dss_select_dsi_clk_source(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI);
DSSDBG("PLL OK\n");
@@ -3082,8 +3418,8 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
err3:
dsi_complexio_uninit();
err2:
- dss_select_dispc_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
- dss_select_dsi_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
+ dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
+ dss_select_dsi_clk_source(DSS_CLK_SRC_FCK);
err1:
dsi_pll_uninit();
err0:
@@ -3099,8 +3435,8 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev)
dsi_vc_enable(2, 0);
dsi_vc_enable(3, 0);
- dss_select_dispc_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
- dss_select_dsi_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
+ dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
+ dss_select_dsi_clk_source(DSS_CLK_SRC_FCK);
dsi_complexio_uninit();
dsi_pll_uninit();
}
@@ -3220,29 +3556,107 @@ int dsi_init_display(struct omap_dss_device *dssdev)
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
- dsi.vc[0].dssdev = dssdev;
- dsi.vc[1].dssdev = dssdev;
+ if (dsi.vdds_dsi_reg == NULL) {
+ struct regulator *vdds_dsi;
+
+ vdds_dsi = regulator_get(&dsi.pdev->dev, "vdds_dsi");
+
+ if (IS_ERR(vdds_dsi)) {
+ DSSERR("can't get VDDS_DSI regulator\n");
+ return PTR_ERR(vdds_dsi);
+ }
+
+ dsi.vdds_dsi_reg = vdds_dsi;
+ }
return 0;
}
-void dsi_wait_dsi1_pll_active(void)
+int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dsi.vc); i++) {
+ if (!dsi.vc[i].dssdev) {
+ dsi.vc[i].dssdev = dssdev;
+ *channel = i;
+ return 0;
+ }
+ }
+
+ DSSERR("cannot get VC for display %s", dssdev->name);
+ return -ENOSPC;
+}
+EXPORT_SYMBOL(omap_dsi_request_vc);
+
+int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
+{
+ if (vc_id < 0 || vc_id > 3) {
+ DSSERR("VC ID out of range\n");
+ return -EINVAL;
+ }
+
+ if (channel < 0 || channel > 3) {
+ DSSERR("Virtual Channel out of range\n");
+ return -EINVAL;
+ }
+
+ if (dsi.vc[channel].dssdev != dssdev) {
+ DSSERR("Virtual Channel not allocated to display %s\n",
+ dssdev->name);
+ return -EINVAL;
+ }
+
+ dsi.vc[channel].vc_id = vc_id;
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_dsi_set_vc_id);
+
+void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel)
+{
+ if ((channel >= 0 && channel <= 3) &&
+ dsi.vc[channel].dssdev == dssdev) {
+ dsi.vc[channel].dssdev = NULL;
+ dsi.vc[channel].vc_id = 0;
+ }
+}
+EXPORT_SYMBOL(omap_dsi_release_vc);
+
+void dsi_wait_pll_hsdiv_dispc_active(void)
{
if (wait_for_bit_change(DSI_PLL_STATUS, 7, 1) != 1)
- DSSERR("DSI1 PLL clock not active\n");
+ DSSERR("%s (%s) not active\n",
+ dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
+ dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC));
}
-void dsi_wait_dsi2_pll_active(void)
+void dsi_wait_pll_hsdiv_dsi_active(void)
{
if (wait_for_bit_change(DSI_PLL_STATUS, 8, 1) != 1)
- DSSERR("DSI2 PLL clock not active\n");
+ DSSERR("%s (%s) not active\n",
+ dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
+ dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI));
+}
+
+static void dsi_calc_clock_param_ranges(void)
+{
+ dsi.regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN);
+ dsi.regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM);
+ dsi.regm_dispc_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC);
+ dsi.regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI);
+ dsi.fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT);
+ dsi.fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT);
+ dsi.lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
}
-int dsi_init(struct platform_device *pdev)
+static int dsi_init(struct platform_device *pdev)
{
u32 rev;
- int r;
+ int r, i;
+ struct resource *dsi_mem;
+ spin_lock_init(&dsi.irq_lock);
spin_lock_init(&dsi.errors_lock);
dsi.errors = 0;
@@ -3251,8 +3665,6 @@ int dsi_init(struct platform_device *pdev)
dsi.irq_stats.last_reset = jiffies;
#endif
- init_completion(&dsi.bta_completion);
-
mutex_init(&dsi.lock);
sema_init(&dsi.bus_lock, 1);
@@ -3268,24 +3680,45 @@ int dsi_init(struct platform_device *pdev)
dsi.te_timer.function = dsi_te_timeout;
dsi.te_timer.data = 0;
#endif
- dsi.base = ioremap(DSI_BASE, DSI_SZ_REGS);
+ dsi_mem = platform_get_resource(dsi.pdev, IORESOURCE_MEM, 0);
+ if (!dsi_mem) {
+ DSSERR("can't get IORESOURCE_MEM DSI\n");
+ r = -EINVAL;
+ goto err1;
+ }
+ dsi.base = ioremap(dsi_mem->start, resource_size(dsi_mem));
if (!dsi.base) {
DSSERR("can't ioremap DSI\n");
r = -ENOMEM;
goto err1;
}
+ dsi.irq = platform_get_irq(dsi.pdev, 0);
+ if (dsi.irq < 0) {
+ DSSERR("platform_get_irq failed\n");
+ r = -ENODEV;
+ goto err2;
+ }
- dsi.vdds_dsi_reg = dss_get_vdds_dsi();
- if (IS_ERR(dsi.vdds_dsi_reg)) {
- DSSERR("can't get VDDS_DSI regulator\n");
- r = PTR_ERR(dsi.vdds_dsi_reg);
+ r = request_irq(dsi.irq, omap_dsi_irq_handler, IRQF_SHARED,
+ "OMAP DSI1", dsi.pdev);
+ if (r < 0) {
+ DSSERR("request_irq failed\n");
goto err2;
}
+ /* DSI VCs initialization */
+ for (i = 0; i < ARRAY_SIZE(dsi.vc); i++) {
+ dsi.vc[i].mode = DSI_VC_MODE_L4;
+ dsi.vc[i].dssdev = NULL;
+ dsi.vc[i].vc_id = 0;
+ }
+
+ dsi_calc_clock_param_ranges();
+
enable_clocks(1);
rev = dsi_read_reg(DSI_REVISION);
- printk(KERN_INFO "OMAP DSI rev %d.%d\n",
+ dev_dbg(&pdev->dev, "OMAP DSI rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
enable_clocks(0);
@@ -3298,8 +3731,14 @@ err1:
return r;
}
-void dsi_exit(void)
+static void dsi_exit(void)
{
+ if (dsi.vdds_dsi_reg != NULL) {
+ regulator_put(dsi.vdds_dsi_reg);
+ dsi.vdds_dsi_reg = NULL;
+ }
+
+ free_irq(dsi.irq, dsi.pdev);
iounmap(dsi.base);
destroy_workqueue(dsi.workqueue);
@@ -3307,3 +3746,41 @@ void dsi_exit(void)
DSSDBG("omap_dsi_exit\n");
}
+/* DSI1 HW IP initialisation */
+static int omap_dsi1hw_probe(struct platform_device *pdev)
+{
+ int r;
+ dsi.pdev = pdev;
+ r = dsi_init(pdev);
+ if (r) {
+ DSSERR("Failed to initialize DSI\n");
+ goto err_dsi;
+ }
+err_dsi:
+ return r;
+}
+
+static int omap_dsi1hw_remove(struct platform_device *pdev)
+{
+ dsi_exit();
+ return 0;
+}
+
+static struct platform_driver omap_dsi1hw_driver = {
+ .probe = omap_dsi1hw_probe,
+ .remove = omap_dsi1hw_remove,
+ .driver = {
+ .name = "omapdss_dsi1",
+ .owner = THIS_MODULE,
+ },
+};
+
+int dsi_init_platform_driver(void)
+{
+ return platform_driver_register(&omap_dsi1hw_driver);
+}
+
+void dsi_uninit_platform_driver(void)
+{
+ return platform_driver_unregister(&omap_dsi1hw_driver);
+}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 77c3621c9171..3f1fee63c678 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -26,14 +26,13 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/delay.h>
-#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/clk.h>
#include <plat/display.h>
+#include <plat/clock.h>
#include "dss.h"
-
-#define DSS_BASE 0x48050000
+#include "dss_features.h"
#define DSS_SZ_REGS SZ_512
@@ -59,9 +58,17 @@ struct dss_reg {
dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
static struct {
+ struct platform_device *pdev;
void __iomem *base;
+ int ctx_id;
struct clk *dpll4_m4_ck;
+ struct clk *dss_ick;
+ struct clk *dss_fck;
+ struct clk *dss_sys_clk;
+ struct clk *dss_tv_fck;
+ struct clk *dss_video_fck;
+ unsigned num_clks_enabled;
unsigned long cache_req_pck;
unsigned long cache_prate;
@@ -70,10 +77,22 @@ static struct {
enum dss_clk_source dsi_clk_source;
enum dss_clk_source dispc_clk_source;
+ enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
u32 ctx[DSS_SZ_REGS / sizeof(u32)];
} dss;
+static const char * const dss_generic_clk_source_names[] = {
+ [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC",
+ [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI",
+ [DSS_CLK_SRC_FCK] = "DSS_FCK",
+};
+
+static void dss_clk_enable_all_no_ctx(void);
+static void dss_clk_disable_all_no_ctx(void);
+static void dss_clk_enable_no_ctx(enum dss_clock clks);
+static void dss_clk_disable_no_ctx(enum dss_clock clks);
+
static int _omap_dss_wait_reset(void);
static inline void dss_write_reg(const struct dss_reg idx, u32 val)
@@ -99,10 +118,11 @@ void dss_save_context(void)
SR(SYSCONFIG);
SR(CONTROL);
-#ifdef CONFIG_OMAP2_DSS_SDI
- SR(SDI_CONTROL);
- SR(PLL_CONTROL);
-#endif
+ if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
+ OMAP_DISPLAY_TYPE_SDI) {
+ SR(SDI_CONTROL);
+ SR(PLL_CONTROL);
+ }
}
void dss_restore_context(void)
@@ -113,10 +133,11 @@ void dss_restore_context(void)
RR(SYSCONFIG);
RR(CONTROL);
-#ifdef CONFIG_OMAP2_DSS_SDI
- RR(SDI_CONTROL);
- RR(PLL_CONTROL);
-#endif
+ if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
+ OMAP_DISPLAY_TYPE_SDI) {
+ RR(SDI_CONTROL);
+ RR(PLL_CONTROL);
+ }
}
#undef SR
@@ -209,66 +230,96 @@ void dss_sdi_disable(void)
REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
}
+const char *dss_get_generic_clk_source_name(enum dss_clk_source clk_src)
+{
+ return dss_generic_clk_source_names[clk_src];
+}
+
void dss_dump_clocks(struct seq_file *s)
{
unsigned long dpll4_ck_rate;
unsigned long dpll4_m4_ck_rate;
+ const char *fclk_name, *fclk_real_name;
+ unsigned long fclk_rate;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
-
- dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
- dpll4_m4_ck_rate = clk_get_rate(dss.dpll4_m4_ck);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
seq_printf(s, "- DSS -\n");
- seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
+ fclk_name = dss_get_generic_clk_source_name(DSS_CLK_SRC_FCK);
+ fclk_real_name = dss_feat_get_clk_source_name(DSS_CLK_SRC_FCK);
+ fclk_rate = dss_clk_get_rate(DSS_CLK_FCK);
- if (cpu_is_omap3630())
- seq_printf(s, "dss1_alwon_fclk = %lu / %lu = %lu\n",
- dpll4_ck_rate,
- dpll4_ck_rate / dpll4_m4_ck_rate,
- dss_clk_get_rate(DSS_CLK_FCK1));
- else
- seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
- dpll4_ck_rate,
- dpll4_ck_rate / dpll4_m4_ck_rate,
- dss_clk_get_rate(DSS_CLK_FCK1));
+ if (dss.dpll4_m4_ck) {
+ dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
+ dpll4_m4_ck_rate = clk_get_rate(dss.dpll4_m4_ck);
+
+ seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ if (cpu_is_omap3630() || cpu_is_omap44xx())
+ seq_printf(s, "%s (%s) = %lu / %lu = %lu\n",
+ fclk_name, fclk_real_name,
+ dpll4_ck_rate,
+ dpll4_ck_rate / dpll4_m4_ck_rate,
+ fclk_rate);
+ else
+ seq_printf(s, "%s (%s) = %lu / %lu * 2 = %lu\n",
+ fclk_name, fclk_real_name,
+ dpll4_ck_rate,
+ dpll4_ck_rate / dpll4_m4_ck_rate,
+ fclk_rate);
+ } else {
+ seq_printf(s, "%s (%s) = %lu\n",
+ fclk_name, fclk_real_name,
+ fclk_rate);
+ }
+
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
}
void dss_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
DUMPREG(DSS_REVISION);
DUMPREG(DSS_SYSCONFIG);
DUMPREG(DSS_SYSSTATUS);
DUMPREG(DSS_IRQSTATUS);
DUMPREG(DSS_CONTROL);
- DUMPREG(DSS_SDI_CONTROL);
- DUMPREG(DSS_PLL_CONTROL);
- DUMPREG(DSS_SDI_STATUS);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
+ OMAP_DISPLAY_TYPE_SDI) {
+ DUMPREG(DSS_SDI_CONTROL);
+ DUMPREG(DSS_PLL_CONTROL);
+ DUMPREG(DSS_SDI_STATUS);
+ }
+
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
#undef DUMPREG
}
void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
{
int b;
+ u8 start, end;
+
+ switch (clk_src) {
+ case DSS_CLK_SRC_FCK:
+ b = 0;
+ break;
+ case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
+ b = 1;
+ dsi_wait_pll_hsdiv_dispc_active();
+ break;
+ default:
+ BUG();
+ }
- BUG_ON(clk_src != DSS_SRC_DSI1_PLL_FCLK &&
- clk_src != DSS_SRC_DSS1_ALWON_FCLK);
-
- b = clk_src == DSS_SRC_DSS1_ALWON_FCLK ? 0 : 1;
-
- if (clk_src == DSS_SRC_DSI1_PLL_FCLK)
- dsi_wait_dsi1_pll_active();
+ dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end);
- REG_FLD_MOD(DSS_CONTROL, b, 0, 0); /* DISPC_CLK_SWITCH */
+ REG_FLD_MOD(DSS_CONTROL, b, start, end); /* DISPC_CLK_SWITCH */
dss.dispc_clk_source = clk_src;
}
@@ -277,19 +328,51 @@ void dss_select_dsi_clk_source(enum dss_clk_source clk_src)
{
int b;
- BUG_ON(clk_src != DSS_SRC_DSI2_PLL_FCLK &&
- clk_src != DSS_SRC_DSS1_ALWON_FCLK);
-
- b = clk_src == DSS_SRC_DSS1_ALWON_FCLK ? 0 : 1;
-
- if (clk_src == DSS_SRC_DSI2_PLL_FCLK)
- dsi_wait_dsi2_pll_active();
+ switch (clk_src) {
+ case DSS_CLK_SRC_FCK:
+ b = 0;
+ break;
+ case DSS_CLK_SRC_DSI_PLL_HSDIV_DSI:
+ b = 1;
+ dsi_wait_pll_hsdiv_dsi_active();
+ break;
+ default:
+ BUG();
+ }
REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */
dss.dsi_clk_source = clk_src;
}
+void dss_select_lcd_clk_source(enum omap_channel channel,
+ enum dss_clk_source clk_src)
+{
+ int b, ix, pos;
+
+ if (!dss_has_feature(FEAT_LCD_CLK_SRC))
+ return;
+
+ switch (clk_src) {
+ case DSS_CLK_SRC_FCK:
+ b = 0;
+ break;
+ case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
+ BUG_ON(channel != OMAP_DSS_CHANNEL_LCD);
+ b = 1;
+ dsi_wait_pll_hsdiv_dispc_active();
+ break;
+ default:
+ BUG();
+ }
+
+ pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12;
+ REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* LCDx_CLK_SWITCH */
+
+ ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1;
+ dss.lcd_clk_source[ix] = clk_src;
+}
+
enum dss_clk_source dss_get_dispc_clk_source(void)
{
return dss.dispc_clk_source;
@@ -300,34 +383,52 @@ enum dss_clk_source dss_get_dsi_clk_source(void)
return dss.dsi_clk_source;
}
+enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
+{
+ int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1;
+ return dss.lcd_clk_source[ix];
+}
+
/* calculate clock rates using dividers in cinfo */
int dss_calc_clock_rates(struct dss_clock_info *cinfo)
{
- unsigned long prate;
+ if (dss.dpll4_m4_ck) {
+ unsigned long prate;
+ u16 fck_div_max = 16;
- if (cinfo->fck_div > (cpu_is_omap3630() ? 32 : 16) ||
- cinfo->fck_div == 0)
- return -EINVAL;
+ if (cpu_is_omap3630() || cpu_is_omap44xx())
+ fck_div_max = 32;
+
+ if (cinfo->fck_div > fck_div_max || cinfo->fck_div == 0)
+ return -EINVAL;
- prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
+ prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
- cinfo->fck = prate / cinfo->fck_div;
+ cinfo->fck = prate / cinfo->fck_div;
+ } else {
+ if (cinfo->fck_div != 0)
+ return -EINVAL;
+ cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK);
+ }
return 0;
}
int dss_set_clock_div(struct dss_clock_info *cinfo)
{
- unsigned long prate;
- int r;
+ if (dss.dpll4_m4_ck) {
+ unsigned long prate;
+ int r;
- if (cpu_is_omap34xx()) {
prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
DSSDBG("dpll4_m4 = %ld\n", prate);
r = clk_set_rate(dss.dpll4_m4_ck, prate / cinfo->fck_div);
if (r)
return r;
+ } else {
+ if (cinfo->fck_div != 0)
+ return -EINVAL;
}
DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div);
@@ -337,12 +438,14 @@ int dss_set_clock_div(struct dss_clock_info *cinfo)
int dss_get_clock_div(struct dss_clock_info *cinfo)
{
- cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK1);
+ cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK);
- if (cpu_is_omap34xx()) {
+ if (dss.dpll4_m4_ck) {
unsigned long prate;
+
prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
- if (cpu_is_omap3630())
+
+ if (cpu_is_omap3630() || cpu_is_omap44xx())
cinfo->fck_div = prate / (cinfo->fck);
else
cinfo->fck_div = prate / (cinfo->fck / 2);
@@ -355,7 +458,7 @@ int dss_get_clock_div(struct dss_clock_info *cinfo)
unsigned long dss_get_dpll4_rate(void)
{
- if (cpu_is_omap34xx())
+ if (dss.dpll4_m4_ck)
return clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
else
return 0;
@@ -369,16 +472,18 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
struct dss_clock_info best_dss;
struct dispc_clock_info best_dispc;
- unsigned long fck;
+ unsigned long fck, max_dss_fck;
- u16 fck_div;
+ u16 fck_div, fck_div_max = 16;
int match = 0;
int min_fck_per_pck;
prate = dss_get_dpll4_rate();
- fck = dss_clk_get_rate(DSS_CLK_FCK1);
+ max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+
+ fck = dss_clk_get_rate(DSS_CLK_FCK);
if (req_pck == dss.cache_req_pck &&
((cpu_is_omap34xx() && prate == dss.cache_prate) ||
dss.cache_dss_cinfo.fck == fck)) {
@@ -391,7 +496,7 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
if (min_fck_per_pck &&
- req_pck * min_fck_per_pck > DISPC_MAX_FCK) {
+ req_pck * min_fck_per_pck > max_dss_fck) {
DSSERR("Requested pixel clock not possible with the current "
"OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
"the constraint off.\n");
@@ -402,10 +507,10 @@ retry:
memset(&best_dss, 0, sizeof(best_dss));
memset(&best_dispc, 0, sizeof(best_dispc));
- if (cpu_is_omap24xx()) {
+ if (dss.dpll4_m4_ck == NULL) {
struct dispc_clock_info cur_dispc;
/* XXX can we change the clock on omap2? */
- fck = dss_clk_get_rate(DSS_CLK_FCK1);
+ fck = dss_clk_get_rate(DSS_CLK_FCK);
fck_div = 1;
dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
@@ -417,17 +522,19 @@ retry:
best_dispc = cur_dispc;
goto found;
- } else if (cpu_is_omap34xx()) {
- for (fck_div = (cpu_is_omap3630() ? 32 : 16);
- fck_div > 0; --fck_div) {
+ } else {
+ if (cpu_is_omap3630() || cpu_is_omap44xx())
+ fck_div_max = 32;
+
+ for (fck_div = fck_div_max; fck_div > 0; --fck_div) {
struct dispc_clock_info cur_dispc;
- if (cpu_is_omap3630())
+ if (fck_div_max == 32)
fck = prate / fck_div;
else
fck = prate / fck_div * 2;
- if (fck > DISPC_MAX_FCK)
+ if (fck > max_dss_fck)
continue;
if (min_fck_per_pck &&
@@ -450,8 +557,6 @@ retry:
goto found;
}
}
- } else {
- BUG();
}
found:
@@ -482,31 +587,6 @@ found:
return 0;
}
-
-
-static irqreturn_t dss_irq_handler_omap2(int irq, void *arg)
-{
- dispc_irq_handler();
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t dss_irq_handler_omap3(int irq, void *arg)
-{
- u32 irqstatus;
-
- irqstatus = dss_read_reg(DSS_IRQSTATUS);
-
- if (irqstatus & (1<<0)) /* DISPC_IRQ */
- dispc_irq_handler();
-#ifdef CONFIG_OMAP2_DSS_DSI
- if (irqstatus & (1<<1)) /* DSI_IRQ */
- dsi_irq_handler();
-#endif
-
- return IRQ_HANDLED;
-}
-
static int _omap_dss_wait_reset(void)
{
int t = 0;
@@ -549,34 +629,45 @@ void dss_set_dac_pwrdn_bgz(bool enable)
REG_FLD_MOD(DSS_CONTROL, enable, 5, 5); /* DAC Power-Down Control */
}
-int dss_init(bool skip_init)
+void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select hdmi)
+{
+ REG_FLD_MOD(DSS_CONTROL, hdmi, 15, 15); /* VENC_HDMI_SWITCH */
+}
+
+static int dss_init(void)
{
int r;
u32 rev;
+ struct resource *dss_mem;
+ struct clk *dpll4_m4_ck;
- dss.base = ioremap(DSS_BASE, DSS_SZ_REGS);
+ dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
+ if (!dss_mem) {
+ DSSERR("can't get IORESOURCE_MEM DSS\n");
+ r = -EINVAL;
+ goto fail0;
+ }
+ dss.base = ioremap(dss_mem->start, resource_size(dss_mem));
if (!dss.base) {
DSSERR("can't ioremap DSS\n");
r = -ENOMEM;
goto fail0;
}
- if (!skip_init) {
- /* disable LCD and DIGIT output. This seems to fix the synclost
- * problem that we get, if the bootloader starts the DSS and
- * the kernel resets it */
- omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
+ /* disable LCD and DIGIT output. This seems to fix the synclost
+ * problem that we get, if the bootloader starts the DSS and
+ * the kernel resets it */
+ omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
- /* We need to wait here a bit, otherwise we sometimes start to
- * get synclost errors, and after that only power cycle will
- * restore DSS functionality. I have no idea why this happens.
- * And we have to wait _before_ resetting the DSS, but after
- * enabling clocks.
- */
- msleep(50);
+ /* We need to wait here a bit, otherwise we sometimes start to
+ * get synclost errors, and after that only power cycle will
+ * restore DSS functionality. I have no idea why this happens.
+ * And we have to wait _before_ resetting the DSS, but after
+ * enabling clocks.
+ */
+ msleep(50);
- _omap_dss_reset();
- }
+ _omap_dss_reset();
/* autoidle */
REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0);
@@ -589,29 +680,30 @@ int dss_init(bool skip_init)
REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
#endif
-
- r = request_irq(INT_24XX_DSS_IRQ,
- cpu_is_omap24xx()
- ? dss_irq_handler_omap2
- : dss_irq_handler_omap3,
- 0, "OMAP DSS", NULL);
-
- if (r < 0) {
- DSSERR("omap2 dss: request_irq failed\n");
- goto fail1;
- }
-
if (cpu_is_omap34xx()) {
- dss.dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
- if (IS_ERR(dss.dpll4_m4_ck)) {
+ dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
+ if (IS_ERR(dpll4_m4_ck)) {
DSSERR("Failed to get dpll4_m4_ck\n");
- r = PTR_ERR(dss.dpll4_m4_ck);
- goto fail2;
+ r = PTR_ERR(dpll4_m4_ck);
+ goto fail1;
}
+ } else if (cpu_is_omap44xx()) {
+ dpll4_m4_ck = clk_get(NULL, "dpll_per_m5x2_ck");
+ if (IS_ERR(dpll4_m4_ck)) {
+ DSSERR("Failed to get dpll4_m4_ck\n");
+ r = PTR_ERR(dpll4_m4_ck);
+ goto fail1;
+ }
+ } else { /* omap24xx */
+ dpll4_m4_ck = NULL;
}
- dss.dsi_clk_source = DSS_SRC_DSS1_ALWON_FCLK;
- dss.dispc_clk_source = DSS_SRC_DSS1_ALWON_FCLK;
+ dss.dpll4_m4_ck = dpll4_m4_ck;
+
+ dss.dsi_clk_source = DSS_CLK_SRC_FCK;
+ dss.dispc_clk_source = DSS_CLK_SRC_FCK;
+ dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK;
+ dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK;
dss_save_context();
@@ -621,21 +713,416 @@ int dss_init(bool skip_init)
return 0;
-fail2:
- free_irq(INT_24XX_DSS_IRQ, NULL);
fail1:
iounmap(dss.base);
fail0:
return r;
}
-void dss_exit(void)
+static void dss_exit(void)
{
- if (cpu_is_omap34xx())
+ if (dss.dpll4_m4_ck)
clk_put(dss.dpll4_m4_ck);
- free_irq(INT_24XX_DSS_IRQ, NULL);
-
iounmap(dss.base);
}
+/* CONTEXT */
+static int dss_get_ctx_id(void)
+{
+ struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data;
+ int r;
+
+ if (!pdata->board_data->get_last_off_on_transaction_id)
+ return 0;
+ r = pdata->board_data->get_last_off_on_transaction_id(&dss.pdev->dev);
+ if (r < 0) {
+ dev_err(&dss.pdev->dev, "getting transaction ID failed, "
+ "will force context restore\n");
+ r = -1;
+ }
+ return r;
+}
+
+int dss_need_ctx_restore(void)
+{
+ int id = dss_get_ctx_id();
+
+ if (id < 0 || id != dss.ctx_id) {
+ DSSDBG("ctx id %d -> id %d\n",
+ dss.ctx_id, id);
+ dss.ctx_id = id;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static void save_all_ctx(void)
+{
+ DSSDBG("save context\n");
+
+ dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK);
+
+ dss_save_context();
+ dispc_save_context();
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_save_context();
+#endif
+
+ dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK);
+}
+
+static void restore_all_ctx(void)
+{
+ DSSDBG("restore context\n");
+
+ dss_clk_enable_all_no_ctx();
+
+ dss_restore_context();
+ dispc_restore_context();
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_restore_context();
+#endif
+
+ dss_clk_disable_all_no_ctx();
+}
+
+static int dss_get_clock(struct clk **clock, const char *clk_name)
+{
+ struct clk *clk;
+
+ clk = clk_get(&dss.pdev->dev, clk_name);
+
+ if (IS_ERR(clk)) {
+ DSSERR("can't get clock %s", clk_name);
+ return PTR_ERR(clk);
+ }
+
+ *clock = clk;
+
+ DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk));
+
+ return 0;
+}
+
+static int dss_get_clocks(void)
+{
+ int r;
+ struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data;
+
+ dss.dss_ick = NULL;
+ dss.dss_fck = NULL;
+ dss.dss_sys_clk = NULL;
+ dss.dss_tv_fck = NULL;
+ dss.dss_video_fck = NULL;
+
+ r = dss_get_clock(&dss.dss_ick, "ick");
+ if (r)
+ goto err;
+
+ r = dss_get_clock(&dss.dss_fck, "fck");
+ if (r)
+ goto err;
+
+ if (!pdata->opt_clock_available) {
+ r = -ENODEV;
+ goto err;
+ }
+
+ if (pdata->opt_clock_available("sys_clk")) {
+ r = dss_get_clock(&dss.dss_sys_clk, "sys_clk");
+ if (r)
+ goto err;
+ }
+
+ if (pdata->opt_clock_available("tv_clk")) {
+ r = dss_get_clock(&dss.dss_tv_fck, "tv_clk");
+ if (r)
+ goto err;
+ }
+
+ if (pdata->opt_clock_available("video_clk")) {
+ r = dss_get_clock(&dss.dss_video_fck, "video_clk");
+ if (r)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (dss.dss_ick)
+ clk_put(dss.dss_ick);
+ if (dss.dss_fck)
+ clk_put(dss.dss_fck);
+ if (dss.dss_sys_clk)
+ clk_put(dss.dss_sys_clk);
+ if (dss.dss_tv_fck)
+ clk_put(dss.dss_tv_fck);
+ if (dss.dss_video_fck)
+ clk_put(dss.dss_video_fck);
+
+ return r;
+}
+
+static void dss_put_clocks(void)
+{
+ if (dss.dss_video_fck)
+ clk_put(dss.dss_video_fck);
+ if (dss.dss_tv_fck)
+ clk_put(dss.dss_tv_fck);
+ if (dss.dss_sys_clk)
+ clk_put(dss.dss_sys_clk);
+ clk_put(dss.dss_fck);
+ clk_put(dss.dss_ick);
+}
+
+unsigned long dss_clk_get_rate(enum dss_clock clk)
+{
+ switch (clk) {
+ case DSS_CLK_ICK:
+ return clk_get_rate(dss.dss_ick);
+ case DSS_CLK_FCK:
+ return clk_get_rate(dss.dss_fck);
+ case DSS_CLK_SYSCK:
+ return clk_get_rate(dss.dss_sys_clk);
+ case DSS_CLK_TVFCK:
+ return clk_get_rate(dss.dss_tv_fck);
+ case DSS_CLK_VIDFCK:
+ return clk_get_rate(dss.dss_video_fck);
+ }
+
+ BUG();
+ return 0;
+}
+
+static unsigned count_clk_bits(enum dss_clock clks)
+{
+ unsigned num_clks = 0;
+
+ if (clks & DSS_CLK_ICK)
+ ++num_clks;
+ if (clks & DSS_CLK_FCK)
+ ++num_clks;
+ if (clks & DSS_CLK_SYSCK)
+ ++num_clks;
+ if (clks & DSS_CLK_TVFCK)
+ ++num_clks;
+ if (clks & DSS_CLK_VIDFCK)
+ ++num_clks;
+
+ return num_clks;
+}
+
+static void dss_clk_enable_no_ctx(enum dss_clock clks)
+{
+ unsigned num_clks = count_clk_bits(clks);
+
+ if (clks & DSS_CLK_ICK)
+ clk_enable(dss.dss_ick);
+ if (clks & DSS_CLK_FCK)
+ clk_enable(dss.dss_fck);
+ if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk)
+ clk_enable(dss.dss_sys_clk);
+ if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck)
+ clk_enable(dss.dss_tv_fck);
+ if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck)
+ clk_enable(dss.dss_video_fck);
+
+ dss.num_clks_enabled += num_clks;
+}
+
+void dss_clk_enable(enum dss_clock clks)
+{
+ bool check_ctx = dss.num_clks_enabled == 0;
+
+ dss_clk_enable_no_ctx(clks);
+
+ /*
+ * HACK: On omap4 the registers may not be accessible right after
+ * enabling the clocks. At some point this will be handled by
+ * pm_runtime, but for the time begin this should make things work.
+ */
+ if (cpu_is_omap44xx() && check_ctx)
+ udelay(10);
+
+ if (check_ctx && cpu_is_omap34xx() && dss_need_ctx_restore())
+ restore_all_ctx();
+}
+
+static void dss_clk_disable_no_ctx(enum dss_clock clks)
+{
+ unsigned num_clks = count_clk_bits(clks);
+
+ if (clks & DSS_CLK_ICK)
+ clk_disable(dss.dss_ick);
+ if (clks & DSS_CLK_FCK)
+ clk_disable(dss.dss_fck);
+ if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk)
+ clk_disable(dss.dss_sys_clk);
+ if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck)
+ clk_disable(dss.dss_tv_fck);
+ if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck)
+ clk_disable(dss.dss_video_fck);
+
+ dss.num_clks_enabled -= num_clks;
+}
+
+void dss_clk_disable(enum dss_clock clks)
+{
+ if (cpu_is_omap34xx()) {
+ unsigned num_clks = count_clk_bits(clks);
+
+ BUG_ON(dss.num_clks_enabled < num_clks);
+
+ if (dss.num_clks_enabled == num_clks)
+ save_all_ctx();
+ }
+
+ dss_clk_disable_no_ctx(clks);
+}
+
+static void dss_clk_enable_all_no_ctx(void)
+{
+ enum dss_clock clks;
+
+ clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK;
+ if (cpu_is_omap34xx())
+ clks |= DSS_CLK_VIDFCK;
+ dss_clk_enable_no_ctx(clks);
+}
+
+static void dss_clk_disable_all_no_ctx(void)
+{
+ enum dss_clock clks;
+
+ clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK;
+ if (cpu_is_omap34xx())
+ clks |= DSS_CLK_VIDFCK;
+ dss_clk_disable_no_ctx(clks);
+}
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+/* CLOCKS */
+static void core_dump_clocks(struct seq_file *s)
+{
+ int i;
+ struct clk *clocks[5] = {
+ dss.dss_ick,
+ dss.dss_fck,
+ dss.dss_sys_clk,
+ dss.dss_tv_fck,
+ dss.dss_video_fck
+ };
+
+ seq_printf(s, "- CORE -\n");
+
+ seq_printf(s, "internal clk count\t\t%u\n", dss.num_clks_enabled);
+
+ for (i = 0; i < 5; i++) {
+ if (!clocks[i])
+ continue;
+ seq_printf(s, "%-15s\t%lu\t%d\n",
+ clocks[i]->name,
+ clk_get_rate(clocks[i]),
+ clocks[i]->usecount);
+ }
+}
+#endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */
+
+/* DEBUGFS */
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+void dss_debug_dump_clocks(struct seq_file *s)
+{
+ core_dump_clocks(s);
+ dss_dump_clocks(s);
+ dispc_dump_clocks(s);
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_dump_clocks(s);
+#endif
+}
+#endif
+
+
+/* DSS HW IP initialisation */
+static int omap_dsshw_probe(struct platform_device *pdev)
+{
+ int r;
+
+ dss.pdev = pdev;
+
+ r = dss_get_clocks();
+ if (r)
+ goto err_clocks;
+
+ dss_clk_enable_all_no_ctx();
+
+ dss.ctx_id = dss_get_ctx_id();
+ DSSDBG("initial ctx id %u\n", dss.ctx_id);
+
+ r = dss_init();
+ if (r) {
+ DSSERR("Failed to initialize DSS\n");
+ goto err_dss;
+ }
+
+ r = dpi_init();
+ if (r) {
+ DSSERR("Failed to initialize DPI\n");
+ goto err_dpi;
+ }
+
+ r = sdi_init();
+ if (r) {
+ DSSERR("Failed to initialize SDI\n");
+ goto err_sdi;
+ }
+
+ dss_clk_disable_all_no_ctx();
+ return 0;
+err_sdi:
+ dpi_exit();
+err_dpi:
+ dss_exit();
+err_dss:
+ dss_clk_disable_all_no_ctx();
+ dss_put_clocks();
+err_clocks:
+ return r;
+}
+
+static int omap_dsshw_remove(struct platform_device *pdev)
+{
+
+ dss_exit();
+
+ /*
+ * As part of hwmod changes, DSS is not the only controller of dss
+ * clocks; hwmod framework itself will also enable clocks during hwmod
+ * init for dss, and autoidle is set in h/w for DSS. Hence, there's no
+ * need to disable clocks if their usecounts > 1.
+ */
+ WARN_ON(dss.num_clks_enabled > 0);
+
+ dss_put_clocks();
+ return 0;
+}
+
+static struct platform_driver omap_dsshw_driver = {
+ .probe = omap_dsshw_probe,
+ .remove = omap_dsshw_remove,
+ .driver = {
+ .name = "omapdss_dss",
+ .owner = THIS_MODULE,
+ },
+};
+
+int dss_init_platform_driver(void)
+{
+ return platform_driver_register(&omap_dsshw_driver);
+}
+
+void dss_uninit_platform_driver(void)
+{
+ return platform_driver_unregister(&omap_dsshw_driver);
+}
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index b394951120ac..c2f582bb19c0 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -97,8 +97,6 @@ extern unsigned int dss_debug;
#define FLD_MOD(orig, val, start, end) \
(((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
-#define DISPC_MAX_FCK 173000000
-
enum omap_burst_size {
OMAP_DSS_BURST_4x32 = 0,
OMAP_DSS_BURST_8x32 = 1,
@@ -112,17 +110,25 @@ enum omap_parallel_interface_mode {
};
enum dss_clock {
- DSS_CLK_ICK = 1 << 0,
- DSS_CLK_FCK1 = 1 << 1,
- DSS_CLK_FCK2 = 1 << 2,
- DSS_CLK_54M = 1 << 3,
- DSS_CLK_96M = 1 << 4,
+ DSS_CLK_ICK = 1 << 0, /* DSS_L3_ICLK and DSS_L4_ICLK */
+ DSS_CLK_FCK = 1 << 1, /* DSS1_ALWON_FCLK */
+ DSS_CLK_SYSCK = 1 << 2, /* DSS2_ALWON_FCLK */
+ DSS_CLK_TVFCK = 1 << 3, /* DSS_TV_FCLK */
+ DSS_CLK_VIDFCK = 1 << 4, /* DSS_96M_FCLK*/
};
enum dss_clk_source {
- DSS_SRC_DSI1_PLL_FCLK,
- DSS_SRC_DSI2_PLL_FCLK,
- DSS_SRC_DSS1_ALWON_FCLK,
+ DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
+ * OMAP4: PLL1_CLK1 */
+ DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
+ * OMAP4: PLL1_CLK2 */
+ DSS_CLK_SRC_FCK, /* OMAP2/3: DSS1_ALWON_FCLK
+ * OMAP4: DSS_FCLK */
+};
+
+enum dss_hdmi_venc_clk_source_select {
+ DSS_VENC_TV_CLK = 0,
+ DSS_HDMI_M_PCLK = 1,
};
struct dss_clock_info {
@@ -148,36 +154,42 @@ struct dsi_clock_info {
unsigned long fint;
unsigned long clkin4ddr;
unsigned long clkin;
- unsigned long dsi1_pll_fclk;
- unsigned long dsi2_pll_fclk;
-
+ unsigned long dsi_pll_hsdiv_dispc_clk; /* OMAP3: DSI1_PLL_CLK
+ * OMAP4: PLLx_CLK1 */
+ unsigned long dsi_pll_hsdiv_dsi_clk; /* OMAP3: DSI2_PLL_CLK
+ * OMAP4: PLLx_CLK2 */
unsigned long lp_clk;
/* dividers */
u16 regn;
u16 regm;
- u16 regm3;
- u16 regm4;
-
+ u16 regm_dispc; /* OMAP3: REGM3
+ * OMAP4: REGM4 */
+ u16 regm_dsi; /* OMAP3: REGM4
+ * OMAP4: REGM5 */
u16 lp_clk_div;
u8 highfreq;
- bool use_dss2_fck;
+ bool use_sys_clk;
+};
+
+/* HDMI PLL structure */
+struct hdmi_pll_info {
+ u16 regn;
+ u16 regm;
+ u32 regmf;
+ u16 regm2;
+ u16 regsd;
+ u16 dcofreq;
};
struct seq_file;
struct platform_device;
/* core */
-void dss_clk_enable(enum dss_clock clks);
-void dss_clk_disable(enum dss_clock clks);
-unsigned long dss_clk_get_rate(enum dss_clock clk);
-int dss_need_ctx_restore(void);
-void dss_dump_clocks(struct seq_file *s);
struct bus_type *dss_get_bus(void);
struct regulator *dss_get_vdds_dsi(void);
struct regulator *dss_get_vdds_sdi(void);
-struct regulator *dss_get_vdda_dac(void);
/* display */
int dss_suspend_all_devices(void);
@@ -214,13 +226,23 @@ void dss_overlay_setup_l4_manager(struct omap_overlay_manager *mgr);
void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
/* DSS */
-int dss_init(bool skip_init);
-void dss_exit(void);
+int dss_init_platform_driver(void);
+void dss_uninit_platform_driver(void);
+void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
void dss_save_context(void);
void dss_restore_context(void);
+void dss_clk_enable(enum dss_clock clks);
+void dss_clk_disable(enum dss_clock clks);
+unsigned long dss_clk_get_rate(enum dss_clock clk);
+int dss_need_ctx_restore(void);
+const char *dss_get_generic_clk_source_name(enum dss_clk_source clk_src);
+void dss_dump_clocks(struct seq_file *s);
void dss_dump_regs(struct seq_file *s);
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+void dss_debug_dump_clocks(struct seq_file *s);
+#endif
void dss_sdi_init(u8 datapairs);
int dss_sdi_enable(void);
@@ -228,8 +250,11 @@ void dss_sdi_disable(void);
void dss_select_dispc_clk_source(enum dss_clk_source clk_src);
void dss_select_dsi_clk_source(enum dss_clk_source clk_src);
+void dss_select_lcd_clk_source(enum omap_channel channel,
+ enum dss_clk_source clk_src);
enum dss_clk_source dss_get_dispc_clk_source(void);
enum dss_clk_source dss_get_dsi_clk_source(void);
+enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
void dss_set_venc_output(enum omap_dss_venc_type type);
void dss_set_dac_pwrdn_bgz(bool enable);
@@ -244,11 +269,11 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
/* SDI */
#ifdef CONFIG_OMAP2_DSS_SDI
-int sdi_init(bool skip_init);
+int sdi_init(void);
void sdi_exit(void);
int sdi_init_display(struct omap_dss_device *display);
#else
-static inline int sdi_init(bool skip_init)
+static inline int sdi_init(void)
{
return 0;
}
@@ -259,8 +284,8 @@ static inline void sdi_exit(void)
/* DSI */
#ifdef CONFIG_OMAP2_DSS_DSI
-int dsi_init(struct platform_device *pdev);
-void dsi_exit(void);
+int dsi_init_platform_driver(void);
+void dsi_uninit_platform_driver(void);
void dsi_dump_clocks(struct seq_file *s);
void dsi_dump_irqs(struct seq_file *s);
@@ -271,7 +296,7 @@ void dsi_restore_context(void);
int dsi_init_display(struct omap_dss_device *display);
void dsi_irq_handler(void);
-unsigned long dsi_get_dsi1_pll_rate(void);
+unsigned long dsi_get_pll_hsdiv_dispc_rate(void);
int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo);
int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
struct dsi_clock_info *cinfo,
@@ -282,31 +307,36 @@ void dsi_pll_uninit(void);
void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
u32 fifo_size, enum omap_burst_size *burst_size,
u32 *fifo_low, u32 *fifo_high);
-void dsi_wait_dsi1_pll_active(void);
-void dsi_wait_dsi2_pll_active(void);
+void dsi_wait_pll_hsdiv_dispc_active(void);
+void dsi_wait_pll_hsdiv_dsi_active(void);
#else
-static inline int dsi_init(struct platform_device *pdev)
+static inline int dsi_init_platform_driver(void)
{
return 0;
}
-static inline void dsi_exit(void)
+static inline void dsi_uninit_platform_driver(void)
{
}
-static inline void dsi_wait_dsi1_pll_active(void)
+static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(void)
{
+ WARN("%s: DSI not compiled in, returning rate as 0\n", __func__);
+ return 0;
}
-static inline void dsi_wait_dsi2_pll_active(void)
+static inline void dsi_wait_pll_hsdiv_dispc_active(void)
+{
+}
+static inline void dsi_wait_pll_hsdiv_dsi_active(void)
{
}
#endif
/* DPI */
#ifdef CONFIG_OMAP2_DSS_DPI
-int dpi_init(struct platform_device *pdev);
+int dpi_init(void);
void dpi_exit(void);
int dpi_init_display(struct omap_dss_device *dssdev);
#else
-static inline int dpi_init(struct platform_device *pdev)
+static inline int dpi_init(void)
{
return 0;
}
@@ -316,8 +346,8 @@ static inline void dpi_exit(void)
#endif
/* DISPC */
-int dispc_init(void);
-void dispc_exit(void);
+int dispc_init_platform_driver(void);
+void dispc_uninit_platform_driver(void);
void dispc_dump_clocks(struct seq_file *s);
void dispc_dump_irqs(struct seq_file *s);
void dispc_dump_regs(struct seq_file *s);
@@ -350,6 +380,7 @@ void dispc_set_plane_size(enum omap_plane plane, u16 width, u16 height);
void dispc_set_channel_out(enum omap_plane plane,
enum omap_channel channel_out);
+void dispc_enable_gamma_table(bool enable);
int dispc_setup_plane(enum omap_plane plane,
u32 paddr, u16 screen_width,
u16 pos_x, u16 pos_y,
@@ -409,24 +440,50 @@ int dispc_get_clock_div(enum omap_channel channel,
/* VENC */
#ifdef CONFIG_OMAP2_DSS_VENC
-int venc_init(struct platform_device *pdev);
-void venc_exit(void);
+int venc_init_platform_driver(void);
+void venc_uninit_platform_driver(void);
void venc_dump_regs(struct seq_file *s);
int venc_init_display(struct omap_dss_device *display);
#else
-static inline int venc_init(struct platform_device *pdev)
+static inline int venc_init_platform_driver(void)
+{
+ return 0;
+}
+static inline void venc_uninit_platform_driver(void)
+{
+}
+#endif
+
+/* HDMI */
+#ifdef CONFIG_OMAP4_DSS_HDMI
+int hdmi_init_platform_driver(void);
+void hdmi_uninit_platform_driver(void);
+int hdmi_init_display(struct omap_dss_device *dssdev);
+#else
+static inline int hdmi_init_display(struct omap_dss_device *dssdev)
+{
+ return 0;
+}
+static inline int hdmi_init_platform_driver(void)
{
return 0;
}
-static inline void venc_exit(void)
+static inline void hdmi_uninit_platform_driver(void)
{
}
#endif
+int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev);
+void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev);
+void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev);
+int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+int hdmi_panel_init(void);
+void hdmi_panel_exit(void);
/* RFBI */
#ifdef CONFIG_OMAP2_DSS_RFBI
-int rfbi_init(void);
-void rfbi_exit(void);
+int rfbi_init_platform_driver(void);
+void rfbi_uninit_platform_driver(void);
void rfbi_dump_regs(struct seq_file *s);
int rfbi_configure(int rfbi_module, int bpp, int lines);
@@ -437,11 +494,11 @@ void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
unsigned long rfbi_get_max_tx_rate(void);
int rfbi_init_display(struct omap_dss_device *display);
#else
-static inline int rfbi_init(void)
+static inline int rfbi_init_platform_driver(void)
{
return 0;
}
-static inline void rfbi_exit(void)
+static inline void rfbi_uninit_platform_driver(void)
{
}
#endif
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index cf3ef696e141..aa1622241d0d 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -25,14 +25,18 @@
#include <plat/display.h>
#include <plat/cpu.h>
+#include "dss.h"
#include "dss_features.h"
/* Defines a generic omap register field */
struct dss_reg_field {
- enum dss_feat_reg_field id;
u8 start, end;
};
+struct dss_param_range {
+ int min, max;
+};
+
struct omap_dss_features {
const struct dss_reg_field *reg_fields;
const int num_reg_fields;
@@ -43,29 +47,68 @@ struct omap_dss_features {
const int num_ovls;
const enum omap_display_type *supported_displays;
const enum omap_color_mode *supported_color_modes;
+ const char * const *clksrc_names;
+ const struct dss_param_range *dss_params;
};
/* This struct is assigned to one of the below during initialization */
static struct omap_dss_features *omap_current_dss_features;
static const struct dss_reg_field omap2_dss_reg_fields[] = {
- { FEAT_REG_FIRHINC, 11, 0 },
- { FEAT_REG_FIRVINC, 27, 16 },
- { FEAT_REG_FIFOLOWTHRESHOLD, 8, 0 },
- { FEAT_REG_FIFOHIGHTHRESHOLD, 24, 16 },
- { FEAT_REG_FIFOSIZE, 8, 0 },
+ [FEAT_REG_FIRHINC] = { 11, 0 },
+ [FEAT_REG_FIRVINC] = { 27, 16 },
+ [FEAT_REG_FIFOLOWTHRESHOLD] = { 8, 0 },
+ [FEAT_REG_FIFOHIGHTHRESHOLD] = { 24, 16 },
+ [FEAT_REG_FIFOSIZE] = { 8, 0 },
+ [FEAT_REG_HORIZONTALACCU] = { 9, 0 },
+ [FEAT_REG_VERTICALACCU] = { 25, 16 },
+ [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 },
+ [FEAT_REG_DSIPLL_REGN] = { 0, 0 },
+ [FEAT_REG_DSIPLL_REGM] = { 0, 0 },
+ [FEAT_REG_DSIPLL_REGM_DISPC] = { 0, 0 },
+ [FEAT_REG_DSIPLL_REGM_DSI] = { 0, 0 },
};
static const struct dss_reg_field omap3_dss_reg_fields[] = {
- { FEAT_REG_FIRHINC, 12, 0 },
- { FEAT_REG_FIRVINC, 28, 16 },
- { FEAT_REG_FIFOLOWTHRESHOLD, 11, 0 },
- { FEAT_REG_FIFOHIGHTHRESHOLD, 27, 16 },
- { FEAT_REG_FIFOSIZE, 10, 0 },
+ [FEAT_REG_FIRHINC] = { 12, 0 },
+ [FEAT_REG_FIRVINC] = { 28, 16 },
+ [FEAT_REG_FIFOLOWTHRESHOLD] = { 11, 0 },
+ [FEAT_REG_FIFOHIGHTHRESHOLD] = { 27, 16 },
+ [FEAT_REG_FIFOSIZE] = { 10, 0 },
+ [FEAT_REG_HORIZONTALACCU] = { 9, 0 },
+ [FEAT_REG_VERTICALACCU] = { 25, 16 },
+ [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 },
+ [FEAT_REG_DSIPLL_REGN] = { 7, 1 },
+ [FEAT_REG_DSIPLL_REGM] = { 18, 8 },
+ [FEAT_REG_DSIPLL_REGM_DISPC] = { 22, 19 },
+ [FEAT_REG_DSIPLL_REGM_DSI] = { 26, 23 },
+};
+
+static const struct dss_reg_field omap4_dss_reg_fields[] = {
+ [FEAT_REG_FIRHINC] = { 12, 0 },
+ [FEAT_REG_FIRVINC] = { 28, 16 },
+ [FEAT_REG_FIFOLOWTHRESHOLD] = { 15, 0 },
+ [FEAT_REG_FIFOHIGHTHRESHOLD] = { 31, 16 },
+ [FEAT_REG_FIFOSIZE] = { 15, 0 },
+ [FEAT_REG_HORIZONTALACCU] = { 10, 0 },
+ [FEAT_REG_VERTICALACCU] = { 26, 16 },
+ [FEAT_REG_DISPC_CLK_SWITCH] = { 9, 8 },
+ [FEAT_REG_DSIPLL_REGN] = { 8, 1 },
+ [FEAT_REG_DSIPLL_REGM] = { 20, 9 },
+ [FEAT_REG_DSIPLL_REGM_DISPC] = { 25, 21 },
+ [FEAT_REG_DSIPLL_REGM_DSI] = { 30, 26 },
};
static const enum omap_display_type omap2_dss_supported_displays[] = {
/* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI,
+
+ /* OMAP_DSS_CHANNEL_DIGIT */
+ OMAP_DISPLAY_TYPE_VENC,
+};
+
+static const enum omap_display_type omap3430_dss_supported_displays[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
OMAP_DISPLAY_TYPE_SDI | OMAP_DISPLAY_TYPE_DSI,
@@ -73,10 +116,10 @@ static const enum omap_display_type omap2_dss_supported_displays[] = {
OMAP_DISPLAY_TYPE_VENC,
};
-static const enum omap_display_type omap3_dss_supported_displays[] = {
+static const enum omap_display_type omap3630_dss_supported_displays[] = {
/* OMAP_DSS_CHANNEL_LCD */
OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
- OMAP_DISPLAY_TYPE_SDI | OMAP_DISPLAY_TYPE_DSI,
+ OMAP_DISPLAY_TYPE_DSI,
/* OMAP_DSS_CHANNEL_DIGIT */
OMAP_DISPLAY_TYPE_VENC,
@@ -87,7 +130,7 @@ static const enum omap_display_type omap4_dss_supported_displays[] = {
OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI,
/* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DISPLAY_TYPE_VENC,
+ OMAP_DISPLAY_TYPE_VENC | OMAP_DISPLAY_TYPE_HDMI,
/* OMAP_DSS_CHANNEL_LCD2 */
OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
@@ -134,6 +177,54 @@ static const enum omap_color_mode omap3_dss_supported_color_modes[] = {
OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
};
+static const char * const omap2_dss_clk_source_names[] = {
+ [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A",
+ [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A",
+ [DSS_CLK_SRC_FCK] = "DSS_FCLK1",
+};
+
+static const char * const omap3_dss_clk_source_names[] = {
+ [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK",
+ [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK",
+ [DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK",
+};
+
+static const char * const omap4_dss_clk_source_names[] = {
+ [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1",
+ [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2",
+ [DSS_CLK_SRC_FCK] = "DSS_FCLK",
+};
+
+static const struct dss_param_range omap2_dss_param_range[] = {
+ [FEAT_PARAM_DSS_FCK] = { 0, 173000000 },
+ [FEAT_PARAM_DSIPLL_REGN] = { 0, 0 },
+ [FEAT_PARAM_DSIPLL_REGM] = { 0, 0 },
+ [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, 0 },
+ [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, 0 },
+ [FEAT_PARAM_DSIPLL_FINT] = { 0, 0 },
+ [FEAT_PARAM_DSIPLL_LPDIV] = { 0, 0 },
+};
+
+static const struct dss_param_range omap3_dss_param_range[] = {
+ [FEAT_PARAM_DSS_FCK] = { 0, 173000000 },
+ [FEAT_PARAM_DSIPLL_REGN] = { 0, (1 << 7) - 1 },
+ [FEAT_PARAM_DSIPLL_REGM] = { 0, (1 << 11) - 1 },
+ [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, (1 << 4) - 1 },
+ [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 4) - 1 },
+ [FEAT_PARAM_DSIPLL_FINT] = { 750000, 2100000 },
+ [FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1},
+};
+
+static const struct dss_param_range omap4_dss_param_range[] = {
+ [FEAT_PARAM_DSS_FCK] = { 0, 186000000 },
+ [FEAT_PARAM_DSIPLL_REGN] = { 0, (1 << 8) - 1 },
+ [FEAT_PARAM_DSIPLL_REGM] = { 0, (1 << 12) - 1 },
+ [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, (1 << 5) - 1 },
+ [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 5) - 1 },
+ [FEAT_PARAM_DSIPLL_FINT] = { 500000, 2500000 },
+ [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
+};
+
/* OMAP2 DSS Features */
static struct omap_dss_features omap2_dss_features = {
.reg_fields = omap2_dss_reg_fields,
@@ -141,12 +232,15 @@ static struct omap_dss_features omap2_dss_features = {
.has_feature =
FEAT_LCDENABLEPOL | FEAT_LCDENABLESIGNAL |
- FEAT_PCKFREEENABLE | FEAT_FUNCGATED,
+ FEAT_PCKFREEENABLE | FEAT_FUNCGATED |
+ FEAT_ROWREPEATENABLE | FEAT_RESIZECONF,
.num_mgrs = 2,
.num_ovls = 3,
.supported_displays = omap2_dss_supported_displays,
.supported_color_modes = omap2_dss_supported_color_modes,
+ .clksrc_names = omap2_dss_clk_source_names,
+ .dss_params = omap2_dss_param_range,
};
/* OMAP3 DSS Features */
@@ -157,12 +251,15 @@ static struct omap_dss_features omap3430_dss_features = {
.has_feature =
FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL |
FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
- FEAT_FUNCGATED,
+ FEAT_FUNCGATED | FEAT_ROWREPEATENABLE |
+ FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF,
.num_mgrs = 2,
.num_ovls = 3,
- .supported_displays = omap3_dss_supported_displays,
+ .supported_displays = omap3430_dss_supported_displays,
.supported_color_modes = omap3_dss_supported_color_modes,
+ .clksrc_names = omap3_dss_clk_source_names,
+ .dss_params = omap3_dss_param_range,
};
static struct omap_dss_features omap3630_dss_features = {
@@ -172,27 +269,34 @@ static struct omap_dss_features omap3630_dss_features = {
.has_feature =
FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL |
FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
- FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED,
+ FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED |
+ FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT |
+ FEAT_RESIZECONF,
.num_mgrs = 2,
.num_ovls = 3,
- .supported_displays = omap3_dss_supported_displays,
+ .supported_displays = omap3630_dss_supported_displays,
.supported_color_modes = omap3_dss_supported_color_modes,
+ .clksrc_names = omap3_dss_clk_source_names,
+ .dss_params = omap3_dss_param_range,
};
/* OMAP4 DSS Features */
static struct omap_dss_features omap4_dss_features = {
- .reg_fields = omap3_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
+ .reg_fields = omap4_dss_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields),
.has_feature =
FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA |
- FEAT_MGR_LCD2,
+ FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 |
+ FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC,
.num_mgrs = 3,
.num_ovls = 3,
.supported_displays = omap4_dss_supported_displays,
.supported_color_modes = omap3_dss_supported_color_modes,
+ .clksrc_names = omap4_dss_clk_source_names,
+ .dss_params = omap4_dss_param_range,
};
/* Functions returning values related to a DSS feature */
@@ -206,6 +310,16 @@ int dss_feat_get_num_ovls(void)
return omap_current_dss_features->num_ovls;
}
+unsigned long dss_feat_get_param_min(enum dss_range_param param)
+{
+ return omap_current_dss_features->dss_params[param].min;
+}
+
+unsigned long dss_feat_get_param_max(enum dss_range_param param)
+{
+ return omap_current_dss_features->dss_params[param].max;
+}
+
enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel)
{
return omap_current_dss_features->supported_displays[channel];
@@ -223,6 +337,11 @@ bool dss_feat_color_mode_supported(enum omap_plane plane,
color_mode;
}
+const char *dss_feat_get_clk_source_name(enum dss_clk_source id)
+{
+ return omap_current_dss_features->clksrc_names[id];
+}
+
/* DSS has_feature check */
bool dss_has_feature(enum dss_feat_id id)
{
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index b9c70be92588..12e9c4ef0dec 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -22,6 +22,7 @@
#define MAX_DSS_MANAGERS 3
#define MAX_DSS_OVERLAYS 3
+#define MAX_DSS_LCD_MANAGERS 2
/* DSS has feature id */
enum dss_feat_id {
@@ -33,6 +34,12 @@ enum dss_feat_id {
FEAT_PCKFREEENABLE = 1 << 5,
FEAT_FUNCGATED = 1 << 6,
FEAT_MGR_LCD2 = 1 << 7,
+ FEAT_LINEBUFFERSPLIT = 1 << 8,
+ FEAT_ROWREPEATENABLE = 1 << 9,
+ FEAT_RESIZECONF = 1 << 10,
+ /* Independent core clk divider */
+ FEAT_CORE_CLK_DIV = 1 << 11,
+ FEAT_LCD_CLK_SRC = 1 << 12,
};
/* DSS register field id */
@@ -42,15 +49,35 @@ enum dss_feat_reg_field {
FEAT_REG_FIFOHIGHTHRESHOLD,
FEAT_REG_FIFOLOWTHRESHOLD,
FEAT_REG_FIFOSIZE,
+ FEAT_REG_HORIZONTALACCU,
+ FEAT_REG_VERTICALACCU,
+ FEAT_REG_DISPC_CLK_SWITCH,
+ FEAT_REG_DSIPLL_REGN,
+ FEAT_REG_DSIPLL_REGM,
+ FEAT_REG_DSIPLL_REGM_DISPC,
+ FEAT_REG_DSIPLL_REGM_DSI,
+};
+
+enum dss_range_param {
+ FEAT_PARAM_DSS_FCK,
+ FEAT_PARAM_DSIPLL_REGN,
+ FEAT_PARAM_DSIPLL_REGM,
+ FEAT_PARAM_DSIPLL_REGM_DISPC,
+ FEAT_PARAM_DSIPLL_REGM_DSI,
+ FEAT_PARAM_DSIPLL_FINT,
+ FEAT_PARAM_DSIPLL_LPDIV,
};
/* DSS Feature Functions */
int dss_feat_get_num_mgrs(void);
int dss_feat_get_num_ovls(void);
+unsigned long dss_feat_get_param_min(enum dss_range_param param);
+unsigned long dss_feat_get_param_max(enum dss_range_param param);
enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel);
enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
bool dss_feat_color_mode_supported(enum omap_plane plane,
enum omap_color_mode color_mode);
+const char *dss_feat_get_clk_source_name(enum dss_clk_source id);
bool dss_has_feature(enum dss_feat_id id);
void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
new file mode 100644
index 000000000000..0d44f070ef36
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -0,0 +1,1332 @@
+/*
+ * hdmi.c
+ *
+ * HDMI interface DSS driver setting for TI's OMAP4 family of processor.
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Yong Zhi
+ * Mythri pk <mythripk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "HDMI"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <plat/display.h>
+
+#include "dss.h"
+#include "hdmi.h"
+
+static struct {
+ struct mutex lock;
+ struct omap_display_platform_data *pdata;
+ struct platform_device *pdev;
+ void __iomem *base_wp; /* HDMI wrapper */
+ int code;
+ int mode;
+ u8 edid[HDMI_EDID_MAX_LENGTH];
+ u8 edid_set;
+ bool custom_set;
+ struct hdmi_config cfg;
+} hdmi;
+
+/*
+ * Logic for the below structure :
+ * user enters the CEA or VESA timings by specifying the HDMI/DVI code.
+ * There is a correspondence between CEA/VESA timing and code, please
+ * refer to section 6.3 in HDMI 1.3 specification for timing code.
+ *
+ * In the below structure, cea_vesa_timings corresponds to all OMAP4
+ * supported CEA and VESA timing values.code_cea corresponds to the CEA
+ * code, It is used to get the timing from cea_vesa_timing array.Similarly
+ * with code_vesa. Code_index is used for back mapping, that is once EDID
+ * is read from the TV, EDID is parsed to find the timing values and then
+ * map it to corresponding CEA or VESA index.
+ */
+
+static const struct hdmi_timings cea_vesa_timings[OMAP_HDMI_TIMINGS_NB] = {
+ { {640, 480, 25200, 96, 16, 48, 2, 10, 33} , 0 , 0},
+ { {1280, 720, 74250, 40, 440, 220, 5, 5, 20}, 1, 1},
+ { {1280, 720, 74250, 40, 110, 220, 5, 5, 20}, 1, 1},
+ { {720, 480, 27027, 62, 16, 60, 6, 9, 30}, 0, 0},
+ { {2880, 576, 108000, 256, 48, 272, 5, 5, 39}, 0, 0},
+ { {1440, 240, 27027, 124, 38, 114, 3, 4, 15}, 0, 0},
+ { {1440, 288, 27000, 126, 24, 138, 3, 2, 19}, 0, 0},
+ { {1920, 540, 74250, 44, 528, 148, 5, 2, 15}, 1, 1},
+ { {1920, 540, 74250, 44, 88, 148, 5, 2, 15}, 1, 1},
+ { {1920, 1080, 148500, 44, 88, 148, 5, 4, 36}, 1, 1},
+ { {720, 576, 27000, 64, 12, 68, 5, 5, 39}, 0, 0},
+ { {1440, 576, 54000, 128, 24, 136, 5, 5, 39}, 0, 0},
+ { {1920, 1080, 148500, 44, 528, 148, 5, 4, 36}, 1, 1},
+ { {2880, 480, 108108, 248, 64, 240, 6, 9, 30}, 0, 0},
+ { {1920, 1080, 74250, 44, 638, 148, 5, 4, 36}, 1, 1},
+ /* VESA From Here */
+ { {640, 480, 25175, 96, 16, 48, 2 , 11, 31}, 0, 0},
+ { {800, 600, 40000, 128, 40, 88, 4 , 1, 23}, 1, 1},
+ { {848, 480, 33750, 112, 16, 112, 8 , 6, 23}, 1, 1},
+ { {1280, 768, 79500, 128, 64, 192, 7 , 3, 20}, 1, 0},
+ { {1280, 800, 83500, 128, 72, 200, 6 , 3, 22}, 1, 0},
+ { {1360, 768, 85500, 112, 64, 256, 6 , 3, 18}, 1, 1},
+ { {1280, 960, 108000, 112, 96, 312, 3 , 1, 36}, 1, 1},
+ { {1280, 1024, 108000, 112, 48, 248, 3 , 1, 38}, 1, 1},
+ { {1024, 768, 65000, 136, 24, 160, 6, 3, 29}, 0, 0},
+ { {1400, 1050, 121750, 144, 88, 232, 4, 3, 32}, 1, 0},
+ { {1440, 900, 106500, 152, 80, 232, 6, 3, 25}, 1, 0},
+ { {1680, 1050, 146250, 176 , 104, 280, 6, 3, 30}, 1, 0},
+ { {1366, 768, 85500, 143, 70, 213, 3, 3, 24}, 1, 1},
+ { {1920, 1080, 148500, 44, 148, 80, 5, 4, 36}, 1, 1},
+ { {1280, 768, 68250, 32, 48, 80, 7, 3, 12}, 0, 1},
+ { {1400, 1050, 101000, 32, 48, 80, 4, 3, 23}, 0, 1},
+ { {1680, 1050, 119000, 32, 48, 80, 6, 3, 21}, 0, 1},
+ { {1280, 800, 79500, 32, 48, 80, 6, 3, 14}, 0, 1},
+ { {1280, 720, 74250, 40, 110, 220, 5, 5, 20}, 1, 1}
+};
+
+/*
+ * This is a static mapping array which maps the timing values
+ * with corresponding CEA / VESA code
+ */
+static const int code_index[OMAP_HDMI_TIMINGS_NB] = {
+ 1, 19, 4, 2, 37, 6, 21, 20, 5, 16, 17, 29, 31, 35, 32,
+ /* <--15 CEA 17--> vesa*/
+ 4, 9, 0xE, 0x17, 0x1C, 0x27, 0x20, 0x23, 0x10, 0x2A,
+ 0X2F, 0x3A, 0X51, 0X52, 0x16, 0x29, 0x39, 0x1B
+};
+
+/*
+ * This is reverse static mapping which maps the CEA / VESA code
+ * to the corresponding timing values
+ */
+static const int code_cea[39] = {
+ -1, 0, 3, 3, 2, 8, 5, 5, -1, -1,
+ -1, -1, -1, -1, -1, -1, 9, 10, 10, 1,
+ 7, 6, 6, -1, -1, -1, -1, -1, -1, 11,
+ 11, 12, 14, -1, -1, 13, 13, 4, 4
+};
+
+static const int code_vesa[85] = {
+ -1, -1, -1, -1, 15, -1, -1, -1, -1, 16,
+ -1, -1, -1, -1, 17, -1, 23, -1, -1, -1,
+ -1, -1, 29, 18, -1, -1, -1, 32, 19, -1,
+ -1, -1, 21, -1, -1, 22, -1, -1, -1, 20,
+ -1, 30, 24, -1, -1, -1, -1, 25, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 31, 26, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 27, 28, -1, 33};
+
+static const u8 edid_header[8] = {0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0};
+
+static inline void hdmi_write_reg(const struct hdmi_reg idx, u32 val)
+{
+ __raw_writel(val, hdmi.base_wp + idx.idx);
+}
+
+static inline u32 hdmi_read_reg(const struct hdmi_reg idx)
+{
+ return __raw_readl(hdmi.base_wp + idx.idx);
+}
+
+static inline int hdmi_wait_for_bit_change(const struct hdmi_reg idx,
+ int b2, int b1, u32 val)
+{
+ u32 t = 0;
+ while (val != REG_GET(idx, b2, b1)) {
+ udelay(1);
+ if (t++ > 10000)
+ return !val;
+ }
+ return val;
+}
+
+int hdmi_init_display(struct omap_dss_device *dssdev)
+{
+ DSSDBG("init_display\n");
+
+ return 0;
+}
+
+static int hdmi_pll_init(enum hdmi_clk_refsel refsel, int dcofreq,
+ struct hdmi_pll_info *fmt, u16 sd)
+{
+ u32 r;
+
+ /* PLL start always use manual mode */
+ REG_FLD_MOD(PLLCTRL_PLL_CONTROL, 0x0, 0, 0);
+
+ r = hdmi_read_reg(PLLCTRL_CFG1);
+ r = FLD_MOD(r, fmt->regm, 20, 9); /* CFG1_PLL_REGM */
+ r = FLD_MOD(r, fmt->regn, 8, 1); /* CFG1_PLL_REGN */
+
+ hdmi_write_reg(PLLCTRL_CFG1, r);
+
+ r = hdmi_read_reg(PLLCTRL_CFG2);
+
+ r = FLD_MOD(r, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
+ r = FLD_MOD(r, 0x1, 13, 13); /* PLL_REFEN */
+ r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */
+
+ if (dcofreq) {
+ /* divider programming for frequency beyond 1000Mhz */
+ REG_FLD_MOD(PLLCTRL_CFG3, sd, 17, 10);
+ r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */
+ } else {
+ r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */
+ }
+
+ hdmi_write_reg(PLLCTRL_CFG2, r);
+
+ r = hdmi_read_reg(PLLCTRL_CFG4);
+ r = FLD_MOD(r, fmt->regm2, 24, 18);
+ r = FLD_MOD(r, fmt->regmf, 17, 0);
+
+ hdmi_write_reg(PLLCTRL_CFG4, r);
+
+ /* go now */
+ REG_FLD_MOD(PLLCTRL_PLL_GO, 0x1, 0, 0);
+
+ /* wait for bit change */
+ if (hdmi_wait_for_bit_change(PLLCTRL_PLL_GO, 0, 0, 1) != 1) {
+ DSSERR("PLL GO bit not set\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Wait till the lock bit is set in PLL status */
+ if (hdmi_wait_for_bit_change(PLLCTRL_PLL_STATUS, 1, 1, 1) != 1) {
+ DSSWARN("cannot lock PLL\n");
+ DSSWARN("CFG1 0x%x\n",
+ hdmi_read_reg(PLLCTRL_CFG1));
+ DSSWARN("CFG2 0x%x\n",
+ hdmi_read_reg(PLLCTRL_CFG2));
+ DSSWARN("CFG4 0x%x\n",
+ hdmi_read_reg(PLLCTRL_CFG4));
+ return -ETIMEDOUT;
+ }
+
+ DSSDBG("PLL locked!\n");
+
+ return 0;
+}
+
+/* PHY_PWR_CMD */
+static int hdmi_set_phy_pwr(enum hdmi_phy_pwr val)
+{
+ /* Command for power control of HDMI PHY */
+ REG_FLD_MOD(HDMI_WP_PWR_CTRL, val, 7, 6);
+
+ /* Status of the power control of HDMI PHY */
+ if (hdmi_wait_for_bit_change(HDMI_WP_PWR_CTRL, 5, 4, val) != val) {
+ DSSERR("Failed to set PHY power mode to %d\n", val);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/* PLL_PWR_CMD */
+static int hdmi_set_pll_pwr(enum hdmi_pll_pwr val)
+{
+ /* Command for power control of HDMI PLL */
+ REG_FLD_MOD(HDMI_WP_PWR_CTRL, val, 3, 2);
+
+ /* wait till PHY_PWR_STATUS is set */
+ if (hdmi_wait_for_bit_change(HDMI_WP_PWR_CTRL, 1, 0, val) != val) {
+ DSSERR("Failed to set PHY_PWR_STATUS\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int hdmi_pll_reset(void)
+{
+ /* SYSRESET controlled by power FSM */
+ REG_FLD_MOD(PLLCTRL_PLL_CONTROL, 0x0, 3, 3);
+
+ /* READ 0x0 reset is in progress */
+ if (hdmi_wait_for_bit_change(PLLCTRL_PLL_STATUS, 0, 0, 1) != 1) {
+ DSSERR("Failed to sysreset PLL\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int hdmi_phy_init(void)
+{
+ u16 r = 0;
+
+ r = hdmi_set_phy_pwr(HDMI_PHYPWRCMD_LDOON);
+ if (r)
+ return r;
+
+ r = hdmi_set_phy_pwr(HDMI_PHYPWRCMD_TXON);
+ if (r)
+ return r;
+
+ /*
+ * Read address 0 in order to get the SCP reset done completed
+ * Dummy access performed to make sure reset is done
+ */
+ hdmi_read_reg(HDMI_TXPHY_TX_CTRL);
+
+ /*
+ * Write to phy address 0 to configure the clock
+ * use HFBITCLK write HDMI_TXPHY_TX_CONTROL_FREQOUT field
+ */
+ REG_FLD_MOD(HDMI_TXPHY_TX_CTRL, 0x1, 31, 30);
+
+ /* Write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */
+ hdmi_write_reg(HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000);
+
+ /* Setup max LDO voltage */
+ REG_FLD_MOD(HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0);
+
+ /* Write to phy address 3 to change the polarity control */
+ REG_FLD_MOD(HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
+
+ return 0;
+}
+
+static int hdmi_wait_softreset(void)
+{
+ /* reset W1 */
+ REG_FLD_MOD(HDMI_WP_SYSCONFIG, 0x1, 0, 0);
+
+ /* wait till SOFTRESET == 0 */
+ if (hdmi_wait_for_bit_change(HDMI_WP_SYSCONFIG, 0, 0, 0) != 0) {
+ DSSERR("sysconfig reset failed\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int hdmi_pll_program(struct hdmi_pll_info *fmt)
+{
+ u16 r = 0;
+ enum hdmi_clk_refsel refsel;
+
+ /* wait for wrapper reset */
+ r = hdmi_wait_softreset();
+ if (r)
+ return r;
+
+ r = hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF);
+ if (r)
+ return r;
+
+ r = hdmi_set_pll_pwr(HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
+ if (r)
+ return r;
+
+ r = hdmi_pll_reset();
+ if (r)
+ return r;
+
+ refsel = HDMI_REFSEL_SYSCLK;
+
+ r = hdmi_pll_init(refsel, fmt->dcofreq, fmt, fmt->regsd);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void hdmi_phy_off(void)
+{
+ hdmi_set_phy_pwr(HDMI_PHYPWRCMD_OFF);
+}
+
+static int hdmi_core_ddc_edid(u8 *pedid, int ext)
+{
+ u32 i, j;
+ char checksum = 0;
+ u32 offset = 0;
+
+ /* Turn on CLK for DDC */
+ REG_FLD_MOD(HDMI_CORE_AV_DPD, 0x7, 2, 0);
+
+ /*
+ * SW HACK : Without the Delay DDC(i2c bus) reads 0 values /
+ * right shifted values( The behavior is not consistent and seen only
+ * with some TV's)
+ */
+ usleep_range(800, 1000);
+
+ if (!ext) {
+ /* Clk SCL Devices */
+ REG_FLD_MOD(HDMI_CORE_DDC_CMD, 0xA, 3, 0);
+
+ /* HDMI_CORE_DDC_STATUS_IN_PROG */
+ if (hdmi_wait_for_bit_change(HDMI_CORE_DDC_STATUS,
+ 4, 4, 0) != 0) {
+ DSSERR("Failed to program DDC\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Clear FIFO */
+ REG_FLD_MOD(HDMI_CORE_DDC_CMD, 0x9, 3, 0);
+
+ /* HDMI_CORE_DDC_STATUS_IN_PROG */
+ if (hdmi_wait_for_bit_change(HDMI_CORE_DDC_STATUS,
+ 4, 4, 0) != 0) {
+ DSSERR("Failed to program DDC\n");
+ return -ETIMEDOUT;
+ }
+
+ } else {
+ if (ext % 2 != 0)
+ offset = 0x80;
+ }
+
+ /* Load Segment Address Register */
+ REG_FLD_MOD(HDMI_CORE_DDC_SEGM, ext/2, 7, 0);
+
+ /* Load Slave Address Register */
+ REG_FLD_MOD(HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1);
+
+ /* Load Offset Address Register */
+ REG_FLD_MOD(HDMI_CORE_DDC_OFFSET, offset, 7, 0);
+
+ /* Load Byte Count */
+ REG_FLD_MOD(HDMI_CORE_DDC_COUNT1, 0x80, 7, 0);
+ REG_FLD_MOD(HDMI_CORE_DDC_COUNT2, 0x0, 1, 0);
+
+ /* Set DDC_CMD */
+ if (ext)
+ REG_FLD_MOD(HDMI_CORE_DDC_CMD, 0x4, 3, 0);
+ else
+ REG_FLD_MOD(HDMI_CORE_DDC_CMD, 0x2, 3, 0);
+
+ /* HDMI_CORE_DDC_STATUS_BUS_LOW */
+ if (REG_GET(HDMI_CORE_DDC_STATUS, 6, 6) == 1) {
+ DSSWARN("I2C Bus Low?\n");
+ return -EIO;
+ }
+ /* HDMI_CORE_DDC_STATUS_NO_ACK */
+ if (REG_GET(HDMI_CORE_DDC_STATUS, 5, 5) == 1) {
+ DSSWARN("I2C No Ack\n");
+ return -EIO;
+ }
+
+ i = ext * 128;
+ j = 0;
+ while (((REG_GET(HDMI_CORE_DDC_STATUS, 4, 4) == 1) ||
+ (REG_GET(HDMI_CORE_DDC_STATUS, 2, 2) == 0)) &&
+ j < 128) {
+
+ if (REG_GET(HDMI_CORE_DDC_STATUS, 2, 2) == 0) {
+ /* FIFO not empty */
+ pedid[i++] = REG_GET(HDMI_CORE_DDC_DATA, 7, 0);
+ j++;
+ }
+ }
+
+ for (j = 0; j < 128; j++)
+ checksum += pedid[j];
+
+ if (checksum != 0) {
+ DSSERR("E-EDID checksum failed!!\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int read_edid(u8 *pedid, u16 max_length)
+{
+ int r = 0, n = 0, i = 0;
+ int max_ext_blocks = (max_length / 128) - 1;
+
+ r = hdmi_core_ddc_edid(pedid, 0);
+ if (r) {
+ return r;
+ } else {
+ n = pedid[0x7e];
+
+ /*
+ * README: need to comply with max_length set by the caller.
+ * Better implementation should be to allocate necessary
+ * memory to store EDID according to nb_block field found
+ * in first block
+ */
+ if (n > max_ext_blocks)
+ n = max_ext_blocks;
+
+ for (i = 1; i <= n; i++) {
+ r = hdmi_core_ddc_edid(pedid, i);
+ if (r)
+ return r;
+ }
+ }
+ return 0;
+}
+
+static int get_timings_index(void)
+{
+ int code;
+
+ if (hdmi.mode == 0)
+ code = code_vesa[hdmi.code];
+ else
+ code = code_cea[hdmi.code];
+
+ if (code == -1) {
+ /* HDMI code 4 corresponds to 640 * 480 VGA */
+ hdmi.code = 4;
+ /* DVI mode 1 corresponds to HDMI 0 to DVI */
+ hdmi.mode = HDMI_DVI;
+
+ code = code_vesa[hdmi.code];
+ }
+ return code;
+}
+
+static struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing)
+{
+ int i = 0, code = -1, temp_vsync = 0, temp_hsync = 0;
+ int timing_vsync = 0, timing_hsync = 0;
+ struct omap_video_timings temp;
+ struct hdmi_cm cm = {-1};
+ DSSDBG("hdmi_get_code\n");
+
+ for (i = 0; i < OMAP_HDMI_TIMINGS_NB; i++) {
+ temp = cea_vesa_timings[i].timings;
+ if ((temp.pixel_clock == timing->pixel_clock) &&
+ (temp.x_res == timing->x_res) &&
+ (temp.y_res == timing->y_res)) {
+
+ temp_hsync = temp.hfp + temp.hsw + temp.hbp;
+ timing_hsync = timing->hfp + timing->hsw + timing->hbp;
+ temp_vsync = temp.vfp + temp.vsw + temp.vbp;
+ timing_vsync = timing->vfp + timing->vsw + timing->vbp;
+
+ DSSDBG("temp_hsync = %d , temp_vsync = %d"
+ "timing_hsync = %d, timing_vsync = %d\n",
+ temp_hsync, temp_hsync,
+ timing_hsync, timing_vsync);
+
+ if ((temp_hsync == timing_hsync) &&
+ (temp_vsync == timing_vsync)) {
+ code = i;
+ cm.code = code_index[i];
+ if (code < 14)
+ cm.mode = HDMI_HDMI;
+ else
+ cm.mode = HDMI_DVI;
+ DSSDBG("Hdmi_code = %d mode = %d\n",
+ cm.code, cm.mode);
+ break;
+ }
+ }
+ }
+
+ return cm;
+}
+
+static void get_horz_vert_timing_info(int current_descriptor_addrs, u8 *edid ,
+ struct omap_video_timings *timings)
+{
+ /* X and Y resolution */
+ timings->x_res = (((edid[current_descriptor_addrs + 4] & 0xF0) << 4) |
+ edid[current_descriptor_addrs + 2]);
+ timings->y_res = (((edid[current_descriptor_addrs + 7] & 0xF0) << 4) |
+ edid[current_descriptor_addrs + 5]);
+
+ timings->pixel_clock = ((edid[current_descriptor_addrs + 1] << 8) |
+ edid[current_descriptor_addrs]);
+
+ timings->pixel_clock = 10 * timings->pixel_clock;
+
+ /* HORIZONTAL FRONT PORCH */
+ timings->hfp = edid[current_descriptor_addrs + 8] |
+ ((edid[current_descriptor_addrs + 11] & 0xc0) << 2);
+ /* HORIZONTAL SYNC WIDTH */
+ timings->hsw = edid[current_descriptor_addrs + 9] |
+ ((edid[current_descriptor_addrs + 11] & 0x30) << 4);
+ /* HORIZONTAL BACK PORCH */
+ timings->hbp = (((edid[current_descriptor_addrs + 4] & 0x0F) << 8) |
+ edid[current_descriptor_addrs + 3]) -
+ (timings->hfp + timings->hsw);
+ /* VERTICAL FRONT PORCH */
+ timings->vfp = ((edid[current_descriptor_addrs + 10] & 0xF0) >> 4) |
+ ((edid[current_descriptor_addrs + 11] & 0x0f) << 2);
+ /* VERTICAL SYNC WIDTH */
+ timings->vsw = (edid[current_descriptor_addrs + 10] & 0x0F) |
+ ((edid[current_descriptor_addrs + 11] & 0x03) << 4);
+ /* VERTICAL BACK PORCH */
+ timings->vbp = (((edid[current_descriptor_addrs + 7] & 0x0F) << 8) |
+ edid[current_descriptor_addrs + 6]) -
+ (timings->vfp + timings->vsw);
+
+}
+
+/* Description : This function gets the resolution information from EDID */
+static void get_edid_timing_data(u8 *edid)
+{
+ u8 count;
+ u16 current_descriptor_addrs;
+ struct hdmi_cm cm;
+ struct omap_video_timings edid_timings;
+
+ /* seach block 0, there are 4 DTDs arranged in priority order */
+ for (count = 0; count < EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR; count++) {
+ current_descriptor_addrs =
+ EDID_DESCRIPTOR_BLOCK0_ADDRESS +
+ count * EDID_TIMING_DESCRIPTOR_SIZE;
+ get_horz_vert_timing_info(current_descriptor_addrs,
+ edid, &edid_timings);
+ cm = hdmi_get_code(&edid_timings);
+ DSSDBG("Block0[%d] value matches code = %d , mode = %d\n",
+ count, cm.code, cm.mode);
+ if (cm.code == -1) {
+ continue;
+ } else {
+ hdmi.code = cm.code;
+ hdmi.mode = cm.mode;
+ DSSDBG("code = %d , mode = %d\n",
+ hdmi.code, hdmi.mode);
+ return;
+ }
+ }
+ if (edid[0x7e] != 0x00) {
+ for (count = 0; count < EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR;
+ count++) {
+ current_descriptor_addrs =
+ EDID_DESCRIPTOR_BLOCK1_ADDRESS +
+ count * EDID_TIMING_DESCRIPTOR_SIZE;
+ get_horz_vert_timing_info(current_descriptor_addrs,
+ edid, &edid_timings);
+ cm = hdmi_get_code(&edid_timings);
+ DSSDBG("Block1[%d] value matches code = %d, mode = %d",
+ count, cm.code, cm.mode);
+ if (cm.code == -1) {
+ continue;
+ } else {
+ hdmi.code = cm.code;
+ hdmi.mode = cm.mode;
+ DSSDBG("code = %d , mode = %d\n",
+ hdmi.code, hdmi.mode);
+ return;
+ }
+ }
+ }
+
+ DSSINFO("no valid timing found , falling back to VGA\n");
+ hdmi.code = 4; /* setting default value of 640 480 VGA */
+ hdmi.mode = HDMI_DVI;
+}
+
+static void hdmi_read_edid(struct omap_video_timings *dp)
+{
+ int ret = 0, code;
+
+ memset(hdmi.edid, 0, HDMI_EDID_MAX_LENGTH);
+
+ if (!hdmi.edid_set)
+ ret = read_edid(hdmi.edid, HDMI_EDID_MAX_LENGTH);
+
+ if (!ret) {
+ if (!memcmp(hdmi.edid, edid_header, sizeof(edid_header))) {
+ /* search for timings of default resolution */
+ get_edid_timing_data(hdmi.edid);
+ hdmi.edid_set = true;
+ }
+ } else {
+ DSSWARN("failed to read E-EDID\n");
+ }
+
+ if (!hdmi.edid_set) {
+ DSSINFO("fallback to VGA\n");
+ hdmi.code = 4; /* setting default value of 640 480 VGA */
+ hdmi.mode = HDMI_DVI;
+ }
+
+ code = get_timings_index();
+
+ *dp = cea_vesa_timings[code].timings;
+}
+
+static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
+ struct hdmi_core_infoframe_avi *avi_cfg,
+ struct hdmi_core_packet_enable_repeat *repeat_cfg)
+{
+ DSSDBG("Enter hdmi_core_init\n");
+
+ /* video core */
+ video_cfg->ip_bus_width = HDMI_INPUT_8BIT;
+ video_cfg->op_dither_truc = HDMI_OUTPUTTRUNCATION_8BIT;
+ video_cfg->deep_color_pkt = HDMI_DEEPCOLORPACKECTDISABLE;
+ video_cfg->pkt_mode = HDMI_PACKETMODERESERVEDVALUE;
+ video_cfg->hdmi_dvi = HDMI_DVI;
+ video_cfg->tclk_sel_clkmult = HDMI_FPLL10IDCK;
+
+ /* info frame */
+ avi_cfg->db1_format = 0;
+ avi_cfg->db1_active_info = 0;
+ avi_cfg->db1_bar_info_dv = 0;
+ avi_cfg->db1_scan_info = 0;
+ avi_cfg->db2_colorimetry = 0;
+ avi_cfg->db2_aspect_ratio = 0;
+ avi_cfg->db2_active_fmt_ar = 0;
+ avi_cfg->db3_itc = 0;
+ avi_cfg->db3_ec = 0;
+ avi_cfg->db3_q_range = 0;
+ avi_cfg->db3_nup_scaling = 0;
+ avi_cfg->db4_videocode = 0;
+ avi_cfg->db5_pixel_repeat = 0;
+ avi_cfg->db6_7_line_eoftop = 0 ;
+ avi_cfg->db8_9_line_sofbottom = 0;
+ avi_cfg->db10_11_pixel_eofleft = 0;
+ avi_cfg->db12_13_pixel_sofright = 0;
+
+ /* packet enable and repeat */
+ repeat_cfg->audio_pkt = 0;
+ repeat_cfg->audio_pkt_repeat = 0;
+ repeat_cfg->avi_infoframe = 0;
+ repeat_cfg->avi_infoframe_repeat = 0;
+ repeat_cfg->gen_cntrl_pkt = 0;
+ repeat_cfg->gen_cntrl_pkt_repeat = 0;
+ repeat_cfg->generic_pkt = 0;
+ repeat_cfg->generic_pkt_repeat = 0;
+}
+
+static void hdmi_core_powerdown_disable(void)
+{
+ DSSDBG("Enter hdmi_core_powerdown_disable\n");
+ REG_FLD_MOD(HDMI_CORE_CTRL1, 0x0, 0, 0);
+}
+
+static void hdmi_core_swreset_release(void)
+{
+ DSSDBG("Enter hdmi_core_swreset_release\n");
+ REG_FLD_MOD(HDMI_CORE_SYS_SRST, 0x0, 0, 0);
+}
+
+static void hdmi_core_swreset_assert(void)
+{
+ DSSDBG("Enter hdmi_core_swreset_assert\n");
+ REG_FLD_MOD(HDMI_CORE_SYS_SRST, 0x1, 0, 0);
+}
+
+/* DSS_HDMI_CORE_VIDEO_CONFIG */
+static void hdmi_core_video_config(struct hdmi_core_video_config *cfg)
+{
+ u32 r = 0;
+
+ /* sys_ctrl1 default configuration not tunable */
+ r = hdmi_read_reg(HDMI_CORE_CTRL1);
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC, 5, 5);
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC, 4, 4);
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_BSEL_24BITBUS, 2, 2);
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_EDGE_RISINGEDGE, 1, 1);
+ hdmi_write_reg(HDMI_CORE_CTRL1, r);
+
+ REG_FLD_MOD(HDMI_CORE_SYS_VID_ACEN, cfg->ip_bus_width, 7, 6);
+
+ /* Vid_Mode */
+ r = hdmi_read_reg(HDMI_CORE_SYS_VID_MODE);
+
+ /* dither truncation configuration */
+ if (cfg->op_dither_truc > HDMI_OUTPUTTRUNCATION_12BIT) {
+ r = FLD_MOD(r, cfg->op_dither_truc - 3, 7, 6);
+ r = FLD_MOD(r, 1, 5, 5);
+ } else {
+ r = FLD_MOD(r, cfg->op_dither_truc, 7, 6);
+ r = FLD_MOD(r, 0, 5, 5);
+ }
+ hdmi_write_reg(HDMI_CORE_SYS_VID_MODE, r);
+
+ /* HDMI_Ctrl */
+ r = hdmi_read_reg(HDMI_CORE_AV_HDMI_CTRL);
+ r = FLD_MOD(r, cfg->deep_color_pkt, 6, 6);
+ r = FLD_MOD(r, cfg->pkt_mode, 5, 3);
+ r = FLD_MOD(r, cfg->hdmi_dvi, 0, 0);
+ hdmi_write_reg(HDMI_CORE_AV_HDMI_CTRL, r);
+
+ /* TMDS_CTRL */
+ REG_FLD_MOD(HDMI_CORE_SYS_TMDS_CTRL,
+ cfg->tclk_sel_clkmult, 6, 5);
+}
+
+static void hdmi_core_aux_infoframe_avi_config(
+ struct hdmi_core_infoframe_avi info_avi)
+{
+ u32 val;
+ char sum = 0, checksum = 0;
+
+ sum += 0x82 + 0x002 + 0x00D;
+ hdmi_write_reg(HDMI_CORE_AV_AVI_TYPE, 0x082);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_VERS, 0x002);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_LEN, 0x00D);
+
+ val = (info_avi.db1_format << 5) |
+ (info_avi.db1_active_info << 4) |
+ (info_avi.db1_bar_info_dv << 2) |
+ (info_avi.db1_scan_info);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(0), val);
+ sum += val;
+
+ val = (info_avi.db2_colorimetry << 6) |
+ (info_avi.db2_aspect_ratio << 4) |
+ (info_avi.db2_active_fmt_ar);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(1), val);
+ sum += val;
+
+ val = (info_avi.db3_itc << 7) |
+ (info_avi.db3_ec << 4) |
+ (info_avi.db3_q_range << 2) |
+ (info_avi.db3_nup_scaling);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(2), val);
+ sum += val;
+
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(3), info_avi.db4_videocode);
+ sum += info_avi.db4_videocode;
+
+ val = info_avi.db5_pixel_repeat;
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(4), val);
+ sum += val;
+
+ val = info_avi.db6_7_line_eoftop & 0x00FF;
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(5), val);
+ sum += val;
+
+ val = ((info_avi.db6_7_line_eoftop >> 8) & 0x00FF);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(6), val);
+ sum += val;
+
+ val = info_avi.db8_9_line_sofbottom & 0x00FF;
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(7), val);
+ sum += val;
+
+ val = ((info_avi.db8_9_line_sofbottom >> 8) & 0x00FF);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(8), val);
+ sum += val;
+
+ val = info_avi.db10_11_pixel_eofleft & 0x00FF;
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(9), val);
+ sum += val;
+
+ val = ((info_avi.db10_11_pixel_eofleft >> 8) & 0x00FF);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(10), val);
+ sum += val;
+
+ val = info_avi.db12_13_pixel_sofright & 0x00FF;
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(11), val);
+ sum += val;
+
+ val = ((info_avi.db12_13_pixel_sofright >> 8) & 0x00FF);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(12), val);
+ sum += val;
+
+ checksum = 0x100 - sum;
+ hdmi_write_reg(HDMI_CORE_AV_AVI_CHSUM, checksum);
+}
+
+static void hdmi_core_av_packet_config(
+ struct hdmi_core_packet_enable_repeat repeat_cfg)
+{
+ /* enable/repeat the infoframe */
+ hdmi_write_reg(HDMI_CORE_AV_PB_CTRL1,
+ (repeat_cfg.audio_pkt << 5) |
+ (repeat_cfg.audio_pkt_repeat << 4) |
+ (repeat_cfg.avi_infoframe << 1) |
+ (repeat_cfg.avi_infoframe_repeat));
+
+ /* enable/repeat the packet */
+ hdmi_write_reg(HDMI_CORE_AV_PB_CTRL2,
+ (repeat_cfg.gen_cntrl_pkt << 3) |
+ (repeat_cfg.gen_cntrl_pkt_repeat << 2) |
+ (repeat_cfg.generic_pkt << 1) |
+ (repeat_cfg.generic_pkt_repeat));
+}
+
+static void hdmi_wp_init(struct omap_video_timings *timings,
+ struct hdmi_video_format *video_fmt,
+ struct hdmi_video_interface *video_int)
+{
+ DSSDBG("Enter hdmi_wp_init\n");
+
+ timings->hbp = 0;
+ timings->hfp = 0;
+ timings->hsw = 0;
+ timings->vbp = 0;
+ timings->vfp = 0;
+ timings->vsw = 0;
+
+ video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
+ video_fmt->y_res = 0;
+ video_fmt->x_res = 0;
+
+ video_int->vsp = 0;
+ video_int->hsp = 0;
+
+ video_int->interlacing = 0;
+ video_int->tm = 0; /* HDMI_TIMING_SLAVE */
+
+}
+
+static void hdmi_wp_video_start(bool start)
+{
+ REG_FLD_MOD(HDMI_WP_VIDEO_CFG, start, 31, 31);
+}
+
+static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt,
+ struct omap_video_timings *timings, struct hdmi_config *param)
+{
+ DSSDBG("Enter hdmi_wp_video_init_format\n");
+
+ video_fmt->y_res = param->timings.timings.y_res;
+ video_fmt->x_res = param->timings.timings.x_res;
+
+ timings->hbp = param->timings.timings.hbp;
+ timings->hfp = param->timings.timings.hfp;
+ timings->hsw = param->timings.timings.hsw;
+ timings->vbp = param->timings.timings.vbp;
+ timings->vfp = param->timings.timings.vfp;
+ timings->vsw = param->timings.timings.vsw;
+}
+
+static void hdmi_wp_video_config_format(
+ struct hdmi_video_format *video_fmt)
+{
+ u32 l = 0;
+
+ REG_FLD_MOD(HDMI_WP_VIDEO_CFG, video_fmt->packing_mode, 10, 8);
+
+ l |= FLD_VAL(video_fmt->y_res, 31, 16);
+ l |= FLD_VAL(video_fmt->x_res, 15, 0);
+ hdmi_write_reg(HDMI_WP_VIDEO_SIZE, l);
+}
+
+static void hdmi_wp_video_config_interface(
+ struct hdmi_video_interface *video_int)
+{
+ u32 r;
+ DSSDBG("Enter hdmi_wp_video_config_interface\n");
+
+ r = hdmi_read_reg(HDMI_WP_VIDEO_CFG);
+ r = FLD_MOD(r, video_int->vsp, 7, 7);
+ r = FLD_MOD(r, video_int->hsp, 6, 6);
+ r = FLD_MOD(r, video_int->interlacing, 3, 3);
+ r = FLD_MOD(r, video_int->tm, 1, 0);
+ hdmi_write_reg(HDMI_WP_VIDEO_CFG, r);
+}
+
+static void hdmi_wp_video_config_timing(
+ struct omap_video_timings *timings)
+{
+ u32 timing_h = 0;
+ u32 timing_v = 0;
+
+ DSSDBG("Enter hdmi_wp_video_config_timing\n");
+
+ timing_h |= FLD_VAL(timings->hbp, 31, 20);
+ timing_h |= FLD_VAL(timings->hfp, 19, 8);
+ timing_h |= FLD_VAL(timings->hsw, 7, 0);
+ hdmi_write_reg(HDMI_WP_VIDEO_TIMING_H, timing_h);
+
+ timing_v |= FLD_VAL(timings->vbp, 31, 20);
+ timing_v |= FLD_VAL(timings->vfp, 19, 8);
+ timing_v |= FLD_VAL(timings->vsw, 7, 0);
+ hdmi_write_reg(HDMI_WP_VIDEO_TIMING_V, timing_v);
+}
+
+static void hdmi_basic_configure(struct hdmi_config *cfg)
+{
+ /* HDMI */
+ struct omap_video_timings video_timing;
+ struct hdmi_video_format video_format;
+ struct hdmi_video_interface video_interface;
+ /* HDMI core */
+ struct hdmi_core_infoframe_avi avi_cfg;
+ struct hdmi_core_video_config v_core_cfg;
+ struct hdmi_core_packet_enable_repeat repeat_cfg;
+
+ hdmi_wp_init(&video_timing, &video_format,
+ &video_interface);
+
+ hdmi_core_init(&v_core_cfg,
+ &avi_cfg,
+ &repeat_cfg);
+
+ hdmi_wp_video_init_format(&video_format,
+ &video_timing, cfg);
+
+ hdmi_wp_video_config_timing(&video_timing);
+
+ /* video config */
+ video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
+
+ hdmi_wp_video_config_format(&video_format);
+
+ video_interface.vsp = cfg->timings.vsync_pol;
+ video_interface.hsp = cfg->timings.hsync_pol;
+ video_interface.interlacing = cfg->interlace;
+ video_interface.tm = 1 ; /* HDMI_TIMING_MASTER_24BIT */
+
+ hdmi_wp_video_config_interface(&video_interface);
+
+ /*
+ * configure core video part
+ * set software reset in the core
+ */
+ hdmi_core_swreset_assert();
+
+ /* power down off */
+ hdmi_core_powerdown_disable();
+
+ v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL;
+ v_core_cfg.hdmi_dvi = cfg->cm.mode;
+
+ hdmi_core_video_config(&v_core_cfg);
+
+ /* release software reset in the core */
+ hdmi_core_swreset_release();
+
+ /*
+ * configure packet
+ * info frame video see doc CEA861-D page 65
+ */
+ avi_cfg.db1_format = HDMI_INFOFRAME_AVI_DB1Y_RGB;
+ avi_cfg.db1_active_info =
+ HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF;
+ avi_cfg.db1_bar_info_dv = HDMI_INFOFRAME_AVI_DB1B_NO;
+ avi_cfg.db1_scan_info = HDMI_INFOFRAME_AVI_DB1S_0;
+ avi_cfg.db2_colorimetry = HDMI_INFOFRAME_AVI_DB2C_NO;
+ avi_cfg.db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_NO;
+ avi_cfg.db2_active_fmt_ar = HDMI_INFOFRAME_AVI_DB2R_SAME;
+ avi_cfg.db3_itc = HDMI_INFOFRAME_AVI_DB3ITC_NO;
+ avi_cfg.db3_ec = HDMI_INFOFRAME_AVI_DB3EC_XVYUV601;
+ avi_cfg.db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_DEFAULT;
+ avi_cfg.db3_nup_scaling = HDMI_INFOFRAME_AVI_DB3SC_NO;
+ avi_cfg.db4_videocode = cfg->cm.code;
+ avi_cfg.db5_pixel_repeat = HDMI_INFOFRAME_AVI_DB5PR_NO;
+ avi_cfg.db6_7_line_eoftop = 0;
+ avi_cfg.db8_9_line_sofbottom = 0;
+ avi_cfg.db10_11_pixel_eofleft = 0;
+ avi_cfg.db12_13_pixel_sofright = 0;
+
+ hdmi_core_aux_infoframe_avi_config(avi_cfg);
+
+ /* enable/repeat the infoframe */
+ repeat_cfg.avi_infoframe = HDMI_PACKETENABLE;
+ repeat_cfg.avi_infoframe_repeat = HDMI_PACKETREPEATON;
+ /* wakeup */
+ repeat_cfg.audio_pkt = HDMI_PACKETENABLE;
+ repeat_cfg.audio_pkt_repeat = HDMI_PACKETREPEATON;
+ hdmi_core_av_packet_config(repeat_cfg);
+}
+
+static void update_hdmi_timings(struct hdmi_config *cfg,
+ struct omap_video_timings *timings, int code)
+{
+ cfg->timings.timings.x_res = timings->x_res;
+ cfg->timings.timings.y_res = timings->y_res;
+ cfg->timings.timings.hbp = timings->hbp;
+ cfg->timings.timings.hfp = timings->hfp;
+ cfg->timings.timings.hsw = timings->hsw;
+ cfg->timings.timings.vbp = timings->vbp;
+ cfg->timings.timings.vfp = timings->vfp;
+ cfg->timings.timings.vsw = timings->vsw;
+ cfg->timings.timings.pixel_clock = timings->pixel_clock;
+ cfg->timings.vsync_pol = cea_vesa_timings[code].vsync_pol;
+ cfg->timings.hsync_pol = cea_vesa_timings[code].hsync_pol;
+}
+
+static void hdmi_compute_pll(unsigned long clkin, int phy,
+ int n, struct hdmi_pll_info *pi)
+{
+ unsigned long refclk;
+ u32 mf;
+
+ /*
+ * Input clock is predivided by N + 1
+ * out put of which is reference clk
+ */
+ refclk = clkin / (n + 1);
+ pi->regn = n;
+
+ /*
+ * multiplier is pixel_clk/ref_clk
+ * Multiplying by 100 to avoid fractional part removal
+ */
+ pi->regm = (phy * 100/(refclk))/100;
+ pi->regm2 = 1;
+
+ /*
+ * fractional multiplier is remainder of the difference between
+ * multiplier and actual phy(required pixel clock thus should be
+ * multiplied by 2^18(262144) divided by the reference clock
+ */
+ mf = (phy - pi->regm * refclk) * 262144;
+ pi->regmf = mf/(refclk);
+
+ /*
+ * Dcofreq should be set to 1 if required pixel clock
+ * is greater than 1000MHz
+ */
+ pi->dcofreq = phy > 1000 * 100;
+ pi->regsd = ((pi->regm * clkin / 10) / ((n + 1) * 250) + 5) / 10;
+
+ DSSDBG("M = %d Mf = %d\n", pi->regm, pi->regmf);
+ DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
+}
+
+static void hdmi_enable_clocks(int enable)
+{
+ if (enable)
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK |
+ DSS_CLK_SYSCK | DSS_CLK_VIDFCK);
+ else
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK |
+ DSS_CLK_SYSCK | DSS_CLK_VIDFCK);
+}
+
+static int hdmi_power_on(struct omap_dss_device *dssdev)
+{
+ int r, code = 0;
+ struct hdmi_pll_info pll_data;
+ struct omap_video_timings *p;
+ int clkin, n, phy;
+
+ hdmi_enable_clocks(1);
+
+ dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 0);
+
+ p = &dssdev->panel.timings;
+
+ DSSDBG("hdmi_power_on x_res= %d y_res = %d\n",
+ dssdev->panel.timings.x_res,
+ dssdev->panel.timings.y_res);
+
+ if (!hdmi.custom_set) {
+ DSSDBG("Read EDID as no EDID is not set on poweron\n");
+ hdmi_read_edid(p);
+ }
+ code = get_timings_index();
+ dssdev->panel.timings = cea_vesa_timings[code].timings;
+ update_hdmi_timings(&hdmi.cfg, p, code);
+
+ clkin = 3840; /* 38.4 MHz */
+ n = 15; /* this is a constant for our math */
+ phy = p->pixel_clock;
+
+ hdmi_compute_pll(clkin, phy, n, &pll_data);
+
+ hdmi_wp_video_start(0);
+
+ /* config the PLL and PHY first */
+ r = hdmi_pll_program(&pll_data);
+ if (r) {
+ DSSDBG("Failed to lock PLL\n");
+ goto err;
+ }
+
+ r = hdmi_phy_init();
+ if (r) {
+ DSSDBG("Failed to start PHY\n");
+ goto err;
+ }
+
+ hdmi.cfg.cm.mode = hdmi.mode;
+ hdmi.cfg.cm.code = hdmi.code;
+ hdmi_basic_configure(&hdmi.cfg);
+
+ /* Make selection of HDMI in DSS */
+ dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
+
+ /* Select the dispc clock source as PRCM clock, to ensure that it is not
+ * DSI PLL source as the clock selected by DSI PLL might not be
+ * sufficient for the resolution selected / that can be changed
+ * dynamically by user. This can be moved to single location , say
+ * Boardfile.
+ */
+ dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
+
+ /* bypass TV gamma table */
+ dispc_enable_gamma_table(0);
+
+ /* tv size */
+ dispc_set_digit_size(dssdev->panel.timings.x_res,
+ dssdev->panel.timings.y_res);
+
+ dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 1);
+
+ hdmi_wp_video_start(1);
+
+ return 0;
+err:
+ hdmi_enable_clocks(0);
+ return -EIO;
+}
+
+static void hdmi_power_off(struct omap_dss_device *dssdev)
+{
+ dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 0);
+
+ hdmi_wp_video_start(0);
+ hdmi_phy_off();
+ hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF);
+ hdmi_enable_clocks(0);
+
+ hdmi.edid_set = 0;
+}
+
+int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct hdmi_cm cm;
+
+ cm = hdmi_get_code(timings);
+ if (cm.code == -1) {
+ DSSERR("Invalid timing entered\n");
+ return -EINVAL;
+ }
+
+ return 0;
+
+}
+
+void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev)
+{
+ struct hdmi_cm cm;
+
+ hdmi.custom_set = 1;
+ cm = hdmi_get_code(&dssdev->panel.timings);
+ hdmi.code = cm.code;
+ hdmi.mode = cm.mode;
+ omapdss_hdmi_display_enable(dssdev);
+ hdmi.custom_set = 0;
+}
+
+int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ DSSDBG("ENTER hdmi_display_enable\n");
+
+ mutex_lock(&hdmi.lock);
+
+ r = omap_dss_start_device(dssdev);
+ if (r) {
+ DSSERR("failed to start device\n");
+ goto err0;
+ }
+
+ if (dssdev->platform_enable) {
+ r = dssdev->platform_enable(dssdev);
+ if (r) {
+ DSSERR("failed to enable GPIO's\n");
+ goto err1;
+ }
+ }
+
+ r = hdmi_power_on(dssdev);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err2;
+ }
+
+ mutex_unlock(&hdmi.lock);
+ return 0;
+
+err2:
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+err1:
+ omap_dss_stop_device(dssdev);
+err0:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
+{
+ DSSDBG("Enter hdmi_display_disable\n");
+
+ mutex_lock(&hdmi.lock);
+
+ hdmi_power_off(dssdev);
+
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ omap_dss_stop_device(dssdev);
+
+ mutex_unlock(&hdmi.lock);
+}
+
+/* HDMI HW IP initialisation */
+static int omapdss_hdmihw_probe(struct platform_device *pdev)
+{
+ struct resource *hdmi_mem;
+
+ hdmi.pdata = pdev->dev.platform_data;
+ hdmi.pdev = pdev;
+
+ mutex_init(&hdmi.lock);
+
+ hdmi_mem = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0);
+ if (!hdmi_mem) {
+ DSSERR("can't get IORESOURCE_MEM HDMI\n");
+ return -EINVAL;
+ }
+
+ /* Base address taken from platform */
+ hdmi.base_wp = ioremap(hdmi_mem->start, resource_size(hdmi_mem));
+ if (!hdmi.base_wp) {
+ DSSERR("can't ioremap WP\n");
+ return -ENOMEM;
+ }
+
+ hdmi_panel_init();
+
+ return 0;
+}
+
+static int omapdss_hdmihw_remove(struct platform_device *pdev)
+{
+ hdmi_panel_exit();
+
+ iounmap(hdmi.base_wp);
+
+ return 0;
+}
+
+static struct platform_driver omapdss_hdmihw_driver = {
+ .probe = omapdss_hdmihw_probe,
+ .remove = omapdss_hdmihw_remove,
+ .driver = {
+ .name = "omapdss_hdmi",
+ .owner = THIS_MODULE,
+ },
+};
+
+int hdmi_init_platform_driver(void)
+{
+ return platform_driver_register(&omapdss_hdmihw_driver);
+}
+
+void hdmi_uninit_platform_driver(void)
+{
+ return platform_driver_unregister(&omapdss_hdmihw_driver);
+}
diff --git a/drivers/video/omap2/dss/hdmi.h b/drivers/video/omap2/dss/hdmi.h
new file mode 100644
index 000000000000..9887ab96da3c
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi.h
@@ -0,0 +1,415 @@
+/*
+ * hdmi.h
+ *
+ * HDMI driver definition for TI OMAP4 processors.
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _OMAP4_DSS_HDMI_H_
+#define _OMAP4_DSS_HDMI_H_
+
+#include <linux/string.h>
+#include <plat/display.h>
+
+#define HDMI_WP 0x0
+#define HDMI_CORE_SYS 0x400
+#define HDMI_CORE_AV 0x900
+#define HDMI_PLLCTRL 0x200
+#define HDMI_PHY 0x300
+
+struct hdmi_reg { u16 idx; };
+
+#define HDMI_REG(idx) ((const struct hdmi_reg) { idx })
+
+/* HDMI Wrapper */
+#define HDMI_WP_REG(idx) HDMI_REG(HDMI_WP + idx)
+
+#define HDMI_WP_REVISION HDMI_WP_REG(0x0)
+#define HDMI_WP_SYSCONFIG HDMI_WP_REG(0x10)
+#define HDMI_WP_IRQSTATUS_RAW HDMI_WP_REG(0x24)
+#define HDMI_WP_IRQSTATUS HDMI_WP_REG(0x28)
+#define HDMI_WP_PWR_CTRL HDMI_WP_REG(0x40)
+#define HDMI_WP_IRQENABLE_SET HDMI_WP_REG(0x2C)
+#define HDMI_WP_VIDEO_CFG HDMI_WP_REG(0x50)
+#define HDMI_WP_VIDEO_SIZE HDMI_WP_REG(0x60)
+#define HDMI_WP_VIDEO_TIMING_H HDMI_WP_REG(0x68)
+#define HDMI_WP_VIDEO_TIMING_V HDMI_WP_REG(0x6C)
+#define HDMI_WP_WP_CLK HDMI_WP_REG(0x70)
+
+/* HDMI IP Core System */
+#define HDMI_CORE_SYS_REG(idx) HDMI_REG(HDMI_CORE_SYS + idx)
+
+#define HDMI_CORE_SYS_VND_IDL HDMI_CORE_SYS_REG(0x0)
+#define HDMI_CORE_SYS_DEV_IDL HDMI_CORE_SYS_REG(0x8)
+#define HDMI_CORE_SYS_DEV_IDH HDMI_CORE_SYS_REG(0xC)
+#define HDMI_CORE_SYS_DEV_REV HDMI_CORE_SYS_REG(0x10)
+#define HDMI_CORE_SYS_SRST HDMI_CORE_SYS_REG(0x14)
+#define HDMI_CORE_CTRL1 HDMI_CORE_SYS_REG(0x20)
+#define HDMI_CORE_SYS_SYS_STAT HDMI_CORE_SYS_REG(0x24)
+#define HDMI_CORE_SYS_VID_ACEN HDMI_CORE_SYS_REG(0x124)
+#define HDMI_CORE_SYS_VID_MODE HDMI_CORE_SYS_REG(0x128)
+#define HDMI_CORE_SYS_INTR_STATE HDMI_CORE_SYS_REG(0x1C0)
+#define HDMI_CORE_SYS_INTR1 HDMI_CORE_SYS_REG(0x1C4)
+#define HDMI_CORE_SYS_INTR2 HDMI_CORE_SYS_REG(0x1C8)
+#define HDMI_CORE_SYS_INTR3 HDMI_CORE_SYS_REG(0x1CC)
+#define HDMI_CORE_SYS_INTR4 HDMI_CORE_SYS_REG(0x1D0)
+#define HDMI_CORE_SYS_UMASK1 HDMI_CORE_SYS_REG(0x1D4)
+#define HDMI_CORE_SYS_TMDS_CTRL HDMI_CORE_SYS_REG(0x208)
+#define HDMI_CORE_SYS_DE_DLY HDMI_CORE_SYS_REG(0xC8)
+#define HDMI_CORE_SYS_DE_CTRL HDMI_CORE_SYS_REG(0xCC)
+#define HDMI_CORE_SYS_DE_TOP HDMI_CORE_SYS_REG(0xD0)
+#define HDMI_CORE_SYS_DE_CNTL HDMI_CORE_SYS_REG(0xD8)
+#define HDMI_CORE_SYS_DE_CNTH HDMI_CORE_SYS_REG(0xDC)
+#define HDMI_CORE_SYS_DE_LINL HDMI_CORE_SYS_REG(0xE0)
+#define HDMI_CORE_SYS_DE_LINH_1 HDMI_CORE_SYS_REG(0xE4)
+#define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC 0x1
+#define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC 0x1
+#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1
+#define HDMI_CORE_CTRL1_EDGE_RISINGEDGE 0x1
+
+/* HDMI DDC E-DID */
+#define HDMI_CORE_DDC_CMD HDMI_CORE_SYS_REG(0x3CC)
+#define HDMI_CORE_DDC_STATUS HDMI_CORE_SYS_REG(0x3C8)
+#define HDMI_CORE_DDC_ADDR HDMI_CORE_SYS_REG(0x3B4)
+#define HDMI_CORE_DDC_OFFSET HDMI_CORE_SYS_REG(0x3BC)
+#define HDMI_CORE_DDC_COUNT1 HDMI_CORE_SYS_REG(0x3C0)
+#define HDMI_CORE_DDC_COUNT2 HDMI_CORE_SYS_REG(0x3C4)
+#define HDMI_CORE_DDC_DATA HDMI_CORE_SYS_REG(0x3D0)
+#define HDMI_CORE_DDC_SEGM HDMI_CORE_SYS_REG(0x3B8)
+
+/* HDMI IP Core Audio Video */
+#define HDMI_CORE_AV_REG(idx) HDMI_REG(HDMI_CORE_AV + idx)
+
+#define HDMI_CORE_AV_HDMI_CTRL HDMI_CORE_AV_REG(0xBC)
+#define HDMI_CORE_AV_DPD HDMI_CORE_AV_REG(0xF4)
+#define HDMI_CORE_AV_PB_CTRL1 HDMI_CORE_AV_REG(0xF8)
+#define HDMI_CORE_AV_PB_CTRL2 HDMI_CORE_AV_REG(0xFC)
+#define HDMI_CORE_AV_AVI_TYPE HDMI_CORE_AV_REG(0x100)
+#define HDMI_CORE_AV_AVI_VERS HDMI_CORE_AV_REG(0x104)
+#define HDMI_CORE_AV_AVI_LEN HDMI_CORE_AV_REG(0x108)
+#define HDMI_CORE_AV_AVI_CHSUM HDMI_CORE_AV_REG(0x10C)
+#define HDMI_CORE_AV_AVI_DBYTE(n) HDMI_CORE_AV_REG(n * 4 + 0x110)
+#define HDMI_CORE_AV_AVI_DBYTE_NELEMS HDMI_CORE_AV_REG(15)
+#define HDMI_CORE_AV_SPD_DBYTE HDMI_CORE_AV_REG(0x190)
+#define HDMI_CORE_AV_SPD_DBYTE_NELEMS HDMI_CORE_AV_REG(27)
+#define HDMI_CORE_AV_MPEG_DBYTE HDMI_CORE_AV_REG(0x290)
+#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS HDMI_CORE_AV_REG(27)
+#define HDMI_CORE_AV_GEN_DBYTE HDMI_CORE_AV_REG(0x300)
+#define HDMI_CORE_AV_GEN_DBYTE_NELEMS HDMI_CORE_AV_REG(31)
+#define HDMI_CORE_AV_GEN2_DBYTE HDMI_CORE_AV_REG(0x380)
+#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS HDMI_CORE_AV_REG(31)
+#define HDMI_CORE_AV_ACR_CTRL HDMI_CORE_AV_REG(0x4)
+#define HDMI_CORE_AV_FREQ_SVAL HDMI_CORE_AV_REG(0x8)
+#define HDMI_CORE_AV_N_SVAL1 HDMI_CORE_AV_REG(0xC)
+#define HDMI_CORE_AV_N_SVAL2 HDMI_CORE_AV_REG(0x10)
+#define HDMI_CORE_AV_N_SVAL3 HDMI_CORE_AV_REG(0x14)
+#define HDMI_CORE_AV_CTS_SVAL1 HDMI_CORE_AV_REG(0x18)
+#define HDMI_CORE_AV_CTS_SVAL2 HDMI_CORE_AV_REG(0x1C)
+#define HDMI_CORE_AV_CTS_SVAL3 HDMI_CORE_AV_REG(0x20)
+#define HDMI_CORE_AV_CTS_HVAL1 HDMI_CORE_AV_REG(0x24)
+#define HDMI_CORE_AV_CTS_HVAL2 HDMI_CORE_AV_REG(0x28)
+#define HDMI_CORE_AV_CTS_HVAL3 HDMI_CORE_AV_REG(0x2C)
+#define HDMI_CORE_AV_AUD_MODE HDMI_CORE_AV_REG(0x50)
+#define HDMI_CORE_AV_SPDIF_CTRL HDMI_CORE_AV_REG(0x54)
+#define HDMI_CORE_AV_HW_SPDIF_FS HDMI_CORE_AV_REG(0x60)
+#define HDMI_CORE_AV_SWAP_I2S HDMI_CORE_AV_REG(0x64)
+#define HDMI_CORE_AV_SPDIF_ERTH HDMI_CORE_AV_REG(0x6C)
+#define HDMI_CORE_AV_I2S_IN_MAP HDMI_CORE_AV_REG(0x70)
+#define HDMI_CORE_AV_I2S_IN_CTRL HDMI_CORE_AV_REG(0x74)
+#define HDMI_CORE_AV_I2S_CHST0 HDMI_CORE_AV_REG(0x78)
+#define HDMI_CORE_AV_I2S_CHST1 HDMI_CORE_AV_REG(0x7C)
+#define HDMI_CORE_AV_I2S_CHST2 HDMI_CORE_AV_REG(0x80)
+#define HDMI_CORE_AV_I2S_CHST4 HDMI_CORE_AV_REG(0x84)
+#define HDMI_CORE_AV_I2S_CHST5 HDMI_CORE_AV_REG(0x88)
+#define HDMI_CORE_AV_ASRC HDMI_CORE_AV_REG(0x8C)
+#define HDMI_CORE_AV_I2S_IN_LEN HDMI_CORE_AV_REG(0x90)
+#define HDMI_CORE_AV_HDMI_CTRL HDMI_CORE_AV_REG(0xBC)
+#define HDMI_CORE_AV_AUDO_TXSTAT HDMI_CORE_AV_REG(0xC0)
+#define HDMI_CORE_AV_AUD_PAR_BUSCLK_1 HDMI_CORE_AV_REG(0xCC)
+#define HDMI_CORE_AV_AUD_PAR_BUSCLK_2 HDMI_CORE_AV_REG(0xD0)
+#define HDMI_CORE_AV_AUD_PAR_BUSCLK_3 HDMI_CORE_AV_REG(0xD4)
+#define HDMI_CORE_AV_TEST_TXCTRL HDMI_CORE_AV_REG(0xF0)
+#define HDMI_CORE_AV_DPD HDMI_CORE_AV_REG(0xF4)
+#define HDMI_CORE_AV_PB_CTRL1 HDMI_CORE_AV_REG(0xF8)
+#define HDMI_CORE_AV_PB_CTRL2 HDMI_CORE_AV_REG(0xFC)
+#define HDMI_CORE_AV_AVI_TYPE HDMI_CORE_AV_REG(0x100)
+#define HDMI_CORE_AV_AVI_VERS HDMI_CORE_AV_REG(0x104)
+#define HDMI_CORE_AV_AVI_LEN HDMI_CORE_AV_REG(0x108)
+#define HDMI_CORE_AV_AVI_CHSUM HDMI_CORE_AV_REG(0x10C)
+#define HDMI_CORE_AV_SPD_TYPE HDMI_CORE_AV_REG(0x180)
+#define HDMI_CORE_AV_SPD_VERS HDMI_CORE_AV_REG(0x184)
+#define HDMI_CORE_AV_SPD_LEN HDMI_CORE_AV_REG(0x188)
+#define HDMI_CORE_AV_SPD_CHSUM HDMI_CORE_AV_REG(0x18C)
+#define HDMI_CORE_AV_MPEG_TYPE HDMI_CORE_AV_REG(0x280)
+#define HDMI_CORE_AV_MPEG_VERS HDMI_CORE_AV_REG(0x284)
+#define HDMI_CORE_AV_MPEG_LEN HDMI_CORE_AV_REG(0x288)
+#define HDMI_CORE_AV_MPEG_CHSUM HDMI_CORE_AV_REG(0x28C)
+#define HDMI_CORE_AV_CP_BYTE1 HDMI_CORE_AV_REG(0x37C)
+#define HDMI_CORE_AV_CEC_ADDR_ID HDMI_CORE_AV_REG(0x3FC)
+#define HDMI_CORE_AV_SPD_DBYTE_ELSIZE 0x4
+#define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE 0x4
+#define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE 0x4
+#define HDMI_CORE_AV_GEN_DBYTE_ELSIZE 0x4
+
+/* PLL */
+#define HDMI_PLL_REG(idx) HDMI_REG(HDMI_PLLCTRL + idx)
+
+#define PLLCTRL_PLL_CONTROL HDMI_PLL_REG(0x0)
+#define PLLCTRL_PLL_STATUS HDMI_PLL_REG(0x4)
+#define PLLCTRL_PLL_GO HDMI_PLL_REG(0x8)
+#define PLLCTRL_CFG1 HDMI_PLL_REG(0xC)
+#define PLLCTRL_CFG2 HDMI_PLL_REG(0x10)
+#define PLLCTRL_CFG3 HDMI_PLL_REG(0x14)
+#define PLLCTRL_CFG4 HDMI_PLL_REG(0x20)
+
+/* HDMI PHY */
+#define HDMI_PHY_REG(idx) HDMI_REG(HDMI_PHY + idx)
+
+#define HDMI_TXPHY_TX_CTRL HDMI_PHY_REG(0x0)
+#define HDMI_TXPHY_DIGITAL_CTRL HDMI_PHY_REG(0x4)
+#define HDMI_TXPHY_POWER_CTRL HDMI_PHY_REG(0x8)
+#define HDMI_TXPHY_PAD_CFG_CTRL HDMI_PHY_REG(0xC)
+
+/* HDMI EDID Length */
+#define HDMI_EDID_MAX_LENGTH 256
+#define EDID_TIMING_DESCRIPTOR_SIZE 0x12
+#define EDID_DESCRIPTOR_BLOCK0_ADDRESS 0x36
+#define EDID_DESCRIPTOR_BLOCK1_ADDRESS 0x80
+#define EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR 4
+#define EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR 4
+
+#define OMAP_HDMI_TIMINGS_NB 34
+
+#define REG_FLD_MOD(idx, val, start, end) \
+ hdmi_write_reg(idx, FLD_MOD(hdmi_read_reg(idx), val, start, end))
+#define REG_GET(idx, start, end) \
+ FLD_GET(hdmi_read_reg(idx), start, end)
+
+/* HDMI timing structure */
+struct hdmi_timings {
+ struct omap_video_timings timings;
+ int vsync_pol;
+ int hsync_pol;
+};
+
+enum hdmi_phy_pwr {
+ HDMI_PHYPWRCMD_OFF = 0,
+ HDMI_PHYPWRCMD_LDOON = 1,
+ HDMI_PHYPWRCMD_TXON = 2
+};
+
+enum hdmi_pll_pwr {
+ HDMI_PLLPWRCMD_ALLOFF = 0,
+ HDMI_PLLPWRCMD_PLLONLY = 1,
+ HDMI_PLLPWRCMD_BOTHON_ALLCLKS = 2,
+ HDMI_PLLPWRCMD_BOTHON_NOPHYCLK = 3
+};
+
+enum hdmi_clk_refsel {
+ HDMI_REFSEL_PCLK = 0,
+ HDMI_REFSEL_REF1 = 1,
+ HDMI_REFSEL_REF2 = 2,
+ HDMI_REFSEL_SYSCLK = 3
+};
+
+enum hdmi_core_inputbus_width {
+ HDMI_INPUT_8BIT = 0,
+ HDMI_INPUT_10BIT = 1,
+ HDMI_INPUT_12BIT = 2
+};
+
+enum hdmi_core_dither_trunc {
+ HDMI_OUTPUTTRUNCATION_8BIT = 0,
+ HDMI_OUTPUTTRUNCATION_10BIT = 1,
+ HDMI_OUTPUTTRUNCATION_12BIT = 2,
+ HDMI_OUTPUTDITHER_8BIT = 3,
+ HDMI_OUTPUTDITHER_10BIT = 4,
+ HDMI_OUTPUTDITHER_12BIT = 5
+};
+
+enum hdmi_core_deepcolor_ed {
+ HDMI_DEEPCOLORPACKECTDISABLE = 0,
+ HDMI_DEEPCOLORPACKECTENABLE = 1
+};
+
+enum hdmi_core_packet_mode {
+ HDMI_PACKETMODERESERVEDVALUE = 0,
+ HDMI_PACKETMODE24BITPERPIXEL = 4,
+ HDMI_PACKETMODE30BITPERPIXEL = 5,
+ HDMI_PACKETMODE36BITPERPIXEL = 6,
+ HDMI_PACKETMODE48BITPERPIXEL = 7
+};
+
+enum hdmi_core_hdmi_dvi {
+ HDMI_DVI = 0,
+ HDMI_HDMI = 1
+};
+
+enum hdmi_core_tclkselclkmult {
+ HDMI_FPLL05IDCK = 0,
+ HDMI_FPLL10IDCK = 1,
+ HDMI_FPLL20IDCK = 2,
+ HDMI_FPLL40IDCK = 3
+};
+
+enum hdmi_core_packet_ctrl {
+ HDMI_PACKETENABLE = 1,
+ HDMI_PACKETDISABLE = 0,
+ HDMI_PACKETREPEATON = 1,
+ HDMI_PACKETREPEATOFF = 0
+};
+
+/* INFOFRAME_AVI_ definitions */
+enum hdmi_core_infoframe {
+ HDMI_INFOFRAME_AVI_DB1Y_RGB = 0,
+ HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1,
+ HDMI_INFOFRAME_AVI_DB1Y_YUV444 = 2,
+ HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF = 0,
+ HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_ON = 1,
+ HDMI_INFOFRAME_AVI_DB1B_NO = 0,
+ HDMI_INFOFRAME_AVI_DB1B_VERT = 1,
+ HDMI_INFOFRAME_AVI_DB1B_HORI = 2,
+ HDMI_INFOFRAME_AVI_DB1B_VERTHORI = 3,
+ HDMI_INFOFRAME_AVI_DB1S_0 = 0,
+ HDMI_INFOFRAME_AVI_DB1S_1 = 1,
+ HDMI_INFOFRAME_AVI_DB1S_2 = 2,
+ HDMI_INFOFRAME_AVI_DB2C_NO = 0,
+ HDMI_INFOFRAME_AVI_DB2C_ITU601 = 1,
+ HDMI_INFOFRAME_AVI_DB2C_ITU709 = 2,
+ HDMI_INFOFRAME_AVI_DB2C_EC_EXTENDED = 3,
+ HDMI_INFOFRAME_AVI_DB2M_NO = 0,
+ HDMI_INFOFRAME_AVI_DB2M_43 = 1,
+ HDMI_INFOFRAME_AVI_DB2M_169 = 2,
+ HDMI_INFOFRAME_AVI_DB2R_SAME = 8,
+ HDMI_INFOFRAME_AVI_DB2R_43 = 9,
+ HDMI_INFOFRAME_AVI_DB2R_169 = 10,
+ HDMI_INFOFRAME_AVI_DB2R_149 = 11,
+ HDMI_INFOFRAME_AVI_DB3ITC_NO = 0,
+ HDMI_INFOFRAME_AVI_DB3ITC_YES = 1,
+ HDMI_INFOFRAME_AVI_DB3EC_XVYUV601 = 0,
+ HDMI_INFOFRAME_AVI_DB3EC_XVYUV709 = 1,
+ HDMI_INFOFRAME_AVI_DB3Q_DEFAULT = 0,
+ HDMI_INFOFRAME_AVI_DB3Q_LR = 1,
+ HDMI_INFOFRAME_AVI_DB3Q_FR = 2,
+ HDMI_INFOFRAME_AVI_DB3SC_NO = 0,
+ HDMI_INFOFRAME_AVI_DB3SC_HORI = 1,
+ HDMI_INFOFRAME_AVI_DB3SC_VERT = 2,
+ HDMI_INFOFRAME_AVI_DB3SC_HORIVERT = 3,
+ HDMI_INFOFRAME_AVI_DB5PR_NO = 0,
+ HDMI_INFOFRAME_AVI_DB5PR_2 = 1,
+ HDMI_INFOFRAME_AVI_DB5PR_3 = 2,
+ HDMI_INFOFRAME_AVI_DB5PR_4 = 3,
+ HDMI_INFOFRAME_AVI_DB5PR_5 = 4,
+ HDMI_INFOFRAME_AVI_DB5PR_6 = 5,
+ HDMI_INFOFRAME_AVI_DB5PR_7 = 6,
+ HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
+ HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
+ HDMI_INFOFRAME_AVI_DB5PR_10 = 9
+};
+
+enum hdmi_packing_mode {
+ HDMI_PACK_10b_RGB_YUV444 = 0,
+ HDMI_PACK_24b_RGB_YUV444_YUV422 = 1,
+ HDMI_PACK_20b_YUV422 = 2,
+ HDMI_PACK_ALREADYPACKED = 7
+};
+
+struct hdmi_core_video_config {
+ enum hdmi_core_inputbus_width ip_bus_width;
+ enum hdmi_core_dither_trunc op_dither_truc;
+ enum hdmi_core_deepcolor_ed deep_color_pkt;
+ enum hdmi_core_packet_mode pkt_mode;
+ enum hdmi_core_hdmi_dvi hdmi_dvi;
+ enum hdmi_core_tclkselclkmult tclk_sel_clkmult;
+};
+
+/*
+ * Refer to section 8.2 in HDMI 1.3 specification for
+ * details about infoframe databytes
+ */
+struct hdmi_core_infoframe_avi {
+ u8 db1_format;
+ /* Y0, Y1 rgb,yCbCr */
+ u8 db1_active_info;
+ /* A0 Active information Present */
+ u8 db1_bar_info_dv;
+ /* B0, B1 Bar info data valid */
+ u8 db1_scan_info;
+ /* S0, S1 scan information */
+ u8 db2_colorimetry;
+ /* C0, C1 colorimetry */
+ u8 db2_aspect_ratio;
+ /* M0, M1 Aspect ratio (4:3, 16:9) */
+ u8 db2_active_fmt_ar;
+ /* R0...R3 Active format aspect ratio */
+ u8 db3_itc;
+ /* ITC IT content. */
+ u8 db3_ec;
+ /* EC0, EC1, EC2 Extended colorimetry */
+ u8 db3_q_range;
+ /* Q1, Q0 Quantization range */
+ u8 db3_nup_scaling;
+ /* SC1, SC0 Non-uniform picture scaling */
+ u8 db4_videocode;
+ /* VIC0..6 Video format identification */
+ u8 db5_pixel_repeat;
+ /* PR0..PR3 Pixel repetition factor */
+ u16 db6_7_line_eoftop;
+ /* Line number end of top bar */
+ u16 db8_9_line_sofbottom;
+ /* Line number start of bottom bar */
+ u16 db10_11_pixel_eofleft;
+ /* Pixel number end of left bar */
+ u16 db12_13_pixel_sofright;
+ /* Pixel number start of right bar */
+};
+
+struct hdmi_core_packet_enable_repeat {
+ u32 audio_pkt;
+ u32 audio_pkt_repeat;
+ u32 avi_infoframe;
+ u32 avi_infoframe_repeat;
+ u32 gen_cntrl_pkt;
+ u32 gen_cntrl_pkt_repeat;
+ u32 generic_pkt;
+ u32 generic_pkt_repeat;
+};
+
+struct hdmi_video_format {
+ enum hdmi_packing_mode packing_mode;
+ u32 y_res; /* Line per panel */
+ u32 x_res; /* pixel per line */
+};
+
+struct hdmi_video_interface {
+ int vsp; /* Vsync polarity */
+ int hsp; /* Hsync polarity */
+ int interlacing;
+ int tm; /* Timing mode */
+};
+
+struct hdmi_cm {
+ int code;
+ int mode;
+};
+
+struct hdmi_config {
+ struct hdmi_timings timings;
+ u16 interlace;
+ struct hdmi_cm cm;
+};
+
+#endif
diff --git a/drivers/video/omap2/dss/hdmi_omap4_panel.c b/drivers/video/omap2/dss/hdmi_omap4_panel.c
new file mode 100644
index 000000000000..ffb5de94131f
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi_omap4_panel.c
@@ -0,0 +1,222 @@
+/*
+ * hdmi_omap4_panel.c
+ *
+ * HDMI library support functions for TI OMAP4 processors.
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Mythri P k <mythripk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <plat/display.h>
+
+#include "dss.h"
+
+static struct {
+ struct mutex hdmi_lock;
+} hdmi;
+
+
+static int hdmi_panel_probe(struct omap_dss_device *dssdev)
+{
+ DSSDBG("ENTER hdmi_panel_probe\n");
+
+ dssdev->panel.config = OMAP_DSS_LCD_TFT |
+ OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IHS;
+
+ /*
+ * Initialize the timings to 640 * 480
+ * This is only for framebuffer update not for TV timing setting
+ * Setting TV timing will be done only on enable
+ */
+ dssdev->panel.timings.x_res = 640;
+ dssdev->panel.timings.y_res = 480;
+
+ DSSDBG("hdmi_panel_probe x_res= %d y_res = %d\n",
+ dssdev->panel.timings.x_res,
+ dssdev->panel.timings.y_res);
+ return 0;
+}
+
+static void hdmi_panel_remove(struct omap_dss_device *dssdev)
+{
+
+}
+
+static int hdmi_panel_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+ DSSDBG("ENTER hdmi_panel_enable\n");
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ r = omapdss_hdmi_display_enable(dssdev);
+ if (r) {
+ DSSERR("failed to power on\n");
+ goto err;
+ }
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+err:
+ mutex_unlock(&hdmi.hdmi_lock);
+
+ return r;
+}
+
+static void hdmi_panel_disable(struct omap_dss_device *dssdev)
+{
+ mutex_lock(&hdmi.hdmi_lock);
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ omapdss_hdmi_display_disable(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+ mutex_unlock(&hdmi.hdmi_lock);
+}
+
+static int hdmi_panel_suspend(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+ omapdss_hdmi_display_disable(dssdev);
+
+err:
+ mutex_unlock(&hdmi.hdmi_lock);
+
+ return r;
+}
+
+static int hdmi_panel_resume(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ r = omapdss_hdmi_display_enable(dssdev);
+ if (r) {
+ DSSERR("failed to power on\n");
+ goto err;
+ }
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+err:
+ mutex_unlock(&hdmi.hdmi_lock);
+
+ return r;
+}
+
+static void hdmi_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ mutex_lock(&hdmi.hdmi_lock);
+
+ *timings = dssdev->panel.timings;
+
+ mutex_unlock(&hdmi.hdmi_lock);
+}
+
+static void hdmi_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ DSSDBG("hdmi_set_timings\n");
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ dssdev->panel.timings = *timings;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ /* turn the hdmi off and on to get new timings to use */
+ omapdss_hdmi_display_disable(dssdev);
+ omapdss_hdmi_display_set_timing(dssdev);
+ }
+
+ mutex_unlock(&hdmi.hdmi_lock);
+}
+
+static int hdmi_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ int r = 0;
+
+ DSSDBG("hdmi_check_timings\n");
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ r = omapdss_hdmi_display_check_timing(dssdev, timings);
+ if (r) {
+ DSSERR("Timing cannot be applied\n");
+ goto err;
+ }
+err:
+ mutex_unlock(&hdmi.hdmi_lock);
+ return r;
+}
+
+static struct omap_dss_driver hdmi_driver = {
+ .probe = hdmi_panel_probe,
+ .remove = hdmi_panel_remove,
+ .enable = hdmi_panel_enable,
+ .disable = hdmi_panel_disable,
+ .suspend = hdmi_panel_suspend,
+ .resume = hdmi_panel_resume,
+ .get_timings = hdmi_get_timings,
+ .set_timings = hdmi_set_timings,
+ .check_timings = hdmi_check_timings,
+ .driver = {
+ .name = "hdmi_panel",
+ .owner = THIS_MODULE,
+ },
+};
+
+int hdmi_panel_init(void)
+{
+ mutex_init(&hdmi.hdmi_lock);
+
+ omap_dss_register_driver(&hdmi_driver);
+
+ return 0;
+}
+
+void hdmi_panel_exit(void)
+{
+ omap_dss_unregister_driver(&hdmi_driver);
+
+}
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 172d4e697309..bcd37ec86952 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -515,6 +515,8 @@ static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) {
irq = DISPC_IRQ_EVSYNC_ODD;
+ } else if (mgr->device->type == OMAP_DISPLAY_TYPE_HDMI) {
+ irq = DISPC_IRQ_EVSYNC_EVEN;
} else {
if (mgr->id == OMAP_DSS_CHANNEL_LCD)
irq = DISPC_IRQ_VSYNC;
@@ -536,7 +538,8 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return 0;
- if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
+ if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
+ || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
} else {
if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
@@ -613,7 +616,8 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return 0;
- if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
+ if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
+ || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
} else {
if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
@@ -1377,6 +1381,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
case OMAP_DISPLAY_TYPE_DBI:
case OMAP_DISPLAY_TYPE_SDI:
case OMAP_DISPLAY_TYPE_VENC:
+ case OMAP_DISPLAY_TYPE_HDMI:
default_get_overlay_fifo_thresholds(ovl->id, size,
&oc->burst_size, &oc->fifo_low,
&oc->fifo_high);
@@ -1394,7 +1399,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
}
r = 0;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
if (!dss_cache.irq_enabled) {
u32 mask;
@@ -1407,7 +1412,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
dss_cache.irq_enabled = true;
}
configure_dispc();
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
spin_unlock_irqrestore(&dss_cache.lock, flags);
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 456efef03c20..f1aca6d04011 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -490,7 +490,7 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
ovl->manager = mgr;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
/* XXX: on manual update display, in auto update mode, a bug happens
* here. When an overlay is first enabled on LCD, then it's disabled,
* and the manager is changed to TV, we sometimes get SYNC_LOST_DIGIT
@@ -499,7 +499,7 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
* but I don't understand how or why. */
msleep(40);
dispc_set_channel_out(ovl->id, mgr->id);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
return 0;
}
@@ -679,7 +679,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
lcd2_mgr->set_device(lcd2_mgr, dssdev);
mgr = lcd2_mgr;
}
- } else if (dssdev->type != OMAP_DISPLAY_TYPE_VENC) {
+ } else if (dssdev->type != OMAP_DISPLAY_TYPE_VENC
+ && dssdev->type != OMAP_DISPLAY_TYPE_HDMI) {
if (!lcd_mgr->device || force) {
if (lcd_mgr->device)
lcd_mgr->unset_device(lcd_mgr);
@@ -688,7 +689,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
}
}
- if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
+ if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
+ || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
if (!tv_mgr->device || force) {
if (tv_mgr->device)
tv_mgr->unset_device(tv_mgr);
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 10a2ffe02882..5ea17f49c611 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -36,8 +36,6 @@
#include <plat/display.h>
#include "dss.h"
-#define RFBI_BASE 0x48050800
-
struct rfbi_reg { u16 idx; };
#define RFBI_REG(idx) ((const struct rfbi_reg) { idx })
@@ -100,6 +98,7 @@ static int rfbi_convert_timings(struct rfbi_timings *t);
static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div);
static struct {
+ struct platform_device *pdev;
void __iomem *base;
unsigned long l4_khz;
@@ -142,9 +141,9 @@ static inline u32 rfbi_read_reg(const struct rfbi_reg idx)
static void rfbi_enable_clocks(bool enable)
{
if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
}
void omap_rfbi_write_command(const void *buf, u32 len)
@@ -497,7 +496,7 @@ unsigned long rfbi_get_max_tx_rate(void)
};
l4_rate = rfbi.l4_khz / 1000;
- dss1_rate = dss_clk_get_rate(DSS_CLK_FCK1) / 1000000;
+ dss1_rate = dss_clk_get_rate(DSS_CLK_FCK) / 1000000;
for (i = 0; i < ARRAY_SIZE(ftab); i++) {
/* Use a window instead of an exact match, to account
@@ -922,7 +921,7 @@ void rfbi_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
DUMPREG(RFBI_REVISION);
DUMPREG(RFBI_SYSCONFIG);
@@ -953,54 +952,10 @@ void rfbi_dump_regs(struct seq_file *s)
DUMPREG(RFBI_VSYNC_WIDTH);
DUMPREG(RFBI_HSYNC_WIDTH);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
#undef DUMPREG
}
-int rfbi_init(void)
-{
- u32 rev;
- u32 l;
-
- spin_lock_init(&rfbi.cmd_lock);
-
- init_completion(&rfbi.cmd_done);
- atomic_set(&rfbi.cmd_fifo_full, 0);
- atomic_set(&rfbi.cmd_pending, 0);
-
- rfbi.base = ioremap(RFBI_BASE, SZ_256);
- if (!rfbi.base) {
- DSSERR("can't ioremap RFBI\n");
- return -ENOMEM;
- }
-
- rfbi_enable_clocks(1);
-
- msleep(10);
-
- rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000;
-
- /* Enable autoidle and smart-idle */
- l = rfbi_read_reg(RFBI_SYSCONFIG);
- l |= (1 << 0) | (2 << 3);
- rfbi_write_reg(RFBI_SYSCONFIG, l);
-
- rev = rfbi_read_reg(RFBI_REVISION);
- printk(KERN_INFO "OMAP RFBI rev %d.%d\n",
- FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
-
- rfbi_enable_clocks(0);
-
- return 0;
-}
-
-void rfbi_exit(void)
-{
- DSSDBG("rfbi_exit\n");
-
- iounmap(rfbi.base);
-}
-
int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
{
int r;
@@ -1056,3 +1011,74 @@ int rfbi_init_display(struct omap_dss_device *dssdev)
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
return 0;
}
+
+/* RFBI HW IP initialisation */
+static int omap_rfbihw_probe(struct platform_device *pdev)
+{
+ u32 rev;
+ u32 l;
+ struct resource *rfbi_mem;
+
+ rfbi.pdev = pdev;
+
+ spin_lock_init(&rfbi.cmd_lock);
+
+ init_completion(&rfbi.cmd_done);
+ atomic_set(&rfbi.cmd_fifo_full, 0);
+ atomic_set(&rfbi.cmd_pending, 0);
+
+ rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0);
+ if (!rfbi_mem) {
+ DSSERR("can't get IORESOURCE_MEM RFBI\n");
+ return -EINVAL;
+ }
+ rfbi.base = ioremap(rfbi_mem->start, resource_size(rfbi_mem));
+ if (!rfbi.base) {
+ DSSERR("can't ioremap RFBI\n");
+ return -ENOMEM;
+ }
+
+ rfbi_enable_clocks(1);
+
+ msleep(10);
+
+ rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000;
+
+ /* Enable autoidle and smart-idle */
+ l = rfbi_read_reg(RFBI_SYSCONFIG);
+ l |= (1 << 0) | (2 << 3);
+ rfbi_write_reg(RFBI_SYSCONFIG, l);
+
+ rev = rfbi_read_reg(RFBI_REVISION);
+ dev_dbg(&pdev->dev, "OMAP RFBI rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ rfbi_enable_clocks(0);
+
+ return 0;
+}
+
+static int omap_rfbihw_remove(struct platform_device *pdev)
+{
+ iounmap(rfbi.base);
+ return 0;
+}
+
+static struct platform_driver omap_rfbihw_driver = {
+ .probe = omap_rfbihw_probe,
+ .remove = omap_rfbihw_remove,
+ .driver = {
+ .name = "omapdss_rfbi",
+ .owner = THIS_MODULE,
+ },
+};
+
+int rfbi_init_platform_driver(void)
+{
+ return platform_driver_register(&omap_rfbihw_driver);
+}
+
+void rfbi_uninit_platform_driver(void)
+{
+ return platform_driver_unregister(&omap_rfbihw_driver);
+}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index b64adf7dfc88..54a53e648180 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -30,7 +30,6 @@
#include "dss.h"
static struct {
- bool skip_init;
bool update_enabled;
struct regulator *vdds_sdi_reg;
} sdi;
@@ -68,9 +67,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err1;
- /* In case of skip_init sdi_init has already enabled the clocks */
- if (!sdi.skip_init)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
sdi_basic_init(dssdev);
@@ -80,14 +77,8 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
dssdev->panel.acbi, dssdev->panel.acb);
- if (!sdi.skip_init) {
- r = dss_calc_clock_div(1, t->pixel_clock * 1000,
- &dss_cinfo, &dispc_cinfo);
- } else {
- r = dss_get_clock_div(&dss_cinfo);
- r = dispc_get_clock_div(dssdev->manager->id, &dispc_cinfo);
- }
-
+ r = dss_calc_clock_div(1, t->pixel_clock * 1000,
+ &dss_cinfo, &dispc_cinfo);
if (r)
goto err2;
@@ -116,21 +107,17 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err2;
- if (!sdi.skip_init) {
- dss_sdi_init(dssdev->phy.sdi.datapairs);
- r = dss_sdi_enable();
- if (r)
- goto err1;
- mdelay(2);
- }
+ dss_sdi_init(dssdev->phy.sdi.datapairs);
+ r = dss_sdi_enable();
+ if (r)
+ goto err1;
+ mdelay(2);
dssdev->manager->enable(dssdev->manager);
- sdi.skip_init = 0;
-
return 0;
err2:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
regulator_disable(sdi.vdds_sdi_reg);
err1:
omap_dss_stop_device(dssdev);
@@ -145,7 +132,7 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
dss_sdi_disable();
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
regulator_disable(sdi.vdds_sdi_reg);
@@ -157,25 +144,24 @@ int sdi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("SDI init\n");
+ if (sdi.vdds_sdi_reg == NULL) {
+ struct regulator *vdds_sdi;
+
+ vdds_sdi = dss_get_vdds_sdi();
+
+ if (IS_ERR(vdds_sdi)) {
+ DSSERR("can't get VDDS_SDI regulator\n");
+ return PTR_ERR(vdds_sdi);
+ }
+
+ sdi.vdds_sdi_reg = vdds_sdi;
+ }
+
return 0;
}
-int sdi_init(bool skip_init)
+int sdi_init(void)
{
- /* we store this for first display enable, then clear it */
- sdi.skip_init = skip_init;
-
- sdi.vdds_sdi_reg = dss_get_vdds_sdi();
- if (IS_ERR(sdi.vdds_sdi_reg)) {
- DSSERR("can't get VDDS_SDI regulator\n");
- return PTR_ERR(sdi.vdds_sdi_reg);
- }
- /*
- * Enable clocks already here, otherwise there would be a toggle
- * of them until sdi_display_enable is called.
- */
- if (skip_init)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
return 0;
}
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index eff35050e28a..8e35a5bae429 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -39,8 +39,6 @@
#include "dss.h"
-#define VENC_BASE 0x48050C00
-
/* Venc registers */
#define VENC_REV_ID 0x00
#define VENC_STATUS 0x04
@@ -289,6 +287,7 @@ const struct omap_video_timings omap_dss_ntsc_timings = {
EXPORT_SYMBOL(omap_dss_ntsc_timings);
static struct {
+ struct platform_device *pdev;
void __iomem *base;
struct mutex venc_lock;
u32 wss_data;
@@ -381,11 +380,11 @@ static void venc_reset(void)
static void venc_enable_clocks(int enable)
{
if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M |
- DSS_CLK_96M);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK |
+ DSS_CLK_VIDFCK);
else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M |
- DSS_CLK_96M);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK |
+ DSS_CLK_VIDFCK);
}
static const struct venc_config *venc_timings_to_config(
@@ -641,50 +640,23 @@ static struct omap_dss_driver venc_driver = {
};
/* driver end */
-
-
-int venc_init(struct platform_device *pdev)
+int venc_init_display(struct omap_dss_device *dssdev)
{
- u8 rev_id;
+ DSSDBG("init_display\n");
- mutex_init(&venc.venc_lock);
+ if (venc.vdda_dac_reg == NULL) {
+ struct regulator *vdda_dac;
- venc.wss_data = 0;
+ vdda_dac = regulator_get(&venc.pdev->dev, "vdda_dac");
- venc.base = ioremap(VENC_BASE, SZ_1K);
- if (!venc.base) {
- DSSERR("can't ioremap VENC\n");
- return -ENOMEM;
- }
+ if (IS_ERR(vdda_dac)) {
+ DSSERR("can't get VDDA_DAC regulator\n");
+ return PTR_ERR(vdda_dac);
+ }
- venc.vdda_dac_reg = dss_get_vdda_dac();
- if (IS_ERR(venc.vdda_dac_reg)) {
- iounmap(venc.base);
- DSSERR("can't get VDDA_DAC regulator\n");
- return PTR_ERR(venc.vdda_dac_reg);
+ venc.vdda_dac_reg = vdda_dac;
}
- venc_enable_clocks(1);
-
- rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
- printk(KERN_INFO "OMAP VENC rev %d\n", rev_id);
-
- venc_enable_clocks(0);
-
- return omap_dss_register_driver(&venc_driver);
-}
-
-void venc_exit(void)
-{
- omap_dss_unregister_driver(&venc_driver);
-
- iounmap(venc.base);
-}
-
-int venc_init_display(struct omap_dss_device *dssdev)
-{
- DSSDBG("init_display\n");
-
return 0;
}
@@ -740,3 +712,73 @@ void venc_dump_regs(struct seq_file *s)
#undef DUMPREG
}
+
+/* VENC HW IP initialisation */
+static int omap_venchw_probe(struct platform_device *pdev)
+{
+ u8 rev_id;
+ struct resource *venc_mem;
+
+ venc.pdev = pdev;
+
+ mutex_init(&venc.venc_lock);
+
+ venc.wss_data = 0;
+
+ venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0);
+ if (!venc_mem) {
+ DSSERR("can't get IORESOURCE_MEM VENC\n");
+ return -EINVAL;
+ }
+ venc.base = ioremap(venc_mem->start, resource_size(venc_mem));
+ if (!venc.base) {
+ DSSERR("can't ioremap VENC\n");
+ return -ENOMEM;
+ }
+
+ venc_enable_clocks(1);
+
+ rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
+ dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id);
+
+ venc_enable_clocks(0);
+
+ return omap_dss_register_driver(&venc_driver);
+}
+
+static int omap_venchw_remove(struct platform_device *pdev)
+{
+ if (venc.vdda_dac_reg != NULL) {
+ regulator_put(venc.vdda_dac_reg);
+ venc.vdda_dac_reg = NULL;
+ }
+ omap_dss_unregister_driver(&venc_driver);
+
+ iounmap(venc.base);
+ return 0;
+}
+
+static struct platform_driver omap_venchw_driver = {
+ .probe = omap_venchw_probe,
+ .remove = omap_venchw_remove,
+ .driver = {
+ .name = "omapdss_venc",
+ .owner = THIS_MODULE,
+ },
+};
+
+int venc_init_platform_driver(void)
+{
+ if (cpu_is_omap44xx())
+ return 0;
+
+ return platform_driver_register(&omap_venchw_driver);
+}
+
+void venc_uninit_platform_driver(void)
+{
+ if (cpu_is_omap44xx())
+ return;
+
+ return platform_driver_unregister(&omap_venchw_driver);
+}
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig
index 65149b22cf37..aa33386c81ff 100644
--- a/drivers/video/omap2/omapfb/Kconfig
+++ b/drivers/video/omap2/omapfb/Kconfig
@@ -1,5 +1,5 @@
menuconfig FB_OMAP2
- tristate "OMAP2/3 frame buffer support (EXPERIMENTAL)"
+ tristate "OMAP2+ frame buffer support (EXPERIMENTAL)"
depends on FB && OMAP2_DSS
select OMAP2_VRAM
@@ -8,10 +8,10 @@ menuconfig FB_OMAP2
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
help
- Frame buffer driver for OMAP2/3 based boards.
+ Frame buffer driver for OMAP2+ based boards.
config FB_OMAP2_DEBUG_SUPPORT
- bool "Debug support for OMAP2/3 FB"
+ bool "Debug support for OMAP2+ FB"
default y
depends on FB_OMAP2
help
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 4fdab8e9c496..505ec6672049 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -2090,7 +2090,7 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
{
int r;
u8 bpp;
- struct omap_video_timings timings;
+ struct omap_video_timings timings, temp_timings;
r = omapfb_mode_to_timings(mode_str, &timings, &bpp);
if (r)
@@ -2100,14 +2100,23 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
fbdev->bpp_overrides[fbdev->num_bpp_overrides].bpp = bpp;
++fbdev->num_bpp_overrides;
- if (!display->driver->check_timings || !display->driver->set_timings)
- return -EINVAL;
+ if (display->driver->check_timings) {
+ r = display->driver->check_timings(display, &timings);
+ if (r)
+ return r;
+ } else {
+ /* If check_timings is not present compare xres and yres */
+ if (display->driver->get_timings) {
+ display->driver->get_timings(display, &temp_timings);
- r = display->driver->check_timings(display, &timings);
- if (r)
- return r;
+ if (temp_timings.x_res != timings.x_res ||
+ temp_timings.y_res != timings.y_res)
+ return -EINVAL;
+ }
+ }
- display->driver->set_timings(display, &timings);
+ if (display->driver->set_timings)
+ display->driver->set_timings(display, &timings);
return 0;
}
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index da388186d617..d8ab7be4fd6b 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -355,6 +355,7 @@ static void riva_bl_init(struct riva_par *par)
snprintf(name, sizeof(name), "rivabl%d", info->node);
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
bd = backlight_device_register(name, info->dev, par, &riva_bl_ops,
&props);
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 83ce9a04d872..6817d187d46e 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -1340,6 +1340,7 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
sfb->bus_clk = clk_get(dev, "lcd");
if (IS_ERR(sfb->bus_clk)) {
dev_err(dev, "failed to get bus clock\n");
+ ret = PTR_ERR(sfb->bus_clk);
goto err_sfb;
}
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 75738a928610..ddedad9cd069 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -64,6 +64,8 @@ static const struct svga_fb_format s3fb_formats[] = {
static const struct svga_pll s3_pll = {3, 129, 3, 33, 0, 3,
35000, 240000, 14318};
+static const struct svga_pll s3_trio3d_pll = {3, 129, 3, 31, 0, 4,
+ 230000, 460000, 14318};
static const int s3_memsizes[] = {4096, 0, 3072, 8192, 2048, 6144, 1024, 512};
@@ -72,7 +74,8 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64",
"S3 Plato/PX", "S3 Aurora64VP", "S3 Virge",
"S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX",
"S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P",
- "S3 Trio3D/1X", "S3 Trio3D/2X", "S3 Trio3D/2X"};
+ "S3 Trio3D/1X", "S3 Trio3D/2X", "S3 Trio3D/2X",
+ "S3 Trio3D"};
#define CHIP_UNKNOWN 0x00
#define CHIP_732_TRIO32 0x01
@@ -93,6 +96,7 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64",
#define CHIP_360_TRIO3D_1X 0x10
#define CHIP_362_TRIO3D_2X 0x11
#define CHIP_368_TRIO3D_2X 0x12
+#define CHIP_365_TRIO3D 0x13
#define CHIP_XXX_TRIO 0x80
#define CHIP_XXX_TRIO64V2_DXGX 0x81
@@ -119,9 +123,11 @@ static const struct vga_regset s3_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07,
static const struct vga_regset s3_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
static const struct vga_regset s3_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x5E, 6, 6}, VGA_REGSET_END};
-static const struct vga_regset s3_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x31, 4, 5}, {0x51, 0, 1}, VGA_REGSET_END};
+static const struct vga_regset s3_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x69, 0, 4}, VGA_REGSET_END};
static const struct vga_regset s3_offset_regs[] = {{0x13, 0, 7}, {0x51, 4, 5}, VGA_REGSET_END}; /* set 0x43 bit 2 to 0 */
+static const struct vga_regset s3_dtpc_regs[] = {{0x3B, 0, 7}, {0x5D, 6, 6}, VGA_REGSET_END};
+
static const struct svga_timing_regs s3_timing_regs = {
s3_h_total_regs, s3_h_display_regs, s3_h_blank_start_regs,
s3_h_blank_end_regs, s3_h_sync_start_regs, s3_h_sync_end_regs,
@@ -188,12 +194,19 @@ static void s3fb_settile_fast(struct fb_info *info, struct fb_tilemap *map)
}
}
+static void s3fb_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor)
+{
+ struct s3fb_info *par = info->par;
+
+ svga_tilecursor(par->state.vgabase, info, cursor);
+}
+
static struct fb_tile_ops s3fb_tile_ops = {
.fb_settile = svga_settile,
.fb_tilecopy = svga_tilecopy,
.fb_tilefill = svga_tilefill,
.fb_tileblit = svga_tileblit,
- .fb_tilecursor = svga_tilecursor,
+ .fb_tilecursor = s3fb_tilecursor,
.fb_get_tilemax = svga_get_tilemax,
};
@@ -202,7 +215,7 @@ static struct fb_tile_ops s3fb_fast_tile_ops = {
.fb_tilecopy = svga_tilecopy,
.fb_tilefill = svga_tilefill,
.fb_tileblit = svga_tileblit,
- .fb_tilecursor = svga_tilecursor,
+ .fb_tilecursor = s3fb_tilecursor,
.fb_get_tilemax = svga_get_tilemax,
};
@@ -334,33 +347,34 @@ static void s3_set_pixclock(struct fb_info *info, u32 pixclock)
u8 regval;
int rv;
- rv = svga_compute_pll(&s3_pll, 1000000000 / pixclock, &m, &n, &r, info->node);
+ rv = svga_compute_pll((par->chip == CHIP_365_TRIO3D) ? &s3_trio3d_pll : &s3_pll,
+ 1000000000 / pixclock, &m, &n, &r, info->node);
if (rv < 0) {
printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
return;
}
/* Set VGA misc register */
- regval = vga_r(NULL, VGA_MIS_R);
- vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
+ regval = vga_r(par->state.vgabase, VGA_MIS_R);
+ vga_w(par->state.vgabase, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
/* Set S3 clock registers */
if (par->chip == CHIP_360_TRIO3D_1X ||
par->chip == CHIP_362_TRIO3D_2X ||
par->chip == CHIP_368_TRIO3D_2X) {
- vga_wseq(NULL, 0x12, (n - 2) | ((r & 3) << 6)); /* n and two bits of r */
- vga_wseq(NULL, 0x29, r >> 2); /* remaining highest bit of r */
+ vga_wseq(par->state.vgabase, 0x12, (n - 2) | ((r & 3) << 6)); /* n and two bits of r */
+ vga_wseq(par->state.vgabase, 0x29, r >> 2); /* remaining highest bit of r */
} else
- vga_wseq(NULL, 0x12, (n - 2) | (r << 5));
- vga_wseq(NULL, 0x13, m - 2);
+ vga_wseq(par->state.vgabase, 0x12, (n - 2) | (r << 5));
+ vga_wseq(par->state.vgabase, 0x13, m - 2);
udelay(1000);
/* Activate clock - write 0, 1, 0 to seq/15 bit 5 */
- regval = vga_rseq (NULL, 0x15); /* | 0x80; */
- vga_wseq(NULL, 0x15, regval & ~(1<<5));
- vga_wseq(NULL, 0x15, regval | (1<<5));
- vga_wseq(NULL, 0x15, regval & ~(1<<5));
+ regval = vga_rseq (par->state.vgabase, 0x15); /* | 0x80; */
+ vga_wseq(par->state.vgabase, 0x15, regval & ~(1<<5));
+ vga_wseq(par->state.vgabase, 0x15, regval | (1<<5));
+ vga_wseq(par->state.vgabase, 0x15, regval & ~(1<<5));
}
@@ -372,7 +386,10 @@ static int s3fb_open(struct fb_info *info, int user)
mutex_lock(&(par->open_lock));
if (par->ref_count == 0) {
+ void __iomem *vgabase = par->state.vgabase;
+
memset(&(par->state), 0, sizeof(struct vgastate));
+ par->state.vgabase = vgabase;
par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS | VGA_SAVE_CMAP;
par->state.num_crtc = 0x70;
par->state.num_seq = 0x20;
@@ -470,6 +487,7 @@ static int s3fb_set_par(struct fb_info *info)
struct s3fb_info *par = info->par;
u32 value, mode, hmul, offset_value, screen_size, multiplex, dbytes;
u32 bpp = info->var.bits_per_pixel;
+ u32 htotal, hsstart;
if (bpp != 0) {
info->fix.ypanstep = 1;
@@ -504,99 +522,112 @@ static int s3fb_set_par(struct fb_info *info)
info->var.activate = FB_ACTIVATE_NOW;
/* Unlock registers */
- vga_wcrt(NULL, 0x38, 0x48);
- vga_wcrt(NULL, 0x39, 0xA5);
- vga_wseq(NULL, 0x08, 0x06);
- svga_wcrt_mask(0x11, 0x00, 0x80);
+ vga_wcrt(par->state.vgabase, 0x38, 0x48);
+ vga_wcrt(par->state.vgabase, 0x39, 0xA5);
+ vga_wseq(par->state.vgabase, 0x08, 0x06);
+ svga_wcrt_mask(par->state.vgabase, 0x11, 0x00, 0x80);
/* Blank screen and turn off sync */
- svga_wseq_mask(0x01, 0x20, 0x20);
- svga_wcrt_mask(0x17, 0x00, 0x80);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x17, 0x00, 0x80);
/* Set default values */
- svga_set_default_gfx_regs();
- svga_set_default_atc_regs();
- svga_set_default_seq_regs();
- svga_set_default_crt_regs();
- svga_wcrt_multi(s3_line_compare_regs, 0xFFFFFFFF);
- svga_wcrt_multi(s3_start_address_regs, 0);
+ svga_set_default_gfx_regs(par->state.vgabase);
+ svga_set_default_atc_regs(par->state.vgabase);
+ svga_set_default_seq_regs(par->state.vgabase);
+ svga_set_default_crt_regs(par->state.vgabase);
+ svga_wcrt_multi(par->state.vgabase, s3_line_compare_regs, 0xFFFFFFFF);
+ svga_wcrt_multi(par->state.vgabase, s3_start_address_regs, 0);
/* S3 specific initialization */
- svga_wcrt_mask(0x58, 0x10, 0x10); /* enable linear framebuffer */
- svga_wcrt_mask(0x31, 0x08, 0x08); /* enable sequencer access to framebuffer above 256 kB */
+ svga_wcrt_mask(par->state.vgabase, 0x58, 0x10, 0x10); /* enable linear framebuffer */
+ svga_wcrt_mask(par->state.vgabase, 0x31, 0x08, 0x08); /* enable sequencer access to framebuffer above 256 kB */
-/* svga_wcrt_mask(0x33, 0x08, 0x08); */ /* DDR ? */
-/* svga_wcrt_mask(0x43, 0x01, 0x01); */ /* DDR ? */
- svga_wcrt_mask(0x33, 0x00, 0x08); /* no DDR ? */
- svga_wcrt_mask(0x43, 0x00, 0x01); /* no DDR ? */
+/* svga_wcrt_mask(par->state.vgabase, 0x33, 0x08, 0x08); */ /* DDR ? */
+/* svga_wcrt_mask(par->state.vgabase, 0x43, 0x01, 0x01); */ /* DDR ? */
+ svga_wcrt_mask(par->state.vgabase, 0x33, 0x00, 0x08); /* no DDR ? */
+ svga_wcrt_mask(par->state.vgabase, 0x43, 0x00, 0x01); /* no DDR ? */
- svga_wcrt_mask(0x5D, 0x00, 0x28); /* Clear strange HSlen bits */
+ svga_wcrt_mask(par->state.vgabase, 0x5D, 0x00, 0x28); /* Clear strange HSlen bits */
-/* svga_wcrt_mask(0x58, 0x03, 0x03); */
+/* svga_wcrt_mask(par->state.vgabase, 0x58, 0x03, 0x03); */
-/* svga_wcrt_mask(0x53, 0x12, 0x13); */ /* enable MMIO */
-/* svga_wcrt_mask(0x40, 0x08, 0x08); */ /* enable write buffer */
+/* svga_wcrt_mask(par->state.vgabase, 0x53, 0x12, 0x13); */ /* enable MMIO */
+/* svga_wcrt_mask(par->state.vgabase, 0x40, 0x08, 0x08); */ /* enable write buffer */
/* Set the offset register */
pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
- svga_wcrt_multi(s3_offset_regs, offset_value);
+ svga_wcrt_multi(par->state.vgabase, s3_offset_regs, offset_value);
if (par->chip != CHIP_360_TRIO3D_1X &&
par->chip != CHIP_362_TRIO3D_2X &&
par->chip != CHIP_368_TRIO3D_2X) {
- vga_wcrt(NULL, 0x54, 0x18); /* M parameter */
- vga_wcrt(NULL, 0x60, 0xff); /* N parameter */
- vga_wcrt(NULL, 0x61, 0xff); /* L parameter */
- vga_wcrt(NULL, 0x62, 0xff); /* L parameter */
+ vga_wcrt(par->state.vgabase, 0x54, 0x18); /* M parameter */
+ vga_wcrt(par->state.vgabase, 0x60, 0xff); /* N parameter */
+ vga_wcrt(par->state.vgabase, 0x61, 0xff); /* L parameter */
+ vga_wcrt(par->state.vgabase, 0x62, 0xff); /* L parameter */
}
- vga_wcrt(NULL, 0x3A, 0x35);
- svga_wattr(0x33, 0x00);
+ vga_wcrt(par->state.vgabase, 0x3A, 0x35);
+ svga_wattr(par->state.vgabase, 0x33, 0x00);
if (info->var.vmode & FB_VMODE_DOUBLE)
- svga_wcrt_mask(0x09, 0x80, 0x80);
+ svga_wcrt_mask(par->state.vgabase, 0x09, 0x80, 0x80);
else
- svga_wcrt_mask(0x09, 0x00, 0x80);
+ svga_wcrt_mask(par->state.vgabase, 0x09, 0x00, 0x80);
if (info->var.vmode & FB_VMODE_INTERLACED)
- svga_wcrt_mask(0x42, 0x20, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x42, 0x20, 0x20);
else
- svga_wcrt_mask(0x42, 0x00, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x42, 0x00, 0x20);
/* Disable hardware graphics cursor */
- svga_wcrt_mask(0x45, 0x00, 0x01);
+ svga_wcrt_mask(par->state.vgabase, 0x45, 0x00, 0x01);
/* Disable Streams engine */
- svga_wcrt_mask(0x67, 0x00, 0x0C);
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x00, 0x0C);
mode = svga_match_format(s3fb_formats, &(info->var), &(info->fix));
/* S3 virge DX hack */
if (par->chip == CHIP_375_VIRGE_DX) {
- vga_wcrt(NULL, 0x86, 0x80);
- vga_wcrt(NULL, 0x90, 0x00);
+ vga_wcrt(par->state.vgabase, 0x86, 0x80);
+ vga_wcrt(par->state.vgabase, 0x90, 0x00);
}
/* S3 virge VX hack */
if (par->chip == CHIP_988_VIRGE_VX) {
- vga_wcrt(NULL, 0x50, 0x00);
- vga_wcrt(NULL, 0x67, 0x50);
+ vga_wcrt(par->state.vgabase, 0x50, 0x00);
+ vga_wcrt(par->state.vgabase, 0x67, 0x50);
- vga_wcrt(NULL, 0x63, (mode <= 2) ? 0x90 : 0x09);
- vga_wcrt(NULL, 0x66, 0x90);
+ vga_wcrt(par->state.vgabase, 0x63, (mode <= 2) ? 0x90 : 0x09);
+ vga_wcrt(par->state.vgabase, 0x66, 0x90);
}
if (par->chip == CHIP_360_TRIO3D_1X ||
par->chip == CHIP_362_TRIO3D_2X ||
- par->chip == CHIP_368_TRIO3D_2X) {
+ par->chip == CHIP_368_TRIO3D_2X ||
+ par->chip == CHIP_365_TRIO3D ||
+ par->chip == CHIP_375_VIRGE_DX ||
+ par->chip == CHIP_385_VIRGE_GX) {
dbytes = info->var.xres * ((bpp+7)/8);
- vga_wcrt(NULL, 0x91, (dbytes + 7) / 8);
- vga_wcrt(NULL, 0x90, (((dbytes + 7) / 8) >> 8) | 0x80);
+ vga_wcrt(par->state.vgabase, 0x91, (dbytes + 7) / 8);
+ vga_wcrt(par->state.vgabase, 0x90, (((dbytes + 7) / 8) >> 8) | 0x80);
- vga_wcrt(NULL, 0x66, 0x81);
+ vga_wcrt(par->state.vgabase, 0x66, 0x81);
}
- svga_wcrt_mask(0x31, 0x00, 0x40);
+ if (par->chip == CHIP_356_VIRGE_GX2 ||
+ par->chip == CHIP_357_VIRGE_GX2P ||
+ par->chip == CHIP_359_VIRGE_GX2P ||
+ par->chip == CHIP_360_TRIO3D_1X ||
+ par->chip == CHIP_362_TRIO3D_2X ||
+ par->chip == CHIP_368_TRIO3D_2X)
+ vga_wcrt(par->state.vgabase, 0x34, 0x00);
+ else /* enable Data Transfer Position Control (DTPC) */
+ vga_wcrt(par->state.vgabase, 0x34, 0x10);
+
+ svga_wcrt_mask(par->state.vgabase, 0x31, 0x00, 0x40);
multiplex = 0;
hmul = 1;
@@ -604,51 +635,51 @@ static int s3fb_set_par(struct fb_info *info)
switch (mode) {
case 0:
pr_debug("fb%d: text mode\n", info->node);
- svga_set_textmode_vga_regs();
+ svga_set_textmode_vga_regs(par->state.vgabase);
/* Set additional registers like in 8-bit mode */
- svga_wcrt_mask(0x50, 0x00, 0x30);
- svga_wcrt_mask(0x67, 0x00, 0xF0);
+ svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30);
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x00, 0xF0);
/* Disable enhanced mode */
- svga_wcrt_mask(0x3A, 0x00, 0x30);
+ svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30);
if (fasttext) {
pr_debug("fb%d: high speed text mode set\n", info->node);
- svga_wcrt_mask(0x31, 0x40, 0x40);
+ svga_wcrt_mask(par->state.vgabase, 0x31, 0x40, 0x40);
}
break;
case 1:
pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
- vga_wgfx(NULL, VGA_GFX_MODE, 0x40);
+ vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
/* Set additional registers like in 8-bit mode */
- svga_wcrt_mask(0x50, 0x00, 0x30);
- svga_wcrt_mask(0x67, 0x00, 0xF0);
+ svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30);
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x00, 0xF0);
/* disable enhanced mode */
- svga_wcrt_mask(0x3A, 0x00, 0x30);
+ svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30);
break;
case 2:
pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
/* Set additional registers like in 8-bit mode */
- svga_wcrt_mask(0x50, 0x00, 0x30);
- svga_wcrt_mask(0x67, 0x00, 0xF0);
+ svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30);
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x00, 0xF0);
/* disable enhanced mode */
- svga_wcrt_mask(0x3A, 0x00, 0x30);
+ svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30);
break;
case 3:
pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
- svga_wcrt_mask(0x50, 0x00, 0x30);
+ svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30);
if (info->var.pixclock > 20000 ||
par->chip == CHIP_360_TRIO3D_1X ||
par->chip == CHIP_362_TRIO3D_2X ||
par->chip == CHIP_368_TRIO3D_2X)
- svga_wcrt_mask(0x67, 0x00, 0xF0);
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x00, 0xF0);
else {
- svga_wcrt_mask(0x67, 0x10, 0xF0);
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x10, 0xF0);
multiplex = 1;
}
break;
@@ -656,12 +687,21 @@ static int s3fb_set_par(struct fb_info *info)
pr_debug("fb%d: 5/5/5 truecolor\n", info->node);
if (par->chip == CHIP_988_VIRGE_VX) {
if (info->var.pixclock > 20000)
- svga_wcrt_mask(0x67, 0x20, 0xF0);
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x20, 0xF0);
else
- svga_wcrt_mask(0x67, 0x30, 0xF0);
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x30, 0xF0);
+ } else if (par->chip == CHIP_365_TRIO3D) {
+ svga_wcrt_mask(par->state.vgabase, 0x50, 0x10, 0x30);
+ if (info->var.pixclock > 8695) {
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x30, 0xF0);
+ hmul = 2;
+ } else {
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x20, 0xF0);
+ multiplex = 1;
+ }
} else {
- svga_wcrt_mask(0x50, 0x10, 0x30);
- svga_wcrt_mask(0x67, 0x30, 0xF0);
+ svga_wcrt_mask(par->state.vgabase, 0x50, 0x10, 0x30);
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x30, 0xF0);
if (par->chip != CHIP_360_TRIO3D_1X &&
par->chip != CHIP_362_TRIO3D_2X &&
par->chip != CHIP_368_TRIO3D_2X)
@@ -672,12 +712,21 @@ static int s3fb_set_par(struct fb_info *info)
pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
if (par->chip == CHIP_988_VIRGE_VX) {
if (info->var.pixclock > 20000)
- svga_wcrt_mask(0x67, 0x40, 0xF0);
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x40, 0xF0);
else
- svga_wcrt_mask(0x67, 0x50, 0xF0);
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x50, 0xF0);
+ } else if (par->chip == CHIP_365_TRIO3D) {
+ svga_wcrt_mask(par->state.vgabase, 0x50, 0x10, 0x30);
+ if (info->var.pixclock > 8695) {
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x50, 0xF0);
+ hmul = 2;
+ } else {
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x40, 0xF0);
+ multiplex = 1;
+ }
} else {
- svga_wcrt_mask(0x50, 0x10, 0x30);
- svga_wcrt_mask(0x67, 0x50, 0xF0);
+ svga_wcrt_mask(par->state.vgabase, 0x50, 0x10, 0x30);
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0x50, 0xF0);
if (par->chip != CHIP_360_TRIO3D_1X &&
par->chip != CHIP_362_TRIO3D_2X &&
par->chip != CHIP_368_TRIO3D_2X)
@@ -687,12 +736,12 @@ static int s3fb_set_par(struct fb_info *info)
case 6:
/* VIRGE VX case */
pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
- svga_wcrt_mask(0x67, 0xD0, 0xF0);
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0xD0, 0xF0);
break;
case 7:
pr_debug("fb%d: 8/8/8/8 truecolor\n", info->node);
- svga_wcrt_mask(0x50, 0x30, 0x30);
- svga_wcrt_mask(0x67, 0xD0, 0xF0);
+ svga_wcrt_mask(par->state.vgabase, 0x50, 0x30, 0x30);
+ svga_wcrt_mask(par->state.vgabase, 0x67, 0xD0, 0xF0);
break;
default:
printk(KERN_ERR "fb%d: unsupported mode - bug\n", info->node);
@@ -700,25 +749,30 @@ static int s3fb_set_par(struct fb_info *info)
}
if (par->chip != CHIP_988_VIRGE_VX) {
- svga_wseq_mask(0x15, multiplex ? 0x10 : 0x00, 0x10);
- svga_wseq_mask(0x18, multiplex ? 0x80 : 0x00, 0x80);
+ svga_wseq_mask(par->state.vgabase, 0x15, multiplex ? 0x10 : 0x00, 0x10);
+ svga_wseq_mask(par->state.vgabase, 0x18, multiplex ? 0x80 : 0x00, 0x80);
}
s3_set_pixclock(info, info->var.pixclock);
- svga_set_timings(&s3_timing_regs, &(info->var), hmul, 1,
+ svga_set_timings(par->state.vgabase, &s3_timing_regs, &(info->var), hmul, 1,
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1,
(info->var.vmode & FB_VMODE_INTERLACED) ? 2 : 1,
hmul, info->node);
/* Set interlaced mode start/end register */
- value = info->var.xres + info->var.left_margin + info->var.right_margin + info->var.hsync_len;
- value = ((value * hmul) / 8) - 5;
- vga_wcrt(NULL, 0x3C, (value + 1) / 2);
+ htotal = info->var.xres + info->var.left_margin + info->var.right_margin + info->var.hsync_len;
+ htotal = ((htotal * hmul) / 8) - 5;
+ vga_wcrt(par->state.vgabase, 0x3C, (htotal + 1) / 2);
+
+ /* Set Data Transfer Position */
+ hsstart = ((info->var.xres + info->var.right_margin) * hmul) / 8;
+ value = clamp((htotal + hsstart + 1) / 2, hsstart + 4, htotal + 1);
+ svga_wcrt_multi(par->state.vgabase, s3_dtpc_regs, value);
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
- svga_wcrt_mask(0x17, 0x80, 0x80);
- svga_wseq_mask(0x01, 0x00, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
return 0;
}
@@ -788,31 +842,33 @@ static int s3fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
static int s3fb_blank(int blank_mode, struct fb_info *info)
{
+ struct s3fb_info *par = info->par;
+
switch (blank_mode) {
case FB_BLANK_UNBLANK:
pr_debug("fb%d: unblank\n", info->node);
- svga_wcrt_mask(0x56, 0x00, 0x06);
- svga_wseq_mask(0x01, 0x00, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x56, 0x00, 0x06);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
break;
case FB_BLANK_NORMAL:
pr_debug("fb%d: blank\n", info->node);
- svga_wcrt_mask(0x56, 0x00, 0x06);
- svga_wseq_mask(0x01, 0x20, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x56, 0x00, 0x06);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_HSYNC_SUSPEND:
pr_debug("fb%d: hsync\n", info->node);
- svga_wcrt_mask(0x56, 0x02, 0x06);
- svga_wseq_mask(0x01, 0x20, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x56, 0x02, 0x06);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_VSYNC_SUSPEND:
pr_debug("fb%d: vsync\n", info->node);
- svga_wcrt_mask(0x56, 0x04, 0x06);
- svga_wseq_mask(0x01, 0x20, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x56, 0x04, 0x06);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_POWERDOWN:
pr_debug("fb%d: sync down\n", info->node);
- svga_wcrt_mask(0x56, 0x06, 0x06);
- svga_wseq_mask(0x01, 0x20, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x56, 0x06, 0x06);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
}
@@ -822,8 +878,9 @@ static int s3fb_blank(int blank_mode, struct fb_info *info)
/* Pan the display */
-static int s3fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) {
-
+static int s3fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct s3fb_info *par = info->par;
unsigned int offset;
/* Calculate the offset */
@@ -837,7 +894,7 @@ static int s3fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
}
/* Set the offset */
- svga_wcrt_multi(s3_start_address_regs, offset);
+ svga_wcrt_multi(par->state.vgabase, s3_start_address_regs, offset);
return 0;
}
@@ -863,12 +920,14 @@ static struct fb_ops s3fb_ops = {
/* ------------------------------------------------------------------------- */
-static int __devinit s3_identification(int chip)
+static int __devinit s3_identification(struct s3fb_info *par)
{
+ int chip = par->chip;
+
if (chip == CHIP_XXX_TRIO) {
- u8 cr30 = vga_rcrt(NULL, 0x30);
- u8 cr2e = vga_rcrt(NULL, 0x2e);
- u8 cr2f = vga_rcrt(NULL, 0x2f);
+ u8 cr30 = vga_rcrt(par->state.vgabase, 0x30);
+ u8 cr2e = vga_rcrt(par->state.vgabase, 0x2e);
+ u8 cr2f = vga_rcrt(par->state.vgabase, 0x2f);
if ((cr30 == 0xE0) || (cr30 == 0xE1)) {
if (cr2e == 0x10)
@@ -883,7 +942,7 @@ static int __devinit s3_identification(int chip)
}
if (chip == CHIP_XXX_TRIO64V2_DXGX) {
- u8 cr6f = vga_rcrt(NULL, 0x6f);
+ u8 cr6f = vga_rcrt(par->state.vgabase, 0x6f);
if (! (cr6f & 0x01))
return CHIP_775_TRIO64V2_DX;
@@ -892,7 +951,7 @@ static int __devinit s3_identification(int chip)
}
if (chip == CHIP_XXX_VIRGE_DXGX) {
- u8 cr6f = vga_rcrt(NULL, 0x6f);
+ u8 cr6f = vga_rcrt(par->state.vgabase, 0x6f);
if (! (cr6f & 0x01))
return CHIP_375_VIRGE_DX;
@@ -901,7 +960,7 @@ static int __devinit s3_identification(int chip)
}
if (chip == CHIP_36X_TRIO3D_1X_2X) {
- switch (vga_rcrt(NULL, 0x2f)) {
+ switch (vga_rcrt(par->state.vgabase, 0x2f)) {
case 0x00:
return CHIP_360_TRIO3D_1X;
case 0x01:
@@ -919,6 +978,8 @@ static int __devinit s3_identification(int chip)
static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ struct pci_bus_region bus_reg;
+ struct resource vga_res;
struct fb_info *info;
struct s3fb_info *par;
int rc;
@@ -968,31 +1029,42 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
goto err_iomap;
}
+ bus_reg.start = 0;
+ bus_reg.end = 64 * 1024;
+
+ vga_res.flags = IORESOURCE_IO;
+
+ pcibios_bus_to_resource(dev, &vga_res, &bus_reg);
+
+ par->state.vgabase = (void __iomem *) vga_res.start;
+
/* Unlock regs */
- cr38 = vga_rcrt(NULL, 0x38);
- cr39 = vga_rcrt(NULL, 0x39);
- vga_wseq(NULL, 0x08, 0x06);
- vga_wcrt(NULL, 0x38, 0x48);
- vga_wcrt(NULL, 0x39, 0xA5);
+ cr38 = vga_rcrt(par->state.vgabase, 0x38);
+ cr39 = vga_rcrt(par->state.vgabase, 0x39);
+ vga_wseq(par->state.vgabase, 0x08, 0x06);
+ vga_wcrt(par->state.vgabase, 0x38, 0x48);
+ vga_wcrt(par->state.vgabase, 0x39, 0xA5);
/* Identify chip type */
par->chip = id->driver_data & CHIP_MASK;
- par->rev = vga_rcrt(NULL, 0x2f);
+ par->rev = vga_rcrt(par->state.vgabase, 0x2f);
if (par->chip & CHIP_UNDECIDED_FLAG)
- par->chip = s3_identification(par->chip);
+ par->chip = s3_identification(par);
/* Find how many physical memory there is on card */
/* 0x36 register is accessible even if other registers are locked */
- regval = vga_rcrt(NULL, 0x36);
+ regval = vga_rcrt(par->state.vgabase, 0x36);
if (par->chip == CHIP_360_TRIO3D_1X ||
par->chip == CHIP_362_TRIO3D_2X ||
- par->chip == CHIP_368_TRIO3D_2X) {
+ par->chip == CHIP_368_TRIO3D_2X ||
+ par->chip == CHIP_365_TRIO3D) {
switch ((regval & 0xE0) >> 5) {
case 0: /* 8MB -- only 4MB usable for display */
case 1: /* 4MB with 32-bit bus */
case 2: /* 4MB */
info->screen_size = 4 << 20;
break;
+ case 4: /* 2MB on 365 Trio3D */
case 6: /* 2MB */
info->screen_size = 2 << 20;
break;
@@ -1002,13 +1074,13 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
info->fix.smem_len = info->screen_size;
/* Find MCLK frequency */
- regval = vga_rseq(NULL, 0x10);
- par->mclk_freq = ((vga_rseq(NULL, 0x11) + 2) * 14318) / ((regval & 0x1F) + 2);
+ regval = vga_rseq(par->state.vgabase, 0x10);
+ par->mclk_freq = ((vga_rseq(par->state.vgabase, 0x11) + 2) * 14318) / ((regval & 0x1F) + 2);
par->mclk_freq = par->mclk_freq >> (regval >> 5);
/* Restore locks */
- vga_wcrt(NULL, 0x38, cr38);
- vga_wcrt(NULL, 0x39, cr39);
+ vga_wcrt(par->state.vgabase, 0x38, cr38);
+ vga_wcrt(par->state.vgabase, 0x39, cr39);
strcpy(info->fix.id, s3_names [par->chip]);
info->fix.mmio_start = 0;
@@ -1027,6 +1099,14 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
goto err_find_mode;
}
+ /* maximize virtual vertical size for fast scrolling */
+ info->var.yres_virtual = info->fix.smem_len * 8 /
+ (info->var.bits_per_pixel * info->var.xres_virtual);
+ if (info->var.yres_virtual < info->var.yres) {
+ dev_err(info->device, "virtual vertical size smaller than real\n");
+ goto err_find_mode;
+ }
+
rc = fb_alloc_cmap(&info->cmap, 256, 0);
if (rc < 0) {
dev_err(info->device, "cannot allocate colormap\n");
@@ -1044,8 +1124,8 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
if (par->chip == CHIP_UNKNOWN)
printk(KERN_INFO "fb%d: unknown chip, CR2D=%x, CR2E=%x, CRT2F=%x, CRT30=%x\n",
- info->node, vga_rcrt(NULL, 0x2d), vga_rcrt(NULL, 0x2e),
- vga_rcrt(NULL, 0x2f), vga_rcrt(NULL, 0x30));
+ info->node, vga_rcrt(par->state.vgabase, 0x2d), vga_rcrt(par->state.vgabase, 0x2e),
+ vga_rcrt(par->state.vgabase, 0x2f), vga_rcrt(par->state.vgabase, 0x30));
/* Record a reference to the driver data */
pci_set_drvdata(dev, info);
@@ -1192,6 +1272,7 @@ static struct pci_device_id s3_devices[] __devinitdata = {
{PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A11), .driver_data = CHIP_357_VIRGE_GX2P},
{PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P},
{PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A13), .driver_data = CHIP_36X_TRIO3D_1X_2X},
+ {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8904), .driver_data = CHIP_365_TRIO3D},
{0, 0, 0, 0, 0, 0, 0}
};
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
index bea38fce2470..8fe19582c460 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/sh7760fb.c
@@ -459,14 +459,14 @@ static int __devinit sh7760fb_probe(struct platform_device *pdev)
}
par->ioarea = request_mem_region(res->start,
- (res->end - res->start), pdev->name);
+ resource_size(res), pdev->name);
if (!par->ioarea) {
dev_err(&pdev->dev, "mmio area busy\n");
ret = -EBUSY;
goto out_fb;
}
- par->base = ioremap_nocache(res->start, res->end - res->start + 1);
+ par->base = ioremap_nocache(res->start, resource_size(res));
if (!par->base) {
dev_err(&pdev->dev, "cannot remap\n");
ret = -ENODEV;
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index bf2629f83f40..757665bc500f 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -1088,8 +1088,9 @@ static struct backlight_device *sh_mobile_lcdc_bl_probe(struct device *parent,
bl = backlight_device_register(ch->cfg.bl_info.name, parent, ch,
&sh_mobile_lcdc_bl_ops, NULL);
- if (!bl) {
- dev_err(parent, "unable to register backlight device\n");
+ if (IS_ERR(bl)) {
+ dev_err(parent, "unable to register backlight device: %ld\n",
+ PTR_ERR(bl));
return NULL;
}
diff --git a/drivers/video/sis/sis.h b/drivers/video/sis/sis.h
index eac7a01925f3..1987f1b7212f 100644
--- a/drivers/video/sis/sis.h
+++ b/drivers/video/sis/sis.h
@@ -495,6 +495,7 @@ struct sis_video_info {
unsigned int refresh_rate;
unsigned int chip;
+ unsigned int chip_real_id;
u8 revision_id;
int sisvga_enabled; /* PCI device was enabled */
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 2fb8c5a660fb..75259845933d 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -4563,6 +4563,11 @@ sisfb_post_sis315330(struct pci_dev *pdev)
}
#endif
+static inline int sisfb_xgi_is21(struct sis_video_info *ivideo)
+{
+ return ivideo->chip_real_id == XGI_21;
+}
+
static void __devinit
sisfb_post_xgi_delay(struct sis_video_info *ivideo, int delay)
{
@@ -4627,11 +4632,11 @@ sisfb_post_xgi_rwtest(struct sis_video_info *ivideo, int starta,
return 1;
}
-static void __devinit
+static int __devinit
sisfb_post_xgi_ramsize(struct sis_video_info *ivideo)
{
unsigned int buswidth, ranksize, channelab, mapsize;
- int i, j, k, l;
+ int i, j, k, l, status;
u8 reg, sr14;
static const u8 dramsr13[12 * 5] = {
0x02, 0x0e, 0x0b, 0x80, 0x5d,
@@ -4673,7 +4678,7 @@ sisfb_post_xgi_ramsize(struct sis_video_info *ivideo)
SiS_SetReg(SISSR, 0x13, 0x35);
SiS_SetReg(SISSR, 0x14, 0x41);
/* TODO */
- return;
+ return -ENOMEM;
}
/* Non-interleaving */
@@ -4835,6 +4840,7 @@ bail_out:
j = (ivideo->chip == XGI_20) ? 5 : 9;
k = (ivideo->chip == XGI_20) ? 12 : 4;
+ status = -EIO;
for(i = 0; i < k; i++) {
@@ -4868,11 +4874,15 @@ bail_out:
SiS_SetRegANDOR(SISSR, 0x14, 0x0f, (reg & 0xf0));
sisfb_post_xgi_delay(ivideo, 1);
- if(sisfb_post_xgi_rwtest(ivideo, j, ((reg >> 4) + channelab - 2 + 20), mapsize))
+ if (sisfb_post_xgi_rwtest(ivideo, j, ((reg >> 4) + channelab - 2 + 20), mapsize)) {
+ status = 0;
break;
+ }
}
iounmap(ivideo->video_vbase);
+
+ return status;
}
static void __devinit
@@ -4931,6 +4941,175 @@ sisfb_post_xgi_setclocks(struct sis_video_info *ivideo, u8 regb)
sisfb_post_xgi_delay(ivideo, 0x43);
}
+static void __devinit
+sisfb_post_xgi_ddr2_mrs_default(struct sis_video_info *ivideo, u8 regb)
+{
+ unsigned char *bios = ivideo->bios_abase;
+ u8 v1;
+
+ SiS_SetReg(SISSR, 0x28, 0x64);
+ SiS_SetReg(SISSR, 0x29, 0x63);
+ sisfb_post_xgi_delay(ivideo, 15);
+ SiS_SetReg(SISSR, 0x18, 0x00);
+ SiS_SetReg(SISSR, 0x19, 0x20);
+ SiS_SetReg(SISSR, 0x16, 0x00);
+ SiS_SetReg(SISSR, 0x16, 0x80);
+ SiS_SetReg(SISSR, 0x18, 0xc5);
+ SiS_SetReg(SISSR, 0x19, 0x23);
+ SiS_SetReg(SISSR, 0x16, 0x00);
+ SiS_SetReg(SISSR, 0x16, 0x80);
+ sisfb_post_xgi_delay(ivideo, 1);
+ SiS_SetReg(SISCR, 0x97, 0x11);
+ sisfb_post_xgi_setclocks(ivideo, regb);
+ sisfb_post_xgi_delay(ivideo, 0x46);
+ SiS_SetReg(SISSR, 0x18, 0xc5);
+ SiS_SetReg(SISSR, 0x19, 0x23);
+ SiS_SetReg(SISSR, 0x16, 0x00);
+ SiS_SetReg(SISSR, 0x16, 0x80);
+ sisfb_post_xgi_delay(ivideo, 1);
+ SiS_SetReg(SISSR, 0x1b, 0x04);
+ sisfb_post_xgi_delay(ivideo, 1);
+ SiS_SetReg(SISSR, 0x1b, 0x00);
+ sisfb_post_xgi_delay(ivideo, 1);
+ v1 = 0x31;
+ if (ivideo->haveXGIROM) {
+ v1 = bios[0xf0];
+ }
+ SiS_SetReg(SISSR, 0x18, v1);
+ SiS_SetReg(SISSR, 0x19, 0x06);
+ SiS_SetReg(SISSR, 0x16, 0x04);
+ SiS_SetReg(SISSR, 0x16, 0x84);
+ sisfb_post_xgi_delay(ivideo, 1);
+}
+
+static void __devinit
+sisfb_post_xgi_ddr2_mrs_xg21(struct sis_video_info *ivideo)
+{
+ sisfb_post_xgi_setclocks(ivideo, 1);
+
+ SiS_SetReg(SISCR, 0x97, 0x11);
+ sisfb_post_xgi_delay(ivideo, 0x46);
+
+ SiS_SetReg(SISSR, 0x18, 0x00); /* EMRS2 */
+ SiS_SetReg(SISSR, 0x19, 0x80);
+ SiS_SetReg(SISSR, 0x16, 0x05);
+ SiS_SetReg(SISSR, 0x16, 0x85);
+
+ SiS_SetReg(SISSR, 0x18, 0x00); /* EMRS3 */
+ SiS_SetReg(SISSR, 0x19, 0xc0);
+ SiS_SetReg(SISSR, 0x16, 0x05);
+ SiS_SetReg(SISSR, 0x16, 0x85);
+
+ SiS_SetReg(SISSR, 0x18, 0x00); /* EMRS1 */
+ SiS_SetReg(SISSR, 0x19, 0x40);
+ SiS_SetReg(SISSR, 0x16, 0x05);
+ SiS_SetReg(SISSR, 0x16, 0x85);
+
+ SiS_SetReg(SISSR, 0x18, 0x42); /* MRS1 */
+ SiS_SetReg(SISSR, 0x19, 0x02);
+ SiS_SetReg(SISSR, 0x16, 0x05);
+ SiS_SetReg(SISSR, 0x16, 0x85);
+ sisfb_post_xgi_delay(ivideo, 1);
+
+ SiS_SetReg(SISSR, 0x1b, 0x04);
+ sisfb_post_xgi_delay(ivideo, 1);
+
+ SiS_SetReg(SISSR, 0x1b, 0x00);
+ sisfb_post_xgi_delay(ivideo, 1);
+
+ SiS_SetReg(SISSR, 0x18, 0x42); /* MRS1 */
+ SiS_SetReg(SISSR, 0x19, 0x00);
+ SiS_SetReg(SISSR, 0x16, 0x05);
+ SiS_SetReg(SISSR, 0x16, 0x85);
+ sisfb_post_xgi_delay(ivideo, 1);
+}
+
+static void __devinit
+sisfb_post_xgi_ddr2(struct sis_video_info *ivideo, u8 regb)
+{
+ unsigned char *bios = ivideo->bios_abase;
+ static const u8 cs158[8] = {
+ 0x88, 0xaa, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+ static const u8 cs160[8] = {
+ 0x44, 0x77, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+ static const u8 cs168[8] = {
+ 0x48, 0x78, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+ u8 reg;
+ u8 v1;
+ u8 v2;
+ u8 v3;
+
+ SiS_SetReg(SISCR, 0xb0, 0x80); /* DDR2 dual frequency mode */
+ SiS_SetReg(SISCR, 0x82, 0x77);
+ SiS_SetReg(SISCR, 0x86, 0x00);
+ reg = SiS_GetReg(SISCR, 0x86);
+ SiS_SetReg(SISCR, 0x86, 0x88);
+ reg = SiS_GetReg(SISCR, 0x86);
+ v1 = cs168[regb]; v2 = cs160[regb]; v3 = cs158[regb];
+ if (ivideo->haveXGIROM) {
+ v1 = bios[regb + 0x168];
+ v2 = bios[regb + 0x160];
+ v3 = bios[regb + 0x158];
+ }
+ SiS_SetReg(SISCR, 0x86, v1);
+ SiS_SetReg(SISCR, 0x82, 0x77);
+ SiS_SetReg(SISCR, 0x85, 0x00);
+ reg = SiS_GetReg(SISCR, 0x85);
+ SiS_SetReg(SISCR, 0x85, 0x88);
+ reg = SiS_GetReg(SISCR, 0x85);
+ SiS_SetReg(SISCR, 0x85, v2);
+ SiS_SetReg(SISCR, 0x82, v3);
+ SiS_SetReg(SISCR, 0x98, 0x01);
+ SiS_SetReg(SISCR, 0x9a, 0x02);
+ if (sisfb_xgi_is21(ivideo))
+ sisfb_post_xgi_ddr2_mrs_xg21(ivideo);
+ else
+ sisfb_post_xgi_ddr2_mrs_default(ivideo, regb);
+}
+
+static u8 __devinit
+sisfb_post_xgi_ramtype(struct sis_video_info *ivideo)
+{
+ unsigned char *bios = ivideo->bios_abase;
+ u8 ramtype;
+ u8 reg;
+ u8 v1;
+
+ ramtype = 0x00; v1 = 0x10;
+ if (ivideo->haveXGIROM) {
+ ramtype = bios[0x62];
+ v1 = bios[0x1d2];
+ }
+ if (!(ramtype & 0x80)) {
+ if (sisfb_xgi_is21(ivideo)) {
+ SiS_SetRegAND(SISCR, 0xb4, 0xfd); /* GPIO control */
+ SiS_SetRegOR(SISCR, 0x4a, 0x80); /* GPIOH EN */
+ reg = SiS_GetReg(SISCR, 0x48);
+ SiS_SetRegOR(SISCR, 0xb4, 0x02);
+ ramtype = reg & 0x01; /* GPIOH */
+ } else if (ivideo->chip == XGI_20) {
+ SiS_SetReg(SISCR, 0x97, v1);
+ reg = SiS_GetReg(SISCR, 0x97);
+ if (reg & 0x10) {
+ ramtype = (reg & 0x01) << 1;
+ }
+ } else {
+ reg = SiS_GetReg(SISSR, 0x39);
+ ramtype = reg & 0x02;
+ if (!(ramtype)) {
+ reg = SiS_GetReg(SISSR, 0x3a);
+ ramtype = (reg >> 1) & 0x01;
+ }
+ }
+ }
+ ramtype &= 0x07;
+
+ return ramtype;
+}
+
static int __devinit
sisfb_post_xgi(struct pci_dev *pdev)
{
@@ -5213,9 +5392,23 @@ sisfb_post_xgi(struct pci_dev *pdev)
SiS_SetReg(SISCR, 0x77, v1);
}
- /* RAM type */
-
- regb = 0; /* ! */
+ /* RAM type:
+ *
+ * 0 == DDR1, 1 == DDR2, 2..7 == reserved?
+ *
+ * The code seems to written so that regb should equal ramtype,
+ * however, so far it has been hardcoded to 0. Enable other values only
+ * on XGI Z9, as it passes the POST, and add a warning for others.
+ */
+ ramtype = sisfb_post_xgi_ramtype(ivideo);
+ if (!sisfb_xgi_is21(ivideo) && ramtype) {
+ dev_warn(&pdev->dev,
+ "RAM type something else than expected: %d\n",
+ ramtype);
+ regb = 0;
+ } else {
+ regb = ramtype;
+ }
v1 = 0xff;
if(ivideo->haveXGIROM) {
@@ -5367,7 +5560,10 @@ sisfb_post_xgi(struct pci_dev *pdev)
}
}
- SiS_SetReg(SISSR, 0x17, 0x00);
+ if (regb == 1)
+ SiS_SetReg(SISSR, 0x17, 0x80); /* DDR2 */
+ else
+ SiS_SetReg(SISSR, 0x17, 0x00); /* DDR1 */
SiS_SetReg(SISSR, 0x1a, 0x87);
if(ivideo->chip == XGI_20) {
@@ -5375,31 +5571,6 @@ sisfb_post_xgi(struct pci_dev *pdev)
SiS_SetReg(SISSR, 0x1c, 0x00);
}
- ramtype = 0x00; v1 = 0x10;
- if(ivideo->haveXGIROM) {
- ramtype = bios[0x62];
- v1 = bios[0x1d2];
- }
- if(!(ramtype & 0x80)) {
- if(ivideo->chip == XGI_20) {
- SiS_SetReg(SISCR, 0x97, v1);
- reg = SiS_GetReg(SISCR, 0x97);
- if(reg & 0x10) {
- ramtype = (reg & 0x01) << 1;
- }
- } else {
- reg = SiS_GetReg(SISSR, 0x39);
- ramtype = reg & 0x02;
- if(!(ramtype)) {
- reg = SiS_GetReg(SISSR, 0x3a);
- ramtype = (reg >> 1) & 0x01;
- }
- }
- }
- ramtype &= 0x07;
-
- regb = 0; /* ! */
-
switch(ramtype) {
case 0:
sisfb_post_xgi_setclocks(ivideo, regb);
@@ -5485,61 +5656,7 @@ sisfb_post_xgi(struct pci_dev *pdev)
SiS_SetReg(SISSR, 0x1b, 0x00);
break;
case 1:
- SiS_SetReg(SISCR, 0x82, 0x77);
- SiS_SetReg(SISCR, 0x86, 0x00);
- reg = SiS_GetReg(SISCR, 0x86);
- SiS_SetReg(SISCR, 0x86, 0x88);
- reg = SiS_GetReg(SISCR, 0x86);
- v1 = cs168[regb]; v2 = cs160[regb]; v3 = cs158[regb];
- if(ivideo->haveXGIROM) {
- v1 = bios[regb + 0x168];
- v2 = bios[regb + 0x160];
- v3 = bios[regb + 0x158];
- }
- SiS_SetReg(SISCR, 0x86, v1);
- SiS_SetReg(SISCR, 0x82, 0x77);
- SiS_SetReg(SISCR, 0x85, 0x00);
- reg = SiS_GetReg(SISCR, 0x85);
- SiS_SetReg(SISCR, 0x85, 0x88);
- reg = SiS_GetReg(SISCR, 0x85);
- SiS_SetReg(SISCR, 0x85, v2);
- SiS_SetReg(SISCR, 0x82, v3);
- SiS_SetReg(SISCR, 0x98, 0x01);
- SiS_SetReg(SISCR, 0x9a, 0x02);
-
- SiS_SetReg(SISSR, 0x28, 0x64);
- SiS_SetReg(SISSR, 0x29, 0x63);
- sisfb_post_xgi_delay(ivideo, 15);
- SiS_SetReg(SISSR, 0x18, 0x00);
- SiS_SetReg(SISSR, 0x19, 0x20);
- SiS_SetReg(SISSR, 0x16, 0x00);
- SiS_SetReg(SISSR, 0x16, 0x80);
- SiS_SetReg(SISSR, 0x18, 0xc5);
- SiS_SetReg(SISSR, 0x19, 0x23);
- SiS_SetReg(SISSR, 0x16, 0x00);
- SiS_SetReg(SISSR, 0x16, 0x80);
- sisfb_post_xgi_delay(ivideo, 1);
- SiS_SetReg(SISCR, 0x97, 0x11);
- sisfb_post_xgi_setclocks(ivideo, regb);
- sisfb_post_xgi_delay(ivideo, 0x46);
- SiS_SetReg(SISSR, 0x18, 0xc5);
- SiS_SetReg(SISSR, 0x19, 0x23);
- SiS_SetReg(SISSR, 0x16, 0x00);
- SiS_SetReg(SISSR, 0x16, 0x80);
- sisfb_post_xgi_delay(ivideo, 1);
- SiS_SetReg(SISSR, 0x1b, 0x04);
- sisfb_post_xgi_delay(ivideo, 1);
- SiS_SetReg(SISSR, 0x1b, 0x00);
- sisfb_post_xgi_delay(ivideo, 1);
- v1 = 0x31;
- if(ivideo->haveXGIROM) {
- v1 = bios[0xf0];
- }
- SiS_SetReg(SISSR, 0x18, v1);
- SiS_SetReg(SISSR, 0x19, 0x06);
- SiS_SetReg(SISSR, 0x16, 0x04);
- SiS_SetReg(SISSR, 0x16, 0x84);
- sisfb_post_xgi_delay(ivideo, 1);
+ sisfb_post_xgi_ddr2(ivideo, regb);
break;
default:
sisfb_post_xgi_setclocks(ivideo, regb);
@@ -5648,6 +5765,7 @@ sisfb_post_xgi(struct pci_dev *pdev)
SiS_SetReg(SISSR, 0x14, bios[regb + 0xe0 + 8]);
} else {
+ int err;
/* Set default mode, don't clear screen */
ivideo->SiS_Pr.SiS_UseOEM = false;
@@ -5661,10 +5779,16 @@ sisfb_post_xgi(struct pci_dev *pdev)
/* Disable read-cache */
SiS_SetRegAND(SISSR, 0x21, 0xdf);
- sisfb_post_xgi_ramsize(ivideo);
+ err = sisfb_post_xgi_ramsize(ivideo);
/* Enable read-cache */
SiS_SetRegOR(SISSR, 0x21, 0x20);
+ if (err) {
+ dev_err(&pdev->dev,
+ "%s: RAM size detection failed: %d\n",
+ __func__, err);
+ return 0;
+ }
}
#if 0
@@ -5777,6 +5901,7 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
ivideo->chip = chipinfo->chip;
+ ivideo->chip_real_id = chipinfo->chip;
ivideo->sisvga_engine = chipinfo->vgaengine;
ivideo->hwcursor_size = chipinfo->hwcursor_size;
ivideo->CRT2_write_enable = chipinfo->CRT2_write_enable;
@@ -6010,6 +6135,18 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
sisfb_detect_custom_timing(ivideo);
}
+#ifdef CONFIG_FB_SIS_315
+ if (ivideo->chip == XGI_20) {
+ /* Check if our Z7 chip is actually Z9 */
+ SiS_SetRegOR(SISCR, 0x4a, 0x40); /* GPIOG EN */
+ reg = SiS_GetReg(SISCR, 0x48);
+ if (reg & 0x02) { /* GPIOG */
+ ivideo->chip_real_id = XGI_21;
+ dev_info(&pdev->dev, "Z9 detected\n");
+ }
+ }
+#endif
+
/* POST card in case this has not been done by the BIOS */
if( (!ivideo->sisvga_enabled)
#if !defined(__i386__) && !defined(__x86_64__)
diff --git a/drivers/video/sis/vgatypes.h b/drivers/video/sis/vgatypes.h
index 12c0dfaf2518..e3f9976cfef0 100644
--- a/drivers/video/sis/vgatypes.h
+++ b/drivers/video/sis/vgatypes.h
@@ -87,6 +87,7 @@ typedef enum _SIS_CHIP_TYPE {
SIS_341,
SIS_342,
XGI_20 = 75,
+ XGI_21,
XGI_40,
MAX_SIS_CHIP
} SIS_CHIP_TYPE;
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index bcb44a594ebc..46d1a64fe80d 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -41,6 +41,26 @@
#include <linux/sm501.h>
#include <linux/sm501-regs.h>
+#include "edid.h"
+
+static char *fb_mode = "640x480-16@60";
+static unsigned long default_bpp = 16;
+
+static struct fb_videomode __devinitdata sm501_default_mode = {
+ .refresh = 60,
+ .xres = 640,
+ .yres = 480,
+ .pixclock = 20833,
+ .left_margin = 142,
+ .right_margin = 13,
+ .upper_margin = 21,
+ .lower_margin = 1,
+ .hsync_len = 69,
+ .vsync_len = 3,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+};
+
#define NR_PALETTE 256
enum sm501_controller {
@@ -77,6 +97,7 @@ struct sm501fb_info {
void __iomem *regs2d; /* 2d remapped registers */
void __iomem *fbmem; /* remapped framebuffer */
size_t fbmem_len; /* length of remapped region */
+ u8 *edid_data;
};
/* per-framebuffer private data */
@@ -117,7 +138,7 @@ static inline int v_total(struct fb_var_screeninfo *var)
static inline void sm501fb_sync_regs(struct sm501fb_info *info)
{
- readl(info->regs);
+ smc501_readl(info->regs);
}
/* sm501_alloc_mem
@@ -262,7 +283,7 @@ static void sm501fb_setup_gamma(struct sm501fb_info *fbi,
/* set gamma values */
for (offset = 0; offset < 256 * 4; offset += 4) {
- writel(value, fbi->regs + palette + offset);
+ smc501_writel(value, fbi->regs + palette + offset);
value += 0x010101; /* Advance RGB by 1,1,1.*/
}
}
@@ -476,7 +497,8 @@ static int sm501fb_set_par_common(struct fb_info *info,
/* set start of framebuffer to the screen */
- writel(par->screen.sm_addr | SM501_ADDR_FLIP, fbi->regs + head_addr);
+ smc501_writel(par->screen.sm_addr | SM501_ADDR_FLIP,
+ fbi->regs + head_addr);
/* program CRT clock */
@@ -519,7 +541,7 @@ static void sm501fb_set_par_geometry(struct fb_info *info,
reg = info->fix.line_length;
reg |= ((var->xres * var->bits_per_pixel)/8) << 16;
- writel(reg, fbi->regs + (par->head == HEAD_CRT ?
+ smc501_writel(reg, fbi->regs + (par->head == HEAD_CRT ?
SM501_DC_CRT_FB_OFFSET : SM501_DC_PANEL_FB_OFFSET));
/* program horizontal total */
@@ -527,27 +549,27 @@ static void sm501fb_set_par_geometry(struct fb_info *info,
reg = (h_total(var) - 1) << 16;
reg |= (var->xres - 1);
- writel(reg, base + SM501_OFF_DC_H_TOT);
+ smc501_writel(reg, base + SM501_OFF_DC_H_TOT);
/* program horizontal sync */
reg = var->hsync_len << 16;
reg |= var->xres + var->right_margin - 1;
- writel(reg, base + SM501_OFF_DC_H_SYNC);
+ smc501_writel(reg, base + SM501_OFF_DC_H_SYNC);
/* program vertical total */
reg = (v_total(var) - 1) << 16;
reg |= (var->yres - 1);
- writel(reg, base + SM501_OFF_DC_V_TOT);
+ smc501_writel(reg, base + SM501_OFF_DC_V_TOT);
/* program vertical sync */
reg = var->vsync_len << 16;
reg |= var->yres + var->lower_margin - 1;
- writel(reg, base + SM501_OFF_DC_V_SYNC);
+ smc501_writel(reg, base + SM501_OFF_DC_V_SYNC);
}
/* sm501fb_pan_crt
@@ -566,15 +588,15 @@ static int sm501fb_pan_crt(struct fb_var_screeninfo *var,
xoffs = var->xoffset * bytes_pixel;
- reg = readl(fbi->regs + SM501_DC_CRT_CONTROL);
+ reg = smc501_readl(fbi->regs + SM501_DC_CRT_CONTROL);
reg &= ~SM501_DC_CRT_CONTROL_PIXEL_MASK;
reg |= ((xoffs & 15) / bytes_pixel) << 4;
- writel(reg, fbi->regs + SM501_DC_CRT_CONTROL);
+ smc501_writel(reg, fbi->regs + SM501_DC_CRT_CONTROL);
reg = (par->screen.sm_addr + xoffs +
var->yoffset * info->fix.line_length);
- writel(reg | SM501_ADDR_FLIP, fbi->regs + SM501_DC_CRT_FB_ADDR);
+ smc501_writel(reg | SM501_ADDR_FLIP, fbi->regs + SM501_DC_CRT_FB_ADDR);
sm501fb_sync_regs(fbi);
return 0;
@@ -593,10 +615,10 @@ static int sm501fb_pan_pnl(struct fb_var_screeninfo *var,
unsigned long reg;
reg = var->xoffset | (var->xres_virtual << 16);
- writel(reg, fbi->regs + SM501_DC_PANEL_FB_WIDTH);
+ smc501_writel(reg, fbi->regs + SM501_DC_PANEL_FB_WIDTH);
reg = var->yoffset | (var->yres_virtual << 16);
- writel(reg, fbi->regs + SM501_DC_PANEL_FB_HEIGHT);
+ smc501_writel(reg, fbi->regs + SM501_DC_PANEL_FB_HEIGHT);
sm501fb_sync_regs(fbi);
return 0;
@@ -622,7 +644,7 @@ static int sm501fb_set_par_crt(struct fb_info *info)
/* enable CRT DAC - note 0 is on!*/
sm501_misc_control(fbi->dev->parent, 0, SM501_MISC_DAC_POWER);
- control = readl(fbi->regs + SM501_DC_CRT_CONTROL);
+ control = smc501_readl(fbi->regs + SM501_DC_CRT_CONTROL);
control &= (SM501_DC_CRT_CONTROL_PIXEL_MASK |
SM501_DC_CRT_CONTROL_GAMMA |
@@ -684,7 +706,7 @@ static int sm501fb_set_par_crt(struct fb_info *info)
out_update:
dev_dbg(fbi->dev, "new control is %08lx\n", control);
- writel(control, fbi->regs + SM501_DC_CRT_CONTROL);
+ smc501_writel(control, fbi->regs + SM501_DC_CRT_CONTROL);
sm501fb_sync_regs(fbi);
return 0;
@@ -696,18 +718,18 @@ static void sm501fb_panel_power(struct sm501fb_info *fbi, int to)
void __iomem *ctrl_reg = fbi->regs + SM501_DC_PANEL_CONTROL;
struct sm501_platdata_fbsub *pd = fbi->pdata->fb_pnl;
- control = readl(ctrl_reg);
+ control = smc501_readl(ctrl_reg);
if (to && (control & SM501_DC_PANEL_CONTROL_VDD) == 0) {
/* enable panel power */
control |= SM501_DC_PANEL_CONTROL_VDD; /* FPVDDEN */
- writel(control, ctrl_reg);
+ smc501_writel(control, ctrl_reg);
sm501fb_sync_regs(fbi);
mdelay(10);
control |= SM501_DC_PANEL_CONTROL_DATA; /* DATA */
- writel(control, ctrl_reg);
+ smc501_writel(control, ctrl_reg);
sm501fb_sync_regs(fbi);
mdelay(10);
@@ -719,7 +741,7 @@ static void sm501fb_panel_power(struct sm501fb_info *fbi, int to)
else
control |= SM501_DC_PANEL_CONTROL_BIAS;
- writel(control, ctrl_reg);
+ smc501_writel(control, ctrl_reg);
sm501fb_sync_regs(fbi);
mdelay(10);
}
@@ -730,7 +752,7 @@ static void sm501fb_panel_power(struct sm501fb_info *fbi, int to)
else
control |= SM501_DC_PANEL_CONTROL_FPEN;
- writel(control, ctrl_reg);
+ smc501_writel(control, ctrl_reg);
sm501fb_sync_regs(fbi);
mdelay(10);
}
@@ -742,7 +764,7 @@ static void sm501fb_panel_power(struct sm501fb_info *fbi, int to)
else
control &= ~SM501_DC_PANEL_CONTROL_FPEN;
- writel(control, ctrl_reg);
+ smc501_writel(control, ctrl_reg);
sm501fb_sync_regs(fbi);
mdelay(10);
}
@@ -753,18 +775,18 @@ static void sm501fb_panel_power(struct sm501fb_info *fbi, int to)
else
control &= ~SM501_DC_PANEL_CONTROL_BIAS;
- writel(control, ctrl_reg);
+ smc501_writel(control, ctrl_reg);
sm501fb_sync_regs(fbi);
mdelay(10);
}
control &= ~SM501_DC_PANEL_CONTROL_DATA;
- writel(control, ctrl_reg);
+ smc501_writel(control, ctrl_reg);
sm501fb_sync_regs(fbi);
mdelay(10);
control &= ~SM501_DC_PANEL_CONTROL_VDD;
- writel(control, ctrl_reg);
+ smc501_writel(control, ctrl_reg);
sm501fb_sync_regs(fbi);
mdelay(10);
}
@@ -799,7 +821,7 @@ static int sm501fb_set_par_pnl(struct fb_info *info)
/* update control register */
- control = readl(fbi->regs + SM501_DC_PANEL_CONTROL);
+ control = smc501_readl(fbi->regs + SM501_DC_PANEL_CONTROL);
control &= (SM501_DC_PANEL_CONTROL_GAMMA |
SM501_DC_PANEL_CONTROL_VDD |
SM501_DC_PANEL_CONTROL_DATA |
@@ -833,16 +855,16 @@ static int sm501fb_set_par_pnl(struct fb_info *info)
BUG();
}
- writel(0x0, fbi->regs + SM501_DC_PANEL_PANNING_CONTROL);
+ smc501_writel(0x0, fbi->regs + SM501_DC_PANEL_PANNING_CONTROL);
/* panel plane top left and bottom right location */
- writel(0x00, fbi->regs + SM501_DC_PANEL_TL_LOC);
+ smc501_writel(0x00, fbi->regs + SM501_DC_PANEL_TL_LOC);
reg = var->xres - 1;
reg |= (var->yres - 1) << 16;
- writel(reg, fbi->regs + SM501_DC_PANEL_BR_LOC);
+ smc501_writel(reg, fbi->regs + SM501_DC_PANEL_BR_LOC);
/* program panel control register */
@@ -855,7 +877,7 @@ static int sm501fb_set_par_pnl(struct fb_info *info)
if ((var->sync & FB_SYNC_VERT_HIGH_ACT) == 0)
control |= SM501_DC_PANEL_CONTROL_VSP;
- writel(control, fbi->regs + SM501_DC_PANEL_CONTROL);
+ smc501_writel(control, fbi->regs + SM501_DC_PANEL_CONTROL);
sm501fb_sync_regs(fbi);
/* ensure the panel interface is not tristated at this point */
@@ -924,7 +946,7 @@ static int sm501fb_setcolreg(unsigned regno,
val |= (green >> 8) << 8;
val |= blue >> 8;
- writel(val, base + (regno * 4));
+ smc501_writel(val, base + (regno * 4));
}
break;
@@ -980,7 +1002,7 @@ static int sm501fb_blank_crt(int blank_mode, struct fb_info *info)
dev_dbg(fbi->dev, "%s(mode=%d, %p)\n", __func__, blank_mode, info);
- ctrl = readl(fbi->regs + SM501_DC_CRT_CONTROL);
+ ctrl = smc501_readl(fbi->regs + SM501_DC_CRT_CONTROL);
switch (blank_mode) {
case FB_BLANK_POWERDOWN:
@@ -1004,7 +1026,7 @@ static int sm501fb_blank_crt(int blank_mode, struct fb_info *info)
}
- writel(ctrl, fbi->regs + SM501_DC_CRT_CONTROL);
+ smc501_writel(ctrl, fbi->regs + SM501_DC_CRT_CONTROL);
sm501fb_sync_regs(fbi);
return 0;
@@ -1041,12 +1063,14 @@ static int sm501fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
if (cursor->image.depth > 1)
return -EINVAL;
- hwc_addr = readl(base + SM501_OFF_HWC_ADDR);
+ hwc_addr = smc501_readl(base + SM501_OFF_HWC_ADDR);
if (cursor->enable)
- writel(hwc_addr | SM501_HWC_EN, base + SM501_OFF_HWC_ADDR);
+ smc501_writel(hwc_addr | SM501_HWC_EN,
+ base + SM501_OFF_HWC_ADDR);
else
- writel(hwc_addr & ~SM501_HWC_EN, base + SM501_OFF_HWC_ADDR);
+ smc501_writel(hwc_addr & ~SM501_HWC_EN,
+ base + SM501_OFF_HWC_ADDR);
/* set data */
if (cursor->set & FB_CUR_SETPOS) {
@@ -1060,7 +1084,7 @@ static int sm501fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
//y += cursor->image.height;
- writel(x | (y << 16), base + SM501_OFF_HWC_LOC);
+ smc501_writel(x | (y << 16), base + SM501_OFF_HWC_LOC);
}
if (cursor->set & FB_CUR_SETCMAP) {
@@ -1080,8 +1104,8 @@ static int sm501fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
dev_dbg(fbi->dev, "fgcol %08lx, bgcol %08lx\n", fg, bg);
- writel(bg, base + SM501_OFF_HWC_COLOR_1_2);
- writel(fg, base + SM501_OFF_HWC_COLOR_3);
+ smc501_writel(bg, base + SM501_OFF_HWC_COLOR_1_2);
+ smc501_writel(fg, base + SM501_OFF_HWC_COLOR_3);
}
if (cursor->set & FB_CUR_SETSIZE ||
@@ -1102,7 +1126,7 @@ static int sm501fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
__func__, cursor->image.width, cursor->image.height);
for (op = 0; op < (64*64*2)/8; op+=4)
- writel(0x0, dst + op);
+ smc501_writel(0x0, dst + op);
for (y = 0; y < cursor->image.height; y++) {
for (x = 0; x < cursor->image.width; x++) {
@@ -1141,7 +1165,7 @@ static ssize_t sm501fb_crtsrc_show(struct device *dev,
struct sm501fb_info *info = dev_get_drvdata(dev);
unsigned long ctrl;
- ctrl = readl(info->regs + SM501_DC_CRT_CONTROL);
+ ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL);
ctrl &= SM501_DC_CRT_CONTROL_SEL;
return snprintf(buf, PAGE_SIZE, "%s\n", ctrl ? "crt" : "panel");
@@ -1172,7 +1196,7 @@ static ssize_t sm501fb_crtsrc_store(struct device *dev,
dev_info(dev, "setting crt source to head %d\n", head);
- ctrl = readl(info->regs + SM501_DC_CRT_CONTROL);
+ ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL);
if (head == HEAD_CRT) {
ctrl |= SM501_DC_CRT_CONTROL_SEL;
@@ -1184,7 +1208,7 @@ static ssize_t sm501fb_crtsrc_store(struct device *dev,
ctrl &= ~SM501_DC_CRT_CONTROL_TE;
}
- writel(ctrl, info->regs + SM501_DC_CRT_CONTROL);
+ smc501_writel(ctrl, info->regs + SM501_DC_CRT_CONTROL);
sm501fb_sync_regs(info);
return len;
@@ -1205,7 +1229,8 @@ static int sm501fb_show_regs(struct sm501fb_info *info, char *ptr,
unsigned int reg;
for (reg = start; reg < (len + start); reg += 4)
- ptr += sprintf(ptr, "%08x = %08x\n", reg, readl(mem + reg));
+ ptr += sprintf(ptr, "%08x = %08x\n", reg,
+ smc501_readl(mem + reg));
return ptr - buf;
}
@@ -1257,7 +1282,7 @@ static int sm501fb_sync(struct fb_info *info)
/* wait for the 2d engine to be ready */
while ((count > 0) &&
- (readl(fbi->regs + SM501_SYSTEM_CONTROL) &
+ (smc501_readl(fbi->regs + SM501_SYSTEM_CONTROL) &
SM501_SYSCTRL_2D_ENGINE_STATUS) != 0)
count--;
@@ -1312,45 +1337,46 @@ static void sm501fb_copyarea(struct fb_info *info, const struct fb_copyarea *are
return;
/* set the base addresses */
- writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_SOURCE_BASE);
- writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_DESTINATION_BASE);
+ smc501_writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_SOURCE_BASE);
+ smc501_writel(par->screen.sm_addr,
+ fbi->regs2d + SM501_2D_DESTINATION_BASE);
/* set the window width */
- writel((info->var.xres << 16) | info->var.xres,
+ smc501_writel((info->var.xres << 16) | info->var.xres,
fbi->regs2d + SM501_2D_WINDOW_WIDTH);
/* set window stride */
- writel((info->var.xres_virtual << 16) | info->var.xres_virtual,
+ smc501_writel((info->var.xres_virtual << 16) | info->var.xres_virtual,
fbi->regs2d + SM501_2D_PITCH);
/* set data format */
switch (info->var.bits_per_pixel) {
case 8:
- writel(0, fbi->regs2d + SM501_2D_STRETCH);
+ smc501_writel(0, fbi->regs2d + SM501_2D_STRETCH);
break;
case 16:
- writel(0x00100000, fbi->regs2d + SM501_2D_STRETCH);
+ smc501_writel(0x00100000, fbi->regs2d + SM501_2D_STRETCH);
break;
case 32:
- writel(0x00200000, fbi->regs2d + SM501_2D_STRETCH);
+ smc501_writel(0x00200000, fbi->regs2d + SM501_2D_STRETCH);
break;
}
/* 2d compare mask */
- writel(0xffffffff, fbi->regs2d + SM501_2D_COLOR_COMPARE_MASK);
+ smc501_writel(0xffffffff, fbi->regs2d + SM501_2D_COLOR_COMPARE_MASK);
/* 2d mask */
- writel(0xffffffff, fbi->regs2d + SM501_2D_MASK);
+ smc501_writel(0xffffffff, fbi->regs2d + SM501_2D_MASK);
/* source and destination x y */
- writel((sx << 16) | sy, fbi->regs2d + SM501_2D_SOURCE);
- writel((dx << 16) | dy, fbi->regs2d + SM501_2D_DESTINATION);
+ smc501_writel((sx << 16) | sy, fbi->regs2d + SM501_2D_SOURCE);
+ smc501_writel((dx << 16) | dy, fbi->regs2d + SM501_2D_DESTINATION);
/* w/h */
- writel((width << 16) | height, fbi->regs2d + SM501_2D_DIMENSION);
+ smc501_writel((width << 16) | height, fbi->regs2d + SM501_2D_DIMENSION);
/* do area move */
- writel(0x800000cc | rtl, fbi->regs2d + SM501_2D_CONTROL);
+ smc501_writel(0x800000cc | rtl, fbi->regs2d + SM501_2D_CONTROL);
}
static void sm501fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
@@ -1372,47 +1398,49 @@ static void sm501fb_fillrect(struct fb_info *info, const struct fb_fillrect *rec
return;
/* set the base addresses */
- writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_SOURCE_BASE);
- writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_DESTINATION_BASE);
+ smc501_writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_SOURCE_BASE);
+ smc501_writel(par->screen.sm_addr,
+ fbi->regs2d + SM501_2D_DESTINATION_BASE);
/* set the window width */
- writel((info->var.xres << 16) | info->var.xres,
+ smc501_writel((info->var.xres << 16) | info->var.xres,
fbi->regs2d + SM501_2D_WINDOW_WIDTH);
/* set window stride */
- writel((info->var.xres_virtual << 16) | info->var.xres_virtual,
+ smc501_writel((info->var.xres_virtual << 16) | info->var.xres_virtual,
fbi->regs2d + SM501_2D_PITCH);
/* set data format */
switch (info->var.bits_per_pixel) {
case 8:
- writel(0, fbi->regs2d + SM501_2D_STRETCH);
+ smc501_writel(0, fbi->regs2d + SM501_2D_STRETCH);
break;
case 16:
- writel(0x00100000, fbi->regs2d + SM501_2D_STRETCH);
+ smc501_writel(0x00100000, fbi->regs2d + SM501_2D_STRETCH);
break;
case 32:
- writel(0x00200000, fbi->regs2d + SM501_2D_STRETCH);
+ smc501_writel(0x00200000, fbi->regs2d + SM501_2D_STRETCH);
break;
}
/* 2d compare mask */
- writel(0xffffffff, fbi->regs2d + SM501_2D_COLOR_COMPARE_MASK);
+ smc501_writel(0xffffffff, fbi->regs2d + SM501_2D_COLOR_COMPARE_MASK);
/* 2d mask */
- writel(0xffffffff, fbi->regs2d + SM501_2D_MASK);
+ smc501_writel(0xffffffff, fbi->regs2d + SM501_2D_MASK);
/* colour */
- writel(rect->color, fbi->regs2d + SM501_2D_FOREGROUND);
+ smc501_writel(rect->color, fbi->regs2d + SM501_2D_FOREGROUND);
/* x y */
- writel((rect->dx << 16) | rect->dy, fbi->regs2d + SM501_2D_DESTINATION);
+ smc501_writel((rect->dx << 16) | rect->dy,
+ fbi->regs2d + SM501_2D_DESTINATION);
/* w/h */
- writel((width << 16) | height, fbi->regs2d + SM501_2D_DIMENSION);
+ smc501_writel((width << 16) | height, fbi->regs2d + SM501_2D_DIMENSION);
/* do rectangle fill */
- writel(0x800100cc, fbi->regs2d + SM501_2D_CONTROL);
+ smc501_writel(0x800100cc, fbi->regs2d + SM501_2D_CONTROL);
}
@@ -1470,11 +1498,12 @@ static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base)
/* initialise the colour registers */
- writel(par->cursor.sm_addr, par->cursor_regs + SM501_OFF_HWC_ADDR);
+ smc501_writel(par->cursor.sm_addr,
+ par->cursor_regs + SM501_OFF_HWC_ADDR);
- writel(0x00, par->cursor_regs + SM501_OFF_HWC_LOC);
- writel(0x00, par->cursor_regs + SM501_OFF_HWC_COLOR_1_2);
- writel(0x00, par->cursor_regs + SM501_OFF_HWC_COLOR_3);
+ smc501_writel(0x00, par->cursor_regs + SM501_OFF_HWC_LOC);
+ smc501_writel(0x00, par->cursor_regs + SM501_OFF_HWC_COLOR_1_2);
+ smc501_writel(0x00, par->cursor_regs + SM501_OFF_HWC_COLOR_3);
sm501fb_sync_regs(info);
return 0;
@@ -1581,7 +1610,7 @@ static int sm501fb_start(struct sm501fb_info *info,
/* clear palette ram - undefined at power on */
for (k = 0; k < (256 * 3); k++)
- writel(0, info->regs + SM501_DC_PANEL_PALETTE + (k * 4));
+ smc501_writel(0, info->regs + SM501_DC_PANEL_PALETTE + (k * 4));
/* enable display controller */
sm501_unit_power(dev->parent, SM501_GATE_DISPLAY, 1);
@@ -1649,20 +1678,20 @@ static int sm501fb_init_fb(struct fb_info *fb,
switch (head) {
case HEAD_CRT:
pd = info->pdata->fb_crt;
- ctrl = readl(info->regs + SM501_DC_CRT_CONTROL);
+ ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL);
enable = (ctrl & SM501_DC_CRT_CONTROL_ENABLE) ? 1 : 0;
/* ensure we set the correct source register */
if (info->pdata->fb_route != SM501_FB_CRT_PANEL) {
ctrl |= SM501_DC_CRT_CONTROL_SEL;
- writel(ctrl, info->regs + SM501_DC_CRT_CONTROL);
+ smc501_writel(ctrl, info->regs + SM501_DC_CRT_CONTROL);
}
break;
case HEAD_PANEL:
pd = info->pdata->fb_pnl;
- ctrl = readl(info->regs + SM501_DC_PANEL_CONTROL);
+ ctrl = smc501_readl(info->regs + SM501_DC_PANEL_CONTROL);
enable = (ctrl & SM501_DC_PANEL_CONTROL_EN) ? 1 : 0;
break;
@@ -1680,7 +1709,7 @@ static int sm501fb_init_fb(struct fb_info *fb,
if (head == HEAD_CRT && info->pdata->fb_route == SM501_FB_CRT_PANEL) {
ctrl &= ~SM501_DC_CRT_CONTROL_SEL;
- writel(ctrl, info->regs + SM501_DC_CRT_CONTROL);
+ smc501_writel(ctrl, info->regs + SM501_DC_CRT_CONTROL);
enable = 0;
}
@@ -1700,6 +1729,15 @@ static int sm501fb_init_fb(struct fb_info *fb,
FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
+#if defined(CONFIG_OF)
+#ifdef __BIG_ENDIAN
+ if (of_get_property(info->dev->parent->of_node, "little-endian", NULL))
+ fb->flags |= FBINFO_FOREIGN_ENDIAN;
+#else
+ if (of_get_property(info->dev->parent->of_node, "big-endian", NULL))
+ fb->flags |= FBINFO_FOREIGN_ENDIAN;
+#endif
+#endif
/* fixed data */
fb->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -1717,9 +1755,16 @@ static int sm501fb_init_fb(struct fb_info *fb,
fb->var.vmode = FB_VMODE_NONINTERLACED;
fb->var.bits_per_pixel = 16;
+ if (info->edid_data) {
+ /* Now build modedb from EDID */
+ fb_edid_to_monspecs(info->edid_data, &fb->monspecs);
+ fb_videomode_to_modelist(fb->monspecs.modedb,
+ fb->monspecs.modedb_len,
+ &fb->modelist);
+ }
+
if (enable && (pd->flags & SM501FB_FLAG_USE_INIT_MODE) && 0) {
/* TODO read the mode from the current display */
-
} else {
if (pd->def_mode) {
dev_info(info->dev, "using supplied mode\n");
@@ -1729,12 +1774,37 @@ static int sm501fb_init_fb(struct fb_info *fb,
fb->var.xres_virtual = fb->var.xres;
fb->var.yres_virtual = fb->var.yres;
} else {
- ret = fb_find_mode(&fb->var, fb,
+ if (info->edid_data) {
+ ret = fb_find_mode(&fb->var, fb, fb_mode,
+ fb->monspecs.modedb,
+ fb->monspecs.modedb_len,
+ &sm501_default_mode, default_bpp);
+ /* edid_data is no longer needed, free it */
+ kfree(info->edid_data);
+ } else {
+ ret = fb_find_mode(&fb->var, fb,
NULL, NULL, 0, NULL, 8);
+ }
- if (ret == 0 || ret == 4) {
- dev_err(info->dev,
- "failed to get initial mode\n");
+ switch (ret) {
+ case 1:
+ dev_info(info->dev, "using mode specified in "
+ "@mode\n");
+ break;
+ case 2:
+ dev_info(info->dev, "using mode specified in "
+ "@mode with ignored refresh rate\n");
+ break;
+ case 3:
+ dev_info(info->dev, "using mode default "
+ "mode\n");
+ break;
+ case 4:
+ dev_info(info->dev, "using mode from list\n");
+ break;
+ default:
+ dev_info(info->dev, "ret = %d\n", ret);
+ dev_info(info->dev, "failed to find mode\n");
return -EINVAL;
}
}
@@ -1875,8 +1945,32 @@ static int __devinit sm501fb_probe(struct platform_device *pdev)
}
if (info->pdata == NULL) {
- dev_info(dev, "using default configuration data\n");
+ int found = 0;
+#if defined(CONFIG_OF)
+ struct device_node *np = pdev->dev.parent->of_node;
+ const u8 *prop;
+ const char *cp;
+ int len;
+
info->pdata = &sm501fb_def_pdata;
+ if (np) {
+ /* Get EDID */
+ cp = of_get_property(np, "mode", &len);
+ if (cp)
+ strcpy(fb_mode, cp);
+ prop = of_get_property(np, "edid", &len);
+ if (prop && len == EDID_LENGTH) {
+ info->edid_data = kmemdup(prop, EDID_LENGTH,
+ GFP_KERNEL);
+ if (info->edid_data)
+ found = 1;
+ }
+ }
+#endif
+ if (!found) {
+ dev_info(dev, "using default configuration data\n");
+ info->pdata = &sm501fb_def_pdata;
+ }
}
/* probe for the presence of each panel */
@@ -2085,7 +2179,7 @@ static int sm501fb_suspend(struct platform_device *pdev, pm_message_t state)
struct sm501fb_info *info = platform_get_drvdata(pdev);
/* store crt control to resume with */
- info->pm_crt_ctrl = readl(info->regs + SM501_DC_CRT_CONTROL);
+ info->pm_crt_ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL);
sm501fb_suspend_fb(info, HEAD_CRT);
sm501fb_suspend_fb(info, HEAD_PANEL);
@@ -2109,10 +2203,10 @@ static int sm501fb_resume(struct platform_device *pdev)
/* restore the items we want to be saved for crt control */
- crt_ctrl = readl(info->regs + SM501_DC_CRT_CONTROL);
+ crt_ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL);
crt_ctrl &= ~SM501_CRT_CTRL_SAVE;
crt_ctrl |= info->pm_crt_ctrl & SM501_CRT_CTRL_SAVE;
- writel(crt_ctrl, info->regs + SM501_DC_CRT_CONTROL);
+ smc501_writel(crt_ctrl, info->regs + SM501_DC_CRT_CONTROL);
sm501fb_resume_fb(info, HEAD_CRT);
sm501fb_resume_fb(info, HEAD_PANEL);
@@ -2149,6 +2243,11 @@ static void __exit sm501fb_cleanup(void)
module_init(sm501fb_init);
module_exit(sm501fb_cleanup);
+module_param_named(mode, fb_mode, charp, 0);
+MODULE_PARM_DESC(mode,
+ "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" ");
+module_param_named(bpp, default_bpp, ulong, 0);
+MODULE_PARM_DESC(bpp, "Specify bit-per-pixel if not specified mode");
MODULE_AUTHOR("Ben Dooks, Vincent Sanders");
MODULE_DESCRIPTION("SM501 Framebuffer driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/svgalib.c b/drivers/video/svgalib.c
index fdb45674e2f6..33df9ec91795 100644
--- a/drivers/video/svgalib.c
+++ b/drivers/video/svgalib.c
@@ -20,12 +20,12 @@
/* Write a CRT register value spread across multiple registers */
-void svga_wcrt_multi(const struct vga_regset *regset, u32 value) {
-
+void svga_wcrt_multi(void __iomem *regbase, const struct vga_regset *regset, u32 value)
+{
u8 regval, bitval, bitnum;
while (regset->regnum != VGA_REGSET_END_VAL) {
- regval = vga_rcrt(NULL, regset->regnum);
+ regval = vga_rcrt(regbase, regset->regnum);
bitnum = regset->lowbit;
while (bitnum <= regset->highbit) {
bitval = 1 << bitnum;
@@ -34,18 +34,18 @@ void svga_wcrt_multi(const struct vga_regset *regset, u32 value) {
bitnum ++;
value = value >> 1;
}
- vga_wcrt(NULL, regset->regnum, regval);
+ vga_wcrt(regbase, regset->regnum, regval);
regset ++;
}
}
/* Write a sequencer register value spread across multiple registers */
-void svga_wseq_multi(const struct vga_regset *regset, u32 value) {
-
+void svga_wseq_multi(void __iomem *regbase, const struct vga_regset *regset, u32 value)
+{
u8 regval, bitval, bitnum;
while (regset->regnum != VGA_REGSET_END_VAL) {
- regval = vga_rseq(NULL, regset->regnum);
+ regval = vga_rseq(regbase, regset->regnum);
bitnum = regset->lowbit;
while (bitnum <= regset->highbit) {
bitval = 1 << bitnum;
@@ -54,7 +54,7 @@ void svga_wseq_multi(const struct vga_regset *regset, u32 value) {
bitnum ++;
value = value >> 1;
}
- vga_wseq(NULL, regset->regnum, regval);
+ vga_wseq(regbase, regset->regnum, regval);
regset ++;
}
}
@@ -75,95 +75,95 @@ static unsigned int svga_regset_size(const struct vga_regset *regset)
/* Set graphics controller registers to sane values */
-void svga_set_default_gfx_regs(void)
+void svga_set_default_gfx_regs(void __iomem *regbase)
{
/* All standard GFX registers (GR00 - GR08) */
- vga_wgfx(NULL, VGA_GFX_SR_VALUE, 0x00);
- vga_wgfx(NULL, VGA_GFX_SR_ENABLE, 0x00);
- vga_wgfx(NULL, VGA_GFX_COMPARE_VALUE, 0x00);
- vga_wgfx(NULL, VGA_GFX_DATA_ROTATE, 0x00);
- vga_wgfx(NULL, VGA_GFX_PLANE_READ, 0x00);
- vga_wgfx(NULL, VGA_GFX_MODE, 0x00);
-/* vga_wgfx(NULL, VGA_GFX_MODE, 0x20); */
-/* vga_wgfx(NULL, VGA_GFX_MODE, 0x40); */
- vga_wgfx(NULL, VGA_GFX_MISC, 0x05);
-/* vga_wgfx(NULL, VGA_GFX_MISC, 0x01); */
- vga_wgfx(NULL, VGA_GFX_COMPARE_MASK, 0x0F);
- vga_wgfx(NULL, VGA_GFX_BIT_MASK, 0xFF);
+ vga_wgfx(regbase, VGA_GFX_SR_VALUE, 0x00);
+ vga_wgfx(regbase, VGA_GFX_SR_ENABLE, 0x00);
+ vga_wgfx(regbase, VGA_GFX_COMPARE_VALUE, 0x00);
+ vga_wgfx(regbase, VGA_GFX_DATA_ROTATE, 0x00);
+ vga_wgfx(regbase, VGA_GFX_PLANE_READ, 0x00);
+ vga_wgfx(regbase, VGA_GFX_MODE, 0x00);
+/* vga_wgfx(regbase, VGA_GFX_MODE, 0x20); */
+/* vga_wgfx(regbase, VGA_GFX_MODE, 0x40); */
+ vga_wgfx(regbase, VGA_GFX_MISC, 0x05);
+/* vga_wgfx(regbase, VGA_GFX_MISC, 0x01); */
+ vga_wgfx(regbase, VGA_GFX_COMPARE_MASK, 0x0F);
+ vga_wgfx(regbase, VGA_GFX_BIT_MASK, 0xFF);
}
/* Set attribute controller registers to sane values */
-void svga_set_default_atc_regs(void)
+void svga_set_default_atc_regs(void __iomem *regbase)
{
u8 count;
- vga_r(NULL, 0x3DA);
- vga_w(NULL, VGA_ATT_W, 0x00);
+ vga_r(regbase, 0x3DA);
+ vga_w(regbase, VGA_ATT_W, 0x00);
/* All standard ATC registers (AR00 - AR14) */
for (count = 0; count <= 0xF; count ++)
- svga_wattr(count, count);
+ svga_wattr(regbase, count, count);
- svga_wattr(VGA_ATC_MODE, 0x01);
-/* svga_wattr(VGA_ATC_MODE, 0x41); */
- svga_wattr(VGA_ATC_OVERSCAN, 0x00);
- svga_wattr(VGA_ATC_PLANE_ENABLE, 0x0F);
- svga_wattr(VGA_ATC_PEL, 0x00);
- svga_wattr(VGA_ATC_COLOR_PAGE, 0x00);
+ svga_wattr(regbase, VGA_ATC_MODE, 0x01);
+/* svga_wattr(regbase, VGA_ATC_MODE, 0x41); */
+ svga_wattr(regbase, VGA_ATC_OVERSCAN, 0x00);
+ svga_wattr(regbase, VGA_ATC_PLANE_ENABLE, 0x0F);
+ svga_wattr(regbase, VGA_ATC_PEL, 0x00);
+ svga_wattr(regbase, VGA_ATC_COLOR_PAGE, 0x00);
- vga_r(NULL, 0x3DA);
- vga_w(NULL, VGA_ATT_W, 0x20);
+ vga_r(regbase, 0x3DA);
+ vga_w(regbase, VGA_ATT_W, 0x20);
}
/* Set sequencer registers to sane values */
-void svga_set_default_seq_regs(void)
+void svga_set_default_seq_regs(void __iomem *regbase)
{
/* Standard sequencer registers (SR01 - SR04), SR00 is not set */
- vga_wseq(NULL, VGA_SEQ_CLOCK_MODE, VGA_SR01_CHAR_CLK_8DOTS);
- vga_wseq(NULL, VGA_SEQ_PLANE_WRITE, VGA_SR02_ALL_PLANES);
- vga_wseq(NULL, VGA_SEQ_CHARACTER_MAP, 0x00);
-/* vga_wseq(NULL, VGA_SEQ_MEMORY_MODE, VGA_SR04_EXT_MEM | VGA_SR04_SEQ_MODE | VGA_SR04_CHN_4M); */
- vga_wseq(NULL, VGA_SEQ_MEMORY_MODE, VGA_SR04_EXT_MEM | VGA_SR04_SEQ_MODE);
+ vga_wseq(regbase, VGA_SEQ_CLOCK_MODE, VGA_SR01_CHAR_CLK_8DOTS);
+ vga_wseq(regbase, VGA_SEQ_PLANE_WRITE, VGA_SR02_ALL_PLANES);
+ vga_wseq(regbase, VGA_SEQ_CHARACTER_MAP, 0x00);
+/* vga_wseq(regbase, VGA_SEQ_MEMORY_MODE, VGA_SR04_EXT_MEM | VGA_SR04_SEQ_MODE | VGA_SR04_CHN_4M); */
+ vga_wseq(regbase, VGA_SEQ_MEMORY_MODE, VGA_SR04_EXT_MEM | VGA_SR04_SEQ_MODE);
}
/* Set CRTC registers to sane values */
-void svga_set_default_crt_regs(void)
+void svga_set_default_crt_regs(void __iomem *regbase)
{
/* Standard CRT registers CR03 CR08 CR09 CR14 CR17 */
- svga_wcrt_mask(0x03, 0x80, 0x80); /* Enable vertical retrace EVRA */
- vga_wcrt(NULL, VGA_CRTC_PRESET_ROW, 0);
- svga_wcrt_mask(VGA_CRTC_MAX_SCAN, 0, 0x1F);
- vga_wcrt(NULL, VGA_CRTC_UNDERLINE, 0);
- vga_wcrt(NULL, VGA_CRTC_MODE, 0xE3);
+ svga_wcrt_mask(regbase, 0x03, 0x80, 0x80); /* Enable vertical retrace EVRA */
+ vga_wcrt(regbase, VGA_CRTC_PRESET_ROW, 0);
+ svga_wcrt_mask(regbase, VGA_CRTC_MAX_SCAN, 0, 0x1F);
+ vga_wcrt(regbase, VGA_CRTC_UNDERLINE, 0);
+ vga_wcrt(regbase, VGA_CRTC_MODE, 0xE3);
}
-void svga_set_textmode_vga_regs(void)
+void svga_set_textmode_vga_regs(void __iomem *regbase)
{
- /* svga_wseq_mask(0x1, 0x00, 0x01); */ /* Switch 8/9 pixel per char */
- vga_wseq(NULL, VGA_SEQ_MEMORY_MODE, VGA_SR04_EXT_MEM);
- vga_wseq(NULL, VGA_SEQ_PLANE_WRITE, 0x03);
+ /* svga_wseq_mask(regbase, 0x1, 0x00, 0x01); */ /* Switch 8/9 pixel per char */
+ vga_wseq(regbase, VGA_SEQ_MEMORY_MODE, VGA_SR04_EXT_MEM);
+ vga_wseq(regbase, VGA_SEQ_PLANE_WRITE, 0x03);
- vga_wcrt(NULL, VGA_CRTC_MAX_SCAN, 0x0f); /* 0x4f */
- vga_wcrt(NULL, VGA_CRTC_UNDERLINE, 0x1f);
- svga_wcrt_mask(VGA_CRTC_MODE, 0x23, 0x7f);
+ vga_wcrt(regbase, VGA_CRTC_MAX_SCAN, 0x0f); /* 0x4f */
+ vga_wcrt(regbase, VGA_CRTC_UNDERLINE, 0x1f);
+ svga_wcrt_mask(regbase, VGA_CRTC_MODE, 0x23, 0x7f);
- vga_wcrt(NULL, VGA_CRTC_CURSOR_START, 0x0d);
- vga_wcrt(NULL, VGA_CRTC_CURSOR_END, 0x0e);
- vga_wcrt(NULL, VGA_CRTC_CURSOR_HI, 0x00);
- vga_wcrt(NULL, VGA_CRTC_CURSOR_LO, 0x00);
+ vga_wcrt(regbase, VGA_CRTC_CURSOR_START, 0x0d);
+ vga_wcrt(regbase, VGA_CRTC_CURSOR_END, 0x0e);
+ vga_wcrt(regbase, VGA_CRTC_CURSOR_HI, 0x00);
+ vga_wcrt(regbase, VGA_CRTC_CURSOR_LO, 0x00);
- vga_wgfx(NULL, VGA_GFX_MODE, 0x10); /* Odd/even memory mode */
- vga_wgfx(NULL, VGA_GFX_MISC, 0x0E); /* Misc graphics register - text mode enable */
- vga_wgfx(NULL, VGA_GFX_COMPARE_MASK, 0x00);
+ vga_wgfx(regbase, VGA_GFX_MODE, 0x10); /* Odd/even memory mode */
+ vga_wgfx(regbase, VGA_GFX_MISC, 0x0E); /* Misc graphics register - text mode enable */
+ vga_wgfx(regbase, VGA_GFX_COMPARE_MASK, 0x00);
- vga_r(NULL, 0x3DA);
- vga_w(NULL, VGA_ATT_W, 0x00);
+ vga_r(regbase, 0x3DA);
+ vga_w(regbase, VGA_ATT_W, 0x00);
- svga_wattr(0x10, 0x0C); /* Attribute Mode Control Register - text mode, blinking and line graphics */
- svga_wattr(0x13, 0x08); /* Horizontal Pixel Panning Register */
+ svga_wattr(regbase, 0x10, 0x0C); /* Attribute Mode Control Register - text mode, blinking and line graphics */
+ svga_wattr(regbase, 0x13, 0x08); /* Horizontal Pixel Panning Register */
- vga_r(NULL, 0x3DA);
- vga_w(NULL, VGA_ATT_W, 0x20);
+ vga_r(regbase, 0x3DA);
+ vga_w(regbase, VGA_ATT_W, 0x20);
}
#if 0
@@ -299,7 +299,7 @@ void svga_tileblit(struct fb_info *info, struct fb_tileblit *blit)
}
/* Set cursor in text (tileblit) mode */
-void svga_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor)
+void svga_tilecursor(void __iomem *regbase, struct fb_info *info, struct fb_tilecursor *cursor)
{
u8 cs = 0x0d;
u8 ce = 0x0e;
@@ -310,7 +310,7 @@ void svga_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor)
if (! cursor -> mode)
return;
- svga_wcrt_mask(0x0A, 0x20, 0x20); /* disable cursor */
+ svga_wcrt_mask(regbase, 0x0A, 0x20, 0x20); /* disable cursor */
if (cursor -> shape == FB_TILE_CURSOR_NONE)
return;
@@ -334,11 +334,11 @@ void svga_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor)
}
/* set cursor position */
- vga_wcrt(NULL, 0x0E, pos >> 8);
- vga_wcrt(NULL, 0x0F, pos & 0xFF);
+ vga_wcrt(regbase, 0x0E, pos >> 8);
+ vga_wcrt(regbase, 0x0F, pos & 0xFF);
- vga_wcrt(NULL, 0x0B, ce); /* set cursor end */
- vga_wcrt(NULL, 0x0A, cs); /* set cursor start and enable it */
+ vga_wcrt(regbase, 0x0B, ce); /* set cursor end */
+ vga_wcrt(regbase, 0x0A, cs); /* set cursor start and enable it */
}
int svga_get_tilemax(struct fb_info *info)
@@ -507,8 +507,9 @@ int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screenin
}
/* Set CRT timing registers */
-void svga_set_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var,
- u32 hmul, u32 hdiv, u32 vmul, u32 vdiv, u32 hborder, int node)
+void svga_set_timings(void __iomem *regbase, const struct svga_timing_regs *tm,
+ struct fb_var_screeninfo *var,
+ u32 hmul, u32 hdiv, u32 vmul, u32 vdiv, u32 hborder, int node)
{
u8 regval;
u32 value;
@@ -516,66 +517,66 @@ void svga_set_timings(const struct svga_timing_regs *tm, struct fb_var_screeninf
value = var->xres + var->left_margin + var->right_margin + var->hsync_len;
value = (value * hmul) / hdiv;
pr_debug("fb%d: horizontal total : %d\n", node, value);
- svga_wcrt_multi(tm->h_total_regs, (value / 8) - 5);
+ svga_wcrt_multi(regbase, tm->h_total_regs, (value / 8) - 5);
value = var->xres;
value = (value * hmul) / hdiv;
pr_debug("fb%d: horizontal display : %d\n", node, value);
- svga_wcrt_multi(tm->h_display_regs, (value / 8) - 1);
+ svga_wcrt_multi(regbase, tm->h_display_regs, (value / 8) - 1);
value = var->xres;
value = (value * hmul) / hdiv;
pr_debug("fb%d: horizontal blank start: %d\n", node, value);
- svga_wcrt_multi(tm->h_blank_start_regs, (value / 8) - 1 + hborder);
+ svga_wcrt_multi(regbase, tm->h_blank_start_regs, (value / 8) - 1 + hborder);
value = var->xres + var->left_margin + var->right_margin + var->hsync_len;
value = (value * hmul) / hdiv;
pr_debug("fb%d: horizontal blank end : %d\n", node, value);
- svga_wcrt_multi(tm->h_blank_end_regs, (value / 8) - 1 - hborder);
+ svga_wcrt_multi(regbase, tm->h_blank_end_regs, (value / 8) - 1 - hborder);
value = var->xres + var->right_margin;
value = (value * hmul) / hdiv;
pr_debug("fb%d: horizontal sync start : %d\n", node, value);
- svga_wcrt_multi(tm->h_sync_start_regs, (value / 8));
+ svga_wcrt_multi(regbase, tm->h_sync_start_regs, (value / 8));
value = var->xres + var->right_margin + var->hsync_len;
value = (value * hmul) / hdiv;
pr_debug("fb%d: horizontal sync end : %d\n", node, value);
- svga_wcrt_multi(tm->h_sync_end_regs, (value / 8));
+ svga_wcrt_multi(regbase, tm->h_sync_end_regs, (value / 8));
value = var->yres + var->upper_margin + var->lower_margin + var->vsync_len;
value = (value * vmul) / vdiv;
pr_debug("fb%d: vertical total : %d\n", node, value);
- svga_wcrt_multi(tm->v_total_regs, value - 2);
+ svga_wcrt_multi(regbase, tm->v_total_regs, value - 2);
value = var->yres;
value = (value * vmul) / vdiv;
pr_debug("fb%d: vertical display : %d\n", node, value);
- svga_wcrt_multi(tm->v_display_regs, value - 1);
+ svga_wcrt_multi(regbase, tm->v_display_regs, value - 1);
value = var->yres;
value = (value * vmul) / vdiv;
pr_debug("fb%d: vertical blank start : %d\n", node, value);
- svga_wcrt_multi(tm->v_blank_start_regs, value);
+ svga_wcrt_multi(regbase, tm->v_blank_start_regs, value);
value = var->yres + var->upper_margin + var->lower_margin + var->vsync_len;
value = (value * vmul) / vdiv;
pr_debug("fb%d: vertical blank end : %d\n", node, value);
- svga_wcrt_multi(tm->v_blank_end_regs, value - 2);
+ svga_wcrt_multi(regbase, tm->v_blank_end_regs, value - 2);
value = var->yres + var->lower_margin;
value = (value * vmul) / vdiv;
pr_debug("fb%d: vertical sync start : %d\n", node, value);
- svga_wcrt_multi(tm->v_sync_start_regs, value);
+ svga_wcrt_multi(regbase, tm->v_sync_start_regs, value);
value = var->yres + var->lower_margin + var->vsync_len;
value = (value * vmul) / vdiv;
pr_debug("fb%d: vertical sync end : %d\n", node, value);
- svga_wcrt_multi(tm->v_sync_end_regs, value);
+ svga_wcrt_multi(regbase, tm->v_sync_end_regs, value);
/* Set horizontal and vertical sync pulse polarity in misc register */
- regval = vga_r(NULL, VGA_MIS_R);
+ regval = vga_r(regbase, VGA_MIS_R);
if (var->sync & FB_SYNC_HOR_HIGH_ACT) {
pr_debug("fb%d: positive horizontal sync\n", node);
regval = regval & ~0x80;
@@ -590,7 +591,7 @@ void svga_set_timings(const struct svga_timing_regs *tm, struct fb_var_screeninf
pr_debug("fb%d: negative vertical sync\n\n", node);
regval = regval | 0x40;
}
- vga_w(NULL, VGA_MIS_W, regval);
+ vga_w(regbase, VGA_MIS_W, regval);
}
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c
index 855b71993f61..07c66e946634 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/tcx.c
@@ -480,6 +480,7 @@ out_dealloc_cmap:
out_unmap_regs:
tcx_unmap_regs(op, info, par);
+ framebuffer_release(info);
out_err:
return err;
diff --git a/drivers/video/tmiofb.c b/drivers/video/tmiofb.c
index dfef88c803d4..9710bf8caeae 100644
--- a/drivers/video/tmiofb.c
+++ b/drivers/video/tmiofb.c
@@ -250,8 +250,7 @@ static irqreturn_t tmiofb_irq(int irq, void *__info)
*/
static int tmiofb_hw_stop(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
- struct tmio_fb_data *data = cell->driver_data;
+ struct tmio_fb_data *data = mfd_get_data(dev);
struct fb_info *info = platform_get_drvdata(dev);
struct tmiofb_par *par = info->par;
@@ -268,7 +267,7 @@ static int tmiofb_hw_stop(struct platform_device *dev)
*/
static int tmiofb_hw_init(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct fb_info *info = platform_get_drvdata(dev);
struct tmiofb_par *par = info->par;
const struct resource *nlcr = &cell->resources[0];
@@ -312,8 +311,7 @@ static int tmiofb_hw_init(struct platform_device *dev)
*/
static void tmiofb_hw_mode(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
- struct tmio_fb_data *data = cell->driver_data;
+ struct tmio_fb_data *data = mfd_get_data(dev);
struct fb_info *info = platform_get_drvdata(dev);
struct fb_videomode *mode = info->mode;
struct tmiofb_par *par = info->par;
@@ -559,9 +557,8 @@ static int tmiofb_ioctl(struct fb_info *fbi,
static struct fb_videomode *
tmiofb_find_mode(struct fb_info *info, struct fb_var_screeninfo *var)
{
- struct mfd_cell *cell =
- info->device->platform_data;
- struct tmio_fb_data *data = cell->driver_data;
+ struct tmio_fb_data *data =
+ mfd_get_data(to_platform_device(info->device));
struct fb_videomode *best = NULL;
int i;
@@ -581,9 +578,8 @@ static int tmiofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct fb_videomode *mode;
- struct mfd_cell *cell =
- info->device->platform_data;
- struct tmio_fb_data *data = cell->driver_data;
+ struct tmio_fb_data *data =
+ mfd_get_data(to_platform_device(info->device));
mode = tmiofb_find_mode(info, var);
if (!mode || var->bits_per_pixel > 16)
@@ -683,8 +679,8 @@ static struct fb_ops tmiofb_ops = {
static int __devinit tmiofb_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
- struct tmio_fb_data *data = cell->driver_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
+ struct tmio_fb_data *data = mfd_get_data(dev);
struct resource *ccr = platform_get_resource(dev, IORESOURCE_MEM, 1);
struct resource *lcr = platform_get_resource(dev, IORESOURCE_MEM, 0);
struct resource *vram = platform_get_resource(dev, IORESOURCE_MEM, 2);
@@ -811,7 +807,7 @@ err_ioremap_ccr:
static int __devexit tmiofb_remove(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct fb_info *info = platform_get_drvdata(dev);
int irq = platform_get_irq(dev, 0);
struct tmiofb_par *par;
@@ -941,7 +937,7 @@ static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
#ifdef CONFIG_FB_TMIO_ACCELL
struct tmiofb_par *par = info->par;
#endif
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
int retval = 0;
console_lock();
@@ -973,7 +969,7 @@ static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
static int tmiofb_resume(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
int retval = 0;
console_lock();
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 5180a215d781..7f8472cc993b 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -1552,8 +1552,7 @@ static void __devinit uvesafb_init_mtrr(struct fb_info *info)
int rc;
/* Find the largest power-of-two */
- while (temp_size & (temp_size - 1))
- temp_size &= (temp_size - 1);
+ temp_size = roundup_pow_of_two(temp_size);
/* Try and find a power of two to add */
do {
@@ -1566,6 +1565,28 @@ static void __devinit uvesafb_init_mtrr(struct fb_info *info)
#endif /* CONFIG_MTRR */
}
+static void __devinit uvesafb_ioremap(struct fb_info *info)
+{
+#ifdef CONFIG_X86
+ switch (mtrr) {
+ case 1: /* uncachable */
+ info->screen_base = ioremap_nocache(info->fix.smem_start, info->fix.smem_len);
+ break;
+ case 2: /* write-back */
+ info->screen_base = ioremap_cache(info->fix.smem_start, info->fix.smem_len);
+ break;
+ case 3: /* write-combining */
+ info->screen_base = ioremap_wc(info->fix.smem_start, info->fix.smem_len);
+ break;
+ case 4: /* write-through */
+ default:
+ info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
+ break;
+ }
+#else
+ info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
+#endif /* CONFIG_X86 */
+}
static ssize_t uvesafb_show_vbe_ver(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1736,15 +1757,22 @@ static int __devinit uvesafb_probe(struct platform_device *dev)
uvesafb_init_info(info, mode);
+ if (!request_region(0x3c0, 32, "uvesafb")) {
+ printk(KERN_ERR "uvesafb: request region 0x3c0-0x3e0 failed\n");
+ err = -EIO;
+ goto out_mode;
+ }
+
if (!request_mem_region(info->fix.smem_start, info->fix.smem_len,
"uvesafb")) {
printk(KERN_ERR "uvesafb: cannot reserve video memory at "
"0x%lx\n", info->fix.smem_start);
err = -EIO;
- goto out_mode;
+ goto out_reg;
}
- info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
+ uvesafb_init_mtrr(info);
+ uvesafb_ioremap(info);
if (!info->screen_base) {
printk(KERN_ERR
@@ -1755,20 +1783,13 @@ static int __devinit uvesafb_probe(struct platform_device *dev)
goto out_mem;
}
- if (!request_region(0x3c0, 32, "uvesafb")) {
- printk(KERN_ERR "uvesafb: request region 0x3c0-0x3e0 failed\n");
- err = -EIO;
- goto out_unmap;
- }
-
- uvesafb_init_mtrr(info);
platform_set_drvdata(dev, info);
if (register_framebuffer(info) < 0) {
printk(KERN_ERR
"uvesafb: failed to register framebuffer device\n");
err = -EINVAL;
- goto out_reg;
+ goto out_unmap;
}
printk(KERN_INFO "uvesafb: framebuffer at 0x%lx, mapped to 0x%p, "
@@ -1785,12 +1806,12 @@ static int __devinit uvesafb_probe(struct platform_device *dev)
return 0;
-out_reg:
- release_region(0x3c0, 32);
out_unmap:
iounmap(info->screen_base);
out_mem:
release_mem_region(info->fix.smem_start, info->fix.smem_len);
+out_reg:
+ release_region(0x3c0, 32);
out_mode:
if (!list_empty(&info->modelist))
fb_destroy_modelist(&info->modelist);
diff --git a/drivers/video/vermilion/vermilion.c b/drivers/video/vermilion/vermilion.c
index 931a567f9aff..970e43d13f52 100644
--- a/drivers/video/vermilion/vermilion.c
+++ b/drivers/video/vermilion/vermilion.c
@@ -891,8 +891,7 @@ static int vmlfb_set_par(struct fb_info *info)
int ret;
mutex_lock(&vml_mutex);
- list_del(&vinfo->head);
- list_add(&vinfo->head, (subsys) ? &global_has_mode : &global_no_mode);
+ list_move(&vinfo->head, (subsys) ? &global_has_mode : &global_no_mode);
ret = vmlfb_set_par_locked(vinfo);
mutex_unlock(&vml_mutex);
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index 6a069d047914..a99bbe86db13 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -303,19 +303,6 @@ static int __init vesafb_probe(struct platform_device *dev)
info->apertures->ranges[0].base = screen_info.lfb_base;
info->apertures->ranges[0].size = size_total;
- info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len);
- if (!info->screen_base) {
- printk(KERN_ERR
- "vesafb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
- vesafb_fix.smem_len, vesafb_fix.smem_start);
- err = -EIO;
- goto err;
- }
-
- printk(KERN_INFO "vesafb: framebuffer at 0x%lx, mapped to 0x%p, "
- "using %dk, total %dk\n",
- vesafb_fix.smem_start, info->screen_base,
- size_remap/1024, size_total/1024);
printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
@@ -438,8 +425,7 @@ static int __init vesafb_probe(struct platform_device *dev)
int rc;
/* Find the largest power-of-two */
- while (temp_size & (temp_size - 1))
- temp_size &= (temp_size - 1);
+ temp_size = roundup_pow_of_two(temp_size);
/* Try and find a power of two to add */
do {
@@ -451,6 +437,34 @@ static int __init vesafb_probe(struct platform_device *dev)
}
#endif
+ switch (mtrr) {
+ case 1: /* uncachable */
+ info->screen_base = ioremap_nocache(vesafb_fix.smem_start, vesafb_fix.smem_len);
+ break;
+ case 2: /* write-back */
+ info->screen_base = ioremap_cache(vesafb_fix.smem_start, vesafb_fix.smem_len);
+ break;
+ case 3: /* write-combining */
+ info->screen_base = ioremap_wc(vesafb_fix.smem_start, vesafb_fix.smem_len);
+ break;
+ case 4: /* write-through */
+ default:
+ info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len);
+ break;
+ }
+ if (!info->screen_base) {
+ printk(KERN_ERR
+ "vesafb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
+ vesafb_fix.smem_len, vesafb_fix.smem_start);
+ err = -EIO;
+ goto err;
+ }
+
+ printk(KERN_INFO "vesafb: framebuffer at 0x%lx, mapped to 0x%p, "
+ "using %dk, total %dk\n",
+ vesafb_fix.smem_start, info->screen_base,
+ size_remap/1024, size_total/1024);
+
info->fbops = &vesafb_ops;
info->var = vesafb_defined;
info->fix = vesafb_fix;
diff --git a/drivers/video/via/viafbdev.h b/drivers/video/via/viafbdev.h
index d66f963e930e..137996dc547e 100644
--- a/drivers/video/via/viafbdev.h
+++ b/drivers/video/via/viafbdev.h
@@ -94,9 +94,6 @@ extern int viafb_LCD_ON;
extern int viafb_DVI_ON;
extern int viafb_hotplug;
-extern int strict_strtoul(const char *cp, unsigned int base,
- unsigned long *res);
-
u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information
*plvds_setting_info, struct lvds_chip_information
*plvds_chip_info, u8 index);
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index a2965ab92cfb..f9b3e3dc2421 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -121,13 +121,19 @@ MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, d
/* ------------------------------------------------------------------------- */
+static void vt8623fb_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor)
+{
+ struct vt8623fb_info *par = info->par;
+
+ svga_tilecursor(par->state.vgabase, info, cursor);
+}
static struct fb_tile_ops vt8623fb_tile_ops = {
.fb_settile = svga_settile,
.fb_tilecopy = svga_tilecopy,
.fb_tilefill = svga_tilefill,
.fb_tileblit = svga_tileblit,
- .fb_tilecursor = svga_tilecursor,
+ .fb_tilecursor = vt8623fb_tilecursor,
.fb_get_tilemax = svga_get_tilemax,
};
@@ -253,6 +259,7 @@ static void vt8623fb_fillrect(struct fb_info *info, const struct fb_fillrect *re
static void vt8623_set_pixclock(struct fb_info *info, u32 pixclock)
{
+ struct vt8623fb_info *par = info->par;
u16 m, n, r;
u8 regval;
int rv;
@@ -264,18 +271,18 @@ static void vt8623_set_pixclock(struct fb_info *info, u32 pixclock)
}
/* Set VGA misc register */
- regval = vga_r(NULL, VGA_MIS_R);
- vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
+ regval = vga_r(par->state.vgabase, VGA_MIS_R);
+ vga_w(par->state.vgabase, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
/* Set clock registers */
- vga_wseq(NULL, 0x46, (n | (r << 6)));
- vga_wseq(NULL, 0x47, m);
+ vga_wseq(par->state.vgabase, 0x46, (n | (r << 6)));
+ vga_wseq(par->state.vgabase, 0x47, m);
udelay(1000);
/* PLL reset */
- svga_wseq_mask(0x40, 0x02, 0x02);
- svga_wseq_mask(0x40, 0x00, 0x02);
+ svga_wseq_mask(par->state.vgabase, 0x40, 0x02, 0x02);
+ svga_wseq_mask(par->state.vgabase, 0x40, 0x00, 0x02);
}
@@ -285,7 +292,10 @@ static int vt8623fb_open(struct fb_info *info, int user)
mutex_lock(&(par->open_lock));
if (par->ref_count == 0) {
+ void __iomem *vgabase = par->state.vgabase;
+
memset(&(par->state), 0, sizeof(struct vgastate));
+ par->state.vgabase = vgabase;
par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS | VGA_SAVE_CMAP;
par->state.num_crtc = 0xA2;
par->state.num_seq = 0x50;
@@ -373,6 +383,7 @@ static int vt8623fb_check_var(struct fb_var_screeninfo *var, struct fb_info *inf
static int vt8623fb_set_par(struct fb_info *info)
{
u32 mode, offset_value, fetch_value, screen_size;
+ struct vt8623fb_info *par = info->par;
u32 bpp = info->var.bits_per_pixel;
if (bpp != 0) {
@@ -414,82 +425,82 @@ static int vt8623fb_set_par(struct fb_info *info)
info->var.activate = FB_ACTIVATE_NOW;
/* Unlock registers */
- svga_wseq_mask(0x10, 0x01, 0x01);
- svga_wcrt_mask(0x11, 0x00, 0x80);
- svga_wcrt_mask(0x47, 0x00, 0x01);
+ svga_wseq_mask(par->state.vgabase, 0x10, 0x01, 0x01);
+ svga_wcrt_mask(par->state.vgabase, 0x11, 0x00, 0x80);
+ svga_wcrt_mask(par->state.vgabase, 0x47, 0x00, 0x01);
/* Device, screen and sync off */
- svga_wseq_mask(0x01, 0x20, 0x20);
- svga_wcrt_mask(0x36, 0x30, 0x30);
- svga_wcrt_mask(0x17, 0x00, 0x80);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x36, 0x30, 0x30);
+ svga_wcrt_mask(par->state.vgabase, 0x17, 0x00, 0x80);
/* Set default values */
- svga_set_default_gfx_regs();
- svga_set_default_atc_regs();
- svga_set_default_seq_regs();
- svga_set_default_crt_regs();
- svga_wcrt_multi(vt8623_line_compare_regs, 0xFFFFFFFF);
- svga_wcrt_multi(vt8623_start_address_regs, 0);
+ svga_set_default_gfx_regs(par->state.vgabase);
+ svga_set_default_atc_regs(par->state.vgabase);
+ svga_set_default_seq_regs(par->state.vgabase);
+ svga_set_default_crt_regs(par->state.vgabase);
+ svga_wcrt_multi(par->state.vgabase, vt8623_line_compare_regs, 0xFFFFFFFF);
+ svga_wcrt_multi(par->state.vgabase, vt8623_start_address_regs, 0);
- svga_wcrt_multi(vt8623_offset_regs, offset_value);
- svga_wseq_multi(vt8623_fetch_count_regs, fetch_value);
+ svga_wcrt_multi(par->state.vgabase, vt8623_offset_regs, offset_value);
+ svga_wseq_multi(par->state.vgabase, vt8623_fetch_count_regs, fetch_value);
/* Clear H/V Skew */
- svga_wcrt_mask(0x03, 0x00, 0x60);
- svga_wcrt_mask(0x05, 0x00, 0x60);
+ svga_wcrt_mask(par->state.vgabase, 0x03, 0x00, 0x60);
+ svga_wcrt_mask(par->state.vgabase, 0x05, 0x00, 0x60);
if (info->var.vmode & FB_VMODE_DOUBLE)
- svga_wcrt_mask(0x09, 0x80, 0x80);
+ svga_wcrt_mask(par->state.vgabase, 0x09, 0x80, 0x80);
else
- svga_wcrt_mask(0x09, 0x00, 0x80);
+ svga_wcrt_mask(par->state.vgabase, 0x09, 0x00, 0x80);
- svga_wseq_mask(0x1E, 0xF0, 0xF0); // DI/DVP bus
- svga_wseq_mask(0x2A, 0x0F, 0x0F); // DI/DVP bus
- svga_wseq_mask(0x16, 0x08, 0xBF); // FIFO read threshold
- vga_wseq(NULL, 0x17, 0x1F); // FIFO depth
- vga_wseq(NULL, 0x18, 0x4E);
- svga_wseq_mask(0x1A, 0x08, 0x08); // enable MMIO ?
+ svga_wseq_mask(par->state.vgabase, 0x1E, 0xF0, 0xF0); // DI/DVP bus
+ svga_wseq_mask(par->state.vgabase, 0x2A, 0x0F, 0x0F); // DI/DVP bus
+ svga_wseq_mask(par->state.vgabase, 0x16, 0x08, 0xBF); // FIFO read threshold
+ vga_wseq(par->state.vgabase, 0x17, 0x1F); // FIFO depth
+ vga_wseq(par->state.vgabase, 0x18, 0x4E);
+ svga_wseq_mask(par->state.vgabase, 0x1A, 0x08, 0x08); // enable MMIO ?
- vga_wcrt(NULL, 0x32, 0x00);
- vga_wcrt(NULL, 0x34, 0x00);
- vga_wcrt(NULL, 0x6A, 0x80);
- vga_wcrt(NULL, 0x6A, 0xC0);
+ vga_wcrt(par->state.vgabase, 0x32, 0x00);
+ vga_wcrt(par->state.vgabase, 0x34, 0x00);
+ vga_wcrt(par->state.vgabase, 0x6A, 0x80);
+ vga_wcrt(par->state.vgabase, 0x6A, 0xC0);
- vga_wgfx(NULL, 0x20, 0x00);
- vga_wgfx(NULL, 0x21, 0x00);
- vga_wgfx(NULL, 0x22, 0x00);
+ vga_wgfx(par->state.vgabase, 0x20, 0x00);
+ vga_wgfx(par->state.vgabase, 0x21, 0x00);
+ vga_wgfx(par->state.vgabase, 0x22, 0x00);
/* Set SR15 according to number of bits per pixel */
mode = svga_match_format(vt8623fb_formats, &(info->var), &(info->fix));
switch (mode) {
case 0:
pr_debug("fb%d: text mode\n", info->node);
- svga_set_textmode_vga_regs();
- svga_wseq_mask(0x15, 0x00, 0xFE);
- svga_wcrt_mask(0x11, 0x60, 0x70);
+ svga_set_textmode_vga_regs(par->state.vgabase);
+ svga_wseq_mask(par->state.vgabase, 0x15, 0x00, 0xFE);
+ svga_wcrt_mask(par->state.vgabase, 0x11, 0x60, 0x70);
break;
case 1:
pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
- vga_wgfx(NULL, VGA_GFX_MODE, 0x40);
- svga_wseq_mask(0x15, 0x20, 0xFE);
- svga_wcrt_mask(0x11, 0x00, 0x70);
+ vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
+ svga_wseq_mask(par->state.vgabase, 0x15, 0x20, 0xFE);
+ svga_wcrt_mask(par->state.vgabase, 0x11, 0x00, 0x70);
break;
case 2:
pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
- svga_wseq_mask(0x15, 0x00, 0xFE);
- svga_wcrt_mask(0x11, 0x00, 0x70);
+ svga_wseq_mask(par->state.vgabase, 0x15, 0x00, 0xFE);
+ svga_wcrt_mask(par->state.vgabase, 0x11, 0x00, 0x70);
break;
case 3:
pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
- svga_wseq_mask(0x15, 0x22, 0xFE);
+ svga_wseq_mask(par->state.vgabase, 0x15, 0x22, 0xFE);
break;
case 4:
pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
- svga_wseq_mask(0x15, 0xB6, 0xFE);
+ svga_wseq_mask(par->state.vgabase, 0x15, 0xB6, 0xFE);
break;
case 5:
pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
- svga_wseq_mask(0x15, 0xAE, 0xFE);
+ svga_wseq_mask(par->state.vgabase, 0x15, 0xAE, 0xFE);
break;
default:
printk(KERN_ERR "vt8623fb: unsupported mode - bug\n");
@@ -497,16 +508,16 @@ static int vt8623fb_set_par(struct fb_info *info)
}
vt8623_set_pixclock(info, info->var.pixclock);
- svga_set_timings(&vt8623_timing_regs, &(info->var), 1, 1,
+ svga_set_timings(par->state.vgabase, &vt8623_timing_regs, &(info->var), 1, 1,
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1, 1,
1, info->node);
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
- svga_wcrt_mask(0x17, 0x80, 0x80);
- svga_wcrt_mask(0x36, 0x00, 0x30);
- svga_wseq_mask(0x01, 0x00, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
+ svga_wcrt_mask(par->state.vgabase, 0x36, 0x00, 0x30);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
return 0;
}
@@ -569,31 +580,33 @@ static int vt8623fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
static int vt8623fb_blank(int blank_mode, struct fb_info *info)
{
+ struct vt8623fb_info *par = info->par;
+
switch (blank_mode) {
case FB_BLANK_UNBLANK:
pr_debug("fb%d: unblank\n", info->node);
- svga_wcrt_mask(0x36, 0x00, 0x30);
- svga_wseq_mask(0x01, 0x00, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x36, 0x00, 0x30);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
break;
case FB_BLANK_NORMAL:
pr_debug("fb%d: blank\n", info->node);
- svga_wcrt_mask(0x36, 0x00, 0x30);
- svga_wseq_mask(0x01, 0x20, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x36, 0x00, 0x30);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_HSYNC_SUSPEND:
pr_debug("fb%d: DPMS standby (hsync off)\n", info->node);
- svga_wcrt_mask(0x36, 0x10, 0x30);
- svga_wseq_mask(0x01, 0x20, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x36, 0x10, 0x30);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_VSYNC_SUSPEND:
pr_debug("fb%d: DPMS suspend (vsync off)\n", info->node);
- svga_wcrt_mask(0x36, 0x20, 0x30);
- svga_wseq_mask(0x01, 0x20, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x36, 0x20, 0x30);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_POWERDOWN:
pr_debug("fb%d: DPMS off (no sync)\n", info->node);
- svga_wcrt_mask(0x36, 0x30, 0x30);
- svga_wseq_mask(0x01, 0x20, 0x20);
+ svga_wcrt_mask(par->state.vgabase, 0x36, 0x30, 0x30);
+ svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
}
@@ -603,6 +616,7 @@ static int vt8623fb_blank(int blank_mode, struct fb_info *info)
static int vt8623fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
+ struct vt8623fb_info *par = info->par;
unsigned int offset;
/* Calculate the offset */
@@ -616,7 +630,7 @@ static int vt8623fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *i
}
/* Set the offset */
- svga_wcrt_multi(vt8623_start_address_regs, offset);
+ svga_wcrt_multi(par->state.vgabase, vt8623_start_address_regs, offset);
return 0;
}
@@ -647,6 +661,8 @@ static struct fb_ops vt8623fb_ops = {
static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ struct pci_bus_region bus_reg;
+ struct resource vga_res;
struct fb_info *info;
struct vt8623fb_info *par;
unsigned int memsize1, memsize2;
@@ -705,9 +721,18 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
goto err_iomap_2;
}
+ bus_reg.start = 0;
+ bus_reg.end = 64 * 1024;
+
+ vga_res.flags = IORESOURCE_IO;
+
+ pcibios_bus_to_resource(dev, &vga_res, &bus_reg);
+
+ par->state.vgabase = (void __iomem *) vga_res.start;
+
/* Find how many physical memory there is on card */
- memsize1 = (vga_rseq(NULL, 0x34) + 1) >> 1;
- memsize2 = vga_rseq(NULL, 0x39) << 2;
+ memsize1 = (vga_rseq(par->state.vgabase, 0x34) + 1) >> 1;
+ memsize2 = vga_rseq(par->state.vgabase, 0x39) << 2;
if ((16 <= memsize1) && (memsize1 <= 64) && (memsize1 == memsize2))
info->screen_size = memsize1 << 20;
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 80b3b123dd7f..7c608c5ccf84 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -60,7 +60,7 @@ config W1_MASTER_GPIO
config HDQ_MASTER_OMAP
tristate "OMAP HDQ driver"
- depends on ARCH_OMAP2430 || ARCH_OMAP3
+ depends on SOC_OMAP2430 || ARCH_OMAP3
help
Say Y here if you want support for the 1-wire or HDQ Interface
on an OMAP processor.
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index 6b85e7fefa43..95921b77cf86 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -90,7 +90,7 @@ struct ds1wm_data {
void __iomem *map;
int bus_shift; /* # of shifts to calc register offsets */
struct platform_device *pdev;
- struct mfd_cell *cell;
+ const struct mfd_cell *cell;
int irq;
int active_high;
int slave_present;
@@ -216,7 +216,7 @@ static int ds1wm_find_divisor(int gclk)
static void ds1wm_up(struct ds1wm_data *ds1wm_data)
{
int divisor;
- struct ds1wm_driver_data *plat = ds1wm_data->cell->driver_data;
+ struct ds1wm_driver_data *plat = mfd_get_data(ds1wm_data->pdev);
if (ds1wm_data->cell->enable)
ds1wm_data->cell->enable(ds1wm_data->pdev);
@@ -330,16 +330,11 @@ static int ds1wm_probe(struct platform_device *pdev)
struct ds1wm_data *ds1wm_data;
struct ds1wm_driver_data *plat;
struct resource *res;
- struct mfd_cell *cell;
int ret;
if (!pdev)
return -ENODEV;
- cell = pdev->dev.platform_data;
- if (!cell)
- return -ENODEV;
-
ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL);
if (!ds1wm_data)
return -ENOMEM;
@@ -356,13 +351,13 @@ static int ds1wm_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err0;
}
- plat = cell->driver_data;
+ plat = mfd_get_data(pdev);
/* calculate bus shift from mem resource */
ds1wm_data->bus_shift = resource_size(res) >> 3;
ds1wm_data->pdev = pdev;
- ds1wm_data->cell = cell;
+ ds1wm_data->cell = mfd_get_cell(pdev);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 908160e938dc..b69d71482554 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -533,6 +533,16 @@ config I6300ESB_WDT
To compile this driver as a module, choose M here: the
module will be called i6300esb.
+config INTEL_SCU_WATCHDOG
+ bool "Intel SCU Watchdog for Mobile Platforms"
+ depends on WATCHDOG
+ depends on INTEL_SCU_IPC
+ ---help---
+ Hardware driver for the watchdog time built into the Intel SCU
+ for Intel Mobile Platforms.
+
+ To compile this driver as a module, choose M here.
+
config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on (X86 || IA64) && PCI
@@ -580,7 +590,7 @@ config IT87_WDT
depends on X86 && EXPERIMENTAL
---help---
This is the driver for the hardware watchdog on the ITE IT8702,
- IT8712, IT8716, IT8718, IT8720, IT8726, IT8712 Super I/O chips.
+ IT8712, IT8716, IT8718, IT8720, IT8721, IT8726 Super I/O chips.
This watchdog simply watches your kernel to make sure it doesn't
freeze, and if it does, it reboots your computer after a certain
amount of time.
@@ -589,18 +599,20 @@ config IT87_WDT
be called it87_wdt.
config HP_WATCHDOG
- tristate "HP Proliant iLO2+ Hardware Watchdog Timer"
+ tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
depends on X86
+ default m
help
A software monitoring watchdog and NMI sourcing driver. This driver
will detect lockups and provide a stack trace. This is a driver that
- will only load on a HP ProLiant system with a minimum of iLO2 support.
+ will only load on an HP ProLiant system with a minimum of iLO2 support.
To compile this driver as a module, choose M here: the module will be
called hpwdt.
config HPWDT_NMI_DECODING
bool "NMI decoding support for the HP ProLiant iLO2+ Hardware Watchdog Timer"
depends on HP_WATCHDOG
+ default y
help
When an NMI occurs this feature will make the necessary BIOS calls to
log the cause of the NMI.
@@ -903,6 +915,12 @@ config INDYDOG
timer expired and no process has written to /dev/watchdog during
that time.
+config JZ4740_WDT
+ tristate "Ingenic jz4740 SoC hardware watchdog"
+ depends on MACH_JZ4740
+ help
+ Hardware driver for the built-in watchdog timer on Ingenic jz4740 SoCs.
+
config WDT_MTX1
tristate "MTX-1 Hardware Watchdog"
depends on MIPS_MTX1
@@ -1111,6 +1129,16 @@ config WATCHDOG_RIO
# XTENSA Architecture
+# Xen Architecture
+
+config XEN_WDT
+ tristate "Xen Watchdog support"
+ depends on XEN
+ help
+ Say Y here to support the hypervisor watchdog capability provided
+ by Xen 4.0 and newer. The watchdog timeout period is normally one
+ minute but can be changed with a boot-time parameter.
+
#
# ISA-based Watchdog Cards
#
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 20e44c4782b3..d520bf9c3355 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -102,6 +102,7 @@ obj-$(CONFIG_W83877F_WDT) += w83877f_wdt.o
obj-$(CONFIG_W83977F_WDT) += w83977f_wdt.o
obj-$(CONFIG_MACHZ_WDT) += machzwd.o
obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
+obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o
# M32R Architecture
@@ -114,6 +115,7 @@ obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o
obj-$(CONFIG_BCM63XX_WDT) += bcm63xx_wdt.o
obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
obj-$(CONFIG_INDYDOG) += indydog.o
+obj-$(CONFIG_JZ4740_WDT) += jz4740_wdt.o
obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o
obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
@@ -148,6 +150,9 @@ obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o
# XTENSA Architecture
+# Xen
+obj-$(CONFIG_XEN_WDT) += xen_wdt.o
+
# Architecture Independant
obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index fa4d36033552..f16dcbd475fb 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -301,7 +301,7 @@ static int ali_notify_sys(struct notifier_block *this,
* want to register another driver on the same PCI id.
*/
-static struct pci_device_id ali_pci_tbl[] __used = {
+static DEFINE_PCI_DEVICE_TABLE(ali_pci_tbl) __used = {
{ PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,},
{ PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,},
{ 0, },
@@ -362,12 +362,12 @@ static int __init ali_find_watchdog(void)
*/
static const struct file_operations ali_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
.write = ali_write,
.unlocked_ioctl = ali_ioctl,
- .open = ali_open,
- .release = ali_release,
+ .open = ali_open,
+ .release = ali_release,
};
static struct miscdevice ali_miscdev = {
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 4b7a2b4138ed..46f4b85b46de 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -430,7 +430,7 @@ err_out:
module_init(alim7101_wdt_init);
module_exit(alim7101_wdt_unload);
-static struct pci_device_id alim7101_pci_tbl[] __devinitdata __used = {
+static DEFINE_PCI_DEVICE_TABLE(alim7101_pci_tbl) __used = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533) },
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ }
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index 5f245522397b..bd44417c84d4 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -150,8 +150,8 @@ static ssize_t bcm47xx_wdt_write(struct file *file, const char __user *data,
}
static const struct watchdog_info bcm47xx_wdt_info = {
- .identity = DRV_NAME,
- .options = WDIOF_SETTIMEOUT |
+ .identity = DRV_NAME,
+ .options = WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
};
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index 9042a95fc98c..b9fa9b71583a 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -63,7 +63,7 @@ static DEFINE_SPINLOCK(bfin_wdt_spinlock);
/**
* bfin_wdt_keepalive - Keep the Userspace Watchdog Alive
*
- * The Userspace watchdog got a KeepAlive: schedule the next timeout.
+ * The Userspace watchdog got a KeepAlive: schedule the next timeout.
*/
static int bfin_wdt_keepalive(void)
{
@@ -337,7 +337,7 @@ static int bfin_wdt_resume(struct platform_device *pdev)
static const struct file_operations bfin_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
- .write = bfin_wdt_write,
+ .write = bfin_wdt_write,
.unlocked_ioctl = bfin_wdt_ioctl,
.open = bfin_wdt_open,
.release = bfin_wdt_release,
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 7e7ec9c35b6a..337265b47305 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -4,7 +4,7 @@
* Author: Matthew McClintock
* Maintainer: Kumar Gala <galak@kernel.crashing.org>
*
- * Copyright 2005, 2008, 2010 Freescale Semiconductor Inc.
+ * Copyright 2005, 2008, 2010-2011 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -221,9 +221,8 @@ static int booke_wdt_open(struct inode *inode, struct file *file)
if (booke_wdt_enabled == 0) {
booke_wdt_enabled = 1;
on_each_cpu(__booke_wdt_enable, NULL, 0);
- printk(KERN_INFO
- "PowerPC Book-E Watchdog Timer Enabled (wdt_period=%d)\n",
- booke_wdt_period);
+ pr_debug("booke_wdt: watchdog enabled (timeout = %llu sec)\n",
+ period_to_sec(booke_wdt_period));
}
spin_unlock(&booke_wdt_lock);
@@ -240,6 +239,7 @@ static int booke_wdt_release(struct inode *inode, struct file *file)
*/
on_each_cpu(__booke_wdt_disable, NULL, 0);
booke_wdt_enabled = 0;
+ pr_debug("booke_wdt: watchdog disabled\n");
#endif
clear_bit(0, &wdt_is_active);
@@ -271,21 +271,20 @@ static int __init booke_wdt_init(void)
{
int ret = 0;
- printk(KERN_INFO "PowerPC Book-E Watchdog Timer Loaded\n");
+ pr_info("booke_wdt: powerpc book-e watchdog driver loaded\n");
ident.firmware_version = cur_cpu_spec->pvr_value;
ret = misc_register(&booke_wdt_miscdev);
if (ret) {
- printk(KERN_CRIT "Cannot register miscdev on minor=%d: %d\n",
- WATCHDOG_MINOR, ret);
+ pr_err("booke_wdt: cannot register device (minor=%u, ret=%i)\n",
+ WATCHDOG_MINOR, ret);
return ret;
}
spin_lock(&booke_wdt_lock);
if (booke_wdt_enabled == 1) {
- printk(KERN_INFO
- "PowerPC Book-E Watchdog Timer Enabled (wdt_period=%d)\n",
- booke_wdt_period);
+ pr_info("booke_wdt: watchdog enabled (timeout = %llu sec)\n",
+ period_to_sec(booke_wdt_period));
on_each_cpu(__booke_wdt_enable, NULL, 0);
}
spin_unlock(&booke_wdt_lock);
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 65911678453d..1e013e8457b7 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -5,10 +5,10 @@
* interface and Solaris-compatible ioctls as best it is
* able.
*
- * NOTE: CP1400 systems appear to have a defective intr_mask
- * register on the PLD, preventing the disabling of
- * timer interrupts. We use a timer to periodically
- * reset 'stopped' watchdogs on affected platforms.
+ * NOTE: CP1400 systems appear to have a defective intr_mask
+ * register on the PLD, preventing the disabling of
+ * timer interrupts. We use a timer to periodically
+ * reset 'stopped' watchdogs on affected platforms.
*
* Copyright (c) 2000 Eric Brower (ebrower@usa.net)
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
@@ -107,13 +107,13 @@ static struct cpwd *cpwd_device;
* -------------------
* |- counter val -|
* -------------------
- * dcntr - Current 16-bit downcounter value.
- * When downcounter reaches '0' watchdog expires.
- * Reading this register resets downcounter with
- * 'limit' value.
- * limit - 16-bit countdown value in 1/10th second increments.
- * Writing this register begins countdown with input value.
- * Reading from this register does not affect counter.
+ * dcntr - Current 16-bit downcounter value.
+ * When downcounter reaches '0' watchdog expires.
+ * Reading this register resets downcounter with
+ * 'limit' value.
+ * limit - 16-bit countdown value in 1/10th second increments.
+ * Writing this register begins countdown with input value.
+ * Reading from this register does not affect counter.
* NOTES: After watchdog reset, dcntr and limit contain '1'
*
* status register (byte access):
@@ -123,7 +123,7 @@ static struct cpwd *cpwd_device;
* |- UNUSED -| EXP | RUN |
* ---------------------------
* status- Bit 0 - Watchdog is running
- * Bit 1 - Watchdog has expired
+ * Bit 1 - Watchdog has expired
*
*** PLD register block definition (struct wd_pld_regblk)
*
@@ -197,7 +197,7 @@ static u8 cpwd_readb(void __iomem *addr)
* Because of the CP1400 defect this should only be
* called during initialzation or by wd_[start|stop]timer()
*
- * index - sub-device index, or -1 for 'all'
+ * index - sub-device index, or -1 for 'all'
* enable - non-zero to enable interrupts, zero to disable
*/
static void cpwd_toggleintr(struct cpwd *p, int index, int enable)
@@ -317,13 +317,13 @@ static int cpwd_getstatus(struct cpwd *p, int index)
} else {
/* Fudge WD_EXPIRED status for defective CP1400--
* IF timer is running
- * AND brokenstop is set
- * AND an interrupt has been serviced
+ * AND brokenstop is set
+ * AND an interrupt has been serviced
* we are WD_EXPIRED.
*
* IF timer is running
- * AND brokenstop is set
- * AND no interrupt has been serviced
+ * AND brokenstop is set
+ * AND no interrupt has been serviced
* we are WD_FREERUN.
*/
if (p->broken &&
@@ -613,7 +613,7 @@ static int __devinit cpwd_probe(struct platform_device *op)
if (p->broken) {
init_timer(&cpwd_timer);
- cpwd_timer.function = cpwd_brokentimer;
+ cpwd_timer.function = cpwd_brokentimer;
cpwd_timer.data = (unsigned long) p;
cpwd_timer.expires = WD_BTIMEOUT;
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index 3f3dc093ad68..f1d1da662fbe 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -201,7 +201,7 @@ static void eurwdt_ping(void)
static ssize_t eurwdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- if (count) {
+ if (count) {
if (!nowayout) {
size_t i;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 204a5603c4ae..8cb26855bfed 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -52,7 +52,7 @@ static void __iomem *pci_mem_addr; /* the PCI-memory address */
static unsigned long __iomem *hpwdt_timer_reg;
static unsigned long __iomem *hpwdt_timer_con;
-static struct pci_device_id hpwdt_devices[] = {
+static DEFINE_PCI_DEVICE_TABLE(hpwdt_devices) = {
{ PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB203) }, /* iLO2 */
{ PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3306) }, /* iLO3 */
{0}, /* terminate list */
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c
index bb9750a03942..db45091ef434 100644
--- a/drivers/watchdog/i6300esb.c
+++ b/drivers/watchdog/i6300esb.c
@@ -334,7 +334,7 @@ static struct miscdevice esb_miscdev = {
/*
* Data for PCI driver interface
*/
-static struct pci_device_id esb_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(esb_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_9), },
{ 0, }, /* End of list */
};
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 2c6c2b4ad8bf..35a0d12dad73 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -247,7 +247,7 @@ static struct {
{NULL, 0}
};
-#define ITCO_PCI_DEVICE(dev, data) \
+#define ITCO_PCI_DEVICE(dev, data) \
.vendor = PCI_VENDOR_ID_INTEL, \
.device = dev, \
.subvendor = PCI_ANY_ID, \
@@ -262,7 +262,7 @@ static struct {
* pci_driver, because the I/O Controller Hub has also other
* functions that probably will be registered by other drivers.
*/
-static struct pci_device_id iTCO_wdt_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AA_0, TCO_ICH)},
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AB_0, TCO_ICH0)},
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801BA_0, TCO_ICH2)},
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
new file mode 100644
index 000000000000..919bdd16136f
--- /dev/null
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -0,0 +1,572 @@
+/*
+ * Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device
+ * for Intel part #(s):
+ * - AF82MP20 PCH
+ *
+ * Copyright (C) 2009-2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ *
+ */
+
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/fs.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/sfi.h>
+#include <linux/types.h>
+#include <asm/irq.h>
+#include <asm/atomic.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/apb_timer.h>
+#include <asm/mrst.h>
+
+#include "intel_scu_watchdog.h"
+
+/* Bounds number of times we will retry loading time count */
+/* This retry is a work around for a silicon bug. */
+#define MAX_RETRY 16
+
+#define IPC_SET_WATCHDOG_TIMER 0xF8
+
+static int timer_margin = DEFAULT_SOFT_TO_HARD_MARGIN;
+module_param(timer_margin, int, 0);
+MODULE_PARM_DESC(timer_margin,
+ "Watchdog timer margin"
+ "Time between interrupt and resetting the system"
+ "The range is from 1 to 160"
+ "This is the time for all keep alives to arrive");
+
+static int timer_set = DEFAULT_TIME;
+module_param(timer_set, int, 0);
+MODULE_PARM_DESC(timer_set,
+ "Default Watchdog timer setting"
+ "Complete cycle time"
+ "The range is from 1 to 170"
+ "This is the time for all keep alives to arrive");
+
+/* After watchdog device is closed, check force_boot. If:
+ * force_boot == 0, then force boot on next watchdog interrupt after close,
+ * force_boot == 1, then force boot immediately when device is closed.
+ */
+static int force_boot;
+module_param(force_boot, int, 0);
+MODULE_PARM_DESC(force_boot,
+ "A value of 1 means that the driver will reboot"
+ "the system immediately if the /dev/watchdog device is closed"
+ "A value of 0 means that when /dev/watchdog device is closed"
+ "the watchdog timer will be refreshed for one more interval"
+ "of length: timer_set. At the end of this interval, the"
+ "watchdog timer will reset the system."
+ );
+
+/* there is only one device in the system now; this can be made into
+ * an array in the future if we have more than one device */
+
+static struct intel_scu_watchdog_dev watchdog_device;
+
+/* Forces restart, if force_reboot is set */
+static void watchdog_fire(void)
+{
+ if (force_boot) {
+ printk(KERN_CRIT PFX "Initiating system reboot.\n");
+ emergency_restart();
+ printk(KERN_CRIT PFX "Reboot didn't ?????\n");
+ }
+
+ else {
+ printk(KERN_CRIT PFX "Immediate Reboot Disabled\n");
+ printk(KERN_CRIT PFX
+ "System will reset when watchdog timer times out!\n");
+ }
+}
+
+static int check_timer_margin(int new_margin)
+{
+ if ((new_margin < MIN_TIME_CYCLE) ||
+ (new_margin > MAX_TIME - timer_set)) {
+ pr_debug("Watchdog timer: value of new_margin %d is out of the range %d to %d\n",
+ new_margin, MIN_TIME_CYCLE, MAX_TIME - timer_set);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * IPC operations
+ */
+static int watchdog_set_ipc(int soft_threshold, int threshold)
+{
+ u32 *ipc_wbuf;
+ u8 cbuf[16] = { '\0' };
+ int ipc_ret = 0;
+
+ ipc_wbuf = (u32 *)&cbuf;
+ ipc_wbuf[0] = soft_threshold;
+ ipc_wbuf[1] = threshold;
+
+ ipc_ret = intel_scu_ipc_command(
+ IPC_SET_WATCHDOG_TIMER,
+ 0,
+ ipc_wbuf,
+ 2,
+ NULL,
+ 0);
+
+ if (ipc_ret != 0)
+ pr_err("Error setting SCU watchdog timer: %x\n", ipc_ret);
+
+ return ipc_ret;
+};
+
+/*
+ * Intel_SCU operations
+ */
+
+/* timer interrupt handler */
+static irqreturn_t watchdog_timer_interrupt(int irq, void *dev_id)
+{
+ int int_status;
+ int_status = ioread32(watchdog_device.timer_interrupt_status_addr);
+
+ pr_debug("Watchdog timer: irq, int_status: %x\n", int_status);
+
+ if (int_status != 0)
+ return IRQ_NONE;
+
+ /* has the timer been started? If not, then this is spurious */
+ if (watchdog_device.timer_started == 0) {
+ pr_debug("Watchdog timer: spurious interrupt received\n");
+ return IRQ_HANDLED;
+ }
+
+ /* temporarily disable the timer */
+ iowrite32(0x00000002, watchdog_device.timer_control_addr);
+
+ /* set the timer to the threshold */
+ iowrite32(watchdog_device.threshold,
+ watchdog_device.timer_load_count_addr);
+
+ /* allow the timer to run */
+ iowrite32(0x00000003, watchdog_device.timer_control_addr);
+
+ return IRQ_HANDLED;
+}
+
+static int intel_scu_keepalive(void)
+{
+
+ /* read eoi register - clears interrupt */
+ ioread32(watchdog_device.timer_clear_interrupt_addr);
+
+ /* temporarily disable the timer */
+ iowrite32(0x00000002, watchdog_device.timer_control_addr);
+
+ /* set the timer to the soft_threshold */
+ iowrite32(watchdog_device.soft_threshold,
+ watchdog_device.timer_load_count_addr);
+
+ /* allow the timer to run */
+ iowrite32(0x00000003, watchdog_device.timer_control_addr);
+
+ return 0;
+}
+
+static int intel_scu_stop(void)
+{
+ iowrite32(0, watchdog_device.timer_control_addr);
+ return 0;
+}
+
+static int intel_scu_set_heartbeat(u32 t)
+{
+ int ipc_ret;
+ int retry_count;
+ u32 soft_value;
+ u32 hw_pre_value;
+ u32 hw_value;
+
+ watchdog_device.timer_set = t;
+ watchdog_device.threshold =
+ timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
+ watchdog_device.soft_threshold =
+ (watchdog_device.timer_set - timer_margin)
+ * watchdog_device.timer_tbl_ptr->freq_hz;
+
+ pr_debug("Watchdog timer: set_heartbeat: timer freq is %d\n",
+ watchdog_device.timer_tbl_ptr->freq_hz);
+ pr_debug("Watchdog timer: set_heartbeat: timer_set is %x (hex)\n",
+ watchdog_device.timer_set);
+ pr_debug("Watchdog timer: set_hearbeat: timer_margin is %x (hex)\n",
+ timer_margin);
+ pr_debug("Watchdog timer: set_heartbeat: threshold is %x (hex)\n",
+ watchdog_device.threshold);
+ pr_debug("Watchdog timer: set_heartbeat: soft_threshold is %x (hex)\n",
+ watchdog_device.soft_threshold);
+
+ /* Adjust thresholds by FREQ_ADJUSTMENT factor, to make the */
+ /* watchdog timing come out right. */
+ watchdog_device.threshold =
+ watchdog_device.threshold / FREQ_ADJUSTMENT;
+ watchdog_device.soft_threshold =
+ watchdog_device.soft_threshold / FREQ_ADJUSTMENT;
+
+ /* temporarily disable the timer */
+ iowrite32(0x00000002, watchdog_device.timer_control_addr);
+
+ /* send the threshold and soft_threshold via IPC to the processor */
+ ipc_ret = watchdog_set_ipc(watchdog_device.soft_threshold,
+ watchdog_device.threshold);
+
+ if (ipc_ret != 0) {
+ /* Make sure the watchdog timer is stopped */
+ intel_scu_stop();
+ return ipc_ret;
+ }
+
+ /* Soft Threshold set loop. Early versions of silicon did */
+ /* not always set this count correctly. This loop checks */
+ /* the value and retries if it was not set correctly. */
+
+ retry_count = 0;
+ soft_value = watchdog_device.soft_threshold & 0xFFFF0000;
+ do {
+
+ /* Make sure timer is stopped */
+ intel_scu_stop();
+
+ if (MAX_RETRY < retry_count++) {
+ /* Unable to set timer value */
+ pr_err("Watchdog timer: Unable to set timer\n");
+ return -ENODEV;
+ }
+
+ /* set the timer to the soft threshold */
+ iowrite32(watchdog_device.soft_threshold,
+ watchdog_device.timer_load_count_addr);
+
+ /* read count value before starting timer */
+ hw_pre_value = ioread32(watchdog_device.timer_load_count_addr);
+ hw_pre_value = hw_pre_value & 0xFFFF0000;
+
+ /* Start the timer */
+ iowrite32(0x00000003, watchdog_device.timer_control_addr);
+
+ /* read the value the time loaded into its count reg */
+ hw_value = ioread32(watchdog_device.timer_load_count_addr);
+ hw_value = hw_value & 0xFFFF0000;
+
+
+ } while (soft_value != hw_value);
+
+ watchdog_device.timer_started = 1;
+
+ return 0;
+}
+
+/*
+ * /dev/watchdog handling
+ */
+
+static int intel_scu_open(struct inode *inode, struct file *file)
+{
+
+ /* Set flag to indicate that watchdog device is open */
+ if (test_and_set_bit(0, &watchdog_device.driver_open))
+ return -EBUSY;
+
+ /* Check for reopen of driver. Reopens are not allowed */
+ if (watchdog_device.driver_closed)
+ return -EPERM;
+
+ return nonseekable_open(inode, file);
+}
+
+static int intel_scu_release(struct inode *inode, struct file *file)
+{
+ /*
+ * This watchdog should not be closed, after the timer
+ * is started with the WDIPC_SETTIMEOUT ioctl
+ * If force_boot is set watchdog_fire() will cause an
+ * immediate reset. If force_boot is not set, the watchdog
+ * timer is refreshed for one more interval. At the end
+ * of that interval, the watchdog timer will reset the system.
+ */
+
+ if (!test_and_clear_bit(0, &watchdog_device.driver_open)) {
+ pr_debug("Watchdog timer: intel_scu_release, without open\n");
+ return -ENOTTY;
+ }
+
+ if (!watchdog_device.timer_started) {
+ /* Just close, since timer has not been started */
+ pr_debug("Watchdog timer: closed, without starting timer\n");
+ return 0;
+ }
+
+ printk(KERN_CRIT PFX
+ "Unexpected close of /dev/watchdog!\n");
+
+ /* Since the timer was started, prevent future reopens */
+ watchdog_device.driver_closed = 1;
+
+ /* Refresh the timer for one more interval */
+ intel_scu_keepalive();
+
+ /* Reboot system (if force_boot is set) */
+ watchdog_fire();
+
+ /* We should only reach this point if force_boot is not set */
+ return 0;
+}
+
+static ssize_t intel_scu_write(struct file *file,
+ char const *data,
+ size_t len,
+ loff_t *ppos)
+{
+
+ if (watchdog_device.timer_started)
+ /* Watchdog already started, keep it alive */
+ intel_scu_keepalive();
+ else
+ /* Start watchdog with timer value set by init */
+ intel_scu_set_heartbeat(watchdog_device.timer_set);
+
+ return len;
+}
+
+static long intel_scu_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ u32 __user *p = argp;
+ u32 new_margin;
+
+
+ static const struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT
+ | WDIOF_KEEPALIVEPING,
+ .firmware_version = 0, /* @todo Get from SCU via
+ ipc_get_scu_fw_version()? */
+ .identity = "Intel_SCU IOH Watchdog" /* len < 32 */
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp,
+ &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+ case WDIOC_KEEPALIVE:
+ intel_scu_keepalive();
+
+ return 0;
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_margin, p))
+ return -EFAULT;
+
+ if (check_timer_margin(new_margin))
+ return -EINVAL;
+
+ if (intel_scu_set_heartbeat(new_margin))
+ return -EINVAL;
+ return 0;
+ case WDIOC_GETTIMEOUT:
+ return put_user(watchdog_device.soft_threshold, p);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+/*
+ * Notifier for system down
+ */
+static int intel_scu_notify_sys(struct notifier_block *this,
+ unsigned long code,
+ void *another_unused)
+{
+ if (code == SYS_DOWN || code == SYS_HALT)
+ /* Turn off the watchdog timer. */
+ intel_scu_stop();
+ return NOTIFY_DONE;
+}
+
+/*
+ * Kernel Interfaces
+ */
+static const struct file_operations intel_scu_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = intel_scu_write,
+ .unlocked_ioctl = intel_scu_ioctl,
+ .open = intel_scu_open,
+ .release = intel_scu_release,
+};
+
+static int __init intel_scu_watchdog_init(void)
+{
+ int ret;
+ u32 __iomem *tmp_addr;
+
+ /*
+ * We don't really need to check this as the SFI timer get will fail
+ * but if we do so we can exit with a clearer reason and no noise.
+ *
+ * If it isn't an intel MID device then it doesn't have this watchdog
+ */
+ if (!mrst_identify_cpu())
+ return -ENODEV;
+
+ /* Check boot parameters to verify that their initial values */
+ /* are in range. */
+ /* Check value of timer_set boot parameter */
+ if ((timer_set < MIN_TIME_CYCLE) ||
+ (timer_set > MAX_TIME - MIN_TIME_CYCLE)) {
+ pr_err("Watchdog timer: value of timer_set %x (hex) "
+ "is out of range from %x to %x (hex)\n",
+ timer_set, MIN_TIME_CYCLE, MAX_TIME - MIN_TIME_CYCLE);
+ return -EINVAL;
+ }
+
+ /* Check value of timer_margin boot parameter */
+ if (check_timer_margin(timer_margin))
+ return -EINVAL;
+
+ watchdog_device.timer_tbl_ptr = sfi_get_mtmr(sfi_mtimer_num-1);
+
+ if (watchdog_device.timer_tbl_ptr == NULL) {
+ pr_debug("Watchdog timer - Intel SCU watchdog: timer is not available\n");
+ return -ENODEV;
+ }
+ /* make sure the timer exists */
+ if (watchdog_device.timer_tbl_ptr->phys_addr == 0) {
+ pr_debug("Watchdog timer - Intel SCU watchdog - timer %d does not have valid physical memory\n",
+ sfi_mtimer_num);
+ return -ENODEV;
+ }
+
+ if (watchdog_device.timer_tbl_ptr->irq == 0) {
+ pr_debug("Watchdog timer: timer %d invalid irq\n",
+ sfi_mtimer_num);
+ return -ENODEV;
+ }
+
+ tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr,
+ 20);
+
+ if (tmp_addr == NULL) {
+ pr_debug("Watchdog timer: timer unable to ioremap\n");
+ return -ENOMEM;
+ }
+
+ watchdog_device.timer_load_count_addr = tmp_addr++;
+ watchdog_device.timer_current_value_addr = tmp_addr++;
+ watchdog_device.timer_control_addr = tmp_addr++;
+ watchdog_device.timer_clear_interrupt_addr = tmp_addr++;
+ watchdog_device.timer_interrupt_status_addr = tmp_addr++;
+
+ /* Set the default time values in device structure */
+
+ watchdog_device.timer_set = timer_set;
+ watchdog_device.threshold =
+ timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
+ watchdog_device.soft_threshold =
+ (watchdog_device.timer_set - timer_margin)
+ * watchdog_device.timer_tbl_ptr->freq_hz;
+
+
+ watchdog_device.intel_scu_notifier.notifier_call =
+ intel_scu_notify_sys;
+
+ ret = register_reboot_notifier(&watchdog_device.intel_scu_notifier);
+ if (ret) {
+ pr_err("Watchdog timer: cannot register notifier %d)\n", ret);
+ goto register_reboot_error;
+ }
+
+ watchdog_device.miscdev.minor = WATCHDOG_MINOR;
+ watchdog_device.miscdev.name = "watchdog";
+ watchdog_device.miscdev.fops = &intel_scu_fops;
+
+ ret = misc_register(&watchdog_device.miscdev);
+ if (ret) {
+ pr_err("Watchdog timer: cannot register miscdev %d err =%d\n",
+ WATCHDOG_MINOR, ret);
+ goto misc_register_error;
+ }
+
+ ret = request_irq((unsigned int)watchdog_device.timer_tbl_ptr->irq,
+ watchdog_timer_interrupt,
+ IRQF_SHARED, "watchdog",
+ &watchdog_device.timer_load_count_addr);
+ if (ret) {
+ pr_err("Watchdog timer: error requesting irq %d\n", ret);
+ goto request_irq_error;
+ }
+ /* Make sure timer is disabled before returning */
+ intel_scu_stop();
+ return 0;
+
+/* error cleanup */
+
+request_irq_error:
+ misc_deregister(&watchdog_device.miscdev);
+misc_register_error:
+ unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
+register_reboot_error:
+ intel_scu_stop();
+ iounmap(watchdog_device.timer_load_count_addr);
+ return ret;
+}
+
+static void __exit intel_scu_watchdog_exit(void)
+{
+
+ misc_deregister(&watchdog_device.miscdev);
+ unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
+ /* disable the timer */
+ iowrite32(0x00000002, watchdog_device.timer_control_addr);
+ iounmap(watchdog_device.timer_load_count_addr);
+}
+
+late_initcall(intel_scu_watchdog_init);
+module_exit(intel_scu_watchdog_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_VERSION(WDT_VER);
diff --git a/drivers/watchdog/intel_scu_watchdog.h b/drivers/watchdog/intel_scu_watchdog.h
new file mode 100644
index 000000000000..d2b074a82db6
--- /dev/null
+++ b/drivers/watchdog/intel_scu_watchdog.h
@@ -0,0 +1,66 @@
+/*
+ * Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device
+ * for Intel part #(s):
+ * - AF82MP20 PCH
+ *
+ * Copyright (C) 2009-2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ *
+ */
+
+#ifndef __INTEL_SCU_WATCHDOG_H
+#define __INTEL_SCU_WATCHDOG_H
+
+#define PFX "Intel_SCU: "
+#define WDT_VER "0.3"
+
+/* minimum time between interrupts */
+#define MIN_TIME_CYCLE 1
+
+/* Time from warning to reboot is 2 seconds */
+#define DEFAULT_SOFT_TO_HARD_MARGIN 2
+
+#define MAX_TIME 170
+
+#define DEFAULT_TIME 5
+
+#define MAX_SOFT_TO_HARD_MARGIN (MAX_TIME-MIN_TIME_CYCLE)
+
+/* Ajustment to clock tick frequency to make timing come out right */
+#define FREQ_ADJUSTMENT 8
+
+struct intel_scu_watchdog_dev {
+ ulong driver_open;
+ ulong driver_closed;
+ u32 timer_started;
+ u32 timer_set;
+ u32 threshold;
+ u32 soft_threshold;
+ u32 __iomem *timer_load_count_addr;
+ u32 __iomem *timer_current_value_addr;
+ u32 __iomem *timer_control_addr;
+ u32 __iomem *timer_clear_interrupt_addr;
+ u32 __iomem *timer_interrupt_status_addr;
+ struct sfi_timer_table_entry *timer_tbl_ptr;
+ struct notifier_block intel_scu_notifier;
+ struct miscdevice miscdev;
+};
+
+extern int sfi_mtimer_num;
+
+/* extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); */
+#endif /* __INTEL_SCU_WATCHDOG_H */
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index b32c6c045b1a..6143f52ba6b8 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -69,7 +69,7 @@ static unsigned short address;
#define IT8712F_DEVID 0x8712
#define LDN_GPIO 0x07 /* GPIO and Watch Dog Timer */
-#define LDN_GAME 0x09 /* Game Port */
+#define LDN_GAME 0x09 /* Game Port */
#define WDT_CONTROL 0x71 /* WDT Register: Control */
#define WDT_CONFIG 0x72 /* WDT Register: Configuration */
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index dad29245a6a7..b1bc72f9a209 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -12,7 +12,7 @@
* http://www.ite.com.tw/
*
* Support of the watchdog timers, which are available on
- * IT8702, IT8712, IT8716, IT8718, IT8720 and IT8726.
+ * IT8702, IT8712, IT8716, IT8718, IT8720, IT8721 and IT8726.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -45,7 +45,7 @@
#include <asm/system.h>
-#define WATCHDOG_VERSION "1.13"
+#define WATCHDOG_VERSION "1.14"
#define WATCHDOG_NAME "IT87 WDT"
#define PFX WATCHDOG_NAME ": "
#define DRIVER_VERSION WATCHDOG_NAME " driver, v" WATCHDOG_VERSION "\n"
@@ -54,7 +54,7 @@
/* Defaults for Module Parameter */
#define DEFAULT_NOGAMEPORT 0
#define DEFAULT_EXCLUSIVE 1
-#define DEFAULT_TIMEOUT 60
+#define DEFAULT_TIMEOUT 60
#define DEFAULT_TESTMODE 0
#define DEFAULT_NOWAYOUT WATCHDOG_NOWAYOUT
@@ -70,9 +70,9 @@
/* Configuration Registers and Functions */
#define LDNREG 0x07
#define CHIPID 0x20
-#define CHIPREV 0x22
+#define CHIPREV 0x22
#define ACTREG 0x30
-#define BASEREG 0x60
+#define BASEREG 0x60
/* Chip Id numbers */
#define NO_DEV_ID 0xffff
@@ -82,10 +82,11 @@
#define IT8716_ID 0x8716
#define IT8718_ID 0x8718
#define IT8720_ID 0x8720
+#define IT8721_ID 0x8721
#define IT8726_ID 0x8726 /* the data sheet suggest wrongly 0x8716 */
/* GPIO Configuration Registers LDN=0x07 */
-#define WDTCTRL 0x71
+#define WDTCTRL 0x71
#define WDTCFG 0x72
#define WDTVALLSB 0x73
#define WDTVALMSB 0x74
@@ -94,7 +95,7 @@
#define WDT_CIRINT 0x80
#define WDT_MOUSEINT 0x40
#define WDT_KYBINT 0x20
-#define WDT_GAMEPORT 0x10 /* not in it8718, it8720 */
+#define WDT_GAMEPORT 0x10 /* not in it8718, it8720, it8721 */
#define WDT_FORCE 0x02
#define WDT_ZERO 0x01
@@ -102,11 +103,11 @@
#define WDT_TOV1 0x80
#define WDT_KRST 0x40
#define WDT_TOVE 0x20
-#define WDT_PWROK 0x10
+#define WDT_PWROK 0x10 /* not in it8721 */
#define WDT_INT_MASK 0x0f
/* CIR Configuration Register LDN=0x0a */
-#define CIR_ILS 0x70
+#define CIR_ILS 0x70
/* The default Base address is not always available, we use this */
#define CIR_BASE 0x0208
@@ -134,7 +135,7 @@
#define WDTS_USE_GP 4
#define WDTS_EXPECTED 5
-static unsigned int base, gpact, ciract, max_units;
+static unsigned int base, gpact, ciract, max_units, chip_type;
static unsigned long wdt_status;
static DEFINE_SPINLOCK(spinlock);
@@ -215,7 +216,7 @@ static inline void superio_outw(int val, int reg)
/* Internal function, should be called after superio_select(GPIO) */
static void wdt_update_timeout(void)
{
- unsigned char cfg = WDT_KRST | WDT_PWROK;
+ unsigned char cfg = WDT_KRST;
int tm = timeout;
if (testmode)
@@ -226,6 +227,9 @@ static void wdt_update_timeout(void)
else
tm /= 60;
+ if (chip_type != IT8721_ID)
+ cfg |= WDT_PWROK;
+
superio_outb(cfg, WDTCFG);
superio_outb(tm, WDTVALLSB);
if (max_units > 255)
@@ -555,7 +559,6 @@ static int __init it87_wdt_init(void)
{
int rc = 0;
int try_gameport = !nogameport;
- u16 chip_type;
u8 chip_rev;
unsigned long flags;
@@ -581,6 +584,7 @@ static int __init it87_wdt_init(void)
break;
case IT8718_ID:
case IT8720_ID:
+ case IT8721_ID:
max_units = 65535;
try_gameport = 0;
break;
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
new file mode 100644
index 000000000000..684ba01fb540
--- /dev/null
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2010, Paul Cercueil <paul@crapouillou.net>
+ * JZ4740 Watchdog driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#include <asm/mach-jz4740/timer.h>
+
+#define JZ_REG_WDT_TIMER_DATA 0x0
+#define JZ_REG_WDT_COUNTER_ENABLE 0x4
+#define JZ_REG_WDT_TIMER_COUNTER 0x8
+#define JZ_REG_WDT_TIMER_CONTROL 0xC
+
+#define JZ_WDT_CLOCK_PCLK 0x1
+#define JZ_WDT_CLOCK_RTC 0x2
+#define JZ_WDT_CLOCK_EXT 0x4
+
+#define WDT_IN_USE 0
+#define WDT_OK_TO_CLOSE 1
+
+#define JZ_WDT_CLOCK_DIV_SHIFT 3
+
+#define JZ_WDT_CLOCK_DIV_1 (0 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_4 (1 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_16 (2 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_64 (3 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_256 (4 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_1024 (5 << JZ_WDT_CLOCK_DIV_SHIFT)
+
+#define DEFAULT_HEARTBEAT 5
+#define MAX_HEARTBEAT 2048
+
+static struct {
+ void __iomem *base;
+ struct resource *mem;
+ struct clk *rtc_clk;
+ unsigned long status;
+} jz4740_wdt;
+
+static int heartbeat = DEFAULT_HEARTBEAT;
+
+
+static void jz4740_wdt_service(void)
+{
+ writew(0x0, jz4740_wdt.base + JZ_REG_WDT_TIMER_COUNTER);
+}
+
+static void jz4740_wdt_set_heartbeat(int new_heartbeat)
+{
+ unsigned int rtc_clk_rate;
+ unsigned int timeout_value;
+ unsigned short clock_div = JZ_WDT_CLOCK_DIV_1;
+
+ heartbeat = new_heartbeat;
+
+ rtc_clk_rate = clk_get_rate(jz4740_wdt.rtc_clk);
+
+ timeout_value = rtc_clk_rate * heartbeat;
+ while (timeout_value > 0xffff) {
+ if (clock_div == JZ_WDT_CLOCK_DIV_1024) {
+ /* Requested timeout too high;
+ * use highest possible value. */
+ timeout_value = 0xffff;
+ break;
+ }
+ timeout_value >>= 2;
+ clock_div += (1 << JZ_WDT_CLOCK_DIV_SHIFT);
+ }
+
+ writeb(0x0, jz4740_wdt.base + JZ_REG_WDT_COUNTER_ENABLE);
+ writew(clock_div, jz4740_wdt.base + JZ_REG_WDT_TIMER_CONTROL);
+
+ writew((u16)timeout_value, jz4740_wdt.base + JZ_REG_WDT_TIMER_DATA);
+ writew(0x0, jz4740_wdt.base + JZ_REG_WDT_TIMER_COUNTER);
+ writew(clock_div | JZ_WDT_CLOCK_RTC,
+ jz4740_wdt.base + JZ_REG_WDT_TIMER_CONTROL);
+
+ writeb(0x1, jz4740_wdt.base + JZ_REG_WDT_COUNTER_ENABLE);
+}
+
+static void jz4740_wdt_enable(void)
+{
+ jz4740_timer_enable_watchdog();
+ jz4740_wdt_set_heartbeat(heartbeat);
+}
+
+static void jz4740_wdt_disable(void)
+{
+ jz4740_timer_disable_watchdog();
+ writeb(0x0, jz4740_wdt.base + JZ_REG_WDT_COUNTER_ENABLE);
+}
+
+static int jz4740_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(WDT_IN_USE, &jz4740_wdt.status))
+ return -EBUSY;
+
+ jz4740_wdt_enable();
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t jz4740_wdt_write(struct file *file, const char *data,
+ size_t len, loff_t *ppos)
+{
+ if (len) {
+ size_t i;
+
+ clear_bit(WDT_OK_TO_CLOSE, &jz4740_wdt.status);
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, data + i))
+ return -EFAULT;
+
+ if (c == 'V')
+ set_bit(WDT_OK_TO_CLOSE, &jz4740_wdt.status);
+ }
+ jz4740_wdt_service();
+ }
+
+ return len;
+}
+
+static const struct watchdog_info ident = {
+ .options = WDIOF_KEEPALIVEPING,
+ .identity = "jz4740 Watchdog",
+};
+
+static long jz4740_wdt_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENOTTY;
+ int heartbeat_seconds;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ ret = copy_to_user((struct watchdog_info *)arg, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+ break;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ ret = put_user(0, (int *)arg);
+ break;
+
+ case WDIOC_KEEPALIVE:
+ jz4740_wdt_service();
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(heartbeat_seconds, (int __user *)arg))
+ return -EFAULT;
+
+ jz4740_wdt_set_heartbeat(heartbeat_seconds);
+ return 0;
+
+ case WDIOC_GETTIMEOUT:
+ return put_user(heartbeat, (int *)arg);
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int jz4740_wdt_release(struct inode *inode, struct file *file)
+{
+ jz4740_wdt_service();
+
+ if (test_and_clear_bit(WDT_OK_TO_CLOSE, &jz4740_wdt.status))
+ jz4740_wdt_disable();
+
+ clear_bit(WDT_IN_USE, &jz4740_wdt.status);
+ return 0;
+}
+
+static const struct file_operations jz4740_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = jz4740_wdt_write,
+ .unlocked_ioctl = jz4740_wdt_ioctl,
+ .open = jz4740_wdt_open,
+ .release = jz4740_wdt_release,
+};
+
+static struct miscdevice jz4740_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &jz4740_wdt_fops,
+};
+
+static int __devinit jz4740_wdt_probe(struct platform_device *pdev)
+{
+ int ret = 0, size;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "failed to get memory region resource\n");
+ return -ENXIO;
+ }
+
+ size = resource_size(res);
+ jz4740_wdt.mem = request_mem_region(res->start, size, pdev->name);
+ if (jz4740_wdt.mem == NULL) {
+ dev_err(dev, "failed to get memory region\n");
+ return -EBUSY;
+ }
+
+ jz4740_wdt.base = ioremap_nocache(res->start, size);
+ if (jz4740_wdt.base == NULL) {
+ dev_err(dev, "failed to map memory region\n");
+ ret = -EBUSY;
+ goto err_release_region;
+ }
+
+ jz4740_wdt.rtc_clk = clk_get(NULL, "rtc");
+ if (IS_ERR(jz4740_wdt.rtc_clk)) {
+ dev_err(dev, "cannot find RTC clock\n");
+ ret = PTR_ERR(jz4740_wdt.rtc_clk);
+ goto err_iounmap;
+ }
+
+ ret = misc_register(&jz4740_wdt_miscdev);
+ if (ret < 0) {
+ dev_err(dev, "cannot register misc device\n");
+ goto err_disable_clk;
+ }
+
+ return 0;
+
+err_disable_clk:
+ clk_put(jz4740_wdt.rtc_clk);
+err_iounmap:
+ iounmap(jz4740_wdt.base);
+err_release_region:
+ release_mem_region(jz4740_wdt.mem->start,
+ resource_size(jz4740_wdt.mem));
+ return ret;
+}
+
+
+static int __devexit jz4740_wdt_remove(struct platform_device *pdev)
+{
+ jz4740_wdt_disable();
+ misc_deregister(&jz4740_wdt_miscdev);
+ clk_put(jz4740_wdt.rtc_clk);
+
+ iounmap(jz4740_wdt.base);
+ jz4740_wdt.base = NULL;
+
+ release_mem_region(jz4740_wdt.mem->start,
+ resource_size(jz4740_wdt.mem));
+ jz4740_wdt.mem = NULL;
+
+ return 0;
+}
+
+
+static struct platform_driver jz4740_wdt_driver = {
+ .probe = jz4740_wdt_probe,
+ .remove = __devexit_p(jz4740_wdt_remove),
+ .driver = {
+ .name = "jz4740-wdt",
+ .owner = THIS_MODULE,
+ },
+};
+
+
+static int __init jz4740_wdt_init(void)
+{
+ return platform_driver_register(&jz4740_wdt_driver);
+}
+module_init(jz4740_wdt_init);
+
+static void __exit jz4740_wdt_exit(void)
+{
+ platform_driver_unregister(&jz4740_wdt_driver);
+}
+module_exit(jz4740_wdt_exit);
+
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("jz4740 Watchdog Driver");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat,
+ "Watchdog heartbeat period in seconds from 1 to "
+ __MODULE_STRING(MAX_HEARTBEAT) ", default "
+ __MODULE_STRING(DEFAULT_HEARTBEAT));
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:jz4740-wdt");
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index 928035069396..1332b838cc58 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -54,7 +54,7 @@
/* indexes */ /* size */
#define ZFL_VERSION 0x02 /* 16 */
-#define CONTROL 0x10 /* 16 */
+#define CONTROL 0x10 /* 16 */
#define STATUS 0x12 /* 8 */
#define COUNTER_1 0x0C /* 16 */
#define COUNTER_2 0x0E /* 8 */
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 3053ff05ca41..7a82ce5a6337 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -41,7 +41,7 @@ static int nowayout = WATCHDOG_NOWAYOUT;
* to ping the watchdog.
*/
#define MAX6369_WDSET (7 << 0)
-#define MAX6369_WDI (1 << 3)
+#define MAX6369_WDI (1 << 3)
static DEFINE_SPINLOCK(io_lock);
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index ea438ad53055..6709d723e017 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -2,9 +2,9 @@
* mpc8xxx_wdt.c - MPC8xx/MPC83xx/MPC86xx watchdog userspace interface
*
* Authors: Dave Updegraff <dave@cray.org>
- * Kumar Gala <galak@kernel.crashing.org>
- * Attribution: from 83xx_wst: Florian Schirmer <jolt@tuxbox.org>
- * ..and from sc520_wdt
+ * Kumar Gala <galak@kernel.crashing.org>
+ * Attribution: from 83xx_wst: Florian Schirmer <jolt@tuxbox.org>
+ * ..and from sc520_wdt
* Copyright (c) 2008 MontaVista Software, Inc.
* Anton Vorontsov <avorontsov@ru.mvista.com>
*
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index b8ec7aca3c8e..2b4af222b5f2 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -172,7 +172,7 @@ static int mpcore_wdt_release(struct inode *inode, struct file *file)
/*
* Shut off the timer.
- * Lock it in if it's a module and we set nowayout
+ * Lock it in if it's a module and we set nowayout
*/
if (wdt->expect_close == 42)
mpcore_wdt_stop(wdt);
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 08e8a6ab74e1..5ec5ac1f7878 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -190,19 +190,19 @@ static ssize_t mtx1_wdt_write(struct file *file, const char *buf,
}
static const struct file_operations mtx1_wdt_fops = {
- .owner = THIS_MODULE,
+ .owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = mtx1_wdt_ioctl,
- .open = mtx1_wdt_open,
- .write = mtx1_wdt_write,
- .release = mtx1_wdt_release,
+ .open = mtx1_wdt_open,
+ .write = mtx1_wdt_write,
+ .release = mtx1_wdt_release,
};
static struct miscdevice mtx1_wdt_misc = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &mtx1_wdt_fops,
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &mtx1_wdt_fops,
};
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index 1a50aa7079bf..267377a5a83e 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -289,7 +289,7 @@ static struct miscdevice nv_tco_miscdev = {
* register a pci_driver, because someone else might one day
* want to register another driver on the same PCI id.
*/
-static struct pci_device_id tco_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tco_pci_tbl) = {
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 3dd4971160ef..2b4acb86c191 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -124,6 +124,8 @@ static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev)
u32 pre_margin = GET_WLDR_VAL(timer_margin);
void __iomem *base = wdev->base;
+ pm_runtime_get_sync(wdev->dev);
+
/* just count up at 32 KHz */
while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04)
cpu_relax();
@@ -131,6 +133,8 @@ static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev)
__raw_writel(pre_margin, base + OMAP_WATCHDOG_LDR);
while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04)
cpu_relax();
+
+ pm_runtime_put_sync(wdev->dev);
}
/*
@@ -160,6 +164,8 @@ static int omap_wdt_open(struct inode *inode, struct file *file)
omap_wdt_ping(wdev); /* trigger loading of new timeout value */
omap_wdt_enable(wdev);
+ pm_runtime_put_sync(wdev->dev);
+
return nonseekable_open(inode, file);
}
@@ -171,6 +177,7 @@ static int omap_wdt_release(struct inode *inode, struct file *file)
* Shut off the timer unless NOWAYOUT is defined.
*/
#ifndef CONFIG_WATCHDOG_NOWAYOUT
+ pm_runtime_get_sync(wdev->dev);
omap_wdt_disable(wdev);
@@ -190,9 +197,11 @@ static ssize_t omap_wdt_write(struct file *file, const char __user *data,
/* Refresh LOAD_TIME. */
if (len) {
+ pm_runtime_get_sync(wdev->dev);
spin_lock(&wdt_lock);
omap_wdt_ping(wdev);
spin_unlock(&wdt_lock);
+ pm_runtime_put_sync(wdev->dev);
}
return len;
}
@@ -224,15 +233,18 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
return put_user(omap_prcm_get_reset_sources(),
(int __user *)arg);
case WDIOC_KEEPALIVE:
+ pm_runtime_get_sync(wdev->dev);
spin_lock(&wdt_lock);
omap_wdt_ping(wdev);
spin_unlock(&wdt_lock);
+ pm_runtime_put_sync(wdev->dev);
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_margin, (int __user *)arg))
return -EFAULT;
omap_wdt_adjust_timeout(new_margin);
+ pm_runtime_get_sync(wdev->dev);
spin_lock(&wdt_lock);
omap_wdt_disable(wdev);
omap_wdt_set_timeout(wdev);
@@ -240,6 +252,7 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
omap_wdt_ping(wdev);
spin_unlock(&wdt_lock);
+ pm_runtime_put_sync(wdev->dev);
/* Fall */
case WDIOC_GETTIMEOUT:
return put_user(timer_margin, (int __user *)arg);
@@ -345,8 +358,11 @@ static void omap_wdt_shutdown(struct platform_device *pdev)
{
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
- if (wdev->omap_wdt_users)
+ if (wdev->omap_wdt_users) {
+ pm_runtime_get_sync(wdev->dev);
omap_wdt_disable(wdev);
+ pm_runtime_put_sync(wdev->dev);
+ }
}
static int __devexit omap_wdt_remove(struct platform_device *pdev)
@@ -381,8 +397,11 @@ static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state)
{
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
- if (wdev->omap_wdt_users)
+ if (wdev->omap_wdt_users) {
+ pm_runtime_get_sync(wdev->dev);
omap_wdt_disable(wdev);
+ pm_runtime_put_sync(wdev->dev);
+ }
return 0;
}
@@ -392,8 +411,10 @@ static int omap_wdt_resume(struct platform_device *pdev)
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
if (wdev->omap_wdt_users) {
+ pm_runtime_get_sync(wdev->dev);
omap_wdt_enable(wdev);
omap_wdt_ping(wdev);
+ pm_runtime_put_sync(wdev->dev);
}
return 0;
diff --git a/drivers/watchdog/omap_wdt.h b/drivers/watchdog/omap_wdt.h
index fc02ec6a0386..09b774cf75b9 100644
--- a/drivers/watchdog/omap_wdt.h
+++ b/drivers/watchdog/omap_wdt.h
@@ -44,7 +44,7 @@
* months before firing. These limits work without scaling,
* with the 60 second default assumed by most tools and docs.
*/
-#define TIMER_MARGIN_MAX (24 * 60 * 60) /* 1 day */
+#define TIMER_MARGIN_MAX (24 * 60 * 60) /* 1 day */
#define TIMER_MARGIN_DEFAULT 60 /* 60 secs */
#define TIMER_MARGIN_MIN 1
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 3a56bc360924..139d773300c6 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -514,7 +514,7 @@ static struct miscdevice pc87413_miscdev = {
/* -- Module init functions -------------------------------------*/
/**
- * pc87413_init: module's "constructor"
+ * pc87413_init: module's "constructor"
*
* Set up the WDT watchdog board. All we have to do is grab the
* resources we require and bitch if anyone beat us to them.
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index 64374d636f09..b8d14f88f0b5 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -817,7 +817,7 @@ static void __devexit pcipcwd_card_exit(struct pci_dev *pdev)
cards_found--;
}
-static struct pci_device_id pcipcwd_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pcipcwd_pci_tbl) = {
{ PCI_VENDOR_ID_QUICKLOGIC, PCI_DEVICE_ID_WATCHDOG_PCIPCWD,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }, /* End of list */
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index bf5b97c546eb..c7cf4cbf8ab3 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -4,7 +4,7 @@
* Watchdog driver for PNX4008 board
*
* Authors: Dmitry Chigirev <source@mvista.com>,
- * Vitaly Wool <vitalywool@gmail.com>
+ * Vitaly Wool <vitalywool@gmail.com>
* Based on sa1100 driver,
* Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
*
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 3939e53f5f98..d8e725082fdc 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -37,6 +37,7 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/mfd/rdc321x.h>
+#include <linux/mfd/core.h>
#define RDC_WDT_MASK 0x80000000 /* Mask */
#define RDC_WDT_EN 0x00800000 /* Enable bit */
@@ -231,7 +232,7 @@ static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
struct resource *r;
struct rdc321x_wdt_pdata *pdata;
- pdata = platform_get_drvdata(pdev);
+ pdata = mfd_get_data(pdev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data supplied\n");
return -ENODEV;
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index ae53662c29bc..25b39bf35925 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -224,7 +224,7 @@ static int s3c2410wdt_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
- * Lock it in if it's a module and we set nowayout
+ * Lock it in if it's a module and we set nowayout
*/
if (expect_close == 42)
diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c
index 68e2e2d6f73d..514ec23050f7 100644
--- a/drivers/watchdog/sbc8360.c
+++ b/drivers/watchdog/sbc8360.c
@@ -114,7 +114,7 @@ static char expect_close;
* C | 6.5s 65s 650s 1300s
* D | 7s 70s 700s 1400s
* E | 7.5s 75s 750s 1500s
- * F | 8s 80s 800s 1600s
+ * F | 8s 80s 800s 1600s
*
* Another way to say the same things is:
* For N=1, Timeout = (M+1) * 0.5s
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index 28f1214457bd..3066a5127ca8 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -220,7 +220,7 @@ module_exit(watchdog_exit);
MODULE_AUTHOR("Calin A. Culianu <calin@ajvar.org>");
MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC. "
"Note that there is no way to probe for this device -- "
- "so only use it if you are *sure* you are runnning on this specific "
+ "so only use it if you are *sure* you are running on this specific "
"SBC system from Winsystems! It writes to IO ports 0x1ee and 0x1ef!");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 79906255eeb6..d5d399464599 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -41,7 +41,7 @@ static DEFINE_MUTEX(wdt_lock);
#define IFACE_ON_COMMAND 1
#define REBOOT_COMMAND 2
-#define WATCHDOG_NAME "SBC-FITPC2 Watchdog"
+#define WATCHDOG_NAME "SBC-FITPC2 Watchdog"
static void wdt_send_data(unsigned char command, unsigned char data)
{
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index 8a1f0bc3e271..df88cfa05f35 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -434,11 +434,11 @@ static long wb_smsc_wdt_ioctl(struct file *file,
} uarg;
static const struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING |
+ .options = WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
.firmware_version = 0,
- .identity = "SMsC 37B787 Watchdog",
+ .identity = "SMsC 37B787 Watchdog",
};
uarg.i = (int __user *)arg;
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index 833f49f43d43..100b114e3c3c 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -151,7 +151,7 @@ static int softdog_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
- * Lock it in if it's a module and we set nowayout
+ * Lock it in if it's a module and we set nowayout
*/
if (expect_close == 42) {
softdog_stop();
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 808372883e88..1bc493848ed4 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -259,7 +259,7 @@ static struct miscdevice sp5100_tco_miscdev = {
* register a pci_driver, because someone else might
* want to register another driver on the same PCI id.
*/
-static struct pci_device_id sp5100_tco_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(sp5100_tco_pci_tbl) = {
{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_ANY_ID,
PCI_ANY_ID, },
{ 0, }, /* End of list */
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 9127eda2145b..0a0efe713bc8 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -278,7 +278,7 @@ static struct miscdevice sp805_wdt_miscdev = {
};
static int __devinit
-sp805_wdt_probe(struct amba_device *adev, struct amba_id *id)
+sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 18cdeb4c4258..5a90a4a871dd 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -68,7 +68,7 @@ struct platform_device *ts72xx_wdt_pdev;
* to control register):
* value description
* -------------------------
- * 0x00 watchdog disabled
+ * 0x00 watchdog disabled
* 0x01 250ms
* 0x02 500ms
* 0x03 1s
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index df2a64dc9672..be9c4d839e15 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -87,10 +87,10 @@ static int w83697ug_select_wd_register(void)
outb_p(0x87, WDT_EFER); /* Enter extended function mode */
outb_p(0x87, WDT_EFER); /* Again according to manual */
- outb(0x20, WDT_EFER); /* check chip version */
+ outb(0x20, WDT_EFER); /* check chip version */
version = inb(WDT_EFDR);
- if (version == 0x68) { /* W83697UG */
+ if (version == 0x68) { /* W83697UG */
printk(KERN_INFO PFX "Watchdog chip version 0x%02x = "
"W83697UG/UF found at 0x%04x\n", version, wdt_io);
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index 552a4381e78f..bb03e151a1d0 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -581,7 +581,7 @@ static void __exit wdt_exit(void)
}
/**
- * wdt_init:
+ * wdt_init:
*
* Set up the WDT watchdog board. All we have to do is grab the
* resources we require and bitch if anyone beat us to them.
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index 5c2521fc836c..a2f01c9f5c34 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -281,7 +281,7 @@ static int wdt977_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
- * Lock it in if it's a module and we set nowayout
+ * Lock it in if it's a module and we set nowayout
*/
if (expect_close == 42) {
wdt977_stop();
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 6130c88fa5ac..172dad6c7693 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -31,7 +31,7 @@
* Jeff Garzik : PCI cleanups
* Tigran Aivazian : Restructured wdtpci_init_one() to handle
* failures
- * Joel Becker : Added WDIOC_GET/SETTIMEOUT
+ * Joel Becker : Added WDIOC_GET/SETTIMEOUT
* Zwane Mwaikambo : Magic char closing, locking changes,
* cleanups
* Matt Domsch : nowayout module option
@@ -727,7 +727,7 @@ static void __devexit wdtpci_remove_one(struct pci_dev *pdev)
}
-static struct pci_device_id wdtpci_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(wdtpci_pci_tbl) = {
{
.vendor = PCI_VENDOR_ID_ACCESSIO,
.device = PCI_DEVICE_ID_ACCESSIO_WDG_CSM,
@@ -764,7 +764,7 @@ static void __exit wdtpci_cleanup(void)
/**
- * wdtpci_init:
+ * wdtpci_init:
*
* Set up the WDT watchdog board. All we have to do is grab the
* resources we require and bitch if anyone beat us to them.
diff --git a/drivers/watchdog/xen_wdt.c b/drivers/watchdog/xen_wdt.c
new file mode 100644
index 000000000000..49bd9d395562
--- /dev/null
+++ b/drivers/watchdog/xen_wdt.c
@@ -0,0 +1,359 @@
+/*
+ * Xen Watchdog Driver
+ *
+ * (c) Copyright 2010 Novell, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define DRV_NAME "wdt"
+#define DRV_VERSION "0.01"
+#define PFX DRV_NAME ": "
+
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+#include <xen/xen.h>
+#include <asm/xen/hypercall.h>
+#include <xen/interface/sched.h>
+
+static struct platform_device *platform_device;
+static DEFINE_SPINLOCK(wdt_lock);
+static struct sched_watchdog wdt;
+static __kernel_time_t wdt_expires;
+static bool is_active, expect_release;
+
+#define WATCHDOG_TIMEOUT 60 /* in seconds */
+static unsigned int timeout = WATCHDOG_TIMEOUT;
+module_param(timeout, uint, S_IRUGO);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds "
+ "(default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, S_IRUGO);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static inline __kernel_time_t set_timeout(void)
+{
+ wdt.timeout = timeout;
+ return ktime_to_timespec(ktime_get()).tv_sec + timeout;
+}
+
+static int xen_wdt_start(void)
+{
+ __kernel_time_t expires;
+ int err;
+
+ spin_lock(&wdt_lock);
+
+ expires = set_timeout();
+ if (!wdt.id)
+ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt);
+ else
+ err = -EBUSY;
+ if (err > 0) {
+ wdt.id = err;
+ wdt_expires = expires;
+ err = 0;
+ } else
+ BUG_ON(!err);
+
+ spin_unlock(&wdt_lock);
+
+ return err;
+}
+
+static int xen_wdt_stop(void)
+{
+ int err = 0;
+
+ spin_lock(&wdt_lock);
+
+ wdt.timeout = 0;
+ if (wdt.id)
+ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt);
+ if (!err)
+ wdt.id = 0;
+
+ spin_unlock(&wdt_lock);
+
+ return err;
+}
+
+static int xen_wdt_kick(void)
+{
+ __kernel_time_t expires;
+ int err;
+
+ spin_lock(&wdt_lock);
+
+ expires = set_timeout();
+ if (wdt.id)
+ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt);
+ else
+ err = -ENXIO;
+ if (!err)
+ wdt_expires = expires;
+
+ spin_unlock(&wdt_lock);
+
+ return err;
+}
+
+static int xen_wdt_open(struct inode *inode, struct file *file)
+{
+ int err;
+
+ /* /dev/watchdog can only be opened once */
+ if (xchg(&is_active, true))
+ return -EBUSY;
+
+ err = xen_wdt_start();
+ if (err == -EBUSY)
+ err = xen_wdt_kick();
+ return err ?: nonseekable_open(inode, file);
+}
+
+static int xen_wdt_release(struct inode *inode, struct file *file)
+{
+ if (expect_release)
+ xen_wdt_stop();
+ else {
+ printk(KERN_CRIT PFX
+ "unexpected close, not stopping watchdog!\n");
+ xen_wdt_kick();
+ }
+ is_active = false;
+ expect_release = false;
+ return 0;
+}
+
+static ssize_t xen_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ /* See if we got the magic character 'V' and reload the timer */
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ /* in case it was set long ago */
+ expect_release = false;
+
+ /* scan to see whether or not we got the magic
+ character */
+ for (i = 0; i != len; i++) {
+ char c;
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_release = true;
+ }
+ }
+
+ /* someone wrote to us, we should reload the timer */
+ xen_wdt_kick();
+ }
+ return len;
+}
+
+static long xen_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int new_options, retval = -EINVAL;
+ int new_timeout;
+ int __user *argp = (void __user *)arg;
+ static const struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
+ .firmware_version = 0,
+ .identity = DRV_NAME,
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, argp);
+
+ case WDIOC_SETOPTIONS:
+ if (get_user(new_options, argp))
+ return -EFAULT;
+
+ if (new_options & WDIOS_DISABLECARD)
+ retval = xen_wdt_stop();
+ if (new_options & WDIOS_ENABLECARD) {
+ retval = xen_wdt_start();
+ if (retval == -EBUSY)
+ retval = xen_wdt_kick();
+ }
+ return retval;
+
+ case WDIOC_KEEPALIVE:
+ xen_wdt_kick();
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_timeout, argp))
+ return -EFAULT;
+ if (!new_timeout)
+ return -EINVAL;
+ timeout = new_timeout;
+ xen_wdt_kick();
+ /* fall through */
+ case WDIOC_GETTIMEOUT:
+ return put_user(timeout, argp);
+
+ case WDIOC_GETTIMELEFT:
+ retval = wdt_expires - ktime_to_timespec(ktime_get()).tv_sec;
+ return put_user(retval, argp);
+ }
+
+ return -ENOTTY;
+}
+
+static const struct file_operations xen_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = xen_wdt_write,
+ .unlocked_ioctl = xen_wdt_ioctl,
+ .open = xen_wdt_open,
+ .release = xen_wdt_release,
+};
+
+static struct miscdevice xen_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &xen_wdt_fops,
+};
+
+static int __devinit xen_wdt_probe(struct platform_device *dev)
+{
+ struct sched_watchdog wd = { .id = ~0 };
+ int ret = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wd);
+
+ switch (ret) {
+ case -EINVAL:
+ if (!timeout) {
+ timeout = WATCHDOG_TIMEOUT;
+ printk(KERN_INFO PFX
+ "timeout value invalid, using %d\n", timeout);
+ }
+
+ ret = misc_register(&xen_wdt_miscdev);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "cannot register miscdev on minor=%d (%d)\n",
+ WATCHDOG_MINOR, ret);
+ break;
+ }
+
+ printk(KERN_INFO PFX
+ "initialized (timeout=%ds, nowayout=%d)\n",
+ timeout, nowayout);
+ break;
+
+ case -ENOSYS:
+ printk(KERN_INFO PFX "not supported\n");
+ ret = -ENODEV;
+ break;
+
+ default:
+ printk(KERN_INFO PFX "bogus return value %d\n", ret);
+ break;
+ }
+
+ return ret;
+}
+
+static int __devexit xen_wdt_remove(struct platform_device *dev)
+{
+ /* Stop the timer before we leave */
+ if (!nowayout)
+ xen_wdt_stop();
+
+ misc_deregister(&xen_wdt_miscdev);
+
+ return 0;
+}
+
+static void xen_wdt_shutdown(struct platform_device *dev)
+{
+ xen_wdt_stop();
+}
+
+static int xen_wdt_suspend(struct platform_device *dev, pm_message_t state)
+{
+ return xen_wdt_stop();
+}
+
+static int xen_wdt_resume(struct platform_device *dev)
+{
+ return xen_wdt_start();
+}
+
+static struct platform_driver xen_wdt_driver = {
+ .probe = xen_wdt_probe,
+ .remove = __devexit_p(xen_wdt_remove),
+ .shutdown = xen_wdt_shutdown,
+ .suspend = xen_wdt_suspend,
+ .resume = xen_wdt_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ },
+};
+
+static int __init xen_wdt_init_module(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ printk(KERN_INFO PFX "Xen WatchDog Timer Driver v%s\n", DRV_VERSION);
+
+ err = platform_driver_register(&xen_wdt_driver);
+ if (err)
+ return err;
+
+ platform_device = platform_device_register_simple(DRV_NAME,
+ -1, NULL, 0);
+ if (IS_ERR(platform_device)) {
+ err = PTR_ERR(platform_device);
+ platform_driver_unregister(&xen_wdt_driver);
+ }
+
+ return err;
+}
+
+static void __exit xen_wdt_cleanup_module(void)
+{
+ platform_device_unregister(platform_device);
+ platform_driver_unregister(&xen_wdt_driver);
+ printk(KERN_INFO PFX "module unloaded\n");
+}
+
+module_init(xen_wdt_init_module);
+module_exit(xen_wdt_cleanup_module);
+
+MODULE_AUTHOR("Jan Beulich <jbeulich@novell.com>");
+MODULE_DESCRIPTION("Xen WatchDog Timer Driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 07bec09d1dad..a59638b37c1a 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -76,10 +76,20 @@ config XEN_XENBUS_FRONTEND
config XEN_GNTDEV
tristate "userspace grant access device driver"
depends on XEN
+ default m
select MMU_NOTIFIER
help
Allows userspace processes to use grants.
+config XEN_GRANT_DEV_ALLOC
+ tristate "User-space grant reference allocator driver"
+ depends on XEN
+ default m
+ help
+ Allows userspace processes to create pages with access granted
+ to other domains. This can be used to implement frontend drivers
+ or as part of an inter-domain shared memory channel.
+
config XEN_PLATFORM_PCI
tristate "xen platform pci device driver"
depends on XEN_PVHVM && PCI
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 5088cc2e6fe2..f420f1ff7f13 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,4 +1,4 @@
-obj-y += grant-table.o features.o events.o manage.o
+obj-y += grant-table.o features.o events.o manage.o balloon.o
obj-y += xenbus/
nostackp := $(call cc-option, -fno-stack-protector)
@@ -7,9 +7,10 @@ CFLAGS_features.o := $(nostackp)
obj-$(CONFIG_BLOCK) += biomerge.o
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
-obj-$(CONFIG_XEN_BALLOON) += balloon.o
+obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
+obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o
obj-$(CONFIG_XENFS) += xenfs/
obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o
@@ -18,5 +19,6 @@ obj-$(CONFIG_XEN_DOM0) += pci.o
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
+xen-gntalloc-y := gntalloc.o
xen-platform-pci-y := platform-pci.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 718050ace08f..043af8ad6b60 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -1,6 +1,4 @@
/******************************************************************************
- * balloon.c
- *
* Xen balloon driver - enables returning/claiming memory to/from Xen.
*
* Copyright (c) 2003, B Dragovic
@@ -33,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/mm.h>
@@ -42,13 +39,11 @@
#include <linux/highmem.h>
#include <linux/mutex.h>
#include <linux/list.h>
-#include <linux/sysdev.h>
#include <linux/gfp.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/e820.h>
@@ -58,35 +53,29 @@
#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
-#include <xen/xenbus.h>
+#include <xen/balloon.h>
#include <xen/features.h>
#include <xen/page.h>
-#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
-
-#define BALLOON_CLASS_NAME "xen_memory"
+/*
+ * balloon_process() state:
+ *
+ * BP_DONE: done or nothing to do,
+ * BP_EAGAIN: error, go to sleep,
+ * BP_ECANCELED: error, balloon operation canceled.
+ */
-struct balloon_stats {
- /* We aim for 'current allocation' == 'target allocation'. */
- unsigned long current_pages;
- unsigned long target_pages;
- /*
- * Drivers may alter the memory reservation independently, but they
- * must inform the balloon driver so we avoid hitting the hard limit.
- */
- unsigned long driver_pages;
- /* Number of pages in high- and low-memory balloons. */
- unsigned long balloon_low;
- unsigned long balloon_high;
+enum bp_state {
+ BP_DONE,
+ BP_EAGAIN,
+ BP_ECANCELED
};
-static DEFINE_MUTEX(balloon_mutex);
-
-static struct sys_device balloon_sysdev;
-static int register_balloon(struct sys_device *sysdev);
+static DEFINE_MUTEX(balloon_mutex);
-static struct balloon_stats balloon_stats;
+struct balloon_stats balloon_stats;
+EXPORT_SYMBOL_GPL(balloon_stats);
/* We increase/decrease in batches which fit in a page */
static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
@@ -104,8 +93,7 @@ static LIST_HEAD(ballooned_pages);
/* Main work function, always executed in process context. */
static void balloon_process(struct work_struct *work);
-static DECLARE_WORK(balloon_worker, balloon_process);
-static struct timer_list balloon_timer;
+static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
/* When ballooning out (allocating memory to return to Xen) we don't really
want the kernel to try too hard since that can trigger the oom killer. */
@@ -140,14 +128,17 @@ static void balloon_append(struct page *page)
}
/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
-static struct page *balloon_retrieve(void)
+static struct page *balloon_retrieve(bool prefer_highmem)
{
struct page *page;
if (list_empty(&ballooned_pages))
return NULL;
- page = list_entry(ballooned_pages.next, struct page, lru);
+ if (prefer_highmem)
+ page = list_entry(ballooned_pages.prev, struct page, lru);
+ else
+ page = list_entry(ballooned_pages.next, struct page, lru);
list_del(&page->lru);
if (PageHighMem(page)) {
@@ -177,9 +168,29 @@ static struct page *balloon_next_page(struct page *page)
return list_entry(next, struct page, lru);
}
-static void balloon_alarm(unsigned long unused)
+static enum bp_state update_schedule(enum bp_state state)
{
- schedule_work(&balloon_worker);
+ if (state == BP_DONE) {
+ balloon_stats.schedule_delay = 1;
+ balloon_stats.retry_count = 1;
+ return BP_DONE;
+ }
+
+ ++balloon_stats.retry_count;
+
+ if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
+ balloon_stats.retry_count > balloon_stats.max_retry_count) {
+ balloon_stats.schedule_delay = 1;
+ balloon_stats.retry_count = 1;
+ return BP_ECANCELED;
+ }
+
+ balloon_stats.schedule_delay <<= 1;
+
+ if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
+ balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
+
+ return BP_EAGAIN;
}
static unsigned long current_target(void)
@@ -194,11 +205,11 @@ static unsigned long current_target(void)
return target;
}
-static int increase_reservation(unsigned long nr_pages)
+static enum bp_state increase_reservation(unsigned long nr_pages)
{
+ int rc;
unsigned long pfn, i;
struct page *page;
- long rc;
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = 0,
@@ -210,7 +221,10 @@ static int increase_reservation(unsigned long nr_pages)
page = balloon_first_page();
for (i = 0; i < nr_pages; i++) {
- BUG_ON(page == NULL);
+ if (!page) {
+ nr_pages = i;
+ break;
+ }
frame_list[i] = page_to_pfn(page);
page = balloon_next_page(page);
}
@@ -218,11 +232,11 @@ static int increase_reservation(unsigned long nr_pages)
set_xen_guest_handle(reservation.extent_start, frame_list);
reservation.nr_extents = nr_pages;
rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
- if (rc < 0)
- goto out;
+ if (rc <= 0)
+ return BP_EAGAIN;
for (i = 0; i < rc; i++) {
- page = balloon_retrieve();
+ page = balloon_retrieve(false);
BUG_ON(page == NULL);
pfn = page_to_pfn(page);
@@ -249,15 +263,14 @@ static int increase_reservation(unsigned long nr_pages)
balloon_stats.current_pages += rc;
- out:
- return rc < 0 ? rc : rc != nr_pages;
+ return BP_DONE;
}
-static int decrease_reservation(unsigned long nr_pages)
+static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
{
+ enum bp_state state = BP_DONE;
unsigned long pfn, i;
struct page *page;
- int need_sleep = 0;
int ret;
struct xen_memory_reservation reservation = {
.address_bits = 0,
@@ -269,9 +282,9 @@ static int decrease_reservation(unsigned long nr_pages)
nr_pages = ARRAY_SIZE(frame_list);
for (i = 0; i < nr_pages; i++) {
- if ((page = alloc_page(GFP_BALLOON)) == NULL) {
+ if ((page = alloc_page(gfp)) == NULL) {
nr_pages = i;
- need_sleep = 1;
+ state = BP_EAGAIN;
break;
}
@@ -307,7 +320,7 @@ static int decrease_reservation(unsigned long nr_pages)
balloon_stats.current_pages -= nr_pages;
- return need_sleep;
+ return state;
}
/*
@@ -318,77 +331,101 @@ static int decrease_reservation(unsigned long nr_pages)
*/
static void balloon_process(struct work_struct *work)
{
- int need_sleep = 0;
+ enum bp_state state = BP_DONE;
long credit;
mutex_lock(&balloon_mutex);
do {
credit = current_target() - balloon_stats.current_pages;
+
if (credit > 0)
- need_sleep = (increase_reservation(credit) != 0);
+ state = increase_reservation(credit);
+
if (credit < 0)
- need_sleep = (decrease_reservation(-credit) != 0);
+ state = decrease_reservation(-credit, GFP_BALLOON);
+
+ state = update_schedule(state);
#ifndef CONFIG_PREEMPT
if (need_resched())
schedule();
#endif
- } while ((credit != 0) && !need_sleep);
+ } while (credit && state == BP_DONE);
/* Schedule more work if there is some still to be done. */
- if (current_target() != balloon_stats.current_pages)
- mod_timer(&balloon_timer, jiffies + HZ);
+ if (state == BP_EAGAIN)
+ schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
mutex_unlock(&balloon_mutex);
}
/* Resets the Xen limit, sets new target, and kicks off processing. */
-static void balloon_set_new_target(unsigned long target)
+void balloon_set_new_target(unsigned long target)
{
/* No need for lock. Not read-modify-write updates. */
balloon_stats.target_pages = target;
- schedule_work(&balloon_worker);
+ schedule_delayed_work(&balloon_worker, 0);
}
+EXPORT_SYMBOL_GPL(balloon_set_new_target);
-static struct xenbus_watch target_watch =
-{
- .node = "memory/target"
-};
-
-/* React to a change in the target key */
-static void watch_target(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
+/**
+ * alloc_xenballooned_pages - get pages that have been ballooned out
+ * @nr_pages: Number of pages to get
+ * @pages: pages returned
+ * @return 0 on success, error otherwise
+ */
+int alloc_xenballooned_pages(int nr_pages, struct page** pages)
{
- unsigned long long new_target;
- int err;
-
- err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
- if (err != 1) {
- /* This is ok (for domain0 at least) - so just return */
- return;
+ int pgno = 0;
+ struct page* page;
+ mutex_lock(&balloon_mutex);
+ while (pgno < nr_pages) {
+ page = balloon_retrieve(true);
+ if (page) {
+ pages[pgno++] = page;
+ } else {
+ enum bp_state st;
+ st = decrease_reservation(nr_pages - pgno, GFP_HIGHUSER);
+ if (st != BP_DONE)
+ goto out_undo;
+ }
}
-
- /* The given memory/target value is in KiB, so it needs converting to
- * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
- */
- balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
+ mutex_unlock(&balloon_mutex);
+ return 0;
+ out_undo:
+ while (pgno)
+ balloon_append(pages[--pgno]);
+ /* Free the memory back to the kernel soon */
+ schedule_delayed_work(&balloon_worker, 0);
+ mutex_unlock(&balloon_mutex);
+ return -ENOMEM;
}
+EXPORT_SYMBOL(alloc_xenballooned_pages);
-static int balloon_init_watcher(struct notifier_block *notifier,
- unsigned long event,
- void *data)
+/**
+ * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
+ * @nr_pages: Number of pages
+ * @pages: pages to return
+ */
+void free_xenballooned_pages(int nr_pages, struct page** pages)
{
- int err;
+ int i;
- err = register_xenbus_watch(&target_watch);
- if (err)
- printk(KERN_ERR "Failed to set balloon watcher\n");
+ mutex_lock(&balloon_mutex);
- return NOTIFY_DONE;
-}
+ for (i = 0; i < nr_pages; i++) {
+ if (pages[i])
+ balloon_append(pages[i]);
+ }
+
+ /* The balloon may be too large now. Shrink it if needed. */
+ if (current_target() != balloon_stats.current_pages)
+ schedule_delayed_work(&balloon_worker, 0);
-static struct notifier_block xenstore_notifier;
+ mutex_unlock(&balloon_mutex);
+}
+EXPORT_SYMBOL(free_xenballooned_pages);
static int __init balloon_init(void)
{
@@ -398,7 +435,7 @@ static int __init balloon_init(void)
if (!xen_domain())
return -ENODEV;
- pr_info("xen_balloon: Initialising balloon driver.\n");
+ pr_info("xen/balloon: Initialising balloon driver.\n");
if (xen_pv_domain())
nr_pages = xen_start_info->nr_pages;
@@ -408,13 +445,11 @@ static int __init balloon_init(void)
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.balloon_low = 0;
balloon_stats.balloon_high = 0;
- balloon_stats.driver_pages = 0UL;
-
- init_timer(&balloon_timer);
- balloon_timer.data = 0;
- balloon_timer.function = balloon_alarm;
- register_balloon(&balloon_sysdev);
+ balloon_stats.schedule_delay = 1;
+ balloon_stats.max_schedule_delay = 32;
+ balloon_stats.retry_count = 1;
+ balloon_stats.max_retry_count = RETRY_UNLIMITED;
/*
* Initialise the balloon with excess memory space. We need
@@ -436,153 +471,9 @@ static int __init balloon_init(void)
__balloon_append(page);
}
- target_watch.callback = watch_target;
- xenstore_notifier.notifier_call = balloon_init_watcher;
-
- register_xenstore_notifier(&xenstore_notifier);
-
return 0;
}
subsys_initcall(balloon_init);
-static void balloon_exit(void)
-{
- /* XXX - release balloon here */
- return;
-}
-
-module_exit(balloon_exit);
-
-#define BALLOON_SHOW(name, format, args...) \
- static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
- char *buf) \
- { \
- return sprintf(buf, format, ##args); \
- } \
- static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
-
-BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
-BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
-BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
-BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages));
-
-static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
-}
-
-static ssize_t store_target_kb(struct sys_device *dev,
- struct sysdev_attribute *attr,
- const char *buf,
- size_t count)
-{
- char *endchar;
- unsigned long long target_bytes;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- target_bytes = simple_strtoull(buf, &endchar, 0) * 1024;
-
- balloon_set_new_target(target_bytes >> PAGE_SHIFT);
-
- return count;
-}
-
-static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
- show_target_kb, store_target_kb);
-
-
-static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%llu\n",
- (unsigned long long)balloon_stats.target_pages
- << PAGE_SHIFT);
-}
-
-static ssize_t store_target(struct sys_device *dev,
- struct sysdev_attribute *attr,
- const char *buf,
- size_t count)
-{
- char *endchar;
- unsigned long long target_bytes;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- target_bytes = memparse(buf, &endchar);
-
- balloon_set_new_target(target_bytes >> PAGE_SHIFT);
-
- return count;
-}
-
-static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR,
- show_target, store_target);
-
-
-static struct sysdev_attribute *balloon_attrs[] = {
- &attr_target_kb,
- &attr_target,
-};
-
-static struct attribute *balloon_info_attrs[] = {
- &attr_current_kb.attr,
- &attr_low_kb.attr,
- &attr_high_kb.attr,
- &attr_driver_kb.attr,
- NULL
-};
-
-static struct attribute_group balloon_info_group = {
- .name = "info",
- .attrs = balloon_info_attrs,
-};
-
-static struct sysdev_class balloon_sysdev_class = {
- .name = BALLOON_CLASS_NAME,
-};
-
-static int register_balloon(struct sys_device *sysdev)
-{
- int i, error;
-
- error = sysdev_class_register(&balloon_sysdev_class);
- if (error)
- return error;
-
- sysdev->id = 0;
- sysdev->cls = &balloon_sysdev_class;
-
- error = sysdev_register(sysdev);
- if (error) {
- sysdev_class_unregister(&balloon_sysdev_class);
- return error;
- }
-
- for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
- error = sysdev_create_file(sysdev, balloon_attrs[i]);
- if (error)
- goto fail;
- }
-
- error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
- if (error)
- goto fail;
-
- return 0;
-
- fail:
- while (--i >= 0)
- sysdev_remove_file(sysdev, balloon_attrs[i]);
- sysdev_unregister(sysdev);
- sysdev_class_unregister(&balloon_sysdev_class);
- return error;
-}
-
MODULE_LICENSE("GPL");
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 65f5068afd84..02b5a9c05cfa 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -56,6 +56,8 @@
*/
static DEFINE_SPINLOCK(irq_mapping_update_lock);
+static LIST_HEAD(xen_irq_list_head);
+
/* IRQ <-> VIRQ mapping. */
static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
@@ -85,7 +87,9 @@ enum xen_irq_type {
*/
struct irq_info
{
+ struct list_head list;
enum xen_irq_type type; /* type */
+ unsigned irq;
unsigned short evtchn; /* event channel */
unsigned short cpu; /* cpu bound */
@@ -103,23 +107,10 @@ struct irq_info
#define PIRQ_NEEDS_EOI (1 << 0)
#define PIRQ_SHAREABLE (1 << 1)
-static struct irq_info *irq_info;
-static int *pirq_to_irq;
-
static int *evtchn_to_irq;
-struct cpu_evtchn_s {
- unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
-};
-static __initdata struct cpu_evtchn_s init_evtchn_mask = {
- .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
-};
-static struct cpu_evtchn_s __refdata *cpu_evtchn_mask_p = &init_evtchn_mask;
-
-static inline unsigned long *cpu_evtchn_mask(int cpu)
-{
- return cpu_evtchn_mask_p[cpu].bits;
-}
+static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
+ cpu_evtchn_mask);
/* Xen will never allocate port zero for any purpose. */
#define VALID_EVTCHN(chn) ((chn) != 0)
@@ -128,46 +119,86 @@ static struct irq_chip xen_dynamic_chip;
static struct irq_chip xen_percpu_chip;
static struct irq_chip xen_pirq_chip;
-/* Constructor for packed IRQ information. */
-static struct irq_info mk_unbound_info(void)
+/* Get info for IRQ */
+static struct irq_info *info_for_irq(unsigned irq)
+{
+ return get_irq_data(irq);
+}
+
+/* Constructors for packed IRQ information. */
+static void xen_irq_info_common_init(struct irq_info *info,
+ unsigned irq,
+ enum xen_irq_type type,
+ unsigned short evtchn,
+ unsigned short cpu)
{
- return (struct irq_info) { .type = IRQT_UNBOUND };
+
+ BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
+
+ info->type = type;
+ info->irq = irq;
+ info->evtchn = evtchn;
+ info->cpu = cpu;
+
+ evtchn_to_irq[evtchn] = irq;
}
-static struct irq_info mk_evtchn_info(unsigned short evtchn)
+static void xen_irq_info_evtchn_init(unsigned irq,
+ unsigned short evtchn)
{
- return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
- .cpu = 0 };
+ struct irq_info *info = info_for_irq(irq);
+
+ xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
}
-static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
+static void xen_irq_info_ipi_init(unsigned cpu,
+ unsigned irq,
+ unsigned short evtchn,
+ enum ipi_vector ipi)
{
- return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
- .cpu = 0, .u.ipi = ipi };
+ struct irq_info *info = info_for_irq(irq);
+
+ xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
+
+ info->u.ipi = ipi;
+
+ per_cpu(ipi_to_irq, cpu)[ipi] = irq;
}
-static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
+static void xen_irq_info_virq_init(unsigned cpu,
+ unsigned irq,
+ unsigned short evtchn,
+ unsigned short virq)
{
- return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
- .cpu = 0, .u.virq = virq };
+ struct irq_info *info = info_for_irq(irq);
+
+ xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
+
+ info->u.virq = virq;
+
+ per_cpu(virq_to_irq, cpu)[virq] = irq;
}
-static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq,
- unsigned short gsi, unsigned short vector)
+static void xen_irq_info_pirq_init(unsigned irq,
+ unsigned short evtchn,
+ unsigned short pirq,
+ unsigned short gsi,
+ unsigned short vector,
+ unsigned char flags)
{
- return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
- .cpu = 0,
- .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } };
+ struct irq_info *info = info_for_irq(irq);
+
+ xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
+
+ info->u.pirq.pirq = pirq;
+ info->u.pirq.gsi = gsi;
+ info->u.pirq.vector = vector;
+ info->u.pirq.flags = flags;
}
/*
* Accessors for packed IRQ information.
*/
-static struct irq_info *info_for_irq(unsigned irq)
-{
- return &irq_info[irq];
-}
-
static unsigned int evtchn_from_irq(unsigned irq)
{
if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
@@ -212,26 +243,6 @@ static unsigned pirq_from_irq(unsigned irq)
return info->u.pirq.pirq;
}
-static unsigned gsi_from_irq(unsigned irq)
-{
- struct irq_info *info = info_for_irq(irq);
-
- BUG_ON(info == NULL);
- BUG_ON(info->type != IRQT_PIRQ);
-
- return info->u.pirq.gsi;
-}
-
-static unsigned vector_from_irq(unsigned irq)
-{
- struct irq_info *info = info_for_irq(irq);
-
- BUG_ON(info == NULL);
- BUG_ON(info->type != IRQT_PIRQ);
-
- return info->u.pirq.vector;
-}
-
static enum xen_irq_type type_from_irq(unsigned irq)
{
return info_for_irq(irq)->type;
@@ -267,7 +278,7 @@ static inline unsigned long active_evtchns(unsigned int cpu,
unsigned int idx)
{
return (sh->evtchn_pending[idx] &
- cpu_evtchn_mask(cpu)[idx] &
+ per_cpu(cpu_evtchn_mask, cpu)[idx] &
~sh->evtchn_mask[idx]);
}
@@ -280,28 +291,28 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
#endif
- clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
- set_bit(chn, cpu_evtchn_mask(cpu));
+ clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)));
+ set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
- irq_info[irq].cpu = cpu;
+ info_for_irq(irq)->cpu = cpu;
}
static void init_evtchn_cpu_bindings(void)
{
int i;
#ifdef CONFIG_SMP
- struct irq_desc *desc;
+ struct irq_info *info;
/* By default all event channels notify CPU#0. */
- for_each_irq_desc(i, desc) {
+ list_for_each_entry(info, &xen_irq_list_head, list) {
+ struct irq_desc *desc = irq_to_desc(info->irq);
cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
}
#endif
for_each_possible_cpu(i)
- memset(cpu_evtchn_mask(i),
- (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s));
-
+ memset(per_cpu(cpu_evtchn_mask, i),
+ (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
}
static inline void clear_evtchn(int port)
@@ -376,7 +387,28 @@ static void unmask_evtchn(int port)
put_cpu();
}
-static int xen_allocate_irq_dynamic(void)
+static void xen_irq_init(unsigned irq)
+{
+ struct irq_info *info;
+ struct irq_desc *desc = irq_to_desc(irq);
+
+#ifdef CONFIG_SMP
+ /* By default all event channels notify CPU#0. */
+ cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
+#endif
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL)
+ panic("Unable to allocate metadata for IRQ%d\n", irq);
+
+ info->type = IRQT_UNBOUND;
+
+ set_irq_data(irq, info);
+
+ list_add_tail(&info->list, &xen_irq_list_head);
+}
+
+static int __must_check xen_allocate_irq_dynamic(void)
{
int first = 0;
int irq;
@@ -393,22 +425,14 @@ static int xen_allocate_irq_dynamic(void)
first = get_nr_irqs_gsi();
#endif
-retry:
irq = irq_alloc_desc_from(first, -1);
- if (irq == -ENOMEM && first > NR_IRQS_LEGACY) {
- printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n");
- first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY);
- goto retry;
- }
-
- if (irq < 0)
- panic("No available IRQ to bind to: increase nr_irqs!\n");
+ xen_irq_init(irq);
return irq;
}
-static int xen_allocate_irq_gsi(unsigned gsi)
+static int __must_check xen_allocate_irq_gsi(unsigned gsi)
{
int irq;
@@ -423,17 +447,25 @@ static int xen_allocate_irq_gsi(unsigned gsi)
/* Legacy IRQ descriptors are already allocated by the arch. */
if (gsi < NR_IRQS_LEGACY)
- return gsi;
+ irq = gsi;
+ else
+ irq = irq_alloc_desc_at(gsi, -1);
- irq = irq_alloc_desc_at(gsi, -1);
- if (irq < 0)
- panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq);
+ xen_irq_init(irq);
return irq;
}
static void xen_free_irq(unsigned irq)
{
+ struct irq_info *info = get_irq_data(irq);
+
+ list_del(&info->list);
+
+ set_irq_data(irq, NULL);
+
+ kfree(info);
+
/* Legacy IRQ descriptors are managed by the arch. */
if (irq < NR_IRQS_LEGACY)
return;
@@ -563,51 +595,39 @@ static void ack_pirq(struct irq_data *data)
static int find_irq_by_gsi(unsigned gsi)
{
- int irq;
+ struct irq_info *info;
- for (irq = 0; irq < nr_irqs; irq++) {
- struct irq_info *info = info_for_irq(irq);
-
- if (info == NULL || info->type != IRQT_PIRQ)
+ list_for_each_entry(info, &xen_irq_list_head, list) {
+ if (info->type != IRQT_PIRQ)
continue;
- if (gsi_from_irq(irq) == gsi)
- return irq;
+ if (info->u.pirq.gsi == gsi)
+ return info->irq;
}
return -1;
}
-int xen_allocate_pirq(unsigned gsi, int shareable, char *name)
+int xen_allocate_pirq_gsi(unsigned gsi)
{
- return xen_map_pirq_gsi(gsi, gsi, shareable, name);
+ return gsi;
}
-/* xen_map_pirq_gsi might allocate irqs from the top down, as a
- * consequence don't assume that the irq number returned has a low value
- * or can be used as a pirq number unless you know otherwise.
- *
- * One notable exception is when xen_map_pirq_gsi is called passing an
- * hardware gsi as argument, in that case the irq number returned
- * matches the gsi number passed as second argument.
+/*
+ * Do not make any assumptions regarding the relationship between the
+ * IRQ number returned here and the Xen pirq argument.
*
* Note: We don't assign an event channel until the irq actually started
* up. Return an existing irq if we've already got one for the gsi.
*/
-int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
+int xen_bind_pirq_gsi_to_irq(unsigned gsi,
+ unsigned pirq, int shareable, char *name)
{
- int irq = 0;
+ int irq = -1;
struct physdev_irq irq_op;
spin_lock(&irq_mapping_update_lock);
- if ((pirq > nr_irqs) || (gsi > nr_irqs)) {
- printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n",
- pirq > nr_irqs ? "pirq" :"",
- gsi > nr_irqs ? "gsi" : "");
- goto out;
- }
-
irq = find_irq_by_gsi(gsi);
if (irq != -1) {
printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
@@ -616,6 +636,8 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
}
irq = xen_allocate_irq_gsi(gsi);
+ if (irq < 0)
+ goto out;
set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
handle_level_irq, name);
@@ -633,9 +655,8 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
goto out;
}
- irq_info[irq] = mk_pirq_info(0, pirq, gsi, irq_op.vector);
- irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0;
- pirq_to_irq[pirq] = irq;
+ xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector,
+ shareable ? PIRQ_SHAREABLE : 0);
out:
spin_unlock(&irq_mapping_update_lock);
@@ -672,8 +693,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
handle_level_irq, name);
- irq_info[irq] = mk_pirq_info(0, pirq, 0, vector);
- pirq_to_irq[pirq] = irq;
+ xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0);
ret = irq_set_msi_desc(irq, msidesc);
if (ret < 0)
goto error_irq;
@@ -709,9 +729,6 @@ int xen_destroy_irq(int irq)
goto out;
}
}
- pirq_to_irq[info->u.pirq.pirq] = -1;
-
- irq_info[irq] = mk_unbound_info();
xen_free_irq(irq);
@@ -720,19 +737,26 @@ out:
return rc;
}
-int xen_vector_from_irq(unsigned irq)
+int xen_irq_from_pirq(unsigned pirq)
{
- return vector_from_irq(irq);
-}
+ int irq;
-int xen_gsi_from_irq(unsigned irq)
-{
- return gsi_from_irq(irq);
-}
+ struct irq_info *info;
-int xen_irq_from_pirq(unsigned pirq)
-{
- return pirq_to_irq[pirq];
+ spin_lock(&irq_mapping_update_lock);
+
+ list_for_each_entry(info, &xen_irq_list_head, list) {
+ if (info == NULL || info->type != IRQT_PIRQ)
+ continue;
+ irq = info->irq;
+ if (info->u.pirq.pirq == pirq)
+ goto out;
+ }
+ irq = -1;
+out:
+ spin_unlock(&irq_mapping_update_lock);
+
+ return irq;
}
int bind_evtchn_to_irq(unsigned int evtchn)
@@ -745,14 +769,16 @@ int bind_evtchn_to_irq(unsigned int evtchn)
if (irq == -1) {
irq = xen_allocate_irq_dynamic();
+ if (irq == -1)
+ goto out;
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
handle_fasteoi_irq, "event");
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_evtchn_info(evtchn);
+ xen_irq_info_evtchn_init(irq, evtchn);
}
+out:
spin_unlock(&irq_mapping_update_lock);
return irq;
@@ -782,9 +808,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
BUG();
evtchn = bind_ipi.port;
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_ipi_info(evtchn, ipi);
- per_cpu(ipi_to_irq, cpu)[ipi] = irq;
+ xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
bind_evtchn_to_cpu(evtchn, cpu);
}
@@ -821,6 +845,8 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
if (irq == -1) {
irq = xen_allocate_irq_dynamic();
+ if (irq == -1)
+ goto out;
set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
handle_percpu_irq, "virq");
@@ -832,14 +858,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
BUG();
evtchn = bind_virq.port;
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_virq_info(evtchn, virq);
-
- per_cpu(virq_to_irq, cpu)[virq] = irq;
+ xen_irq_info_virq_init(cpu, irq, evtchn, virq);
bind_evtchn_to_cpu(evtchn, cpu);
}
+out:
spin_unlock(&irq_mapping_update_lock);
return irq;
@@ -876,11 +900,9 @@ static void unbind_from_irq(unsigned int irq)
evtchn_to_irq[evtchn] = -1;
}
- if (irq_info[irq].type != IRQT_UNBOUND) {
- irq_info[irq] = mk_unbound_info();
+ BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
- xen_free_irq(irq);
- }
+ xen_free_irq(irq);
spin_unlock(&irq_mapping_update_lock);
}
@@ -894,6 +916,8 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
int retval;
irq = bind_evtchn_to_irq(evtchn);
+ if (irq < 0)
+ return irq;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
if (retval != 0) {
unbind_from_irq(irq);
@@ -935,6 +959,8 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
int retval;
irq = bind_virq_to_irq(virq, cpu);
+ if (irq < 0)
+ return irq;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
if (retval != 0) {
unbind_from_irq(irq);
@@ -986,7 +1012,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
{
struct shared_info *sh = HYPERVISOR_shared_info;
int cpu = smp_processor_id();
- unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu);
+ unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
int i;
unsigned long flags;
static DEFINE_SPINLOCK(debug_lock);
@@ -1064,6 +1090,13 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
}
static DEFINE_PER_CPU(unsigned, xed_nesting_count);
+static DEFINE_PER_CPU(unsigned int, current_word_idx);
+static DEFINE_PER_CPU(unsigned int, current_bit_idx);
+
+/*
+ * Mask out the i least significant bits of w
+ */
+#define MASK_LSBS(w, i) (w & ((~0UL) << i))
/*
* Search the CPUs pending events bitmasks. For each one found, map
@@ -1076,6 +1109,9 @@ static DEFINE_PER_CPU(unsigned, xed_nesting_count);
*/
static void __xen_evtchn_do_upcall(void)
{
+ int start_word_idx, start_bit_idx;
+ int word_idx, bit_idx;
+ int i;
int cpu = get_cpu();
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
@@ -1094,17 +1130,57 @@ static void __xen_evtchn_do_upcall(void)
wmb();
#endif
pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
- while (pending_words != 0) {
+
+ start_word_idx = __this_cpu_read(current_word_idx);
+ start_bit_idx = __this_cpu_read(current_bit_idx);
+
+ word_idx = start_word_idx;
+
+ for (i = 0; pending_words != 0; i++) {
unsigned long pending_bits;
- int word_idx = __ffs(pending_words);
- pending_words &= ~(1UL << word_idx);
+ unsigned long words;
- while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
- int bit_idx = __ffs(pending_bits);
- int port = (word_idx * BITS_PER_LONG) + bit_idx;
- int irq = evtchn_to_irq[port];
+ words = MASK_LSBS(pending_words, word_idx);
+
+ /*
+ * If we masked out all events, wrap to beginning.
+ */
+ if (words == 0) {
+ word_idx = 0;
+ bit_idx = 0;
+ continue;
+ }
+ word_idx = __ffs(words);
+
+ pending_bits = active_evtchns(cpu, s, word_idx);
+ bit_idx = 0; /* usually scan entire word from start */
+ if (word_idx == start_word_idx) {
+ /* We scan the starting word in two parts */
+ if (i == 0)
+ /* 1st time: start in the middle */
+ bit_idx = start_bit_idx;
+ else
+ /* 2nd time: mask bits done already */
+ bit_idx &= (1UL << start_bit_idx) - 1;
+ }
+
+ do {
+ unsigned long bits;
+ int port, irq;
struct irq_desc *desc;
+ bits = MASK_LSBS(pending_bits, bit_idx);
+
+ /* If we masked out all events, move on. */
+ if (bits == 0)
+ break;
+
+ bit_idx = __ffs(bits);
+
+ /* Process port. */
+ port = (word_idx * BITS_PER_LONG) + bit_idx;
+ irq = evtchn_to_irq[port];
+
mask_evtchn(port);
clear_evtchn(port);
@@ -1113,7 +1189,21 @@ static void __xen_evtchn_do_upcall(void)
if (desc)
generic_handle_irq_desc(irq, desc);
}
- }
+
+ bit_idx = (bit_idx + 1) % BITS_PER_LONG;
+
+ /* Next caller starts at last processed + 1 */
+ __this_cpu_write(current_word_idx,
+ bit_idx ? word_idx :
+ (word_idx+1) % BITS_PER_LONG);
+ __this_cpu_write(current_bit_idx, bit_idx);
+ } while (bit_idx != 0);
+
+ /* Scan start_l1i twice; all others once. */
+ if ((word_idx != start_word_idx) || (i != 0))
+ pending_words &= ~(1UL << word_idx);
+
+ word_idx = (word_idx + 1) % BITS_PER_LONG;
}
BUG_ON(!irqs_disabled());
@@ -1163,8 +1253,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
so there should be a proper type */
BUG_ON(info->type == IRQT_UNBOUND);
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_evtchn_info(evtchn);
+ xen_irq_info_evtchn_init(irq, evtchn);
spin_unlock(&irq_mapping_update_lock);
@@ -1181,10 +1270,14 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
struct evtchn_bind_vcpu bind_vcpu;
int evtchn = evtchn_from_irq(irq);
- /* events delivered via platform PCI interrupts are always
- * routed to vcpu 0 */
- if (!VALID_EVTCHN(evtchn) ||
- (xen_hvm_domain() && !xen_have_vector_callback))
+ if (!VALID_EVTCHN(evtchn))
+ return -1;
+
+ /*
+ * Events delivered via platform PCI interrupts are always
+ * routed to vcpu 0 and hence cannot be rebound.
+ */
+ if (xen_hvm_domain() && !xen_have_vector_callback)
return -1;
/* Send future instances of this interrupt to other vcpu. */
@@ -1271,19 +1364,22 @@ static int retrigger_dynirq(struct irq_data *data)
return ret;
}
-static void restore_cpu_pirqs(void)
+static void restore_pirqs(void)
{
int pirq, rc, irq, gsi;
struct physdev_map_pirq map_irq;
+ struct irq_info *info;
- for (pirq = 0; pirq < nr_irqs; pirq++) {
- irq = pirq_to_irq[pirq];
- if (irq == -1)
+ list_for_each_entry(info, &xen_irq_list_head, list) {
+ if (info->type != IRQT_PIRQ)
continue;
+ pirq = info->u.pirq.pirq;
+ gsi = info->u.pirq.gsi;
+ irq = info->irq;
+
/* save/restore of PT devices doesn't work, so at this point the
* only devices present are GSI based emulated devices */
- gsi = gsi_from_irq(irq);
if (!gsi)
continue;
@@ -1296,8 +1392,7 @@ static void restore_cpu_pirqs(void)
if (rc) {
printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
gsi, irq, pirq, rc);
- irq_info[irq] = mk_unbound_info();
- pirq_to_irq[pirq] = -1;
+ xen_free_irq(irq);
continue;
}
@@ -1327,8 +1422,7 @@ static void restore_cpu_virqs(unsigned int cpu)
evtchn = bind_virq.port;
/* Record the new mapping. */
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_virq_info(evtchn, virq);
+ xen_irq_info_virq_init(cpu, irq, evtchn, virq);
bind_evtchn_to_cpu(evtchn, cpu);
}
}
@@ -1352,8 +1446,7 @@ static void restore_cpu_ipis(unsigned int cpu)
evtchn = bind_ipi.port;
/* Record the new mapping. */
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_ipi_info(evtchn, ipi);
+ xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
bind_evtchn_to_cpu(evtchn, cpu);
}
}
@@ -1413,7 +1506,8 @@ void xen_poll_irq(int irq)
void xen_irq_resume(void)
{
- unsigned int cpu, irq, evtchn;
+ unsigned int cpu, evtchn;
+ struct irq_info *info;
init_evtchn_cpu_bindings();
@@ -1422,8 +1516,8 @@ void xen_irq_resume(void)
mask_evtchn(evtchn);
/* No IRQ <-> event-channel mappings. */
- for (irq = 0; irq < nr_irqs; irq++)
- irq_info[irq].evtchn = 0; /* zap event-channel binding */
+ list_for_each_entry(info, &xen_irq_list_head, list)
+ info->evtchn = 0; /* zap event-channel binding */
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
evtchn_to_irq[evtchn] = -1;
@@ -1433,7 +1527,7 @@ void xen_irq_resume(void)
restore_cpu_ipis(cpu);
}
- restore_cpu_pirqs();
+ restore_pirqs();
}
static struct irq_chip xen_dynamic_chip __read_mostly = {
@@ -1519,17 +1613,6 @@ void __init xen_init_IRQ(void)
{
int i;
- cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
- GFP_KERNEL);
- irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
-
- /* We are using nr_irqs as the maximum number of pirq available but
- * that number is actually chosen by Xen and we don't know exactly
- * what it is. Be careful choosing high pirq numbers. */
- pirq_to_irq = kcalloc(nr_irqs, sizeof(*pirq_to_irq), GFP_KERNEL);
- for (i = 0; i < nr_irqs; i++)
- pirq_to_irq[i] = -1;
-
evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
GFP_KERNEL);
for (i = 0; i < NR_EVENT_CHANNELS; i++)
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
new file mode 100644
index 000000000000..a7ffdfe19fc9
--- /dev/null
+++ b/drivers/xen/gntalloc.c
@@ -0,0 +1,545 @@
+/******************************************************************************
+ * gntalloc.c
+ *
+ * Device for creating grant references (in user-space) that may be shared
+ * with other domains.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * This driver exists to allow userspace programs in Linux to allocate kernel
+ * memory that will later be shared with another domain. Without this device,
+ * Linux userspace programs cannot create grant references.
+ *
+ * How this stuff works:
+ * X -> granting a page to Y
+ * Y -> mapping the grant from X
+ *
+ * 1. X uses the gntalloc device to allocate a page of kernel memory, P.
+ * 2. X creates an entry in the grant table that says domid(Y) can access P.
+ * This is done without a hypercall unless the grant table needs expansion.
+ * 3. X gives the grant reference identifier, GREF, to Y.
+ * 4. Y maps the page, either directly into kernel memory for use in a backend
+ * driver, or via a the gntdev device to map into the address space of an
+ * application running in Y. This is the first point at which Xen does any
+ * tracking of the page.
+ * 5. A program in X mmap()s a segment of the gntalloc device that corresponds
+ * to the shared page, and can now communicate with Y over the shared page.
+ *
+ *
+ * NOTE TO USERSPACE LIBRARIES:
+ * The grant allocation and mmap()ing are, naturally, two separate operations.
+ * You set up the sharing by calling the create ioctl() and then the mmap().
+ * Teardown requires munmap() and either close() or ioctl().
+ *
+ * WARNING: Since Xen does not allow a guest to forcibly end the use of a grant
+ * reference, this device can be used to consume kernel memory by leaving grant
+ * references mapped by another domain when an application exits. Therefore,
+ * there is a global limit on the number of pages that can be allocated. When
+ * all references to the page are unmapped, it will be freed during the next
+ * grant operation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/highmem.h>
+
+#include <xen/xen.h>
+#include <xen/page.h>
+#include <xen/grant_table.h>
+#include <xen/gntalloc.h>
+#include <xen/events.h>
+
+static int limit = 1024;
+module_param(limit, int, 0644);
+MODULE_PARM_DESC(limit, "Maximum number of grants that may be allocated by "
+ "the gntalloc device");
+
+static LIST_HEAD(gref_list);
+static DEFINE_SPINLOCK(gref_lock);
+static int gref_size;
+
+struct notify_info {
+ uint16_t pgoff:12; /* Bits 0-11: Offset of the byte to clear */
+ uint16_t flags:2; /* Bits 12-13: Unmap notification flags */
+ int event; /* Port (event channel) to notify */
+};
+
+/* Metadata on a grant reference. */
+struct gntalloc_gref {
+ struct list_head next_gref; /* list entry gref_list */
+ struct list_head next_file; /* list entry file->list, if open */
+ struct page *page; /* The shared page */
+ uint64_t file_index; /* File offset for mmap() */
+ unsigned int users; /* Use count - when zero, waiting on Xen */
+ grant_ref_t gref_id; /* The grant reference number */
+ struct notify_info notify; /* Unmap notification */
+};
+
+struct gntalloc_file_private_data {
+ struct list_head list;
+ uint64_t index;
+};
+
+static void __del_gref(struct gntalloc_gref *gref);
+
+static void do_cleanup(void)
+{
+ struct gntalloc_gref *gref, *n;
+ list_for_each_entry_safe(gref, n, &gref_list, next_gref) {
+ if (!gref->users)
+ __del_gref(gref);
+ }
+}
+
+static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
+ uint32_t *gref_ids, struct gntalloc_file_private_data *priv)
+{
+ int i, rc, readonly;
+ LIST_HEAD(queue_gref);
+ LIST_HEAD(queue_file);
+ struct gntalloc_gref *gref;
+
+ readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE);
+ rc = -ENOMEM;
+ for (i = 0; i < op->count; i++) {
+ gref = kzalloc(sizeof(*gref), GFP_KERNEL);
+ if (!gref)
+ goto undo;
+ list_add_tail(&gref->next_gref, &queue_gref);
+ list_add_tail(&gref->next_file, &queue_file);
+ gref->users = 1;
+ gref->file_index = op->index + i * PAGE_SIZE;
+ gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ if (!gref->page)
+ goto undo;
+
+ /* Grant foreign access to the page. */
+ gref->gref_id = gnttab_grant_foreign_access(op->domid,
+ pfn_to_mfn(page_to_pfn(gref->page)), readonly);
+ if (gref->gref_id < 0) {
+ rc = gref->gref_id;
+ goto undo;
+ }
+ gref_ids[i] = gref->gref_id;
+ }
+
+ /* Add to gref lists. */
+ spin_lock(&gref_lock);
+ list_splice_tail(&queue_gref, &gref_list);
+ list_splice_tail(&queue_file, &priv->list);
+ spin_unlock(&gref_lock);
+
+ return 0;
+
+undo:
+ spin_lock(&gref_lock);
+ gref_size -= (op->count - i);
+
+ list_for_each_entry(gref, &queue_file, next_file) {
+ /* __del_gref does not remove from queue_file */
+ __del_gref(gref);
+ }
+
+ /* It's possible for the target domain to map the just-allocated grant
+ * references by blindly guessing their IDs; if this is done, then
+ * __del_gref will leave them in the queue_gref list. They need to be
+ * added to the global list so that we can free them when they are no
+ * longer referenced.
+ */
+ if (unlikely(!list_empty(&queue_gref)))
+ list_splice_tail(&queue_gref, &gref_list);
+ spin_unlock(&gref_lock);
+ return rc;
+}
+
+static void __del_gref(struct gntalloc_gref *gref)
+{
+ if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
+ uint8_t *tmp = kmap(gref->page);
+ tmp[gref->notify.pgoff] = 0;
+ kunmap(gref->page);
+ }
+ if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+ notify_remote_via_evtchn(gref->notify.event);
+
+ gref->notify.flags = 0;
+
+ if (gref->gref_id > 0) {
+ if (gnttab_query_foreign_access(gref->gref_id))
+ return;
+
+ if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
+ return;
+ }
+
+ gref_size--;
+ list_del(&gref->next_gref);
+
+ if (gref->page)
+ __free_page(gref->page);
+
+ kfree(gref);
+}
+
+/* finds contiguous grant references in a file, returns the first */
+static struct gntalloc_gref *find_grefs(struct gntalloc_file_private_data *priv,
+ uint64_t index, uint32_t count)
+{
+ struct gntalloc_gref *rv = NULL, *gref;
+ list_for_each_entry(gref, &priv->list, next_file) {
+ if (gref->file_index == index && !rv)
+ rv = gref;
+ if (rv) {
+ if (gref->file_index != index)
+ return NULL;
+ index += PAGE_SIZE;
+ count--;
+ if (count == 0)
+ return rv;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * -------------------------------------
+ * File operations.
+ * -------------------------------------
+ */
+static int gntalloc_open(struct inode *inode, struct file *filp)
+{
+ struct gntalloc_file_private_data *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto out_nomem;
+ INIT_LIST_HEAD(&priv->list);
+
+ filp->private_data = priv;
+
+ pr_debug("%s: priv %p\n", __func__, priv);
+
+ return 0;
+
+out_nomem:
+ return -ENOMEM;
+}
+
+static int gntalloc_release(struct inode *inode, struct file *filp)
+{
+ struct gntalloc_file_private_data *priv = filp->private_data;
+ struct gntalloc_gref *gref;
+
+ pr_debug("%s: priv %p\n", __func__, priv);
+
+ spin_lock(&gref_lock);
+ while (!list_empty(&priv->list)) {
+ gref = list_entry(priv->list.next,
+ struct gntalloc_gref, next_file);
+ list_del(&gref->next_file);
+ gref->users--;
+ if (gref->users == 0)
+ __del_gref(gref);
+ }
+ kfree(priv);
+ spin_unlock(&gref_lock);
+
+ return 0;
+}
+
+static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
+ struct ioctl_gntalloc_alloc_gref __user *arg)
+{
+ int rc = 0;
+ struct ioctl_gntalloc_alloc_gref op;
+ uint32_t *gref_ids;
+
+ pr_debug("%s: priv %p\n", __func__, priv);
+
+ if (copy_from_user(&op, arg, sizeof(op))) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ gref_ids = kzalloc(sizeof(gref_ids[0]) * op.count, GFP_TEMPORARY);
+ if (!gref_ids) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock(&gref_lock);
+ /* Clean up pages that were at zero (local) users but were still mapped
+ * by remote domains. Since those pages count towards the limit that we
+ * are about to enforce, removing them here is a good idea.
+ */
+ do_cleanup();
+ if (gref_size + op.count > limit) {
+ spin_unlock(&gref_lock);
+ rc = -ENOSPC;
+ goto out_free;
+ }
+ gref_size += op.count;
+ op.index = priv->index;
+ priv->index += op.count * PAGE_SIZE;
+ spin_unlock(&gref_lock);
+
+ rc = add_grefs(&op, gref_ids, priv);
+ if (rc < 0)
+ goto out_free;
+
+ /* Once we finish add_grefs, it is unsafe to touch the new reference,
+ * since it is possible for a concurrent ioctl to remove it (by guessing
+ * its index). If the userspace application doesn't provide valid memory
+ * to write the IDs to, then it will need to close the file in order to
+ * release - which it will do by segfaulting when it tries to access the
+ * IDs to close them.
+ */
+ if (copy_to_user(arg, &op, sizeof(op))) {
+ rc = -EFAULT;
+ goto out_free;
+ }
+ if (copy_to_user(arg->gref_ids, gref_ids,
+ sizeof(gref_ids[0]) * op.count)) {
+ rc = -EFAULT;
+ goto out_free;
+ }
+
+out_free:
+ kfree(gref_ids);
+out:
+ return rc;
+}
+
+static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
+ void __user *arg)
+{
+ int i, rc = 0;
+ struct ioctl_gntalloc_dealloc_gref op;
+ struct gntalloc_gref *gref, *n;
+
+ pr_debug("%s: priv %p\n", __func__, priv);
+
+ if (copy_from_user(&op, arg, sizeof(op))) {
+ rc = -EFAULT;
+ goto dealloc_grant_out;
+ }
+
+ spin_lock(&gref_lock);
+ gref = find_grefs(priv, op.index, op.count);
+ if (gref) {
+ /* Remove from the file list only, and decrease reference count.
+ * The later call to do_cleanup() will remove from gref_list and
+ * free the memory if the pages aren't mapped anywhere.
+ */
+ for (i = 0; i < op.count; i++) {
+ n = list_entry(gref->next_file.next,
+ struct gntalloc_gref, next_file);
+ list_del(&gref->next_file);
+ gref->users--;
+ gref = n;
+ }
+ } else {
+ rc = -EINVAL;
+ }
+
+ do_cleanup();
+
+ spin_unlock(&gref_lock);
+dealloc_grant_out:
+ return rc;
+}
+
+static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
+ void __user *arg)
+{
+ struct ioctl_gntalloc_unmap_notify op;
+ struct gntalloc_gref *gref;
+ uint64_t index;
+ int pgoff;
+ int rc;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ index = op.index & ~(PAGE_SIZE - 1);
+ pgoff = op.index & (PAGE_SIZE - 1);
+
+ spin_lock(&gref_lock);
+
+ gref = find_grefs(priv, index, 1);
+ if (!gref) {
+ rc = -ENOENT;
+ goto unlock_out;
+ }
+
+ if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) {
+ rc = -EINVAL;
+ goto unlock_out;
+ }
+
+ gref->notify.flags = op.action;
+ gref->notify.pgoff = pgoff;
+ gref->notify.event = op.event_channel_port;
+ rc = 0;
+ unlock_out:
+ spin_unlock(&gref_lock);
+ return rc;
+}
+
+static long gntalloc_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct gntalloc_file_private_data *priv = filp->private_data;
+
+ switch (cmd) {
+ case IOCTL_GNTALLOC_ALLOC_GREF:
+ return gntalloc_ioctl_alloc(priv, (void __user *)arg);
+
+ case IOCTL_GNTALLOC_DEALLOC_GREF:
+ return gntalloc_ioctl_dealloc(priv, (void __user *)arg);
+
+ case IOCTL_GNTALLOC_SET_UNMAP_NOTIFY:
+ return gntalloc_ioctl_unmap_notify(priv, (void __user *)arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return 0;
+}
+
+static void gntalloc_vma_close(struct vm_area_struct *vma)
+{
+ struct gntalloc_gref *gref = vma->vm_private_data;
+ if (!gref)
+ return;
+
+ spin_lock(&gref_lock);
+ gref->users--;
+ if (gref->users == 0)
+ __del_gref(gref);
+ spin_unlock(&gref_lock);
+}
+
+static struct vm_operations_struct gntalloc_vmops = {
+ .close = gntalloc_vma_close,
+};
+
+static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct gntalloc_file_private_data *priv = filp->private_data;
+ struct gntalloc_gref *gref;
+ int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int rv, i;
+
+ pr_debug("%s: priv %p, page %lu+%d\n", __func__,
+ priv, vma->vm_pgoff, count);
+
+ if (!(vma->vm_flags & VM_SHARED)) {
+ printk(KERN_ERR "%s: Mapping must be shared.\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock(&gref_lock);
+ gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count);
+ if (gref == NULL) {
+ rv = -ENOENT;
+ pr_debug("%s: Could not find grant reference",
+ __func__);
+ goto out_unlock;
+ }
+
+ vma->vm_private_data = gref;
+
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTCOPY;
+ vma->vm_flags |= VM_PFNMAP | VM_PFN_AT_MMAP;
+
+ vma->vm_ops = &gntalloc_vmops;
+
+ for (i = 0; i < count; i++) {
+ gref->users++;
+ rv = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
+ gref->page);
+ if (rv)
+ goto out_unlock;
+
+ gref = list_entry(gref->next_file.next,
+ struct gntalloc_gref, next_file);
+ }
+ rv = 0;
+
+out_unlock:
+ spin_unlock(&gref_lock);
+ return rv;
+}
+
+static const struct file_operations gntalloc_fops = {
+ .owner = THIS_MODULE,
+ .open = gntalloc_open,
+ .release = gntalloc_release,
+ .unlocked_ioctl = gntalloc_ioctl,
+ .mmap = gntalloc_mmap
+};
+
+/*
+ * -------------------------------------
+ * Module creation/destruction.
+ * -------------------------------------
+ */
+static struct miscdevice gntalloc_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "xen/gntalloc",
+ .fops = &gntalloc_fops,
+};
+
+static int __init gntalloc_init(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ err = misc_register(&gntalloc_miscdev);
+ if (err != 0) {
+ printk(KERN_ERR "Could not register misc gntalloc device\n");
+ return err;
+ }
+
+ pr_debug("Created grant allocation device at %d,%d\n",
+ MISC_MAJOR, gntalloc_miscdev.minor);
+
+ return 0;
+}
+
+static void __exit gntalloc_exit(void)
+{
+ misc_deregister(&gntalloc_miscdev);
+}
+
+module_init(gntalloc_init);
+module_exit(gntalloc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Carter Weatherly <carter.weatherly@jhuapl.edu>, "
+ "Daniel De Graaf <dgdegra@tycho.nsa.gov>");
+MODULE_DESCRIPTION("User-space grant reference allocator driver");
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 1e31cdcdae1e..017ce600fbc6 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -32,10 +32,13 @@
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/highmem.h>
#include <xen/xen.h>
#include <xen/grant_table.h>
+#include <xen/balloon.h>
#include <xen/gntdev.h>
+#include <xen/events.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
@@ -45,35 +48,46 @@ MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
"Gerd Hoffmann <kraxel@redhat.com>");
MODULE_DESCRIPTION("User-space granted page access driver");
-static int limit = 1024;
+static int limit = 1024*1024;
module_param(limit, int, 0644);
-MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped at "
- "once by a gntdev instance");
+MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
+ "the gntdev device");
+
+static atomic_t pages_mapped = ATOMIC_INIT(0);
+
+static int use_ptemod;
struct gntdev_priv {
struct list_head maps;
- uint32_t used;
- uint32_t limit;
/* lock protects maps from concurrent changes */
spinlock_t lock;
struct mm_struct *mm;
struct mmu_notifier mn;
};
+struct unmap_notify {
+ int flags;
+ /* Address relative to the start of the grant_map */
+ int addr;
+ int event;
+};
+
struct grant_map {
struct list_head next;
- struct gntdev_priv *priv;
struct vm_area_struct *vma;
int index;
int count;
int flags;
- int is_mapped;
+ atomic_t users;
+ struct unmap_notify notify;
struct ioctl_gntdev_grant_ref *grants;
struct gnttab_map_grant_ref *map_ops;
struct gnttab_unmap_grant_ref *unmap_ops;
struct page **pages;
};
+static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
+
/* ------------------------------------------------------------------ */
static void gntdev_print_maps(struct gntdev_priv *priv,
@@ -82,9 +96,7 @@ static void gntdev_print_maps(struct gntdev_priv *priv,
#ifdef DEBUG
struct grant_map *map;
- pr_debug("maps list (priv %p, usage %d/%d)\n",
- priv, priv->used, priv->limit);
-
+ pr_debug("%s: maps list (priv %p)\n", __func__, priv);
list_for_each_entry(map, &priv->maps, next)
pr_debug(" index %2d, count %2d %s\n",
map->index, map->count,
@@ -111,27 +123,21 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
NULL == add->pages)
goto err;
+ if (alloc_xenballooned_pages(count, add->pages))
+ goto err;
+
for (i = 0; i < count; i++) {
- add->pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
- if (add->pages[i] == NULL)
- goto err;
+ add->map_ops[i].handle = -1;
+ add->unmap_ops[i].handle = -1;
}
add->index = 0;
add->count = count;
- add->priv = priv;
-
- if (add->count + priv->used > priv->limit)
- goto err;
+ atomic_set(&add->users, 1);
return add;
err:
- if (add->pages)
- for (i = 0; i < count; i++) {
- if (add->pages[i])
- __free_page(add->pages[i]);
- }
kfree(add->pages);
kfree(add->grants);
kfree(add->map_ops);
@@ -154,7 +160,6 @@ static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
list_add_tail(&add->next, &priv->maps);
done:
- priv->used += add->count;
gntdev_print_maps(priv, "[new]", add->index);
}
@@ -166,57 +171,33 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
list_for_each_entry(map, &priv->maps, next) {
if (map->index != index)
continue;
- if (map->count != count)
+ if (count && map->count != count)
continue;
return map;
}
return NULL;
}
-static struct grant_map *gntdev_find_map_vaddr(struct gntdev_priv *priv,
- unsigned long vaddr)
+static void gntdev_put_map(struct grant_map *map)
{
- struct grant_map *map;
-
- list_for_each_entry(map, &priv->maps, next) {
- if (!map->vma)
- continue;
- if (vaddr < map->vma->vm_start)
- continue;
- if (vaddr >= map->vma->vm_end)
- continue;
- return map;
- }
- return NULL;
-}
-
-static int gntdev_del_map(struct grant_map *map)
-{
- int i;
+ if (!map)
+ return;
- if (map->vma)
- return -EBUSY;
- for (i = 0; i < map->count; i++)
- if (map->unmap_ops[i].handle)
- return -EBUSY;
+ if (!atomic_dec_and_test(&map->users))
+ return;
- map->priv->used -= map->count;
- list_del(&map->next);
- return 0;
-}
+ atomic_sub(map->count, &pages_mapped);
-static void gntdev_free_map(struct grant_map *map)
-{
- int i;
+ if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
+ notify_remote_via_evtchn(map->notify.event);
+ }
- if (!map)
- return;
+ if (map->pages) {
+ if (!use_ptemod)
+ unmap_grant_pages(map, 0, map->count);
- if (map->pages)
- for (i = 0; i < map->count; i++) {
- if (map->pages[i])
- __free_page(map->pages[i]);
- }
+ free_xenballooned_pages(map->count, map->pages);
+ }
kfree(map->pages);
kfree(map->grants);
kfree(map->map_ops);
@@ -231,18 +212,17 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
{
struct grant_map *map = data;
unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
+ int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
u64 pte_maddr;
BUG_ON(pgnr >= map->count);
pte_maddr = arbitrary_virt_to_machine(pte).maddr;
- gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr,
- GNTMAP_contains_pte | map->flags,
+ gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
map->grants[pgnr].ref,
map->grants[pgnr].domid);
- gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr,
- GNTMAP_contains_pte | map->flags,
- 0 /* handle */);
+ gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
+ -1 /* handle */);
return 0;
}
@@ -250,6 +230,21 @@ static int map_grant_pages(struct grant_map *map)
{
int i, err = 0;
+ if (!use_ptemod) {
+ /* Note: it could already be mapped */
+ if (map->map_ops[0].handle != -1)
+ return 0;
+ for (i = 0; i < map->count; i++) {
+ unsigned long addr = (unsigned long)
+ pfn_to_kaddr(page_to_pfn(map->pages[i]));
+ gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
+ map->grants[i].ref,
+ map->grants[i].domid);
+ gnttab_set_unmap_op(&map->unmap_ops[i], addr,
+ map->flags, -1 /* handle */);
+ }
+ }
+
pr_debug("map %d+%d\n", map->index, map->count);
err = gnttab_map_refs(map->map_ops, map->pages, map->count);
if (err)
@@ -258,28 +253,81 @@ static int map_grant_pages(struct grant_map *map)
for (i = 0; i < map->count; i++) {
if (map->map_ops[i].status)
err = -EINVAL;
- map->unmap_ops[i].handle = map->map_ops[i].handle;
+ else {
+ BUG_ON(map->map_ops[i].handle == -1);
+ map->unmap_ops[i].handle = map->map_ops[i].handle;
+ pr_debug("map handle=%d\n", map->map_ops[i].handle);
+ }
}
return err;
}
-static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
+static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
{
int i, err = 0;
- pr_debug("map %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
- err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages, pages);
+ if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
+ int pgno = (map->notify.addr >> PAGE_SHIFT);
+ if (pgno >= offset && pgno < offset + pages && use_ptemod) {
+ void __user *tmp = (void __user *)
+ map->vma->vm_start + map->notify.addr;
+ err = copy_to_user(tmp, &err, 1);
+ if (err)
+ return err;
+ map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
+ } else if (pgno >= offset && pgno < offset + pages) {
+ uint8_t *tmp = kmap(map->pages[pgno]);
+ tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
+ kunmap(map->pages[pgno]);
+ map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
+ }
+ }
+
+ err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, pages);
if (err)
return err;
for (i = 0; i < pages; i++) {
if (map->unmap_ops[offset+i].status)
err = -EINVAL;
- map->unmap_ops[offset+i].handle = 0;
+ pr_debug("unmap handle=%d st=%d\n",
+ map->unmap_ops[offset+i].handle,
+ map->unmap_ops[offset+i].status);
+ map->unmap_ops[offset+i].handle = -1;
}
return err;
}
+static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
+{
+ int range, err = 0;
+
+ pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
+
+ /* It is possible the requested range will have a "hole" where we
+ * already unmapped some of the grants. Only unmap valid ranges.
+ */
+ while (pages && !err) {
+ while (pages && map->unmap_ops[offset].handle == -1) {
+ offset++;
+ pages--;
+ }
+ range = 0;
+ while (range < pages) {
+ if (map->unmap_ops[offset+range].handle == -1) {
+ range--;
+ break;
+ }
+ range++;
+ }
+ err = __unmap_grant_pages(map, offset, range);
+ offset += range;
+ pages -= range;
+ }
+
+ return err;
+}
+
/* ------------------------------------------------------------------ */
static void gntdev_vma_close(struct vm_area_struct *vma)
@@ -287,22 +335,13 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
struct grant_map *map = vma->vm_private_data;
pr_debug("close %p\n", vma);
- map->is_mapped = 0;
map->vma = NULL;
vma->vm_private_data = NULL;
-}
-
-static int gntdev_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- pr_debug("vaddr %p, pgoff %ld (shouldn't happen)\n",
- vmf->virtual_address, vmf->pgoff);
- vmf->flags = VM_FAULT_ERROR;
- return 0;
+ gntdev_put_map(map);
}
static struct vm_operations_struct gntdev_vmops = {
.close = gntdev_vma_close,
- .fault = gntdev_vma_fault,
};
/* ------------------------------------------------------------------ */
@@ -320,8 +359,6 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
list_for_each_entry(map, &priv->maps, next) {
if (!map->vma)
continue;
- if (!map->is_mapped)
- continue;
if (map->vma->vm_start >= end)
continue;
if (map->vma->vm_end <= start)
@@ -386,16 +423,17 @@ static int gntdev_open(struct inode *inode, struct file *flip)
INIT_LIST_HEAD(&priv->maps);
spin_lock_init(&priv->lock);
- priv->limit = limit;
- priv->mm = get_task_mm(current);
- if (!priv->mm) {
- kfree(priv);
- return -ENOMEM;
+ if (use_ptemod) {
+ priv->mm = get_task_mm(current);
+ if (!priv->mm) {
+ kfree(priv);
+ return -ENOMEM;
+ }
+ priv->mn.ops = &gntdev_mmu_ops;
+ ret = mmu_notifier_register(&priv->mn, priv->mm);
+ mmput(priv->mm);
}
- priv->mn.ops = &gntdev_mmu_ops;
- ret = mmu_notifier_register(&priv->mn, priv->mm);
- mmput(priv->mm);
if (ret) {
kfree(priv);
@@ -412,21 +450,19 @@ static int gntdev_release(struct inode *inode, struct file *flip)
{
struct gntdev_priv *priv = flip->private_data;
struct grant_map *map;
- int err;
pr_debug("priv %p\n", priv);
spin_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
map = list_entry(priv->maps.next, struct grant_map, next);
- err = gntdev_del_map(map);
- if (WARN_ON(err))
- gntdev_free_map(map);
-
+ list_del(&map->next);
+ gntdev_put_map(map);
}
spin_unlock(&priv->lock);
- mmu_notifier_unregister(&priv->mn, priv->mm);
+ if (use_ptemod)
+ mmu_notifier_unregister(&priv->mn, priv->mm);
kfree(priv);
return 0;
}
@@ -443,16 +479,21 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
pr_debug("priv %p, add %d\n", priv, op.count);
if (unlikely(op.count <= 0))
return -EINVAL;
- if (unlikely(op.count > priv->limit))
- return -EINVAL;
err = -ENOMEM;
map = gntdev_alloc_map(priv, op.count);
if (!map)
return err;
+
+ if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
+ pr_debug("can't map: over limit\n");
+ gntdev_put_map(map);
+ return err;
+ }
+
if (copy_from_user(map->grants, &u->refs,
sizeof(map->grants[0]) * op.count) != 0) {
- gntdev_free_map(map);
+ gntdev_put_map(map);
return err;
}
@@ -461,13 +502,9 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
op.index = map->index << PAGE_SHIFT;
spin_unlock(&priv->lock);
- if (copy_to_user(u, &op, sizeof(op)) != 0) {
- spin_lock(&priv->lock);
- gntdev_del_map(map);
- spin_unlock(&priv->lock);
- gntdev_free_map(map);
- return err;
- }
+ if (copy_to_user(u, &op, sizeof(op)) != 0)
+ return -EFAULT;
+
return 0;
}
@@ -484,11 +521,12 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
spin_lock(&priv->lock);
map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
- if (map)
- err = gntdev_del_map(map);
+ if (map) {
+ list_del(&map->next);
+ gntdev_put_map(map);
+ err = 0;
+ }
spin_unlock(&priv->lock);
- if (!err)
- gntdev_free_map(map);
return err;
}
@@ -496,43 +534,66 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
struct ioctl_gntdev_get_offset_for_vaddr __user *u)
{
struct ioctl_gntdev_get_offset_for_vaddr op;
+ struct vm_area_struct *vma;
struct grant_map *map;
if (copy_from_user(&op, u, sizeof(op)) != 0)
return -EFAULT;
pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
- spin_lock(&priv->lock);
- map = gntdev_find_map_vaddr(priv, op.vaddr);
- if (map == NULL ||
- map->vma->vm_start != op.vaddr) {
- spin_unlock(&priv->lock);
+ vma = find_vma(current->mm, op.vaddr);
+ if (!vma || vma->vm_ops != &gntdev_vmops)
return -EINVAL;
- }
+
+ map = vma->vm_private_data;
+ if (!map)
+ return -EINVAL;
+
op.offset = map->index << PAGE_SHIFT;
op.count = map->count;
- spin_unlock(&priv->lock);
if (copy_to_user(u, &op, sizeof(op)) != 0)
return -EFAULT;
return 0;
}
-static long gntdev_ioctl_set_max_grants(struct gntdev_priv *priv,
- struct ioctl_gntdev_set_max_grants __user *u)
+static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
{
- struct ioctl_gntdev_set_max_grants op;
+ struct ioctl_gntdev_unmap_notify op;
+ struct grant_map *map;
+ int rc;
- if (copy_from_user(&op, u, sizeof(op)) != 0)
+ if (copy_from_user(&op, u, sizeof(op)))
return -EFAULT;
- pr_debug("priv %p, limit %d\n", priv, op.count);
- if (op.count > limit)
- return -E2BIG;
+
+ if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
+ return -EINVAL;
spin_lock(&priv->lock);
- priv->limit = op.count;
+
+ list_for_each_entry(map, &priv->maps, next) {
+ uint64_t begin = map->index << PAGE_SHIFT;
+ uint64_t end = (map->index + map->count) << PAGE_SHIFT;
+ if (op.index >= begin && op.index < end)
+ goto found;
+ }
+ rc = -ENOENT;
+ goto unlock_out;
+
+ found:
+ if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
+ (map->flags & GNTMAP_readonly)) {
+ rc = -EINVAL;
+ goto unlock_out;
+ }
+
+ map->notify.flags = op.action;
+ map->notify.addr = op.index - (map->index << PAGE_SHIFT);
+ map->notify.event = op.event_channel_port;
+ rc = 0;
+ unlock_out:
spin_unlock(&priv->lock);
- return 0;
+ return rc;
}
static long gntdev_ioctl(struct file *flip,
@@ -551,8 +612,8 @@ static long gntdev_ioctl(struct file *flip,
case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
- case IOCTL_GNTDEV_SET_MAX_GRANTS:
- return gntdev_ioctl_set_max_grants(priv, ptr);
+ case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
+ return gntdev_ioctl_notify(priv, ptr);
default:
pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
@@ -568,7 +629,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
int index = vma->vm_pgoff;
int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
struct grant_map *map;
- int err = -EINVAL;
+ int i, err = -EINVAL;
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
return -EINVAL;
@@ -580,47 +641,70 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
map = gntdev_find_map_index(priv, index, count);
if (!map)
goto unlock_out;
- if (map->vma)
+ if (use_ptemod && map->vma)
goto unlock_out;
- if (priv->mm != vma->vm_mm) {
+ if (use_ptemod && priv->mm != vma->vm_mm) {
printk(KERN_WARNING "Huh? Other mm?\n");
goto unlock_out;
}
+ atomic_inc(&map->users);
+
vma->vm_ops = &gntdev_vmops;
vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP;
vma->vm_private_data = map;
- map->vma = vma;
- map->flags = GNTMAP_host_map | GNTMAP_application_map;
- if (!(vma->vm_flags & VM_WRITE))
- map->flags |= GNTMAP_readonly;
+ if (use_ptemod)
+ map->vma = vma;
+
+ if (map->flags) {
+ if ((vma->vm_flags & VM_WRITE) &&
+ (map->flags & GNTMAP_readonly))
+ return -EINVAL;
+ } else {
+ map->flags = GNTMAP_host_map;
+ if (!(vma->vm_flags & VM_WRITE))
+ map->flags |= GNTMAP_readonly;
+ }
spin_unlock(&priv->lock);
- err = apply_to_page_range(vma->vm_mm, vma->vm_start,
- vma->vm_end - vma->vm_start,
- find_grant_ptes, map);
- if (err) {
- printk(KERN_WARNING "find_grant_ptes() failure.\n");
- return err;
+ if (use_ptemod) {
+ err = apply_to_page_range(vma->vm_mm, vma->vm_start,
+ vma->vm_end - vma->vm_start,
+ find_grant_ptes, map);
+ if (err) {
+ printk(KERN_WARNING "find_grant_ptes() failure.\n");
+ goto out_put_map;
+ }
}
err = map_grant_pages(map);
- if (err) {
- printk(KERN_WARNING "map_grant_pages() failure.\n");
- return err;
- }
+ if (err)
+ goto out_put_map;
- map->is_mapped = 1;
+ if (!use_ptemod) {
+ for (i = 0; i < count; i++) {
+ err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
+ map->pages[i]);
+ if (err)
+ goto out_put_map;
+ }
+ }
return 0;
unlock_out:
spin_unlock(&priv->lock);
return err;
+
+out_put_map:
+ if (use_ptemod)
+ map->vma = NULL;
+ gntdev_put_map(map);
+ return err;
}
static const struct file_operations gntdev_fops = {
@@ -646,6 +730,8 @@ static int __init gntdev_init(void)
if (!xen_domain())
return -ENODEV;
+ use_ptemod = xen_pv_domain();
+
err = misc_register(&gntdev_miscdev);
if (err != 0) {
printk(KERN_ERR "Could not register gntdev device\n");
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 9ef54ebc1194..3745a318defc 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -458,7 +458,14 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
if (ret)
return ret;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return ret;
+
for (i = 0; i < count; i++) {
+ /* Do not add to override if the map failed. */
+ if (map_ops[i].status)
+ continue;
+
/* m2p override only supported for GNTMAP_contains_pte mappings */
if (!(map_ops[i].flags & GNTMAP_contains_pte))
continue;
@@ -483,6 +490,9 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
if (ret)
return ret;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return ret;
+
for (i = 0; i < count; i++) {
ret = m2p_remove_override(pages[i]);
if (ret)
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index ebb292859b59..95143dd6904d 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -61,7 +61,7 @@ static void xen_post_suspend(int cancelled)
xen_mm_unpin_all();
}
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_HIBERNATION
static int xen_suspend(void *data)
{
struct suspend_info *si = data;
@@ -69,7 +69,7 @@ static int xen_suspend(void *data)
BUG_ON(!irqs_disabled());
- err = sysdev_suspend(PMSG_SUSPEND);
+ err = sysdev_suspend(PMSG_FREEZE);
if (err) {
printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n",
err);
@@ -118,7 +118,7 @@ static void do_suspend(void)
}
#endif
- err = dpm_suspend_start(PMSG_SUSPEND);
+ err = dpm_suspend_start(PMSG_FREEZE);
if (err) {
printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err);
goto out_thaw;
@@ -127,7 +127,7 @@ static void do_suspend(void)
printk(KERN_DEBUG "suspending xenstore...\n");
xs_suspend();
- err = dpm_suspend_noirq(PMSG_SUSPEND);
+ err = dpm_suspend_noirq(PMSG_FREEZE);
if (err) {
printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err);
goto out_resume;
@@ -147,7 +147,7 @@ static void do_suspend(void)
err = stop_machine(xen_suspend, &si, cpumask_of(0));
- dpm_resume_noirq(PMSG_RESUME);
+ dpm_resume_noirq(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
if (err) {
printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
@@ -161,7 +161,7 @@ out_resume:
} else
xs_suspend_cancel();
- dpm_resume_end(PMSG_RESUME);
+ dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
/* Make sure timer events get retriggered on all CPUs */
clock_was_set();
@@ -173,7 +173,7 @@ out:
#endif
shutting_down = SHUTDOWN_INVALID;
}
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_HIBERNATION */
struct shutdown_handler {
const char *command;
@@ -202,7 +202,7 @@ static void shutdown_handler(struct xenbus_watch *watch,
{ "poweroff", do_poweroff },
{ "halt", do_poweroff },
{ "reboot", do_reboot },
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_HIBERNATION
{ "suspend", do_suspend },
#endif
{NULL, NULL},
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
new file mode 100644
index 000000000000..a4ff225ee868
--- /dev/null
+++ b/drivers/xen/xen-balloon.c
@@ -0,0 +1,256 @@
+/******************************************************************************
+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
+ *
+ * Copyright (c) 2003, B Dragovic
+ * Copyright (c) 2003-2004, M Williamson, K Fraser
+ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/capability.h>
+
+#include <xen/xen.h>
+#include <xen/interface/xen.h>
+#include <xen/balloon.h>
+#include <xen/xenbus.h>
+#include <xen/features.h>
+#include <xen/page.h>
+
+#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
+
+#define BALLOON_CLASS_NAME "xen_memory"
+
+static struct sys_device balloon_sysdev;
+
+static int register_balloon(struct sys_device *sysdev);
+
+static struct xenbus_watch target_watch =
+{
+ .node = "memory/target"
+};
+
+/* React to a change in the target key */
+static void watch_target(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ unsigned long long new_target;
+ int err;
+
+ err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
+ if (err != 1) {
+ /* This is ok (for domain0 at least) - so just return */
+ return;
+ }
+
+ /* The given memory/target value is in KiB, so it needs converting to
+ * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
+ */
+ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
+}
+
+static int balloon_init_watcher(struct notifier_block *notifier,
+ unsigned long event,
+ void *data)
+{
+ int err;
+
+ err = register_xenbus_watch(&target_watch);
+ if (err)
+ printk(KERN_ERR "Failed to set balloon watcher\n");
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block xenstore_notifier;
+
+static int __init balloon_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ pr_info("xen-balloon: Initialising balloon driver.\n");
+
+ register_balloon(&balloon_sysdev);
+
+ target_watch.callback = watch_target;
+ xenstore_notifier.notifier_call = balloon_init_watcher;
+
+ register_xenstore_notifier(&xenstore_notifier);
+
+ return 0;
+}
+subsys_initcall(balloon_init);
+
+static void balloon_exit(void)
+{
+ /* XXX - release balloon here */
+ return;
+}
+
+module_exit(balloon_exit);
+
+#define BALLOON_SHOW(name, format, args...) \
+ static ssize_t show_##name(struct sys_device *dev, \
+ struct sysdev_attribute *attr, \
+ char *buf) \
+ { \
+ return sprintf(buf, format, ##args); \
+ } \
+ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
+
+BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
+BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
+BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
+
+static SYSDEV_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
+static SYSDEV_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
+static SYSDEV_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
+static SYSDEV_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
+
+static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
+}
+
+static ssize_t store_target_kb(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ char *endchar;
+ unsigned long long target_bytes;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ target_bytes = simple_strtoull(buf, &endchar, 0) * 1024;
+
+ balloon_set_new_target(target_bytes >> PAGE_SHIFT);
+
+ return count;
+}
+
+static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
+ show_target_kb, store_target_kb);
+
+
+static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)balloon_stats.target_pages
+ << PAGE_SHIFT);
+}
+
+static ssize_t store_target(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ char *endchar;
+ unsigned long long target_bytes;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ target_bytes = memparse(buf, &endchar);
+
+ balloon_set_new_target(target_bytes >> PAGE_SHIFT);
+
+ return count;
+}
+
+static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR,
+ show_target, store_target);
+
+
+static struct sysdev_attribute *balloon_attrs[] = {
+ &attr_target_kb,
+ &attr_target,
+ &attr_schedule_delay.attr,
+ &attr_max_schedule_delay.attr,
+ &attr_retry_count.attr,
+ &attr_max_retry_count.attr
+};
+
+static struct attribute *balloon_info_attrs[] = {
+ &attr_current_kb.attr,
+ &attr_low_kb.attr,
+ &attr_high_kb.attr,
+ NULL
+};
+
+static struct attribute_group balloon_info_group = {
+ .name = "info",
+ .attrs = balloon_info_attrs
+};
+
+static struct sysdev_class balloon_sysdev_class = {
+ .name = BALLOON_CLASS_NAME
+};
+
+static int register_balloon(struct sys_device *sysdev)
+{
+ int i, error;
+
+ error = sysdev_class_register(&balloon_sysdev_class);
+ if (error)
+ return error;
+
+ sysdev->id = 0;
+ sysdev->cls = &balloon_sysdev_class;
+
+ error = sysdev_register(sysdev);
+ if (error) {
+ sysdev_class_unregister(&balloon_sysdev_class);
+ return error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
+ error = sysdev_create_file(sysdev, balloon_attrs[i]);
+ if (error)
+ goto fail;
+ }
+
+ error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
+ if (error)
+ goto fail;
+
+ return 0;
+
+ fail:
+ while (--i >= 0)
+ sysdev_remove_file(sysdev, balloon_attrs[i]);
+ sysdev_unregister(sysdev);
+ sysdev_class_unregister(&balloon_sysdev_class);
+ return error;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index baa65e7fbbc7..739769551e33 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -577,7 +577,7 @@ void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
}
EXPORT_SYMBOL_GPL(xenbus_dev_changed);
-int xenbus_dev_suspend(struct device *dev, pm_message_t state)
+int xenbus_dev_suspend(struct device *dev)
{
int err = 0;
struct xenbus_driver *drv;
@@ -590,7 +590,7 @@ int xenbus_dev_suspend(struct device *dev, pm_message_t state)
return 0;
drv = to_xenbus_driver(dev->driver);
if (drv->suspend)
- err = drv->suspend(xdev, state);
+ err = drv->suspend(xdev);
if (err)
printk(KERN_WARNING
"xenbus: suspend %s failed: %i\n", dev_name(dev), err);
@@ -642,6 +642,14 @@ int xenbus_dev_resume(struct device *dev)
}
EXPORT_SYMBOL_GPL(xenbus_dev_resume);
+int xenbus_dev_cancel(struct device *dev)
+{
+ /* Do nothing */
+ DPRINTK("cancel");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xenbus_dev_cancel);
+
/* A flag to determine if xenstored is 'ready' (i.e. has started) */
int xenstored_ready = 0;
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 24665812316a..888b9900ca08 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -64,8 +64,9 @@ extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
extern void xenbus_dev_shutdown(struct device *_dev);
-extern int xenbus_dev_suspend(struct device *dev, pm_message_t state);
+extern int xenbus_dev_suspend(struct device *dev);
extern int xenbus_dev_resume(struct device *dev);
+extern int xenbus_dev_cancel(struct device *dev);
extern void xenbus_otherend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len,
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 5bcc2d6cf129..b6a2690c9d49 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -85,6 +85,14 @@ static struct device_attribute xenbus_frontend_dev_attrs[] = {
__ATTR_NULL
};
+static const struct dev_pm_ops xenbus_pm_ops = {
+ .suspend = xenbus_dev_suspend,
+ .resume = xenbus_dev_resume,
+ .freeze = xenbus_dev_suspend,
+ .thaw = xenbus_dev_cancel,
+ .restore = xenbus_dev_resume,
+};
+
static struct xen_bus_type xenbus_frontend = {
.root = "device",
.levels = 2, /* device/type/<id> */
@@ -100,8 +108,7 @@ static struct xen_bus_type xenbus_frontend = {
.shutdown = xenbus_dev_shutdown,
.dev_attrs = xenbus_frontend_dev_attrs,
- .suspend = xenbus_dev_suspend,
- .resume = xenbus_dev_resume,
+ .pm = &xenbus_pm_ops,
},
};