summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/Changes43
-rw-r--r--Documentation/CodingStyle4
-rw-r--r--Documentation/cgroups/blkio-controller.txt12
-rw-r--r--Documentation/feature-removal-schedule.txt22
-rw-r--r--Documentation/filesystems/caching/netfs-api.txt16
-rw-r--r--Documentation/hwmon/f71882fg4
-rw-r--r--Documentation/hwmon/k10temp8
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/laptops/thinkpad-acpi.txt5
-rw-r--r--Documentation/networking/ip-sysctl.txt2
-rw-r--r--Documentation/power/opp.txt2
-rw-r--r--Documentation/spinlocks.txt45
-rw-r--r--MAINTAINERS15
-rw-r--r--Makefile2
-rw-r--r--arch/arm/common/dmabounce.c2
-rw-r--r--arch/arm/kernel/perf_event.c6
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kernel/smp_twd.c2
-rw-r--r--arch/arm/mach-at91/at91cap9.c8
-rw-r--r--arch/arm/mach-at91/at91cap9_devices.c2
-rw-r--r--arch/arm/mach-at91/at91rm9200.c6
-rw-r--r--arch/arm/mach-at91/at91rm9200_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c10
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9rl.c4
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c2
-rw-r--r--arch/arm/mach-at91/board-cap9adk.c2
-rw-r--r--arch/arm/mach-at91/board-sam9260ek.c2
-rw-r--r--arch/arm/mach-at91/board-sam9261ek.c2
-rw-r--r--arch/arm/mach-at91/board-sam9263ek.c2
-rw-r--r--arch/arm/mach-at91/board-sam9g20ek.c2
-rw-r--r--arch/arm/mach-at91/board-sam9m10g45ek.c2
-rw-r--r--arch/arm/mach-at91/include/mach/system_rev.h10
-rw-r--r--arch/arm/mach-davinci/irq.c2
-rw-r--r--arch/arm/mach-ep93xx/core.c4
-rw-r--r--arch/arm/mach-exynos4/cpu.c6
-rw-r--r--arch/arm/mach-exynos4/dev-audio.c2
-rw-r--r--arch/arm/mach-exynos4/headsmp.S2
-rw-r--r--arch/arm/mach-exynos4/mach-smdkv310.c8
-rw-r--r--arch/arm/mach-mmp/pxa168.c2
-rw-r--r--arch/arm/mach-mmp/pxa910.c2
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c8
-rw-r--r--arch/arm/mach-omap1/gpio15xx.c4
-rw-r--r--arch/arm/mach-omap1/gpio16xx.c10
-rw-r--r--arch/arm/mach-omap1/gpio7xx.c14
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c2
-rw-r--r--arch/arm/mach-pxa/mfp-pxa2xx.c4
-rw-r--r--arch/arm/mach-pxa/raumfeld.c36
-rw-r--r--arch/arm/mach-s3c2440/mach-mini2440.c2
-rw-r--r--arch/arm/mach-s3c64xx/dev-spi.c2
-rw-r--r--arch/arm/mach-s5p64x0/dev-spi.c4
-rw-r--r--arch/arm/mach-s5pc100/dev-spi.c4
-rw-r--r--arch/arm/mach-s5pv210/dev-spi.c2
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c2
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c2
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c1
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h2
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c52
-rw-r--r--arch/arm/mach-shmobile/pm_runtime.c8
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c4
-rw-r--r--arch/arm/mach-vt8500/irq.c21
-rw-r--r--arch/arm/mm/cache-l2x0.c19
-rw-r--r--arch/arm/mm/mmu.c5
-rw-r--r--arch/arm/mm/nommu.c4
-rw-r--r--arch/arm/plat-orion/gpio.c2
-rw-r--r--arch/arm/plat-pxa/gpio.c10
-rw-r--r--arch/arm/plat-s3c24xx/dma.c12
-rw-r--r--arch/arm/plat-s5p/irq-gpioint.c2
-rw-r--r--arch/arm/plat-s5p/s5p-time.c4
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h6
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c64xx-spi.h2
-rw-r--r--arch/arm/plat-samsung/irq-uart.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c30
-rw-r--r--arch/x86/include/asm/mmzone_32.h2
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.S14
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.h6
-rw-r--r--arch/x86/kernel/acpi/sleep.c6
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/mm/init_64.c3
-rw-r--r--arch/x86/oprofile/nmi_int.c14
-rw-r--r--arch/x86/pci/xen.c56
-rw-r--r--arch/x86/platform/efi/efi.c3
-rw-r--r--block/cfq-iosched.c8
-rw-r--r--drivers/base/memory.c1
-rw-r--r--drivers/base/power/domain.c402
-rw-r--r--drivers/base/power/opp.c17
-rw-r--r--drivers/base/power/trace.c2
-rw-r--r--drivers/base/syscore.c8
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c37
-rw-r--r--drivers/block/drbd/drbd_receiver.c6
-rw-r--r--drivers/block/drbd/drbd_worker.c7
-rw-r--r--drivers/char/agp/intel-agp.h7
-rw-r--r--drivers/char/apm-emulation.c2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c2
-rw-r--r--drivers/firewire/ohci.c6
-rw-r--r--drivers/gpio/langwell_gpio.c2
-rw-r--r--drivers/gpio/tps65910-gpio.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c118
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c167
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h4
-rw-r--r--drivers/gpu/drm/radeon/ni.c1
-rw-r--r--drivers/gpu/drm/radeon/nid.h2
-rw-r--r--drivers/gpu/drm/radeon/r600.c1
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c6
-rw-r--r--drivers/gpu/drm/radeon/rv770.c1
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/adm1275.c26
-rw-r--r--drivers/hwmon/emc6w201.c58
-rw-r--r--drivers/hwmon/f71882fg.c19
-rw-r--r--drivers/hwmon/hwmon-vid.c2
-rw-r--r--drivers/hwmon/lm95241.c22
-rw-r--r--drivers/hwmon/pmbus.c19
-rw-r--r--drivers/hwmon/pmbus_core.c31
-rw-r--r--drivers/hwmon/sch5627.c2
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c8
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c7
-rw-r--r--drivers/i2c/busses/i2c-tegra.c8
-rw-r--r--drivers/infiniband/core/cm.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c3
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c3
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c3
-rw-r--r--drivers/leds/leds-pca9532.c2
-rw-r--r--drivers/media/rc/fintek-cir.c5
-rw-r--r--drivers/media/rc/imon.c19
-rw-r--r--drivers/media/rc/ir-raw.c4
-rw-r--r--drivers/media/rc/ite-cir.c12
-rw-r--r--drivers/media/rc/ite-cir.h3
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c58
-rw-r--r--drivers/media/rc/lirc_dev.c37
-rw-r--r--drivers/media/rc/mceusb.c80
-rw-r--r--drivers/media/rc/nuvoton-cir.c2
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/rc/rc-main.c48
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c8
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c8
-rw-r--r--drivers/media/video/m5mols/m5mols.h57
-rw-r--r--drivers/media/video/m5mols/m5mols_capture.c22
-rw-r--r--drivers/media/video/m5mols/m5mols_controls.c6
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c144
-rw-r--r--drivers/media/video/m5mols/m5mols_reg.h21
-rw-r--r--drivers/media/video/msp3400-driver.c12
-rw-r--r--drivers/media/video/mx1_camera.c10
-rw-r--r--drivers/media/video/omap/omap_vout.c18
-rw-r--r--drivers/media/video/omap/omap_voutlib.c6
-rw-r--r--drivers/media/video/omap3isp/isp.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c4
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c2
-rw-r--r--drivers/media/video/pwc/pwc-if.c152
-rw-r--r--drivers/media/video/pwc/pwc.h4
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c21
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c28
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h29
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c2
-rw-r--r--drivers/media/video/tuner-core.c229
-rw-r--r--drivers/media/video/uvc/uvc_entity.c34
-rw-r--r--drivers/media/video/uvc/uvc_queue.c2
-rw-r--r--drivers/media/video/uvc/uvc_video.c4
-rw-r--r--drivers/media/video/v4l2-dev.c39
-rw-r--r--drivers/media/video/v4l2-ioctl.c18
-rw-r--r--drivers/media/video/videobuf2-core.c14
-rw-r--r--drivers/media/video/videobuf2-dma-sg.c2
-rw-r--r--drivers/mfd/Kconfig3
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/asic3.c1
-rw-r--r--drivers/mfd/htc-pasic3.c1
-rw-r--r--drivers/mfd/omap-usb-host.c131
-rw-r--r--drivers/mfd/tps65911-comparator.c2
-rw-r--r--drivers/mmc/core/mmc.c77
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/mmci.h5
-rw-r--r--drivers/net/8139too.c1
-rw-r--r--drivers/net/bna/bnad.c7
-rw-r--r--drivers/net/greth.c7
-rw-r--r--drivers/net/hamradio/6pack.c4
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/natsemi.c9
-rw-r--r--drivers/net/qlge/qlge.h3
-rw-r--r--drivers/net/qlge/qlge_main.c40
-rw-r--r--drivers/net/sh_eth.c6
-rw-r--r--drivers/net/slip.c2
-rw-r--r--drivers/net/usb/hso.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c138
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h5
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/sysfs.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c25
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/setup-bus.c15
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c4
-rw-r--r--drivers/platform/x86/acer-wmi.c47
-rw-r--r--drivers/platform/x86/asus-wmi.c1
-rw-r--r--drivers/platform/x86/compal-laptop.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c30
-rw-r--r--drivers/platform/x86/hp-wmi.c11
-rw-r--r--drivers/platform/x86/intel_oaktrail.c1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c72
-rw-r--r--drivers/regulator/db8500-prcmu.c14
-rw-r--r--drivers/regulator/max8952.c2
-rw-r--r--drivers/regulator/max8997.c55
-rw-r--r--drivers/s390/char/vmwatchdog.c4
-rw-r--r--drivers/s390/cio/css.c8
-rw-r--r--drivers/scsi/Kconfig13
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/hpsa.c16
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c4
-rw-r--r--drivers/scsi/isci/Makefile8
-rw-r--r--drivers/scsi/isci/firmware/Makefile19
-rw-r--r--drivers/scsi/isci/firmware/README36
-rw-r--r--drivers/scsi/isci/firmware/create_fw.c99
-rw-r--r--drivers/scsi/isci/firmware/create_fw.h77
-rw-r--r--drivers/scsi/isci/host.c2751
-rw-r--r--drivers/scsi/isci/host.h542
-rw-r--r--drivers/scsi/isci/init.c565
-rw-r--r--drivers/scsi/isci/isci.h538
-rw-r--r--drivers/scsi/isci/phy.c1312
-rw-r--r--drivers/scsi/isci/phy.h504
-rw-r--r--drivers/scsi/isci/port.c1757
-rw-r--r--drivers/scsi/isci/port.h306
-rw-r--r--drivers/scsi/isci/port_config.c754
-rw-r--r--drivers/scsi/isci/probe_roms.c243
-rw-r--r--drivers/scsi/isci/probe_roms.h249
-rw-r--r--drivers/scsi/isci/registers.h1934
-rw-r--r--drivers/scsi/isci/remote_device.c1501
-rw-r--r--drivers/scsi/isci/remote_device.h352
-rw-r--r--drivers/scsi/isci/remote_node_context.c627
-rw-r--r--drivers/scsi/isci/remote_node_context.h224
-rw-r--r--drivers/scsi/isci/remote_node_table.c598
-rw-r--r--drivers/scsi/isci/remote_node_table.h188
-rw-r--r--drivers/scsi/isci/request.c3391
-rw-r--r--drivers/scsi/isci/request.h448
-rw-r--r--drivers/scsi/isci/sas.h219
-rw-r--r--drivers/scsi/isci/scu_completion_codes.h283
-rw-r--r--drivers/scsi/isci/scu_event_codes.h336
-rw-r--r--drivers/scsi/isci/scu_remote_node_context.h229
-rw-r--r--drivers/scsi/isci/scu_task_context.h942
-rw-r--r--drivers/scsi/isci/task.c1676
-rw-r--r--drivers/scsi/isci/task.h367
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.c225
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.h278
-rw-r--r--drivers/spi/spi_s3c64xx.c4
-rw-r--r--drivers/ssb/driver_pcicore.c18
-rw-r--r--drivers/staging/lirc/lirc_imon.c10
-rw-r--r--drivers/staging/lirc/lirc_serial.c44
-rw-r--r--drivers/staging/lirc/lirc_sir.c11
-rw-r--r--drivers/staging/lirc/lirc_zilog.c4
-rw-r--r--drivers/tty/serial/atmel_serial.c3
-rw-r--r--drivers/usb/core/message.c11
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c36
-rw-r--r--drivers/video/amba-clcd.c2
-rw-r--r--drivers/video/fsl-diu-fb.c16
-rw-r--r--drivers/video/geode/gx1fb_core.c14
-rw-r--r--drivers/video/hecubafb.c3
-rw-r--r--drivers/video/sh_mobile_meram.c2
-rw-r--r--drivers/video/sm501fb.c2
-rw-r--r--drivers/video/udlfb.c8
-rw-r--r--drivers/video/vesafb.c1
-rw-r--r--drivers/w1/masters/ds1wm.c5
-rw-r--r--firmware/Makefile1
-rw-r--r--firmware/isci/isci_firmware.bin.ihex16
-rw-r--r--fs/binfmt_elf_fdpic.c1
-rw-r--r--fs/btrfs/ctree.h5
-rw-r--r--fs/btrfs/inode.c12
-rw-r--r--fs/btrfs/super.c6
-rw-r--r--fs/btrfs/volumes.c3
-rw-r--r--fs/ceph/file.c14
-rw-r--r--fs/cifs/cifsfs.c8
-rw-r--r--fs/cifs/cifsproto.h6
-rw-r--r--fs/cifs/connect.c132
-rw-r--r--fs/cifs/fscache.c1
-rw-r--r--fs/dcache.c51
-rw-r--r--fs/fscache/page.c44
-rw-r--r--fs/gfs2/aops.c3
-rw-r--r--fs/gfs2/glops.c8
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/gfs2/log.c1
-rw-r--r--fs/gfs2/ops_fstype.c3
-rw-r--r--fs/gfs2/super.c36
-rw-r--r--fs/gfs2/sys.c7
-rw-r--r--fs/hfsplus/super.c2
-rw-r--r--fs/hfsplus/wrapper.c7
-rw-r--r--fs/locks.c30
-rw-r--r--fs/namei.c2
-rw-r--r--fs/nfs/fscache.c8
-rw-r--r--fs/nfs/nfs4filelayout.c1
-rw-r--r--fs/nfs/nfs4xdr.c2
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/xfs/xfs_inode_item.c14
-rw-r--r--fs/xfs/xfs_trans.c4
-rw-r--r--include/drm/drm_pciids.h2
-rw-r--r--include/linux/drbd_limits.h4
-rw-r--r--include/linux/fscache.h21
-rw-r--r--include/linux/fsl-diu-fb.h6
-rw-r--r--include/linux/irq.h3
-rw-r--r--include/linux/memory.h2
-rw-r--r--include/linux/mfd/ds1wm.h7
-rw-r--r--include/linux/mmc/card.h13
-rw-r--r--include/linux/opp.h8
-rw-r--r--include/linux/pm_domain.h23
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/suspend.h8
-rw-r--r--include/media/lirc_dev.h2
-rw-r--r--include/media/m5mols.h4
-rw-r--r--include/media/v4l2-subdev.h10
-rw-r--r--include/net/cfg80211.h2
-rw-r--r--include/net/dst.h1
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/sctp/ulpevent.h2
-rw-r--r--include/sound/sb16_csp.h9
-rw-r--r--kernel/irq/generic-chip.c18
-rw-r--r--kernel/jump_label.c14
-rw-r--r--kernel/power/main.c5
-rw-r--r--kernel/power/snapshot.c6
-rw-r--r--kernel/power/suspend.c20
-rw-r--r--kernel/rcutree.c26
-rw-r--r--kernel/rcutree_plugin.h15
-rw-r--r--kernel/resource.c116
-rw-r--r--kernel/sched.c12
-rw-r--r--lib/debugobjects.c2
-rw-r--r--mm/memcontrol.c140
-rw-r--r--mm/memory.c1
-rw-r--r--mm/nommu.c9
-rw-r--r--mm/vmscan.c59
-rw-r--r--net/8021q/vlan_dev.c5
-rw-r--r--net/bluetooth/hci_conn.c3
-rw-r--r--net/bluetooth/hidp/core.c18
-rw-r--r--net/bluetooth/hidp/hidp.h1
-rw-r--r--net/bluetooth/l2cap_core.c5
-rw-r--r--net/bridge/br_device.c4
-rw-r--r--net/bridge/br_input.c6
-rw-r--r--net/ceph/osd_client.c10
-rw-r--r--net/core/dst.c6
-rw-r--r--net/ipv4/af_inet.c4
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/udp.c10
-rw-r--r--net/ipv4/xfrm4_output.c7
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/route.c25
-rw-r--r--net/mac80211/scan.c3
-rw-r--r--net/mac80211/wpa.c24
-rw-r--r--net/sctp/outqueue.c20
-rw-r--r--net/sctp/protocol.c11
-rw-r--r--net/sctp/sm_sideeffect.c20
-rw-r--r--net/sctp/sm_statefuns.c32
-rw-r--r--net/sctp/sm_statetable.c2
-rw-r--r--net/sctp/socket.c36
-rw-r--r--net/sctp/ulpevent.c16
-rw-r--r--net/sunrpc/rpcb_clnt.c2
-rw-r--r--net/sunrpc/sched.c27
-rw-r--r--net/wireless/core.c12
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c27
-rw-r--r--net/wireless/scan.c10
-rw-r--r--net/xfrm/xfrm_policy.c6
-rw-r--r--net/xfrm/xfrm_state.c2
-rwxr-xr-xscripts/depmod.sh10
-rw-r--r--sound/atmel/abdac.c2
-rw-r--r--sound/atmel/ac97c.c2
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c4
-rw-r--r--sound/pci/hda/hda_eld.c2
-rw-r--r--sound/pci/hda/patch_conexant.c4
-rw-r--r--sound/pci/hda/patch_realtek.c33
-rw-r--r--sound/pci/rme9652/hdspm.c8
-rw-r--r--sound/soc/blackfin/bf5xx-i2s-pcm.c13
-rw-r--r--sound/soc/codecs/ak4642.c2
-rw-r--r--sound/soc/codecs/tlv320aic26.c14
-rw-r--r--sound/soc/codecs/tlv320aic3x.c9
-rw-r--r--sound/soc/codecs/wm8731.c29
-rw-r--r--sound/soc/codecs/wm8994.c2
-rw-r--r--sound/soc/soc-core.c5
-rw-r--r--sound/soc/tegra/tegra_i2s.c6
-rw-r--r--sound/spi/at73c213.c2
399 files changed, 27137 insertions, 1954 deletions
diff --git a/Documentation/Changes b/Documentation/Changes
index 5f4828a034e3..b17580885273 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -2,13 +2,7 @@ Intro
=====
This document is designed to provide a list of the minimum levels of
-software necessary to run the 2.6 kernels, as well as provide brief
-instructions regarding any other "Gotchas" users may encounter when
-trying life on the Bleeding Edge. If upgrading from a pre-2.4.x
-kernel, please consult the Changes file included with 2.4.x kernels for
-additional information; most of that information will not be repeated
-here. Basically, this document assumes that your system is already
-functional and running at least 2.4.x kernels.
+software necessary to run the 3.0 kernels.
This document is originally based on my "Changes" file for 2.0.x kernels
and therefore owes credit to the same people as that file (Jared Mauch,
@@ -22,11 +16,10 @@ Upgrade to at *least* these software revisions before thinking you've
encountered a bug! If you're unsure what version you're currently
running, the suggested command should tell you.
-Again, keep in mind that this list assumes you are already
-functionally running a Linux 2.4 kernel. Also, not all tools are
-necessary on all systems; obviously, if you don't have any ISDN
-hardware, for example, you probably needn't concern yourself with
-isdn4k-utils.
+Again, keep in mind that this list assumes you are already functionally
+running a Linux kernel. Also, not all tools are necessary on all
+systems; obviously, if you don't have any ISDN hardware, for example,
+you probably needn't concern yourself with isdn4k-utils.
o Gnu C 3.2 # gcc --version
o Gnu make 3.80 # make --version
@@ -114,12 +107,12 @@ Ksymoops
If the unthinkable happens and your kernel oopses, you may need the
ksymoops tool to decode it, but in most cases you don't.
-In the 2.6 kernel it is generally preferred to build the kernel with
-CONFIG_KALLSYMS so that it produces readable dumps that can be used as-is
-(this also produces better output than ksymoops).
-If for some reason your kernel is not build with CONFIG_KALLSYMS and
-you have no way to rebuild and reproduce the Oops with that option, then
-you can still decode that Oops with ksymoops.
+It is generally preferred to build the kernel with CONFIG_KALLSYMS so
+that it produces readable dumps that can be used as-is (this also
+produces better output than ksymoops). If for some reason your kernel
+is not build with CONFIG_KALLSYMS and you have no way to rebuild and
+reproduce the Oops with that option, then you can still decode that Oops
+with ksymoops.
Module-Init-Tools
-----------------
@@ -261,8 +254,8 @@ needs to be recompiled or (preferably) upgraded.
NFS-utils
---------
-In 2.4 and earlier kernels, the nfs server needed to know about any
-client that expected to be able to access files via NFS. This
+In ancient (2.4 and earlier) kernels, the nfs server needed to know
+about any client that expected to be able to access files via NFS. This
information would be given to the kernel by "mountd" when the client
mounted the filesystem, or by "exportfs" at system startup. exportfs
would take information about active clients from /var/lib/nfs/rmtab.
@@ -272,11 +265,11 @@ which is not always easy, particularly when trying to implement
fail-over. Even when the system is working well, rmtab suffers from
getting lots of old entries that never get removed.
-With 2.6 we have the option of having the kernel tell mountd when it
-gets a request from an unknown host, and mountd can give appropriate
-export information to the kernel. This removes the dependency on
-rmtab and means that the kernel only needs to know about currently
-active clients.
+With modern kernels we have the option of having the kernel tell mountd
+when it gets a request from an unknown host, and mountd can give
+appropriate export information to the kernel. This removes the
+dependency on rmtab and means that the kernel only needs to know about
+currently active clients.
To enable this new functionality, you need to:
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index 58b0bf917834..fa6e25b94a54 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -680,8 +680,8 @@ ones already enabled by DEBUG.
Chapter 14: Allocating memory
The kernel provides the following general purpose memory allocators:
-kmalloc(), kzalloc(), kcalloc(), and vmalloc(). Please refer to the API
-documentation for further information about them.
+kmalloc(), kzalloc(), kcalloc(), vmalloc(), and vzalloc(). Please refer to
+the API documentation for further information about them.
The preferred form for passing a size of a struct is the following:
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index cd45c8ea7463..84f0a15fc210 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -77,7 +77,7 @@ Throttling/Upper Limit policy
- Specify a bandwidth rate on particular device for root group. The format
for policy is "<major>:<minor> <byes_per_second>".
- echo "8:16 1048576" > /sys/fs/cgroup/blkio/blkio.read_bps_device
+ echo "8:16 1048576" > /sys/fs/cgroup/blkio/blkio.throttle.read_bps_device
Above will put a limit of 1MB/second on reads happening for root group
on device having major/minor number 8:16.
@@ -90,7 +90,7 @@ Throttling/Upper Limit policy
1024+0 records out
4194304 bytes (4.2 MB) copied, 4.0001 s, 1.0 MB/s
- Limits for writes can be put using blkio.write_bps_device file.
+ Limits for writes can be put using blkio.throttle.write_bps_device file.
Hierarchical Cgroups
====================
@@ -286,28 +286,28 @@ Throttling/Upper limit policy files
specified in bytes per second. Rules are per deivce. Following is
the format.
- echo "<major>:<minor> <rate_bytes_per_second>" > /cgrp/blkio.read_bps_device
+ echo "<major>:<minor> <rate_bytes_per_second>" > /cgrp/blkio.throttle.read_bps_device
- blkio.throttle.write_bps_device
- Specifies upper limit on WRITE rate to the device. IO rate is
specified in bytes per second. Rules are per deivce. Following is
the format.
- echo "<major>:<minor> <rate_bytes_per_second>" > /cgrp/blkio.write_bps_device
+ echo "<major>:<minor> <rate_bytes_per_second>" > /cgrp/blkio.throttle.write_bps_device
- blkio.throttle.read_iops_device
- Specifies upper limit on READ rate from the device. IO rate is
specified in IO per second. Rules are per deivce. Following is
the format.
- echo "<major>:<minor> <rate_io_per_second>" > /cgrp/blkio.read_iops_device
+ echo "<major>:<minor> <rate_io_per_second>" > /cgrp/blkio.throttle.read_iops_device
- blkio.throttle.write_iops_device
- Specifies upper limit on WRITE rate to the device. IO rate is
specified in io per second. Rules are per deivce. Following is
the format.
- echo "<major>:<minor> <rate_io_per_second>" > /cgrp/blkio.write_iops_device
+ echo "<major>:<minor> <rate_io_per_second>" > /cgrp/blkio.throttle.write_iops_device
Note: If both BW and IOPS rules are specified for a device, then IO is
subjectd to both the constraints.
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 72e238465b0b..b1c921c27519 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -583,3 +583,25 @@ Why: Superseded by the UVCIOC_CTRL_QUERY ioctl.
Who: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
----------------------------
+
+What: For VIDIOC_S_FREQUENCY the type field must match the device node's type.
+ If not, return -EINVAL.
+When: 3.2
+Why: It makes no sense to switch the tuner to radio mode by calling
+ VIDIOC_S_FREQUENCY on a video node, or to switch the tuner to tv mode by
+ calling VIDIOC_S_FREQUENCY on a radio node. This is the first step of a
+ move to more consistent handling of tv and radio tuners.
+Who: Hans Verkuil <hans.verkuil@cisco.com>
+
+----------------------------
+
+What: Opening a radio device node will no longer automatically switch the
+ tuner mode from tv to radio.
+When: 3.3
+Why: Just opening a V4L device should not change the state of the hardware
+ like that. It's very unexpected and against the V4L spec. Instead, you
+ switch to radio mode by calling VIDIOC_S_FREQUENCY. This is the second
+ and last step of the move to consistent handling of tv and radio tuners.
+Who: Hans Verkuil <hans.verkuil@cisco.com>
+
+----------------------------
diff --git a/Documentation/filesystems/caching/netfs-api.txt b/Documentation/filesystems/caching/netfs-api.txt
index a167ab876c35..7cc6bf2871eb 100644
--- a/Documentation/filesystems/caching/netfs-api.txt
+++ b/Documentation/filesystems/caching/netfs-api.txt
@@ -673,6 +673,22 @@ storage request to complete, or it may attempt to cancel the storage request -
in which case the page will not be stored in the cache this time.
+BULK INODE PAGE UNCACHE
+-----------------------
+
+A convenience routine is provided to perform an uncache on all the pages
+attached to an inode. This assumes that the pages on the inode correspond on a
+1:1 basis with the pages in the cache.
+
+ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
+ struct inode *inode);
+
+This takes the netfs cookie that the pages were cached with and the inode that
+the pages are attached to. This function will wait for pages to finish being
+written to the cache and for the cache to finish with the page generally. No
+error is returned.
+
+
==========================
INDEX AND DATA FILE UPDATE
==========================
diff --git a/Documentation/hwmon/f71882fg b/Documentation/hwmon/f71882fg
index 84d2623810f3..de91c0db5846 100644
--- a/Documentation/hwmon/f71882fg
+++ b/Documentation/hwmon/f71882fg
@@ -22,6 +22,10 @@ Supported chips:
Prefix: 'f71869'
Addresses scanned: none, address read from Super I/O config space
Datasheet: Available from the Fintek website
+ * Fintek F71869A
+ Prefix: 'f71869a'
+ Addresses scanned: none, address read from Super I/O config space
+ Datasheet: Not public
* Fintek F71882FG and F71883FG
Prefix: 'f71882fg'
Addresses scanned: none, address read from Super I/O config space
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp
index 0393c89277c0..a10f73624ad3 100644
--- a/Documentation/hwmon/k10temp
+++ b/Documentation/hwmon/k10temp
@@ -9,8 +9,8 @@ Supported chips:
Socket S1G3: Athlon II, Sempron, Turion II
* AMD Family 11h processors:
Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
-* AMD Family 12h processors: "Llano"
-* AMD Family 14h processors: "Brazos" (C/E/G-Series)
+* AMD Family 12h processors: "Llano" (E2/A4/A6/A8-Series)
+* AMD Family 14h processors: "Brazos" (C/E/G/Z-Series)
* AMD Family 15h processors: "Bulldozer"
Prefix: 'k10temp'
@@ -20,12 +20,16 @@ Supported chips:
http://support.amd.com/us/Processor_TechDocs/31116.pdf
BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors:
http://support.amd.com/us/Processor_TechDocs/41256.pdf
+ BIOS and Kernel Developer's Guide (BKDG) for AMD Family 12h Processors:
+ http://support.amd.com/us/Processor_TechDocs/41131.pdf
BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors:
http://support.amd.com/us/Processor_TechDocs/43170.pdf
Revision Guide for AMD Family 10h Processors:
http://support.amd.com/us/Processor_TechDocs/41322.pdf
Revision Guide for AMD Family 11h Processors:
http://support.amd.com/us/Processor_TechDocs/41788.pdf
+ Revision Guide for AMD Family 12h Processors:
+ http://support.amd.com/us/Processor_TechDocs/44739.pdf
Revision Guide for AMD Family 14h Models 00h-0Fh Processors:
http://support.amd.com/us/Processor_TechDocs/47534.pdf
AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks:
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index fd248a318211..aa47be71df4c 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2015,6 +2015,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
the default.
off: Turn ECRC off
on: Turn ECRC on.
+ realloc reallocate PCI resources if allocations done by BIOS
+ are erroneous.
pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
Management.
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index 1565eefd6fd5..61815483efa3 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -534,6 +534,8 @@ Events that are never propagated by the driver:
0x2404 System is waking up from hibernation to undock
0x2405 System is waking up from hibernation to eject bay
0x5010 Brightness level changed/control event
+0x6000 KEYBOARD: Numlock key pressed
+0x6005 KEYBOARD: Fn key pressed (TO BE VERIFIED)
Events that are propagated by the driver to userspace:
@@ -545,6 +547,8 @@ Events that are propagated by the driver to userspace:
0x3006 Bay hotplug request (hint to power up SATA link when
the optical drive tray is ejected)
0x4003 Undocked (see 0x2x04), can sleep again
+0x4010 Docked into hotplug port replicator (non-ACPI dock)
+0x4011 Undocked from hotplug port replicator (non-ACPI dock)
0x500B Tablet pen inserted into its storage bay
0x500C Tablet pen removed from its storage bay
0x6011 ALARM: battery is too hot
@@ -552,6 +556,7 @@ Events that are propagated by the driver to userspace:
0x6021 ALARM: a sensor is too hot
0x6022 ALARM: a sensor is extremely hot
0x6030 System thermal table changed
+0x6040 Nvidia Optimus/AC adapter related (TO BE VERIFIED)
Battery nearly empty alarms are a last resort attempt to get the
operating system to hibernate or shutdown cleanly (0x2313), or shutdown
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index d3d653a5f9b9..bfe924217f24 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -346,7 +346,7 @@ tcp_orphan_retries - INTEGER
when RTO retransmissions remain unacknowledged.
See tcp_retries2 for more details.
- The default value is 7.
+ The default value is 8.
If your machine is a loaded WEB server,
you should think about lowering this value, such sockets
may consume significant resources. Cf. tcp_max_orphans.
diff --git a/Documentation/power/opp.txt b/Documentation/power/opp.txt
index 5ae70a12c1e2..3035d00757ad 100644
--- a/Documentation/power/opp.txt
+++ b/Documentation/power/opp.txt
@@ -321,6 +321,8 @@ opp_init_cpufreq_table - cpufreq framework typically is initialized with
addition to CONFIG_PM as power management feature is required to
dynamically scale voltage and frequency in a system.
+opp_free_cpufreq_table - Free up the table allocated by opp_init_cpufreq_table
+
7. Data Structures
==================
Typically an SoC contains multiple voltage domains which are variable. Each
diff --git a/Documentation/spinlocks.txt b/Documentation/spinlocks.txt
index 2e3c64b1a6a5..9dbe885ecd8d 100644
--- a/Documentation/spinlocks.txt
+++ b/Documentation/spinlocks.txt
@@ -13,18 +13,8 @@ static DEFINE_SPINLOCK(xxx_lock);
The above is always safe. It will disable interrupts _locally_, but the
spinlock itself will guarantee the global lock, so it will guarantee that
there is only one thread-of-control within the region(s) protected by that
-lock. This works well even under UP. The above sequence under UP
-essentially is just the same as doing
-
- unsigned long flags;
-
- save_flags(flags); cli();
- ... critical section ...
- restore_flags(flags);
-
-so the code does _not_ need to worry about UP vs SMP issues: the spinlocks
-work correctly under both (and spinlocks are actually more efficient on
-architectures that allow doing the "save_flags + cli" in one operation).
+lock. This works well even under UP also, so the code does _not_ need to
+worry about UP vs SMP issues: the spinlocks work correctly under both.
NOTE! Implications of spin_locks for memory are further described in:
@@ -36,27 +26,7 @@ The above is usually pretty simple (you usually need and want only one
spinlock for most things - using more than one spinlock can make things a
lot more complex and even slower and is usually worth it only for
sequences that you _know_ need to be split up: avoid it at all cost if you
-aren't sure). HOWEVER, it _does_ mean that if you have some code that does
-
- cli();
- .. critical section ..
- sti();
-
-and another sequence that does
-
- spin_lock_irqsave(flags);
- .. critical section ..
- spin_unlock_irqrestore(flags);
-
-then they are NOT mutually exclusive, and the critical regions can happen
-at the same time on two different CPU's. That's fine per se, but the
-critical regions had better be critical for different things (ie they
-can't stomp on each other).
-
-The above is a problem mainly if you end up mixing code - for example the
-routines in ll_rw_block() tend to use cli/sti to protect the atomicity of
-their actions, and if a driver uses spinlocks instead then you should
-think about issues like the above.
+aren't sure).
This is really the only really hard part about spinlocks: once you start
using spinlocks they tend to expand to areas you might not have noticed
@@ -120,11 +90,10 @@ Lesson 3: spinlocks revisited.
The single spin-lock primitives above are by no means the only ones. They
are the most safe ones, and the ones that work under all circumstances,
-but partly _because_ they are safe they are also fairly slow. They are
-much faster than a generic global cli/sti pair, but slower than they'd
-need to be, because they do have to disable interrupts (which is just a
-single instruction on a x86, but it's an expensive one - and on other
-architectures it can be worse).
+but partly _because_ they are safe they are also fairly slow. They are slower
+than they'd need to be, because they do have to disable interrupts
+(which is just a single instruction on a x86, but it's an expensive one -
+and on other architectures it can be worse).
If you have a case where you have to protect a data structure across
several CPU's and you want to use spinlocks you can potentially use
diff --git a/MAINTAINERS b/MAINTAINERS
index ae563fad2271..187282da9213 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -594,6 +594,16 @@ S: Maintained
F: arch/arm/lib/floppydma.S
F: arch/arm/include/asm/floppy.h
+ARM PMU PROFILING AND DEBUGGING
+M: Will Deacon <will.deacon@arm.com>
+S: Maintained
+F: arch/arm/kernel/perf_event*
+F: arch/arm/oprofile/common.c
+F: arch/arm/kernel/pmu.c
+F: arch/arm/include/asm/pmu.h
+F: arch/arm/kernel/hw_breakpoint.c
+F: arch/arm/include/asm/hw_breakpoint.h
+
ARM PORT
M: Russell King <linux@arm.linux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2197,7 +2207,7 @@ F: drivers/acpi/dock.c
DOCUMENTATION
M: Randy Dunlap <rdunlap@xenotime.net>
L: linux-doc@vger.kernel.org
-T: quilt oss.oracle.com/~rdunlap/kernel-doc-patches/current/
+T: quilt http://userweb.kernel.org/~rdunlap/kernel-doc-patches/current/
S: Maintained
F: Documentation/
@@ -4982,7 +4992,7 @@ F: drivers/power/power_supply*
PNP SUPPORT
M: Adam Belay <abelay@mit.edu>
-M: Bjorn Helgaas <bjorn.helgaas@hp.com>
+M: Bjorn Helgaas <bhelgaas@google.com>
S: Maintained
F: drivers/pnp/
@@ -6733,6 +6743,7 @@ F: fs/fat/
VIDEOBUF2 FRAMEWORK
M: Pawel Osciak <pawel@osciak.com>
M: Marek Szyprowski <m.szyprowski@samsung.com>
+M: Kyungmin Park <kyungmin.park@samsung.com>
L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/video/videobuf2-*
diff --git a/Makefile b/Makefile
index dc670467fb8c..60d91f76c2fd 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 0
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
NAME = Sneaky Weasel
# *DOCUMENTATION*
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index e5681636626f..841df7d21c2f 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -255,7 +255,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
if (buf == 0) {
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
__func__, ptr);
- return 0;
+ return ~0;
}
dev_dbg(dev,
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index d53c0abc4dd3..2b5b1421596c 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -583,7 +583,7 @@ static int armpmu_event_init(struct perf_event *event)
static void armpmu_enable(struct pmu *pmu)
{
/* Enable all of the perf events on hardware. */
- int idx;
+ int idx, enabled = 0;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!armpmu)
@@ -596,9 +596,11 @@ static void armpmu_enable(struct pmu *pmu)
continue;
armpmu->enable(&event->hw, idx);
+ enabled = 1;
}
- armpmu->start();
+ if (enabled)
+ armpmu->start();
}
static void armpmu_disable(struct pmu *pmu)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index ed11fb08b05a..acbb447ac6b5 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -73,6 +73,7 @@ __setup("fpe=", fpe_setup);
#endif
extern void paging_init(struct machine_desc *desc);
+extern void sanity_check_meminfo(void);
extern void reboot_setup(char *str);
unsigned int processor_id;
@@ -900,6 +901,7 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
+ sanity_check_meminfo();
arm_memblock_init(&meminfo, mdesc);
paging_init(mdesc);
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 60636f499cb3..2c277d40cee6 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -115,7 +115,7 @@ static void __cpuinit twd_calibrate_rate(void)
twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
- (twd_timer_rate / 1000000) % 100);
+ (twd_timer_rate / 10000) % 100);
}
}
diff --git a/arch/arm/mach-at91/at91cap9.c b/arch/arm/mach-at91/at91cap9.c
index 17fae4a42ab5..f1013d08bb57 100644
--- a/arch/arm/mach-at91/at91cap9.c
+++ b/arch/arm/mach-at91/at91cap9.c
@@ -223,15 +223,15 @@ static struct clk *periph_clocks[] __initdata = {
};
static struct clk_lookup periph_clocks_lookups[] = {
- CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
- CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
+ CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk),
+ CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk),
CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk),
- CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk),
- CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
};
static struct clk_lookup usart_clocks_lookups[] = {
diff --git a/arch/arm/mach-at91/at91cap9_devices.c b/arch/arm/mach-at91/at91cap9_devices.c
index cd850ed6f335..dba0d8d8a4bd 100644
--- a/arch/arm/mach-at91/at91cap9_devices.c
+++ b/arch/arm/mach-at91/at91cap9_devices.c
@@ -1220,7 +1220,7 @@ void __init at91_set_serial_console(unsigned portnr)
{
if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
- at91cap9_set_console_clock(portnr);
+ at91cap9_set_console_clock(at91_uarts[portnr]->id);
}
}
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index b228ce9e21a1..83a1a3fee554 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -199,9 +199,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
- CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk),
- CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk),
- CLKDEV_CON_DEV_ID("ssc", "ssc.2", &ssc2_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
};
static struct clk_lookup usart_clocks_lookups[] = {
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index a0ba475be04c..7227755ffec6 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -1135,7 +1135,7 @@ void __init at91_set_serial_console(unsigned portnr)
{
if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
- at91rm9200_set_console_clock(portnr);
+ at91rm9200_set_console_clock(at91_uarts[portnr]->id);
}
}
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 1fdeb9058a76..39f81f47b4ba 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -1173,7 +1173,7 @@ void __init at91_set_serial_console(unsigned portnr)
{
if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
- at91sam9260_set_console_clock(portnr);
+ at91sam9260_set_console_clock(at91_uarts[portnr]->id);
}
}
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 3eb4538fceeb..5004bf0a05f2 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -1013,7 +1013,7 @@ void __init at91_set_serial_console(unsigned portnr)
{
if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
- at91sam9261_set_console_clock(portnr);
+ at91sam9261_set_console_clock(at91_uarts[portnr]->id);
}
}
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index ffe081b77ed0..a050f41fc860 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -1395,7 +1395,7 @@ void __init at91_set_serial_console(unsigned portnr)
{
if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
- at91sam9263_set_console_clock(portnr);
+ at91sam9263_set_console_clock(at91_uarts[portnr]->id);
}
}
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index 2bb6ff9af1c7..11e214121b23 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -217,11 +217,11 @@ static struct clk *periph_clocks[] __initdata = {
static struct clk_lookup periph_clocks_lookups[] = {
/* One additional fake clock for ohci */
CLKDEV_CON_ID("ohci_clk", &uhphs_clk),
- CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci.0", &uhphs_clk),
- CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
- CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
- CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
- CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
+ CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci", &uhphs_clk),
+ CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk),
+ CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk),
+ CLKDEV_CON_DEV_ID("mci_clk", "atmel_mci.0", &mmc0_clk),
+ CLKDEV_CON_DEV_ID("mci_clk", "atmel_mci.1", &mmc1_clk),
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb0_clk),
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 05674865bc21..600bffb01edb 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -1550,7 +1550,7 @@ void __init at91_set_serial_console(unsigned portnr)
{
if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
- at91sam9g45_set_console_clock(portnr);
+ at91sam9g45_set_console_clock(at91_uarts[portnr]->id);
}
}
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index 1a40f16b66c8..29dff18ed130 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -191,8 +191,8 @@ static struct clk *periph_clocks[] __initdata = {
};
static struct clk_lookup periph_clocks_lookups[] = {
- CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
- CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
+ CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk),
+ CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk),
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index c296045f2b6a..aacb19dc9225 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -1168,7 +1168,7 @@ void __init at91_set_serial_console(unsigned portnr)
{
if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
- at91sam9rl_set_console_clock(portnr);
+ at91sam9rl_set_console_clock(at91_uarts[portnr]->id);
}
}
diff --git a/arch/arm/mach-at91/board-cap9adk.c b/arch/arm/mach-at91/board-cap9adk.c
index 1904fdf87613..cdb65d483250 100644
--- a/arch/arm/mach-at91/board-cap9adk.c
+++ b/arch/arm/mach-at91/board-cap9adk.c
@@ -215,7 +215,7 @@ static void __init cap9adk_add_device_nand(void)
csa = at91_sys_read(AT91_MATRIX_EBICSA);
at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_VDDIOMSEL_3_3V);
- cap9adk_nand_data.bus_width_16 = !board_have_nand_8bit();
+ cap9adk_nand_data.bus_width_16 = board_have_nand_16bit();
/* setup bus-width (8 or 16) */
if (cap9adk_nand_data.bus_width_16)
cap9adk_nand_smc_config.mode |= AT91_SMC_DBW_16;
diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c
index d600dc123227..5c240743c5b7 100644
--- a/arch/arm/mach-at91/board-sam9260ek.c
+++ b/arch/arm/mach-at91/board-sam9260ek.c
@@ -214,7 +214,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
- ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+ ek_nand_data.bus_width_16 = board_have_nand_16bit();
/* setup bus-width (8 or 16) */
if (ek_nand_data.bus_width_16)
ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index f897f84d43dc..b60c22b6e241 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -220,7 +220,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
- ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+ ek_nand_data.bus_width_16 = board_have_nand_16bit();
/* setup bus-width (8 or 16) */
if (ek_nand_data.bus_width_16)
ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c
index 605b26f40a4c..9bbdc92ea194 100644
--- a/arch/arm/mach-at91/board-sam9263ek.c
+++ b/arch/arm/mach-at91/board-sam9263ek.c
@@ -221,7 +221,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
- ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+ ek_nand_data.bus_width_16 = board_have_nand_16bit();
/* setup bus-width (8 or 16) */
if (ek_nand_data.bus_width_16)
ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c
index 7624cf0d006b..1325a50101a8 100644
--- a/arch/arm/mach-at91/board-sam9g20ek.c
+++ b/arch/arm/mach-at91/board-sam9g20ek.c
@@ -198,7 +198,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
- ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+ ek_nand_data.bus_width_16 = board_have_nand_16bit();
/* setup bus-width (8 or 16) */
if (ek_nand_data.bus_width_16)
ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index 063c95d0e8f0..33eaa135f248 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -178,7 +178,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
- ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+ ek_nand_data.bus_width_16 = board_have_nand_16bit();
/* setup bus-width (8 or 16) */
if (ek_nand_data.bus_width_16)
ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
diff --git a/arch/arm/mach-at91/include/mach/system_rev.h b/arch/arm/mach-at91/include/mach/system_rev.h
index b855ee75f72c..8f4866045b41 100644
--- a/arch/arm/mach-at91/include/mach/system_rev.h
+++ b/arch/arm/mach-at91/include/mach/system_rev.h
@@ -13,13 +13,13 @@
* the 16-31 bit are reserved for at91 generic information
*
* bit 31:
- * 0 => nand 16 bit
- * 1 => nand 8 bit
+ * 0 => nand 8 bit
+ * 1 => nand 16 bit
*/
-#define BOARD_HAVE_NAND_8BIT (1 << 31)
-static int inline board_have_nand_8bit(void)
+#define BOARD_HAVE_NAND_16BIT (1 << 31)
+static inline int board_have_nand_16bit(void)
{
- return system_rev & BOARD_HAVE_NAND_8BIT;
+ return system_rev & BOARD_HAVE_NAND_16BIT;
}
#endif /* __ARCH_SYSTEM_REV_H__ */
diff --git a/arch/arm/mach-davinci/irq.c b/arch/arm/mach-davinci/irq.c
index bfe68ec4e1a6..d8c1af025931 100644
--- a/arch/arm/mach-davinci/irq.c
+++ b/arch/arm/mach-davinci/irq.c
@@ -53,7 +53,7 @@ davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq);
ct = gc->chip_types;
- ct->chip.irq_ack = irq_gc_ack;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 1d4b65fd673e..6659a0d137a3 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -251,9 +251,9 @@ static void ep93xx_uart_set_mctrl(struct amba_device *dev,
unsigned int mcr;
mcr = 0;
- if (!(mctrl & TIOCM_RTS))
+ if (mctrl & TIOCM_RTS)
mcr |= 2;
- if (!(mctrl & TIOCM_DTR))
+ if (mctrl & TIOCM_DTR)
mcr |= 1;
__raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET);
diff --git a/arch/arm/mach-exynos4/cpu.c b/arch/arm/mach-exynos4/cpu.c
index 9babe4473e88..bfd621460abf 100644
--- a/arch/arm/mach-exynos4/cpu.c
+++ b/arch/arm/mach-exynos4/cpu.c
@@ -23,6 +23,7 @@
#include <plat/sdhci.h>
#include <plat/devs.h>
#include <plat/fimc-core.h>
+#include <plat/iic-core.h>
#include <mach/regs-irq.h>
@@ -132,6 +133,11 @@ void __init exynos4_map_io(void)
s3c_fimc_setname(1, "exynos4-fimc");
s3c_fimc_setname(2, "exynos4-fimc");
s3c_fimc_setname(3, "exynos4-fimc");
+
+ /* The I2C bus controllers are directly compatible with s3c2440 */
+ s3c_i2c0_setname("s3c2440-i2c");
+ s3c_i2c1_setname("s3c2440-i2c");
+ s3c_i2c2_setname("s3c2440-i2c");
}
void __init exynos4_init_clocks(int xtal)
diff --git a/arch/arm/mach-exynos4/dev-audio.c b/arch/arm/mach-exynos4/dev-audio.c
index 1eed5f9f7bd3..983069a53239 100644
--- a/arch/arm/mach-exynos4/dev-audio.c
+++ b/arch/arm/mach-exynos4/dev-audio.c
@@ -330,7 +330,7 @@ struct platform_device exynos4_device_ac97 = {
static int exynos4_spdif_cfg_gpio(struct platform_device *pdev)
{
- s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(4));
return 0;
}
diff --git a/arch/arm/mach-exynos4/headsmp.S b/arch/arm/mach-exynos4/headsmp.S
index 6c6cfc50c46b..3cdeb3647542 100644
--- a/arch/arm/mach-exynos4/headsmp.S
+++ b/arch/arm/mach-exynos4/headsmp.S
@@ -13,7 +13,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
- __INIT
+ __CPUINIT
/*
* exynos4 specific entry point for secondary CPUs. This provides
diff --git a/arch/arm/mach-exynos4/mach-smdkv310.c b/arch/arm/mach-exynos4/mach-smdkv310.c
index 152676471b67..edd814110da8 100644
--- a/arch/arm/mach-exynos4/mach-smdkv310.c
+++ b/arch/arm/mach-exynos4/mach-smdkv310.c
@@ -78,9 +78,7 @@ static struct s3c2410_uartcfg smdkv310_uartcfgs[] __initdata = {
};
static struct s3c_sdhci_platdata smdkv310_hsmmc0_pdata __initdata = {
- .cd_type = S3C_SDHCI_CD_GPIO,
- .ext_cd_gpio = EXYNOS4_GPK0(2),
- .ext_cd_gpio_invert = 1,
+ .cd_type = S3C_SDHCI_CD_INTERNAL,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
#ifdef CONFIG_EXYNOS4_SDHCI_CH0_8BIT
.max_width = 8,
@@ -96,9 +94,7 @@ static struct s3c_sdhci_platdata smdkv310_hsmmc1_pdata __initdata = {
};
static struct s3c_sdhci_platdata smdkv310_hsmmc2_pdata __initdata = {
- .cd_type = S3C_SDHCI_CD_GPIO,
- .ext_cd_gpio = EXYNOS4_GPK2(2),
- .ext_cd_gpio_invert = 1,
+ .cd_type = S3C_SDHCI_CD_INTERNAL,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
#ifdef CONFIG_EXYNOS4_SDHCI_CH2_8BIT
.max_width = 8,
diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c
index 72b4e7631583..ab9f999106c7 100644
--- a/arch/arm/mach-mmp/pxa168.c
+++ b/arch/arm/mach-mmp/pxa168.c
@@ -79,7 +79,7 @@ static APBC_CLK(ssp4, PXA168_SSP4, 4, 0);
static APBC_CLK(ssp5, PXA168_SSP5, 4, 0);
static APBC_CLK(keypad, PXA168_KPC, 0, 32000);
-static APMU_CLK(nand, NAND, 0x01db, 208000000);
+static APMU_CLK(nand, NAND, 0x19b, 156000000);
static APMU_CLK(lcd, LCD, 0x7f, 312000000);
/* device and clock bindings */
diff --git a/arch/arm/mach-mmp/pxa910.c b/arch/arm/mach-mmp/pxa910.c
index 8f92ccd26edf..1464607aa60d 100644
--- a/arch/arm/mach-mmp/pxa910.c
+++ b/arch/arm/mach-mmp/pxa910.c
@@ -110,7 +110,7 @@ static APBC_CLK(pwm2, PXA910_PWM2, 1, 13000000);
static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000);
static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000);
-static APMU_CLK(nand, NAND, 0x01db, 208000000);
+static APMU_CLK(nand, NAND, 0x19b, 156000000);
static APMU_CLK(u2o, USB, 0x1b, 480000000);
/* device and clock bindings */
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index de88c9297b68..f49ce85d2448 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -215,7 +215,7 @@ static struct omap_kp_platform_data ams_delta_kp_data __initdata = {
.delay = 9,
};
-static struct platform_device ams_delta_kp_device __initdata = {
+static struct platform_device ams_delta_kp_device = {
.name = "omap-keypad",
.id = -1,
.dev = {
@@ -225,12 +225,12 @@ static struct platform_device ams_delta_kp_device __initdata = {
.resource = ams_delta_kp_resources,
};
-static struct platform_device ams_delta_lcd_device __initdata = {
+static struct platform_device ams_delta_lcd_device = {
.name = "lcd_ams_delta",
.id = -1,
};
-static struct platform_device ams_delta_led_device __initdata = {
+static struct platform_device ams_delta_led_device = {
.name = "ams-delta-led",
.id = -1
};
@@ -267,7 +267,7 @@ static struct soc_camera_link ams_delta_iclink = {
.power = ams_delta_camera_power,
};
-static struct platform_device ams_delta_camera_device __initdata = {
+static struct platform_device ams_delta_camera_device = {
.name = "soc-camera-pdrv",
.id = 0,
.dev = {
diff --git a/arch/arm/mach-omap1/gpio15xx.c b/arch/arm/mach-omap1/gpio15xx.c
index 04c4b04cf54e..364137c2042c 100644
--- a/arch/arm/mach-omap1/gpio15xx.c
+++ b/arch/arm/mach-omap1/gpio15xx.c
@@ -41,7 +41,7 @@ static struct __initdata omap_gpio_platform_data omap15xx_mpu_gpio_config = {
.bank_stride = 1,
};
-static struct __initdata platform_device omap15xx_mpu_gpio = {
+static struct platform_device omap15xx_mpu_gpio = {
.name = "omap_gpio",
.id = 0,
.dev = {
@@ -70,7 +70,7 @@ static struct __initdata omap_gpio_platform_data omap15xx_gpio_config = {
.bank_width = 16,
};
-static struct __initdata platform_device omap15xx_gpio = {
+static struct platform_device omap15xx_gpio = {
.name = "omap_gpio",
.id = 1,
.dev = {
diff --git a/arch/arm/mach-omap1/gpio16xx.c b/arch/arm/mach-omap1/gpio16xx.c
index 5dd0d4c82b24..293a246e2824 100644
--- a/arch/arm/mach-omap1/gpio16xx.c
+++ b/arch/arm/mach-omap1/gpio16xx.c
@@ -44,7 +44,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_mpu_gpio_config = {
.bank_stride = 1,
};
-static struct __initdata platform_device omap16xx_mpu_gpio = {
+static struct platform_device omap16xx_mpu_gpio = {
.name = "omap_gpio",
.id = 0,
.dev = {
@@ -73,7 +73,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio1_config = {
.bank_width = 16,
};
-static struct __initdata platform_device omap16xx_gpio1 = {
+static struct platform_device omap16xx_gpio1 = {
.name = "omap_gpio",
.id = 1,
.dev = {
@@ -102,7 +102,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio2_config = {
.bank_width = 16,
};
-static struct __initdata platform_device omap16xx_gpio2 = {
+static struct platform_device omap16xx_gpio2 = {
.name = "omap_gpio",
.id = 2,
.dev = {
@@ -131,7 +131,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio3_config = {
.bank_width = 16,
};
-static struct __initdata platform_device omap16xx_gpio3 = {
+static struct platform_device omap16xx_gpio3 = {
.name = "omap_gpio",
.id = 3,
.dev = {
@@ -160,7 +160,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio4_config = {
.bank_width = 16,
};
-static struct __initdata platform_device omap16xx_gpio4 = {
+static struct platform_device omap16xx_gpio4 = {
.name = "omap_gpio",
.id = 4,
.dev = {
diff --git a/arch/arm/mach-omap1/gpio7xx.c b/arch/arm/mach-omap1/gpio7xx.c
index 1204c8b871af..c6ad248d63a6 100644
--- a/arch/arm/mach-omap1/gpio7xx.c
+++ b/arch/arm/mach-omap1/gpio7xx.c
@@ -46,7 +46,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_mpu_gpio_config = {
.bank_stride = 2,
};
-static struct __initdata platform_device omap7xx_mpu_gpio = {
+static struct platform_device omap7xx_mpu_gpio = {
.name = "omap_gpio",
.id = 0,
.dev = {
@@ -75,7 +75,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio1_config = {
.bank_width = 32,
};
-static struct __initdata platform_device omap7xx_gpio1 = {
+static struct platform_device omap7xx_gpio1 = {
.name = "omap_gpio",
.id = 1,
.dev = {
@@ -104,7 +104,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio2_config = {
.bank_width = 32,
};
-static struct __initdata platform_device omap7xx_gpio2 = {
+static struct platform_device omap7xx_gpio2 = {
.name = "omap_gpio",
.id = 2,
.dev = {
@@ -133,7 +133,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio3_config = {
.bank_width = 32,
};
-static struct __initdata platform_device omap7xx_gpio3 = {
+static struct platform_device omap7xx_gpio3 = {
.name = "omap_gpio",
.id = 3,
.dev = {
@@ -162,7 +162,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio4_config = {
.bank_width = 32,
};
-static struct __initdata platform_device omap7xx_gpio4 = {
+static struct platform_device omap7xx_gpio4 = {
.name = "omap_gpio",
.id = 4,
.dev = {
@@ -191,7 +191,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio5_config = {
.bank_width = 32,
};
-static struct __initdata platform_device omap7xx_gpio5 = {
+static struct platform_device omap7xx_gpio5 = {
.name = "omap_gpio",
.id = 5,
.dev = {
@@ -220,7 +220,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio6_config = {
.bank_width = 32,
};
-static struct __initdata platform_device omap7xx_gpio6 = {
+static struct platform_device omap7xx_gpio6 = {
.name = "omap_gpio",
.id = 6,
.dev = {
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 990366726c58..88bd6f7705f0 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -558,7 +558,7 @@ static struct radio_si4713_platform_data rx51_si4713_data __initdata_or_module =
.subdev_board_info = &rx51_si4713_board_info,
};
-static struct platform_device rx51_si4713_dev __initdata_or_module = {
+static struct platform_device rx51_si4713_dev = {
.name = "radio-si4713",
.id = -1,
.dev = {
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c
index 87ae3129f4f7..b27544bcafcb 100644
--- a/arch/arm/mach-pxa/mfp-pxa2xx.c
+++ b/arch/arm/mach-pxa/mfp-pxa2xx.c
@@ -347,9 +347,9 @@ static int pxa2xx_mfp_suspend(void)
if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) &&
(GPDR(i) & GPIO_bit(i))) {
if (GPLR(i) & GPIO_bit(i))
- PGSR(i) |= GPIO_bit(i);
+ PGSR(gpio_to_bank(i)) |= GPIO_bit(i);
else
- PGSR(i) &= ~GPIO_bit(i);
+ PGSR(gpio_to_bank(i)) &= ~GPIO_bit(i);
}
}
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index d130f77b6d11..2f37d43f51b6 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -573,10 +573,10 @@ static struct pxafb_mode_info sharp_lq043t3dx02_mode = {
.xres = 480,
.yres = 272,
.bpp = 16,
- .hsync_len = 4,
+ .hsync_len = 41,
.left_margin = 2,
.right_margin = 1,
- .vsync_len = 1,
+ .vsync_len = 10,
.upper_margin = 3,
.lower_margin = 1,
.sync = 0,
@@ -596,29 +596,31 @@ static void __init raumfeld_lcd_init(void)
{
int ret;
- pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info);
-
- /* Earlier devices had the backlight regulator controlled
- * via PWM, later versions use another controller for that */
- if ((system_rev & 0xff) < 2) {
- mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT;
- pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1);
- platform_device_register(&raumfeld_pwm_backlight_device);
- } else
- platform_device_register(&raumfeld_lt3593_device);
-
ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable");
if (ret < 0)
pr_warning("Unable to request GPIO_TFT_VA_EN\n");
else
gpio_direction_output(GPIO_TFT_VA_EN, 1);
+ msleep(100);
+
ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable");
if (ret < 0)
pr_warning("Unable to request GPIO_DISPLAY_ENABLE\n");
else
gpio_direction_output(GPIO_DISPLAY_ENABLE, 1);
+ /* Hardware revision 2 has the backlight regulator controlled
+ * by an LT3593, earlier and later devices use PWM for that. */
+ if ((system_rev & 0xff) == 2) {
+ platform_device_register(&raumfeld_lt3593_device);
+ } else {
+ mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT;
+ pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1);
+ platform_device_register(&raumfeld_pwm_backlight_device);
+ }
+
+ pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info);
platform_device_register(&pxa3xx_device_gcu);
}
@@ -657,10 +659,10 @@ static struct lis3lv02d_platform_data lis3_pdata = {
#define SPI_AK4104 \
{ \
- .modalias = "ak4104", \
- .max_speed_hz = 10000, \
- .bus_num = 0, \
- .chip_select = 0, \
+ .modalias = "ak4104-codec", \
+ .max_speed_hz = 10000, \
+ .bus_num = 0, \
+ .chip_select = 0, \
.controller_data = (void *) GPIO_SPDIF_CS, \
}
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c
index dd3120df09fe..fc2dc0b3d4fe 100644
--- a/arch/arm/mach-s3c2440/mach-mini2440.c
+++ b/arch/arm/mach-s3c2440/mach-mini2440.c
@@ -552,7 +552,7 @@ struct mini2440_features_t {
struct platform_device *optional[8];
};
-static void mini2440_parse_features(
+static void __init mini2440_parse_features(
struct mini2440_features_t * features,
const char * features_str )
{
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c
index 82db072cb836..5e6b42089eb4 100644
--- a/arch/arm/mach-s3c64xx/dev-spi.c
+++ b/arch/arm/mach-s3c64xx/dev-spi.c
@@ -88,6 +88,7 @@ static struct s3c64xx_spi_info s3c64xx_spi0_pdata = {
.cfg_gpio = s3c64xx_spi_cfg_gpio,
.fifo_lvl_mask = 0x7f,
.rx_lvl_offset = 13,
+ .tx_st_done = 21,
};
static u64 spi_dmamask = DMA_BIT_MASK(32);
@@ -132,6 +133,7 @@ static struct s3c64xx_spi_info s3c64xx_spi1_pdata = {
.cfg_gpio = s3c64xx_spi_cfg_gpio,
.fifo_lvl_mask = 0x7f,
.rx_lvl_offset = 13,
+ .tx_st_done = 21,
};
struct platform_device s3c64xx_device_spi1 = {
diff --git a/arch/arm/mach-s5p64x0/dev-spi.c b/arch/arm/mach-s5p64x0/dev-spi.c
index e78ee18c76e3..ac825e826326 100644
--- a/arch/arm/mach-s5p64x0/dev-spi.c
+++ b/arch/arm/mach-s5p64x0/dev-spi.c
@@ -112,12 +112,14 @@ static struct s3c64xx_spi_info s5p6440_spi0_pdata = {
.cfg_gpio = s5p6440_spi_cfg_gpio,
.fifo_lvl_mask = 0x1ff,
.rx_lvl_offset = 15,
+ .tx_st_done = 25,
};
static struct s3c64xx_spi_info s5p6450_spi0_pdata = {
.cfg_gpio = s5p6450_spi_cfg_gpio,
.fifo_lvl_mask = 0x1ff,
.rx_lvl_offset = 15,
+ .tx_st_done = 25,
};
static u64 spi_dmamask = DMA_BIT_MASK(32);
@@ -160,12 +162,14 @@ static struct s3c64xx_spi_info s5p6440_spi1_pdata = {
.cfg_gpio = s5p6440_spi_cfg_gpio,
.fifo_lvl_mask = 0x7f,
.rx_lvl_offset = 15,
+ .tx_st_done = 25,
};
static struct s3c64xx_spi_info s5p6450_spi1_pdata = {
.cfg_gpio = s5p6450_spi_cfg_gpio,
.fifo_lvl_mask = 0x7f,
.rx_lvl_offset = 15,
+ .tx_st_done = 25,
};
struct platform_device s5p64x0_device_spi1 = {
diff --git a/arch/arm/mach-s5pc100/dev-spi.c b/arch/arm/mach-s5pc100/dev-spi.c
index 57b19794d9bb..e5d6c4dceb56 100644
--- a/arch/arm/mach-s5pc100/dev-spi.c
+++ b/arch/arm/mach-s5pc100/dev-spi.c
@@ -15,6 +15,7 @@
#include <mach/dma.h>
#include <mach/map.h>
#include <mach/spi-clocks.h>
+#include <mach/irqs.h>
#include <plat/s3c64xx-spi.h>
#include <plat/gpio-cfg.h>
@@ -90,6 +91,7 @@ static struct s3c64xx_spi_info s5pc100_spi0_pdata = {
.fifo_lvl_mask = 0x7f,
.rx_lvl_offset = 13,
.high_speed = 1,
+ .tx_st_done = 21,
};
static u64 spi_dmamask = DMA_BIT_MASK(32);
@@ -134,6 +136,7 @@ static struct s3c64xx_spi_info s5pc100_spi1_pdata = {
.fifo_lvl_mask = 0x7f,
.rx_lvl_offset = 13,
.high_speed = 1,
+ .tx_st_done = 21,
};
struct platform_device s5pc100_device_spi1 = {
@@ -176,6 +179,7 @@ static struct s3c64xx_spi_info s5pc100_spi2_pdata = {
.fifo_lvl_mask = 0x7f,
.rx_lvl_offset = 13,
.high_speed = 1,
+ .tx_st_done = 21,
};
struct platform_device s5pc100_device_spi2 = {
diff --git a/arch/arm/mach-s5pv210/dev-spi.c b/arch/arm/mach-s5pv210/dev-spi.c
index e3249a47e3b1..eaf9a7bff7a0 100644
--- a/arch/arm/mach-s5pv210/dev-spi.c
+++ b/arch/arm/mach-s5pv210/dev-spi.c
@@ -85,6 +85,7 @@ static struct s3c64xx_spi_info s5pv210_spi0_pdata = {
.fifo_lvl_mask = 0x1ff,
.rx_lvl_offset = 15,
.high_speed = 1,
+ .tx_st_done = 25,
};
static u64 spi_dmamask = DMA_BIT_MASK(32);
@@ -129,6 +130,7 @@ static struct s3c64xx_spi_info s5pv210_spi1_pdata = {
.fifo_lvl_mask = 0x7f,
.rx_lvl_offset = 15,
.high_speed = 1,
+ .tx_st_done = 25,
};
struct platform_device s5pv210_device_spi1 = {
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index a26f895cdc89..b473b8efac68 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -1410,10 +1410,12 @@ static void __init ap4evb_init(void)
sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc1_device);
sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device);
+ sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
hdmi_init_pm_clock();
fsi_init_pm_clock();
sh7372_pm_init();
+ pm_clk_add(&fsi_device.dev, "spu2");
}
static void __init ap4evb_timer_init(void)
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index cc1ccd899cde..5b36b6c5b448 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1584,9 +1584,11 @@ static void __init mackerel_init(void)
sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device);
sh7372_add_device_to_domain(&sh7372_a4lc, &hdmi_lcdc_device);
+ sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
hdmi_init_pm_clock();
sh7372_pm_init();
+ pm_clk_add(&fsi_device.dev, "spu2");
}
static void __init mackerel_timer_init(void)
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index c0800d83971e..91f5779abdd3 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -662,6 +662,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]),
CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]),
CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]),
+ CLKDEV_ICK_ID("spu2", "sh_fsi2", &mstp_clks[MSTP223]),
};
void __init sh7372_clock_init(void)
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index 1aed9da92918..ce595cee86cd 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -485,6 +485,8 @@ static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d)
#ifdef CONFIG_PM
extern struct sh7372_pm_domain sh7372_a4lc;
+extern struct sh7372_pm_domain sh7372_a4mp;
+extern struct sh7372_pm_domain sh7372_d4;
extern struct sh7372_pm_domain sh7372_a3rv;
extern struct sh7372_pm_domain sh7372_a3ri;
extern struct sh7372_pm_domain sh7372_a3sg;
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index 474a15a29dec..933fb411be0f 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -91,6 +91,35 @@ static int pd_power_up(struct generic_pm_domain *genpd)
return ret;
}
+static int pd_power_up_a3rv(struct generic_pm_domain *genpd)
+{
+ int ret = pd_power_up(genpd);
+
+ /* force A4LC on after A3RV has been requested on */
+ pm_genpd_poweron(&sh7372_a4lc.genpd);
+
+ return ret;
+}
+
+static int pd_power_down_a3rv(struct generic_pm_domain *genpd)
+{
+ int ret = pd_power_down(genpd);
+
+ /* try to power down A4LC after A3RV is requested off */
+ genpd_queue_power_off_work(&sh7372_a4lc.genpd);
+
+ return ret;
+}
+
+static int pd_power_down_a4lc(struct generic_pm_domain *genpd)
+{
+ /* only power down A4LC if A3RV is off */
+ if (!(__raw_readl(PSTR) & (1 << sh7372_a3rv.bit_shift)))
+ return pd_power_down(genpd);
+
+ return -EBUSY;
+}
+
static bool pd_active_wakeup(struct device *dev)
{
return true;
@@ -104,9 +133,18 @@ void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
genpd->stop_device = pm_clk_suspend;
genpd->start_device = pm_clk_resume;
genpd->active_wakeup = pd_active_wakeup;
- genpd->power_off = pd_power_down;
- genpd->power_on = pd_power_up;
- pd_power_up(&sh7372_pd->genpd);
+
+ if (sh7372_pd == &sh7372_a4lc) {
+ genpd->power_off = pd_power_down_a4lc;
+ genpd->power_on = pd_power_up;
+ } else if (sh7372_pd == &sh7372_a3rv) {
+ genpd->power_off = pd_power_down_a3rv;
+ genpd->power_on = pd_power_up_a3rv;
+ } else {
+ genpd->power_off = pd_power_down;
+ genpd->power_on = pd_power_up;
+ }
+ genpd->power_on(&sh7372_pd->genpd);
}
void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
@@ -125,6 +163,14 @@ struct sh7372_pm_domain sh7372_a4lc = {
.bit_shift = 1,
};
+struct sh7372_pm_domain sh7372_a4mp = {
+ .bit_shift = 2,
+};
+
+struct sh7372_pm_domain sh7372_d4 = {
+ .bit_shift = 3,
+};
+
struct sh7372_pm_domain sh7372_a3rv = {
.bit_shift = 6,
};
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
index 2bcde1c46a6b..6ec454e1e063 100644
--- a/arch/arm/mach-shmobile/pm_runtime.c
+++ b/arch/arm/mach-shmobile/pm_runtime.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/sh_clk.h>
@@ -56,3 +57,10 @@ static int __init sh_pm_runtime_init(void)
return 0;
}
core_initcall(sh_pm_runtime_init);
+
+static int __init sh_pm_runtime_late_init(void)
+{
+ pm_genpd_poweroff_unused();
+ return 0;
+}
+late_initcall(sh_pm_runtime_late_init);
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index b219ceaf85bf..79f0413d8725 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -842,6 +842,8 @@ static struct platform_device *sh7372_late_devices[] __initdata = {
void __init sh7372_add_standard_devices(void)
{
sh7372_init_pm_domain(&sh7372_a4lc);
+ sh7372_init_pm_domain(&sh7372_a4mp);
+ sh7372_init_pm_domain(&sh7372_d4);
sh7372_init_pm_domain(&sh7372_a3rv);
sh7372_init_pm_domain(&sh7372_a3ri);
sh7372_init_pm_domain(&sh7372_a3sg);
@@ -853,6 +855,8 @@ void __init sh7372_add_standard_devices(void)
ARRAY_SIZE(sh7372_late_devices));
sh7372_add_device_to_domain(&sh7372_a3rv, &vpu_device);
+ sh7372_add_device_to_domain(&sh7372_a4mp, &spu0_device);
+ sh7372_add_device_to_domain(&sh7372_a4mp, &spu1_device);
}
void __init sh7372_add_early_devices(void)
diff --git a/arch/arm/mach-vt8500/irq.c b/arch/arm/mach-vt8500/irq.c
index 245140c0df10..642de0408f25 100644
--- a/arch/arm/mach-vt8500/irq.c
+++ b/arch/arm/mach-vt8500/irq.c
@@ -39,9 +39,10 @@
static void __iomem *ic_regbase;
static void __iomem *sic_regbase;
-static void vt8500_irq_mask(unsigned int irq)
+static void vt8500_irq_mask(struct irq_data *d)
{
void __iomem *base = ic_regbase;
+ unsigned irq = d->irq;
u8 edge;
if (irq >= 64) {
@@ -64,9 +65,10 @@ static void vt8500_irq_mask(unsigned int irq)
}
}
-static void vt8500_irq_unmask(unsigned int irq)
+static void vt8500_irq_unmask(struct irq_data *d)
{
void __iomem *base = ic_regbase;
+ unsigned irq = d->irq;
u8 dctr;
if (irq >= 64) {
@@ -78,10 +80,11 @@ static void vt8500_irq_unmask(unsigned int irq)
writeb(dctr, base + VT8500_IC_DCTR + irq);
}
-static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type)
+static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
void __iomem *base = ic_regbase;
- unsigned int orig_irq = irq;
+ unsigned irq = d->irq;
+ unsigned orig_irq = irq;
u8 dctr;
if (irq >= 64) {
@@ -114,11 +117,11 @@ static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type)
}
static struct irq_chip vt8500_irq_chip = {
- .name = "vt8500",
- .ack = vt8500_irq_mask,
- .mask = vt8500_irq_mask,
- .unmask = vt8500_irq_unmask,
- .set_type = vt8500_irq_set_type,
+ .name = "vt8500",
+ .irq_ack = vt8500_irq_mask,
+ .irq_mask = vt8500_irq_mask,
+ .irq_unmask = vt8500_irq_unmask,
+ .irq_set_type = vt8500_irq_set_type,
};
void __init vt8500_init_irq(void)
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index ef59099a5463..44c086710d2b 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -120,17 +120,22 @@ static void l2x0_cache_sync(void)
spin_unlock_irqrestore(&l2x0_lock, flags);
}
-static void l2x0_flush_all(void)
+static void __l2x0_flush_all(void)
{
- unsigned long flags;
-
- /* clean all ways */
- spin_lock_irqsave(&l2x0_lock, flags);
debug_writel(0x03);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
cache_sync();
debug_writel(0x00);
+}
+
+static void l2x0_flush_all(void)
+{
+ unsigned long flags;
+
+ /* clean all ways */
+ spin_lock_irqsave(&l2x0_lock, flags);
+ __l2x0_flush_all();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
@@ -266,7 +271,9 @@ static void l2x0_disable(void)
unsigned long flags;
spin_lock_irqsave(&l2x0_lock, flags);
- writel(0, l2x0_base + L2X0_CTRL);
+ __l2x0_flush_all();
+ writel_relaxed(0, l2x0_base + L2X0_CTRL);
+ dsb();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9d9e736c2b4f..594d677b92c8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -759,7 +759,7 @@ early_param("vmalloc", early_vmalloc);
static phys_addr_t lowmem_limit __initdata = 0;
-static void __init sanity_check_meminfo(void)
+void __init sanity_check_meminfo(void)
{
int i, j, highmem = 0;
@@ -1032,8 +1032,9 @@ void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
+ memblock_set_current_limit(lowmem_limit);
+
build_mem_type_table();
- sanity_check_meminfo();
prepare_page_table();
map_lowmem();
devicemaps_init(mdesc);
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 687d02319a41..941a98c9e8aa 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -27,6 +27,10 @@ void __init arm_mm_memblock_reserve(void)
memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE);
}
+void __init sanity_check_meminfo(void)
+{
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index 5b4fffab1eb4..41ab97ebe4cf 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -432,7 +432,7 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF;
ct->regs.ack = GPIO_EDGE_CAUSE_OFF;
ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
- ct->chip.irq_ack = irq_gc_ack;
+ ct->chip.irq_ack = irq_gc_ack_clr_bit;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = gpio_irq_set_type;
diff --git a/arch/arm/plat-pxa/gpio.c b/arch/arm/plat-pxa/gpio.c
index 48ebb9479b61..a11dc3670505 100644
--- a/arch/arm/plat-pxa/gpio.c
+++ b/arch/arm/plat-pxa/gpio.c
@@ -50,7 +50,7 @@ static inline void __iomem *gpio_chip_base(struct gpio_chip *c)
return container_of(c, struct pxa_gpio_chip, chip)->regbase;
}
-static inline struct pxa_gpio_chip *gpio_to_chip(unsigned gpio)
+static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio)
{
return &pxa_gpio_chips[gpio_to_bank(gpio)];
}
@@ -161,7 +161,7 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
int gpio = irq_to_gpio(d->irq);
unsigned long gpdr, mask = GPIO_bit(gpio);
- c = gpio_to_chip(gpio);
+ c = gpio_to_pxachip(gpio);
if (type == IRQ_TYPE_PROBE) {
/* Don't mess with enabled GPIOs using preconfigured edges or
@@ -230,7 +230,7 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
static void pxa_ack_muxed_gpio(struct irq_data *d)
{
int gpio = irq_to_gpio(d->irq);
- struct pxa_gpio_chip *c = gpio_to_chip(gpio);
+ struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
__raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
}
@@ -238,7 +238,7 @@ static void pxa_ack_muxed_gpio(struct irq_data *d)
static void pxa_mask_muxed_gpio(struct irq_data *d)
{
int gpio = irq_to_gpio(d->irq);
- struct pxa_gpio_chip *c = gpio_to_chip(gpio);
+ struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
uint32_t grer, gfer;
c->irq_mask &= ~GPIO_bit(gpio);
@@ -252,7 +252,7 @@ static void pxa_mask_muxed_gpio(struct irq_data *d)
static void pxa_unmask_muxed_gpio(struct irq_data *d)
{
int gpio = irq_to_gpio(d->irq);
- struct pxa_gpio_chip *c = gpio_to_chip(gpio);
+ struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
c->irq_mask |= GPIO_bit(gpio);
update_edge_detect(c);
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index 2abf9660bc6c..a79a8ccd25f6 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -1027,17 +1027,13 @@ int s3c2410_dma_config(unsigned int channel,
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
unsigned int dcon;
- pr_debug("%s: chan=%d, xfer_unit=%d, dcon=%08x\n",
- __func__, channel, xferunit, dcon);
+ pr_debug("%s: chan=%d, xfer_unit=%d\n", __func__, channel, xferunit);
if (chan == NULL)
return -EINVAL;
- pr_debug("%s: Initial dcon is %08x\n", __func__, dcon);
-
dcon = chan->dcon & dma_sel.dcon_mask;
-
- pr_debug("%s: New dcon is %08x\n", __func__, dcon);
+ pr_debug("%s: dcon is %08x\n", __func__, dcon);
switch (chan->req_ch) {
case DMACH_I2S_IN:
@@ -1235,7 +1231,7 @@ static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp)
/* restore channel's hardware configuration */
if (!cp->in_use)
- return 0;
+ return;
printk(KERN_INFO "dma%d: restoring configuration\n", cp->number);
@@ -1246,8 +1242,6 @@ static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp)
if (cp->map != NULL)
dma_sel.select(cp, cp->map);
-
- return 0;
}
static void s3c2410_dma_resume(void)
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c
index 135abda31c9a..327ab9f662e8 100644
--- a/arch/arm/plat-s5p/irq-gpioint.c
+++ b/arch/arm/plat-s5p/irq-gpioint.c
@@ -152,7 +152,7 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
if (!gc)
return -ENOMEM;
ct = gc->chip_types;
- ct->chip.irq_ack = irq_gc_ack;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
ct->chip.irq_mask = irq_gc_mask_set_bit;
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
ct->chip.irq_set_type = s5p_gpioint_set_type,
diff --git a/arch/arm/plat-s5p/s5p-time.c b/arch/arm/plat-s5p/s5p-time.c
index 899a8cc011ff..612934c48b0d 100644
--- a/arch/arm/plat-s5p/s5p-time.c
+++ b/arch/arm/plat-s5p/s5p-time.c
@@ -370,11 +370,11 @@ static void __init s5p_clocksource_init(void)
clock_rate = clk_get_rate(tin_source);
- init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate);
-
s5p_time_setup(timer_source.source_id, TCNT_MAX);
s5p_time_start(timer_source.source_id, PERIODIC);
+ init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate);
+
if (clocksource_register_hz(&time_clocksource, clock_rate))
panic("%s: can't register clocksource\n", time_clocksource.name);
}
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 4af108ff4112..e3b31c26ac3e 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -12,6 +12,10 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
+#ifndef __PLAT_DEVS_H
+#define __PLAT_DEVS_H __FILE__
+
#include <linux/platform_device.h>
struct s3c24xx_uart_resources {
@@ -159,3 +163,5 @@ extern struct platform_device s3c_device_ac97;
*/
extern void *s3c_set_platdata(void *pd, size_t pdsize,
struct platform_device *pdev);
+
+#endif /* __PLAT_DEVS_H */
diff --git a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
index 0ffe34a21554..4c16fa3621bb 100644
--- a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
+++ b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
@@ -39,6 +39,7 @@ struct s3c64xx_spi_csinfo {
* @fifo_lvl_mask: All tx fifo_lvl fields start at offset-6
* @rx_lvl_offset: Depends on tx fifo_lvl field and bus number
* @high_speed: If the controller supports HIGH_SPEED_EN bit
+ * @tx_st_done: Depends on tx fifo_lvl field
*/
struct s3c64xx_spi_info {
int src_clk_nr;
@@ -53,6 +54,7 @@ struct s3c64xx_spi_info {
int fifo_lvl_mask;
int rx_lvl_offset;
int high_speed;
+ int tx_st_done;
};
/**
diff --git a/arch/arm/plat-samsung/irq-uart.c b/arch/arm/plat-samsung/irq-uart.c
index 32582c0958e3..0e46588d847b 100644
--- a/arch/arm/plat-samsung/irq-uart.c
+++ b/arch/arm/plat-samsung/irq-uart.c
@@ -55,7 +55,7 @@ static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq)
gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base,
handle_level_irq);
ct = gc->chip_types;
- ct->chip.irq_ack = irq_gc_ack;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
ct->chip.irq_mask = irq_gc_mask_set_bit;
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
ct->regs.ack = S3C64XX_UINTP;
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 33867ec4a234..9d6a8effeda2 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -12,6 +12,8 @@
#include <linux/of.h>
#include <linux/memblock.h>
#include <linux/vmalloc.h>
+#include <linux/memory.h>
+
#include <asm/firmware.h>
#include <asm/machdep.h>
#include <asm/pSeries_reconfig.h>
@@ -20,24 +22,25 @@
static unsigned long get_memblock_size(void)
{
struct device_node *np;
- unsigned int memblock_size = 0;
+ unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
+ struct resource r;
np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (np) {
- const unsigned long *size;
+ const __be64 *size;
size = of_get_property(np, "ibm,lmb-size", NULL);
- memblock_size = size ? *size : 0;
-
+ if (size)
+ memblock_size = be64_to_cpup(size);
of_node_put(np);
- } else {
+ } else if (machine_is(pseries)) {
+ /* This fallback really only applies to pseries */
unsigned int memzero_size = 0;
- const unsigned int *regs;
np = of_find_node_by_path("/memory@0");
if (np) {
- regs = of_get_property(np, "reg", NULL);
- memzero_size = regs ? regs[3] : 0;
+ if (!of_address_to_resource(np, 0, &r))
+ memzero_size = resource_size(&r);
of_node_put(np);
}
@@ -50,16 +53,21 @@ static unsigned long get_memblock_size(void)
sprintf(buf, "/memory@%x", memzero_size);
np = of_find_node_by_path(buf);
if (np) {
- regs = of_get_property(np, "reg", NULL);
- memblock_size = regs ? regs[3] : 0;
+ if (!of_address_to_resource(np, 0, &r))
+ memblock_size = resource_size(&r);
of_node_put(np);
}
}
}
-
return memblock_size;
}
+/* WARNING: This is going to override the generic definition whenever
+ * pseries is built-in regardless of what platform is active at boot
+ * time. This is fine for now as this is the only "option" and it
+ * should work everywhere. If not, we'll have to turn this into a
+ * ppc_md. callback
+ */
unsigned long memory_block_size_bytes(void)
{
return get_memblock_size();
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 224e8c5eb307..ffa037f28d39 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -57,6 +57,8 @@ static inline int pfn_valid(int pfn)
return 0;
}
+#define early_pfn_valid(pfn) pfn_valid((pfn))
+
#endif /* CONFIG_DISCONTIGMEM */
#ifdef CONFIG_NEED_MULTIPLE_NODES
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
index ead21b663117..b4fd836e4053 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.S
+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
@@ -28,6 +28,8 @@ pmode_cr3: .long 0 /* Saved %cr3 */
pmode_cr4: .long 0 /* Saved %cr4 */
pmode_efer: .quad 0 /* Saved EFER */
pmode_gdt: .quad 0
+pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */
+pmode_behavior: .long 0 /* Wakeup behavior flags */
realmode_flags: .long 0
real_magic: .long 0
trampoline_segment: .word 0
@@ -91,6 +93,18 @@ wakeup_code:
/* Call the C code */
calll main
+ /* Restore MISC_ENABLE before entering protected mode, in case
+ BIOS decided to clear XD_DISABLE during S3. */
+ movl pmode_behavior, %eax
+ btl $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %eax
+ jnc 1f
+
+ movl pmode_misc_en, %eax
+ movl pmode_misc_en + 4, %edx
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ wrmsr
+1:
+
/* Do any other stuff... */
#ifndef CONFIG_64BIT
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/kernel/acpi/realmode/wakeup.h
index e1828c07e79c..97a29e1430e3 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.h
+++ b/arch/x86/kernel/acpi/realmode/wakeup.h
@@ -21,6 +21,9 @@ struct wakeup_header {
u32 pmode_efer_low; /* Protected mode EFER */
u32 pmode_efer_high;
u64 pmode_gdt;
+ u32 pmode_misc_en_low; /* Protected mode MISC_ENABLE */
+ u32 pmode_misc_en_high;
+ u32 pmode_behavior; /* Wakeup routine behavior flags */
u32 realmode_flags;
u32 real_magic;
u16 trampoline_segment; /* segment with trampoline code, 64-bit only */
@@ -39,4 +42,7 @@ extern struct wakeup_header wakeup_header;
#define WAKEUP_HEADER_SIGNATURE 0x51ee1111
#define WAKEUP_END_SIGNATURE 0x65a22c82
+/* Wakeup behavior bits */
+#define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE 0
+
#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 18a857ba7a25..103b6ab368d3 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -77,6 +77,12 @@ int acpi_suspend_lowlevel(void)
header->pmode_cr0 = read_cr0();
header->pmode_cr4 = read_cr4_safe();
+ header->pmode_behavior = 0;
+ if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
+ &header->pmode_misc_en_low,
+ &header->pmode_misc_en_high))
+ header->pmode_behavior |=
+ (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
header->realmode_flags = acpi_realmode_flags;
header->real_magic = 0x12345678;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 0c016f727695..4f0d46fefa7f 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -294,6 +294,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
},
},
+ { /* Handle reboot issue on Acer Aspire one */
+ .callback = set_bios_reboot,
+ .ident = "Acer Aspire One A110",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
+ },
+ },
{ }
};
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index d865c4aeec55..bbaaa005bf0e 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -28,6 +28,7 @@
#include <linux/poison.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/nmi.h>
#include <linux/gfp.h>
@@ -895,8 +896,6 @@ const char *arch_vma_name(struct vm_area_struct *vma)
}
#ifdef CONFIG_X86_UV
-#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
-
unsigned long memory_block_size_bytes(void)
{
if (is_uv_system()) {
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index cf9750004a08..68894fdc034b 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -112,8 +112,10 @@ static void nmi_cpu_start(void *dummy)
static int nmi_start(void)
{
get_online_cpus();
- on_each_cpu(nmi_cpu_start, NULL, 1);
ctr_running = 1;
+ /* make ctr_running visible to the nmi handler: */
+ smp_mb();
+ on_each_cpu(nmi_cpu_start, NULL, 1);
put_online_cpus();
return 0;
}
@@ -504,15 +506,18 @@ static int nmi_setup(void)
nmi_enabled = 0;
ctr_running = 0;
- barrier();
+ /* make variables visible to the nmi handler: */
+ smp_mb();
err = register_die_notifier(&profile_exceptions_nb);
if (err)
goto fail;
get_online_cpus();
register_cpu_notifier(&oprofile_cpu_nb);
- on_each_cpu(nmi_cpu_setup, NULL, 1);
nmi_enabled = 1;
+ /* make nmi_enabled visible to the nmi handler: */
+ smp_mb();
+ on_each_cpu(nmi_cpu_setup, NULL, 1);
put_online_cpus();
return 0;
@@ -531,7 +536,8 @@ static void nmi_shutdown(void)
nmi_enabled = 0;
ctr_running = 0;
put_online_cpus();
- barrier();
+ /* make variables visible to the nmi handler: */
+ smp_mb();
unregister_die_notifier(&profile_exceptions_nb);
msrs = &get_cpu_var(cpu_msrs);
model->shutdown(msrs);
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index fe008309ffec..f567965c0620 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -327,13 +327,12 @@ int __init pci_xen_hvm_init(void)
}
#ifdef CONFIG_XEN_DOM0
-static int xen_register_pirq(u32 gsi, int triggering)
+static int xen_register_pirq(u32 gsi, int gsi_override, int triggering)
{
int rc, pirq, irq = -1;
struct physdev_map_pirq map_irq;
int shareable = 0;
char *name;
- bool gsi_override = false;
if (!xen_pv_domain())
return -1;
@@ -345,31 +344,12 @@ static int xen_register_pirq(u32 gsi, int triggering)
shareable = 1;
name = "ioapic-level";
}
-
pirq = xen_allocate_pirq_gsi(gsi);
if (pirq < 0)
goto out;
- /* Before we bind the GSI to a Linux IRQ, check whether
- * we need to override it with bus_irq (IRQ) value. Usually for
- * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
- * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
- * but there are oddballs where the IRQ != GSI:
- * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
- * which ends up being: gsi_to_irq[9] == 20
- * (which is what acpi_gsi_to_irq ends up calling when starting the
- * the ACPI interpreter and keels over since IRQ 9 has not been
- * setup as we had setup IRQ 20 for it).
- */
- if (gsi == acpi_sci_override_gsi) {
- /* Check whether the GSI != IRQ */
- acpi_gsi_to_irq(gsi, &irq);
- if (irq != gsi)
- /* Bugger, we MUST have that IRQ. */
- gsi_override = true;
- }
- if (gsi_override)
- irq = xen_bind_pirq_gsi_to_irq(irq, pirq, shareable, name);
+ if (gsi_override >= 0)
+ irq = xen_bind_pirq_gsi_to_irq(gsi_override, pirq, shareable, name);
else
irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name);
if (irq < 0)
@@ -392,7 +372,7 @@ out:
return irq;
}
-static int xen_register_gsi(u32 gsi, int triggering, int polarity)
+static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity)
{
int rc, irq;
struct physdev_setup_gsi setup_gsi;
@@ -403,7 +383,7 @@ static int xen_register_gsi(u32 gsi, int triggering, int polarity)
printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
gsi, triggering, polarity);
- irq = xen_register_pirq(gsi, triggering);
+ irq = xen_register_pirq(gsi, gsi_override, triggering);
setup_gsi.gsi = gsi;
setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
@@ -425,6 +405,8 @@ static __init void xen_setup_acpi_sci(void)
int rc;
int trigger, polarity;
int gsi = acpi_sci_override_gsi;
+ int irq = -1;
+ int gsi_override = -1;
if (!gsi)
return;
@@ -441,7 +423,25 @@ static __init void xen_setup_acpi_sci(void)
printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
"polarity=%d\n", gsi, trigger, polarity);
- gsi = xen_register_gsi(gsi, trigger, polarity);
+ /* Before we bind the GSI to a Linux IRQ, check whether
+ * we need to override it with bus_irq (IRQ) value. Usually for
+ * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
+ * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
+ * but there are oddballs where the IRQ != GSI:
+ * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
+ * which ends up being: gsi_to_irq[9] == 20
+ * (which is what acpi_gsi_to_irq ends up calling when starting the
+ * the ACPI interpreter and keels over since IRQ 9 has not been
+ * setup as we had setup IRQ 20 for it).
+ */
+ /* Check whether the GSI != IRQ */
+ if (acpi_gsi_to_irq(gsi, &irq) == 0) {
+ if (irq >= 0 && irq != gsi)
+ /* Bugger, we MUST have that IRQ. */
+ gsi_override = irq;
+ }
+
+ gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity);
printk(KERN_INFO "xen: acpi sci %d\n", gsi);
return;
@@ -450,7 +450,7 @@ static __init void xen_setup_acpi_sci(void)
static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
int trigger, int polarity)
{
- return xen_register_gsi(gsi, trigger, polarity);
+ return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity);
}
static int __init pci_xen_initial_domain(void)
@@ -489,7 +489,7 @@ void __init xen_setup_pirqs(void)
if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
continue;
- xen_register_pirq(irq,
+ xen_register_pirq(irq, -1 /* no GSI override */,
trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE);
}
}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 474356b98ede..899e393d8e73 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -504,9 +504,6 @@ void __init efi_init(void)
x86_platform.set_wallclock = efi_set_rtc_mmss;
#endif
- /* Setup for EFI runtime service */
- reboot_type = BOOT_EFI;
-
#if EFI_DEBUG
print_efi_memmap();
#endif
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index f3799432676d..ae21919f15e1 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2773,11 +2773,14 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
smp_wmb();
cic->key = cfqd_dead_key(cfqd);
+ rcu_read_lock();
if (rcu_dereference(ioc->ioc_data) == cic) {
+ rcu_read_unlock();
spin_lock(&ioc->lock);
rcu_assign_pointer(ioc->ioc_data, NULL);
spin_unlock(&ioc->lock);
- }
+ } else
+ rcu_read_unlock();
if (cic->cfqq[BLK_RW_ASYNC]) {
cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
@@ -3084,7 +3087,8 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
spin_lock_irqsave(&ioc->lock, flags);
- BUG_ON(ioc->ioc_data == cic);
+ BUG_ON(rcu_dereference_check(ioc->ioc_data,
+ lockdep_is_held(&ioc->lock)) == cic);
radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
hlist_del_rcu(&cic->cic_list);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 9f9b2359f718..45d7c8fc73bd 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -30,7 +30,6 @@
static DEFINE_MUTEX(mem_sysfs_mutex);
#define MEMORY_CLASS_NAME "memory"
-#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
static int sections_per_block;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 1aed94c73cfc..be8714aa9dd6 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -13,6 +13,11 @@
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+
+static LIST_HEAD(gpd_list);
+static DEFINE_MUTEX(gpd_list_lock);
#ifdef CONFIG_PM
@@ -30,6 +35,41 @@ static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
genpd->sd_count--;
}
+static void genpd_acquire_lock(struct generic_pm_domain *genpd)
+{
+ DEFINE_WAIT(wait);
+
+ mutex_lock(&genpd->lock);
+ /*
+ * Wait for the domain to transition into either the active,
+ * or the power off state.
+ */
+ for (;;) {
+ prepare_to_wait(&genpd->status_wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (genpd->status == GPD_STATE_ACTIVE
+ || genpd->status == GPD_STATE_POWER_OFF)
+ break;
+ mutex_unlock(&genpd->lock);
+
+ schedule();
+
+ mutex_lock(&genpd->lock);
+ }
+ finish_wait(&genpd->status_wait_queue, &wait);
+}
+
+static void genpd_release_lock(struct generic_pm_domain *genpd)
+{
+ mutex_unlock(&genpd->lock);
+}
+
+static void genpd_set_active(struct generic_pm_domain *genpd)
+{
+ if (genpd->resume_count == 0)
+ genpd->status = GPD_STATE_ACTIVE;
+}
+
/**
* pm_genpd_poweron - Restore power to a given PM domain and its parents.
* @genpd: PM domain to power up.
@@ -37,24 +77,34 @@ static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
* Restore power to @genpd and all of its parents so that it is possible to
* resume a device belonging to it.
*/
-static int pm_genpd_poweron(struct generic_pm_domain *genpd)
+int pm_genpd_poweron(struct generic_pm_domain *genpd)
{
+ struct generic_pm_domain *parent = genpd->parent;
+ DEFINE_WAIT(wait);
int ret = 0;
start:
- if (genpd->parent)
- mutex_lock(&genpd->parent->lock);
- mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ if (parent) {
+ genpd_acquire_lock(parent);
+ mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ } else {
+ mutex_lock(&genpd->lock);
+ }
- if (!genpd->power_is_off
+ if (genpd->status == GPD_STATE_ACTIVE
|| (genpd->prepared_count > 0 && genpd->suspend_power_off))
goto out;
- if (genpd->parent && genpd->parent->power_is_off) {
+ if (genpd->status != GPD_STATE_POWER_OFF) {
+ genpd_set_active(genpd);
+ goto out;
+ }
+
+ if (parent && parent->status != GPD_STATE_ACTIVE) {
mutex_unlock(&genpd->lock);
- mutex_unlock(&genpd->parent->lock);
+ genpd_release_lock(parent);
- ret = pm_genpd_poweron(genpd->parent);
+ ret = pm_genpd_poweron(parent);
if (ret)
return ret;
@@ -67,14 +117,14 @@ static int pm_genpd_poweron(struct generic_pm_domain *genpd)
goto out;
}
- genpd->power_is_off = false;
- if (genpd->parent)
- genpd->parent->sd_count++;
+ genpd_set_active(genpd);
+ if (parent)
+ parent->sd_count++;
out:
mutex_unlock(&genpd->lock);
- if (genpd->parent)
- mutex_unlock(&genpd->parent->lock);
+ if (parent)
+ genpd_release_lock(parent);
return ret;
}
@@ -90,6 +140,7 @@ static int pm_genpd_poweron(struct generic_pm_domain *genpd)
*/
static int __pm_genpd_save_device(struct dev_list_entry *dle,
struct generic_pm_domain *genpd)
+ __releases(&genpd->lock) __acquires(&genpd->lock)
{
struct device *dev = dle->dev;
struct device_driver *drv = dev->driver;
@@ -98,6 +149,8 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
if (dle->need_restore)
return 0;
+ mutex_unlock(&genpd->lock);
+
if (drv && drv->pm && drv->pm->runtime_suspend) {
if (genpd->start_device)
genpd->start_device(dev);
@@ -108,6 +161,8 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
genpd->stop_device(dev);
}
+ mutex_lock(&genpd->lock);
+
if (!ret)
dle->need_restore = true;
@@ -121,6 +176,7 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
*/
static void __pm_genpd_restore_device(struct dev_list_entry *dle,
struct generic_pm_domain *genpd)
+ __releases(&genpd->lock) __acquires(&genpd->lock)
{
struct device *dev = dle->dev;
struct device_driver *drv = dev->driver;
@@ -128,6 +184,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
if (!dle->need_restore)
return;
+ mutex_unlock(&genpd->lock);
+
if (drv && drv->pm && drv->pm->runtime_resume) {
if (genpd->start_device)
genpd->start_device(dev);
@@ -138,10 +196,39 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
genpd->stop_device(dev);
}
+ mutex_lock(&genpd->lock);
+
dle->need_restore = false;
}
/**
+ * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
+ * @genpd: PM domain to check.
+ *
+ * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
+ * a "power off" operation, which means that a "power on" has occured in the
+ * meantime, or if its resume_count field is different from zero, which means
+ * that one of its devices has been resumed in the meantime.
+ */
+static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
+{
+ return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
+}
+
+/**
+ * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
+ * @genpd: PM domait to power off.
+ *
+ * Queue up the execution of pm_genpd_poweroff() unless it's already been done
+ * before.
+ */
+void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+{
+ if (!work_pending(&genpd->power_off_work))
+ queue_work(pm_wq, &genpd->power_off_work);
+}
+
+/**
* pm_genpd_poweroff - Remove power from a given PM domain.
* @genpd: PM domain to power down.
*
@@ -150,13 +237,22 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
* the @genpd's devices' drivers and remove power from @genpd.
*/
static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+ __releases(&genpd->lock) __acquires(&genpd->lock)
{
struct generic_pm_domain *parent;
struct dev_list_entry *dle;
unsigned int not_suspended;
- int ret;
+ int ret = 0;
- if (genpd->power_is_off || genpd->prepared_count > 0)
+ start:
+ /*
+ * Do not try to power off the domain in the following situations:
+ * (1) The domain is already in the "power off" state.
+ * (2) System suspend is in progress.
+ * (3) One of the domain's devices is being resumed right now.
+ */
+ if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
+ || genpd->resume_count > 0)
return 0;
if (genpd->sd_count > 0)
@@ -170,35 +266,76 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
if (not_suspended > genpd->in_progress)
return -EBUSY;
+ if (genpd->poweroff_task) {
+ /*
+ * Another instance of pm_genpd_poweroff() is executing
+ * callbacks, so tell it to start over and return.
+ */
+ genpd->status = GPD_STATE_REPEAT;
+ return 0;
+ }
+
if (genpd->gov && genpd->gov->power_down_ok) {
if (!genpd->gov->power_down_ok(&genpd->domain))
return -EAGAIN;
}
+ genpd->status = GPD_STATE_BUSY;
+ genpd->poweroff_task = current;
+
list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
ret = __pm_genpd_save_device(dle, genpd);
- if (ret)
- goto err_dev;
- }
+ if (ret) {
+ genpd_set_active(genpd);
+ goto out;
+ }
- if (genpd->power_off)
- genpd->power_off(genpd);
+ if (genpd_abort_poweroff(genpd))
+ goto out;
- genpd->power_is_off = true;
+ if (genpd->status == GPD_STATE_REPEAT) {
+ genpd->poweroff_task = NULL;
+ goto start;
+ }
+ }
parent = genpd->parent;
if (parent) {
- genpd_sd_counter_dec(parent);
- if (parent->sd_count == 0)
- queue_work(pm_wq, &parent->power_off_work);
+ mutex_unlock(&genpd->lock);
+
+ genpd_acquire_lock(parent);
+ mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+
+ if (genpd_abort_poweroff(genpd)) {
+ genpd_release_lock(parent);
+ goto out;
+ }
}
- return 0;
+ if (genpd->power_off) {
+ ret = genpd->power_off(genpd);
+ if (ret == -EBUSY) {
+ genpd_set_active(genpd);
+ if (parent)
+ genpd_release_lock(parent);
+
+ goto out;
+ }
+ }
+
+ genpd->status = GPD_STATE_POWER_OFF;
+
+ if (parent) {
+ genpd_sd_counter_dec(parent);
+ if (parent->sd_count == 0)
+ genpd_queue_power_off_work(parent);
- err_dev:
- list_for_each_entry_continue(dle, &genpd->dev_list, node)
- __pm_genpd_restore_device(dle, genpd);
+ genpd_release_lock(parent);
+ }
+ out:
+ genpd->poweroff_task = NULL;
+ wake_up_all(&genpd->status_wait_queue);
return ret;
}
@@ -212,13 +349,9 @@ static void genpd_power_off_work_fn(struct work_struct *work)
genpd = container_of(work, struct generic_pm_domain, power_off_work);
- if (genpd->parent)
- mutex_lock(&genpd->parent->lock);
- mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ genpd_acquire_lock(genpd);
pm_genpd_poweroff(genpd);
- mutex_unlock(&genpd->lock);
- if (genpd->parent)
- mutex_unlock(&genpd->parent->lock);
+ genpd_release_lock(genpd);
}
/**
@@ -239,23 +372,17 @@ static int pm_genpd_runtime_suspend(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->parent)
- mutex_lock(&genpd->parent->lock);
- mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
-
if (genpd->stop_device) {
int ret = genpd->stop_device(dev);
if (ret)
- goto out;
+ return ret;
}
+
+ mutex_lock(&genpd->lock);
genpd->in_progress++;
pm_genpd_poweroff(genpd);
genpd->in_progress--;
-
- out:
mutex_unlock(&genpd->lock);
- if (genpd->parent)
- mutex_unlock(&genpd->parent->lock);
return 0;
}
@@ -276,9 +403,6 @@ static void __pm_genpd_runtime_resume(struct device *dev,
break;
}
}
-
- if (genpd->start_device)
- genpd->start_device(dev);
}
/**
@@ -292,6 +416,7 @@ static void __pm_genpd_runtime_resume(struct device *dev,
static int pm_genpd_runtime_resume(struct device *dev)
{
struct generic_pm_domain *genpd;
+ DEFINE_WAIT(wait);
int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -305,9 +430,34 @@ static int pm_genpd_runtime_resume(struct device *dev)
return ret;
mutex_lock(&genpd->lock);
+ genpd->status = GPD_STATE_BUSY;
+ genpd->resume_count++;
+ for (;;) {
+ prepare_to_wait(&genpd->status_wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ /*
+ * If current is the powering off task, we have been called
+ * reentrantly from one of the device callbacks, so we should
+ * not wait.
+ */
+ if (!genpd->poweroff_task || genpd->poweroff_task == current)
+ break;
+ mutex_unlock(&genpd->lock);
+
+ schedule();
+
+ mutex_lock(&genpd->lock);
+ }
+ finish_wait(&genpd->status_wait_queue, &wait);
__pm_genpd_runtime_resume(dev, genpd);
+ genpd->resume_count--;
+ genpd_set_active(genpd);
+ wake_up_all(&genpd->status_wait_queue);
mutex_unlock(&genpd->lock);
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
return 0;
}
@@ -339,7 +489,7 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
{
struct generic_pm_domain *parent = genpd->parent;
- if (genpd->power_is_off)
+ if (genpd->status == GPD_STATE_POWER_OFF)
return;
if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
@@ -348,7 +498,7 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
if (genpd->power_off)
genpd->power_off(genpd);
- genpd->power_is_off = true;
+ genpd->status = GPD_STATE_POWER_OFF;
if (parent) {
genpd_sd_counter_dec(parent);
pm_genpd_sync_poweroff(parent);
@@ -356,6 +506,33 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
}
/**
+ * resume_needed - Check whether to resume a device before system suspend.
+ * @dev: Device to check.
+ * @genpd: PM domain the device belongs to.
+ *
+ * There are two cases in which a device that can wake up the system from sleep
+ * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
+ * to wake up the system and it has to remain active for this purpose while the
+ * system is in the sleep state and (2) if the device is not enabled to wake up
+ * the system from sleep states and it generally doesn't generate wakeup signals
+ * by itself (those signals are generated on its behalf by other parts of the
+ * system). In the latter case it may be necessary to reconfigure the device's
+ * wakeup settings during system suspend, because it may have been set up to
+ * signal remote wakeup from the system's working state as needed by runtime PM.
+ * Return 'true' in either of the above cases.
+ */
+static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
+{
+ bool active_wakeup;
+
+ if (!device_can_wakeup(dev))
+ return false;
+
+ active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
+ return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
+}
+
+/**
* pm_genpd_prepare - Start power transition of a device in a PM domain.
* @dev: Device to start the transition of.
*
@@ -367,6 +544,7 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
static int pm_genpd_prepare(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -374,33 +552,57 @@ static int pm_genpd_prepare(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- mutex_lock(&genpd->lock);
+ /*
+ * If a wakeup request is pending for the device, it should be woken up
+ * at this point and a system wakeup event should be reported if it's
+ * set up to wake up the system from sleep states.
+ */
+ pm_runtime_get_noresume(dev);
+ if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+ pm_wakeup_event(dev, 0);
+
+ if (pm_wakeup_pending()) {
+ pm_runtime_put_sync(dev);
+ return -EBUSY;
+ }
+
+ if (resume_needed(dev, genpd))
+ pm_runtime_resume(dev);
+
+ genpd_acquire_lock(genpd);
if (genpd->prepared_count++ == 0)
- genpd->suspend_power_off = genpd->power_is_off;
+ genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
+
+ genpd_release_lock(genpd);
if (genpd->suspend_power_off) {
- mutex_unlock(&genpd->lock);
+ pm_runtime_put_noidle(dev);
return 0;
}
/*
- * If the device is in the (runtime) "suspended" state, call
- * .start_device() for it, if defined.
- */
- if (pm_runtime_suspended(dev))
- __pm_genpd_runtime_resume(dev, genpd);
-
- /*
- * Do not check if runtime resume is pending at this point, because it
- * has been taken care of already and if pm_genpd_poweron() ran at this
- * point as a result of the check, it would deadlock.
+ * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
+ * so pm_genpd_poweron() will return immediately, but if the device
+ * is suspended (e.g. it's been stopped by .stop_device()), we need
+ * to make it operational.
*/
+ pm_runtime_resume(dev);
__pm_runtime_disable(dev, false);
- mutex_unlock(&genpd->lock);
+ ret = pm_generic_prepare(dev);
+ if (ret) {
+ mutex_lock(&genpd->lock);
+
+ if (--genpd->prepared_count == 0)
+ genpd->suspend_power_off = false;
- return pm_generic_prepare(dev);
+ mutex_unlock(&genpd->lock);
+ pm_runtime_enable(dev);
+ }
+
+ pm_runtime_put_sync(dev);
+ return ret;
}
/**
@@ -716,7 +918,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
* guaranteed that this function will never run twice in parallel for
* the same PM domain, so it is not necessary to use locking here.
*/
- genpd->power_is_off = true;
+ genpd->status = GPD_STATE_POWER_OFF;
if (genpd->suspend_power_off) {
/*
* The boot kernel might put the domain into the power on state,
@@ -786,7 +988,9 @@ static void pm_genpd_complete(struct device *dev)
if (run_complete) {
pm_generic_complete(dev);
+ pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
}
}
@@ -824,9 +1028,9 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
return -EINVAL;
- mutex_lock(&genpd->lock);
+ genpd_acquire_lock(genpd);
- if (genpd->power_is_off) {
+ if (genpd->status == GPD_STATE_POWER_OFF) {
ret = -EINVAL;
goto out;
}
@@ -858,7 +1062,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
spin_unlock_irq(&dev->power.lock);
out:
- mutex_unlock(&genpd->lock);
+ genpd_release_lock(genpd);
return ret;
}
@@ -879,7 +1083,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
return -EINVAL;
- mutex_lock(&genpd->lock);
+ genpd_acquire_lock(genpd);
if (genpd->prepared_count > 0) {
ret = -EAGAIN;
@@ -903,7 +1107,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
}
out:
- mutex_unlock(&genpd->lock);
+ genpd_release_lock(genpd);
return ret;
}
@@ -922,9 +1126,19 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
return -EINVAL;
- mutex_lock(&genpd->lock);
+ start:
+ genpd_acquire_lock(genpd);
+ mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
+
+ if (new_subdomain->status != GPD_STATE_POWER_OFF
+ && new_subdomain->status != GPD_STATE_ACTIVE) {
+ mutex_unlock(&new_subdomain->lock);
+ genpd_release_lock(genpd);
+ goto start;
+ }
- if (genpd->power_is_off && !new_subdomain->power_is_off) {
+ if (genpd->status == GPD_STATE_POWER_OFF
+ && new_subdomain->status != GPD_STATE_POWER_OFF) {
ret = -EINVAL;
goto out;
}
@@ -936,17 +1150,14 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
}
}
- mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
-
list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
new_subdomain->parent = genpd;
- if (!subdomain->power_is_off)
+ if (subdomain->status != GPD_STATE_POWER_OFF)
genpd->sd_count++;
- mutex_unlock(&new_subdomain->lock);
-
out:
- mutex_unlock(&genpd->lock);
+ mutex_unlock(&new_subdomain->lock);
+ genpd_release_lock(genpd);
return ret;
}
@@ -965,7 +1176,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
return -EINVAL;
- mutex_lock(&genpd->lock);
+ start:
+ genpd_acquire_lock(genpd);
list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
if (subdomain != target)
@@ -973,9 +1185,16 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
+ if (subdomain->status != GPD_STATE_POWER_OFF
+ && subdomain->status != GPD_STATE_ACTIVE) {
+ mutex_unlock(&subdomain->lock);
+ genpd_release_lock(genpd);
+ goto start;
+ }
+
list_del(&subdomain->sd_node);
subdomain->parent = NULL;
- if (!subdomain->power_is_off)
+ if (subdomain->status != GPD_STATE_POWER_OFF)
genpd_sd_counter_dec(genpd);
mutex_unlock(&subdomain->lock);
@@ -984,7 +1203,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
break;
}
- mutex_unlock(&genpd->lock);
+ genpd_release_lock(genpd);
return ret;
}
@@ -1010,7 +1229,10 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
genpd->in_progress = 0;
genpd->sd_count = 0;
- genpd->power_is_off = is_off;
+ genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
+ init_waitqueue_head(&genpd->status_wait_queue);
+ genpd->poweroff_task = NULL;
+ genpd->resume_count = 0;
genpd->device_count = 0;
genpd->suspended_count = 0;
genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
@@ -1030,4 +1252,22 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
genpd->domain.ops.restore = pm_genpd_restore;
genpd->domain.ops.complete = pm_genpd_complete;
+ mutex_lock(&gpd_list_lock);
+ list_add(&genpd->gpd_list_node, &gpd_list);
+ mutex_unlock(&gpd_list_lock);
+}
+
+/**
+ * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
+ */
+void pm_genpd_poweroff_unused(void)
+{
+ struct generic_pm_domain *genpd;
+
+ mutex_lock(&gpd_list_lock);
+
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node)
+ genpd_queue_power_off_work(genpd);
+
+ mutex_unlock(&gpd_list_lock);
}
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 56a6899f5e9e..5cc12322ef32 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -625,4 +625,21 @@ int opp_init_cpufreq_table(struct device *dev,
return 0;
}
+
+/**
+ * opp_free_cpufreq_table() - free the cpufreq table
+ * @dev: device for which we do this operation
+ * @table: table to free
+ *
+ * Free up the table allocated by opp_init_cpufreq_table
+ */
+void opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ if (!table)
+ return;
+
+ kfree(*table);
+ *table = NULL;
+}
#endif /* CONFIG_CPU_FREQ */
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index c80e138b62fe..af10abecb99b 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -112,7 +112,7 @@ static unsigned int read_magic_time(void)
unsigned int val;
get_rtc_time(&time);
- pr_info("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n",
+ pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n",
time.tm_hour, time.tm_min, time.tm_sec,
time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
val = time.tm_year; /* 100 years */
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index c126db3cb7d1..e8d11b6630ee 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -9,6 +9,7 @@
#include <linux/syscore_ops.h>
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/interrupt.h>
static LIST_HEAD(syscore_ops_list);
static DEFINE_MUTEX(syscore_ops_lock);
@@ -48,6 +49,13 @@ int syscore_suspend(void)
struct syscore_ops *ops;
int ret = 0;
+ pr_debug("Checking wakeup interrupts\n");
+
+ /* Return error code if there are any wakeup interrupts pending. */
+ ret = check_wakeup_irqs();
+ if (ret)
+ return ret;
+
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core suspend.\n");
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 09ef9a878ef0..cf0e63dd97da 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -79,7 +79,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
md_io.error = 0;
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
- rw |= REQ_FUA;
+ rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC;
bio = bio_alloc(GFP_NOIO, 1);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index f440a02dfdb1..7b976296b564 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -112,9 +112,6 @@ struct drbd_bitmap {
struct task_struct *bm_task;
};
-static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
- unsigned long e, int val, const enum km_type km);
-
#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
{
@@ -994,6 +991,9 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bio_endio(bio, -EIO);
} else {
submit_bio(rw, bio);
+ /* this should not count as user activity and cause the
+ * resync to throttle -- see drbd_rs_should_slow_down(). */
+ atomic_add(len >> 9, &mdev->rs_sect_ev);
}
}
@@ -1256,7 +1256,7 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_f
* expected to be called for only a few bits (e - s about BITS_PER_LONG).
* Must hold bitmap lock already. */
static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
- unsigned long e, int val, const enum km_type km)
+ unsigned long e, int val)
{
struct drbd_bitmap *b = mdev->bitmap;
unsigned long *p_addr = NULL;
@@ -1274,14 +1274,14 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
if (page_nr != last_page_nr) {
if (p_addr)
- __bm_unmap(p_addr, km);
+ __bm_unmap(p_addr, KM_IRQ1);
if (c < 0)
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
else if (c > 0)
bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
changed_total += c;
c = 0;
- p_addr = __bm_map_pidx(b, page_nr, km);
+ p_addr = __bm_map_pidx(b, page_nr, KM_IRQ1);
last_page_nr = page_nr;
}
if (val)
@@ -1290,7 +1290,7 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
}
if (p_addr)
- __bm_unmap(p_addr, km);
+ __bm_unmap(p_addr, KM_IRQ1);
if (c < 0)
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
else if (c > 0)
@@ -1318,7 +1318,7 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
bm_print_lock_info(mdev);
- c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
+ c = __bm_change_bits_to(mdev, s, e, val);
spin_unlock_irqrestore(&b->bm_lock, flags);
return c;
@@ -1343,16 +1343,17 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
{
int i;
int bits;
- unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0);
+ unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_IRQ1);
for (i = first_word; i < last_word; i++) {
bits = hweight_long(paddr[i]);
paddr[i] = ~0UL;
b->bm_set += BITS_PER_LONG - bits;
}
- kunmap_atomic(paddr, KM_USER0);
+ kunmap_atomic(paddr, KM_IRQ1);
}
-/* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave.
+/* Same thing as drbd_bm_set_bits,
+ * but more efficient for a large bit range.
* You must first drbd_bm_lock().
* Can be called to set the whole bitmap in one go.
* Sets bits from s to e _inclusive_. */
@@ -1366,6 +1367,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* Do not use memset, because we must account for changes,
* so we need to loop over the words with hweight() anyways.
*/
+ struct drbd_bitmap *b = mdev->bitmap;
unsigned long sl = ALIGN(s,BITS_PER_LONG);
unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
int first_page;
@@ -1376,15 +1378,19 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
if (e - s <= 3*BITS_PER_LONG) {
/* don't bother; el and sl may even be wrong. */
- __bm_change_bits_to(mdev, s, e, 1, KM_USER0);
+ spin_lock_irq(&b->bm_lock);
+ __bm_change_bits_to(mdev, s, e, 1);
+ spin_unlock_irq(&b->bm_lock);
return;
}
/* difference is large enough that we can trust sl and el */
+ spin_lock_irq(&b->bm_lock);
+
/* bits filling the current long */
if (sl)
- __bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0);
+ __bm_change_bits_to(mdev, s, sl-1, 1);
first_page = sl >> (3 + PAGE_SHIFT);
last_page = el >> (3 + PAGE_SHIFT);
@@ -1397,8 +1403,10 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
/* first and full pages, unless first page == last page */
for (page_nr = first_page; page_nr < last_page; page_nr++) {
bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
+ spin_unlock_irq(&b->bm_lock);
cond_resched();
first_word = 0;
+ spin_lock_irq(&b->bm_lock);
}
/* last page (respectively only page, for first page == last page) */
@@ -1411,7 +1419,8 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* it would trigger an assert in __bm_change_bits_to()
*/
if (el <= e)
- __bm_change_bits_to(mdev, el, e, 1, KM_USER0);
+ __bm_change_bits_to(mdev, el, e, 1);
+ spin_unlock_irq(&b->bm_lock);
}
/* returns bit state
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 25d32c5aa50a..43beaca53179 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -4602,6 +4602,11 @@ int drbd_asender(struct drbd_thread *thi)
dev_err(DEV, "meta connection shut down by peer.\n");
goto reconnect;
} else if (rv == -EAGAIN) {
+ /* If the data socket received something meanwhile,
+ * that is good enough: peer is still alive. */
+ if (time_after(mdev->last_received,
+ jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
+ continue;
if (ping_timeout_active) {
dev_err(DEV, "PingAck did not arrive in time.\n");
goto reconnect;
@@ -4637,6 +4642,7 @@ int drbd_asender(struct drbd_thread *thi)
goto reconnect;
}
if (received == expect) {
+ mdev->last_received = jiffies;
D_ASSERT(cmd != NULL);
if (!cmd->process(mdev, h))
goto reconnect;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 4d76b06b6b20..4d3e6f6213ba 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -536,12 +536,7 @@ static int w_make_resync_request(struct drbd_conf *mdev,
return 1;
}
- /* starting with drbd 8.3.8, we can handle multi-bio EEs,
- * if it should be necessary */
- max_bio_size =
- mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 :
- mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE;
-
+ max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
number = drbd_rs_number_requests(mdev);
if (number == 0)
goto requeue;
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 999803ce10dc..5da67f165afa 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -90,9 +90,10 @@
#define G4x_GMCH_SIZE_MASK (0xf << 8)
#define G4x_GMCH_SIZE_1M (0x1 << 8)
#define G4x_GMCH_SIZE_2M (0x3 << 8)
-#define G4x_GMCH_SIZE_VT_1M (0x9 << 8)
-#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
-#define G4x_GMCH_SIZE_VT_2M (0xc << 8)
+#define G4x_GMCH_SIZE_VT_EN (0x8 << 8)
+#define G4x_GMCH_SIZE_VT_1M (G4x_GMCH_SIZE_1M | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_1_5M ((0x2 << 8) | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 548708c4b2b8..a7346ab97a3c 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -606,7 +606,7 @@ static int apm_suspend_notifier(struct notifier_block *nb,
return NOTIFY_OK;
/* interrupted by signal */
- return NOTIFY_BAD;
+ return notifier_from_errno(err);
case PM_POST_SUSPEND:
/*
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 4e04e1274388..596d5dd32f41 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -759,7 +759,7 @@ static void __exit acpi_cpufreq_exit(void)
cpufreq_unregister_driver(&acpi_cpufreq_driver);
- free_percpu(acpi_perf_data);
+ free_acpi_perf_data();
}
module_param(acpi_pstate_strict, uint, 0644);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 438e6c831170..ebb897329c1e 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -264,6 +264,7 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define PCI_DEVICE_ID_AGERE_FW643 0x5901
#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
+#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
#define QUIRK_CYCLE_TIMER 1
#define QUIRK_RESET_PACKET 2
@@ -3190,6 +3191,11 @@ static int __devinit pci_probe(struct pci_dev *dev,
int i, err;
size_t size;
+ if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
+ dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
+ return -ENOSYS;
+ }
+
ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
if (ohci == NULL) {
err = -ENOMEM;
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index bd6571e0097a..644ba1255d3c 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -223,7 +223,7 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
gedr = gpio_reg(&lnw->chip, base, GEDR);
pending = readl(gedr);
while (pending) {
- gpio = __ffs(pending) - 1;
+ gpio = __ffs(pending);
mask = BIT(gpio);
pending &= ~mask;
/* Clear before handling so we can't lose an edge */
diff --git a/drivers/gpio/tps65910-gpio.c b/drivers/gpio/tps65910-gpio.c
index 8d1ddfdd63eb..15097ca616d6 100644
--- a/drivers/gpio/tps65910-gpio.c
+++ b/drivers/gpio/tps65910-gpio.c
@@ -81,8 +81,10 @@ void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base)
switch(tps65910_chip_id(tps65910)) {
case TPS65910:
tps65910->gpio.ngpio = 6;
+ break;
case TPS65911:
tps65910->gpio.ngpio = 9;
+ break;
default:
return;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 21058e6ad2b8..82db18506662 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -886,9 +886,6 @@ int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
total_objects += dev->mode_config.num_connector;
total_objects += dev->mode_config.num_encoder;
- if (total_objects == 0)
- return -EINVAL;
-
group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
if (!group->id_list)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e1787022d6c8..296fbd66f0e1 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1943,7 +1943,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
ret = -ENODEV;
- goto out_iomapfree;
+ goto out_rmmap;
}
agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
@@ -1987,7 +1987,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
- goto out_iomapfree;
+ goto out_mtrrfree;
}
/* enable GEM by default */
@@ -2074,13 +2074,21 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
out_gem_unload:
+ if (dev_priv->mm.inactive_shrinker.shrink)
+ unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->wq);
-out_iomapfree:
+out_mtrrfree:
+ if (dev_priv->mm.gtt_mtrr >= 0) {
+ mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
+ dev->agp->agp_info.aper_size * 1024 * 1024);
+ dev_priv->mm.gtt_mtrr = -1;
+ }
io_mapping_free(dev_priv->mm.gtt_mapping);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 013d304455b9..eb91e2dd7914 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -52,7 +52,7 @@ module_param_named(powersave, i915_powersave, int, 0600);
unsigned int i915_semaphores = 0;
module_param_named(semaphores, i915_semaphores, int, 0600);
-unsigned int i915_enable_rc6 = 1;
+unsigned int i915_enable_rc6 = 0;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
unsigned int i915_enable_fbc = 0;
@@ -577,6 +577,7 @@ int i915_reset(struct drm_device *dev, u8 flags)
if (get_seconds() - dev_priv->last_gpu_reset < 5) {
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
} else switch (INTEL_INFO(dev)->gen) {
+ case 7:
case 6:
ret = gen6_do_reset(dev, flags);
/* If reset with a user forcewake, try to restore */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 391b55f1cc74..e2aced6eec4c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -50,7 +50,6 @@ struct intel_dp {
bool has_audio;
int force_audio;
uint32_t color_range;
- int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[4];
@@ -138,8 +137,8 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
int max_lane_count = 4;
- if (intel_dp->dpcd[0] >= 0x11) {
- max_lane_count = intel_dp->dpcd[2] & 0x1f;
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+ max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
switch (max_lane_count) {
case 1: case 2: case 4:
break;
@@ -153,7 +152,7 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp)
static int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
- int max_link_bw = intel_dp->dpcd[1];
+ int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
switch (max_link_bw) {
case DP_LINK_BW_1_62:
@@ -774,7 +773,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
- if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
intel_dp->DP |= DP_ENHANCED_FRAMING;
}
@@ -942,11 +942,44 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
udelay(200);
}
+/* If the sink supports it, try to set the power state appropriately */
+static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+{
+ int ret, i;
+
+ /* Should have a valid DPCD by this point */
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
+ DP_SET_POWER_D3);
+ if (ret != 1)
+ DRM_DEBUG_DRIVER("failed to write sink power state\n");
+ } else {
+ /*
+ * When turning on, we need to retry for 1ms to give the sink
+ * time to wake up.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = intel_dp_aux_native_write_1(intel_dp,
+ DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ msleep(1);
+ }
+ }
+}
+
static void intel_dp_prepare(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
+ /* Wake up the sink first */
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
if (is_edp(intel_dp)) {
ironlake_edp_backlight_off(dev);
ironlake_edp_panel_off(dev);
@@ -990,6 +1023,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
if (mode != DRM_MODE_DPMS_ON) {
if (is_edp(intel_dp))
ironlake_edp_backlight_off(dev);
+ intel_dp_sink_dpms(intel_dp, mode);
intel_dp_link_down(intel_dp);
if (is_edp(intel_dp))
ironlake_edp_panel_off(dev);
@@ -998,6 +1032,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
} else {
if (is_edp(intel_dp))
ironlake_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, mode);
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_start_link_train(intel_dp);
if (is_edp(intel_dp)) {
@@ -1009,7 +1044,31 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
if (is_edp(intel_dp))
ironlake_edp_backlight_on(dev);
}
- intel_dp->dpms_mode = mode;
+}
+
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ */
+static bool
+intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
+ uint8_t *recv, int recv_bytes)
+{
+ int ret, i;
+
+ /*
+ * Sinks are *supposed* to come up within 1ms from an off state,
+ * but we're also supposed to retry 3 times per the spec.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = intel_dp_aux_native_read(intel_dp, address, recv,
+ recv_bytes);
+ if (ret == recv_bytes)
+ return true;
+ msleep(1);
+ }
+
+ return false;
}
/*
@@ -1019,14 +1078,10 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
static bool
intel_dp_get_link_status(struct intel_dp *intel_dp)
{
- int ret;
-
- ret = intel_dp_aux_native_read(intel_dp,
- DP_LANE0_1_STATUS,
- intel_dp->link_status, DP_LINK_STATUS_SIZE);
- if (ret != DP_LINK_STATUS_SIZE)
- return false;
- return true;
+ return intel_dp_aux_native_read_retry(intel_dp,
+ DP_LANE0_1_STATUS,
+ intel_dp->link_status,
+ DP_LINK_STATUS_SIZE);
}
static uint8_t
@@ -1515,6 +1570,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
+ int ret;
+
if (!intel_dp->base.base.crtc)
return;
@@ -1523,6 +1580,15 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
return;
}
+ /* Try to read receiver status if the link appears to be up */
+ ret = intel_dp_aux_native_read(intel_dp,
+ 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd));
+ if (ret != sizeof(intel_dp->dpcd)) {
+ intel_dp_link_down(intel_dp);
+ return;
+ }
+
if (!intel_channel_eq_ok(intel_dp)) {
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
@@ -1533,6 +1599,7 @@ static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
enum drm_connector_status status;
+ bool ret;
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
@@ -1543,13 +1610,11 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
}
status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_dp,
- 0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd))
- == sizeof(intel_dp->dpcd)) {
- if (intel_dp->dpcd[0] != 0)
- status = connector_status_connected;
- }
+ ret = intel_dp_aux_native_read_retry(intel_dp,
+ 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd));
+ if (ret && intel_dp->dpcd[DP_DPCD_REV] != 0)
+ status = connector_status_connected;
DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
return status;
@@ -1586,7 +1651,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
{
- if (intel_dp->dpcd[0] != 0)
+ if (intel_dp->dpcd[DP_DPCD_REV] != 0)
status = connector_status_connected;
}
@@ -1790,8 +1855,7 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
- if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON)
- intel_dp_check_link_status(intel_dp);
+ intel_dp_check_link_status(intel_dp);
}
/* Return which DP Port should be selected for Transcoder DP control */
@@ -1859,7 +1923,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
return;
intel_dp->output_reg = output_reg;
- intel_dp->dpms_mode = -1;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
@@ -1954,8 +2017,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
sizeof(intel_dp->dpcd));
ironlake_edp_panel_vdd_off(intel_dp);
if (ret == sizeof(intel_dp->dpcd)) {
- if (intel_dp->dpcd[0] >= 0x11)
- dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
+ dev_priv->no_aux_handshake =
+ intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
} else {
/* if this fails, presume the device is a ghost */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c0e0ee63fbf4..39ac2b634ae5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -165,7 +165,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
{
- return intel_wait_ring_buffer(ring, ring->space - 8);
+ return intel_wait_ring_buffer(ring, ring->size - 8);
}
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 144f79a350ae..731acea865b5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -371,7 +371,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->vram.flags_valid = nv50_vram_flags_valid;
break;
case 0xC0:
- case 0xD0:
engine->instmem.init = nvc0_instmem_init;
engine->instmem.takedown = nvc0_instmem_takedown;
engine->instmem.suspend = nvc0_instmem_suspend;
@@ -923,7 +922,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->card_type = NV_50;
break;
case 0xc0:
- case 0xd0:
dev_priv->card_type = NV_C0;
break;
default:
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 12d2fdc52414..15bd0477a3e8 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -985,17 +985,19 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
{
save->vga_control[0] = RREG32(D1VGA_CONTROL);
save->vga_control[1] = RREG32(D2VGA_CONTROL);
- save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
- save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
- save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
- save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
+ save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
+ save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ }
+ if (rdev->num_crtc >= 6) {
+ save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
+ save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
@@ -1004,35 +1006,45 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
WREG32(VGA_RENDER_CONTROL, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
}
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(D1VGA_CONTROL, 0);
WREG32(D2VGA_CONTROL, 0);
- WREG32(EVERGREEN_D3VGA_CONTROL, 0);
- WREG32(EVERGREEN_D4VGA_CONTROL, 0);
- WREG32(EVERGREEN_D5VGA_CONTROL, 0);
- WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+ if (rdev->num_crtc >= 4) {
+ WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+ }
}
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1055,7 +1067,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
@@ -1073,7 +1085,8 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
-
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
@@ -1101,31 +1114,41 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
/* Restore video state */
WREG32(D1VGA_CONTROL, save->vga_control[0]);
WREG32(D2VGA_CONTROL, save->vga_control[1]);
- WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
- WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
- WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
- WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
+ if (rdev->num_crtc >= 4) {
+ WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
+ WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
+ WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
+ }
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
}
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
@@ -1977,7 +2000,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
gb_backend_map = 0x66442200;
break;
case CHIP_JUNIPER:
- gb_backend_map = 0x00006420;
+ gb_backend_map = 0x00002200;
break;
default:
gb_backend_map =
@@ -2248,7 +2271,10 @@ int evergreen_mc_init(struct radeon_device *rdev)
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
- tmp = RREG32(MC_ARB_RAMCFG);
+ if (rdev->flags & RADEON_IS_IGP)
+ tmp = RREG32(FUS_MC_ARB_RAMCFG);
+ else
+ tmp = RREG32(MC_ARB_RAMCFG);
if (tmp & CHANSIZE_OVERRIDE) {
chansize = 16;
} else if (tmp & CHANSIZE_MASK) {
@@ -2414,18 +2440,22 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
@@ -2544,19 +2574,25 @@ int evergreen_irq_set(struct radeon_device *rdev)
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
}
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+ if (rdev->num_crtc >= 4) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+ }
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
@@ -2580,53 +2616,57 @@ static inline void evergreen_irq_ack(struct radeon_device *rdev)
rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
- rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
- rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
- rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
- rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ if (rdev->num_crtc >= 4) {
+ rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ }
+ if (rdev->num_crtc >= 6) {
+ rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ }
if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
- if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
- WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
- if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
- WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
- if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
- WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
- if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
- WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
-
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
-
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
- WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
- WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
-
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
- WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
- WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
-
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
- WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
- WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
-
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
- WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
- WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+ if (rdev->num_crtc >= 4) {
+ if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+ }
+
+ if (rdev->num_crtc >= 6) {
+ if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+ }
if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
tmp = RREG32(DC_HPD1_INT_CONTROL);
@@ -3234,6 +3274,7 @@ void evergreen_fini(struct radeon_device *rdev)
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 57f3bc17b87e..2eb251858e72 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -252,7 +252,7 @@ draw_auto(struct radeon_device *rdev)
}
-/* emits 36 */
+/* emits 39 */
static void
set_default_state(struct radeon_device *rdev)
{
@@ -531,6 +531,11 @@ set_default_state(struct radeon_device *rdev)
radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(rdev, 0);
+ /* setup LDS */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, 0x10001000);
+
/* SQ config */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
@@ -773,7 +778,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
- ring_size += 52; /* shaders + def state */
+ ring_size += 55; /* shaders + def state */
ring_size += 10; /* fence emit for VB IB */
ring_size += 5; /* done copy */
ring_size += 10; /* fence emit for done copy */
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 1636e3449825..b7b2714f0b32 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -466,7 +466,7 @@
#define IH_RB_WPTR_ADDR_LO 0x3e14
#define IH_CNTL 0x3e18
# define ENABLE_INTR (1 << 0)
-# define IH_MC_SWAP(x) ((x) << 2)
+# define IH_MC_SWAP(x) ((x) << 1)
# define IH_MC_SWAP_NONE 0
# define IH_MC_SWAP_16BIT 1
# define IH_MC_SWAP_32BIT 2
@@ -547,7 +547,7 @@
# define LB_D5_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD5_INTERRUPT (1 << 17)
# define DC_HPD5_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050
+#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6150
# define LB_D6_VLINE_INTERRUPT (1 << 2)
# define LB_D6_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD6_INTERRUPT (1 << 17)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 16caafeadf5e..559dbd412906 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1581,6 +1581,7 @@ void cayman_fini(struct radeon_device *rdev)
cayman_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 9736746da2d6..4672869cdb26 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -320,7 +320,7 @@
#define CGTS_USER_TCC_DISABLE 0x914C
#define TCC_DISABLE_MASK 0xFFFF0000
#define TCC_DISABLE_SHIFT 16
-#define CGTS_SM_CTRL_REG 0x915C
+#define CGTS_SM_CTRL_REG 0x9150
#define OVERRIDE (1 << 21)
#define TA_CNTL_AUX 0x9508
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f79d2ccb6755..bc54b26cb32f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2628,6 +2628,7 @@ void r600_fini(struct radeon_device *rdev)
r600_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index f140a0d5cb54..0245ae6c204e 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -536,7 +536,7 @@
#define IH_RB_WPTR_ADDR_LO 0x3e14
#define IH_CNTL 0x3e18
# define ENABLE_INTR (1 << 0)
-# define IH_MC_SWAP(x) ((x) << 2)
+# define IH_MC_SWAP(x) ((x) << 1)
# define IH_MC_SWAP_NONE 0
# define IH_MC_SWAP_16BIT 1
# define IH_MC_SWAP_32BIT 2
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 3fc5fa1aefd0..229a20f10e2b 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -331,7 +331,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ bus_cntl = RREG32(RV370_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -350,7 +350,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
@@ -367,7 +367,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
/* restore regs */
WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(RV370_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
@@ -390,7 +390,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ if (rdev->flags & RADEON_IS_PCIE)
+ bus_cntl = RREG32(RV370_BUS_CNTL);
+ else
+ bus_cntl = RREG32(RADEON_BUS_CNTL);
crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
crtc2_gen_cntl = 0;
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
@@ -412,7 +415,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ if (rdev->flags & RADEON_IS_PCIE)
+ WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
+ else
+ WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
/* Turn off mem requests and CRTC for both controllers */
WREG32(RADEON_CRTC_GEN_CNTL,
@@ -439,7 +445,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
/* restore regs */
WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ if (rdev->flags & RADEON_IS_PCIE)
+ WREG32(RV370_BUS_CNTL, bus_cntl);
+ else
+ WREG32(RADEON_BUS_CNTL, bus_cntl);
WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cbfca3a24fdf..9792d4ffdc86 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -52,6 +52,12 @@ void radeon_connector_hotplug(struct drm_connector *connector)
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ /* bail if the connector does not have hpd pin, e.g.,
+ * VGA, TV, etc.
+ */
+ if (radeon_connector->hpd.hpd == RADEON_HPD_NONE)
+ return;
+
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
/* powering up/down the eDP panel generates hpd events which
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index ec93a75369e6..bc44a3d35ec6 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -300,6 +300,8 @@
# define RADEON_BUS_READ_BURST (1 << 30)
#define RADEON_BUS_CNTL1 0x0034
# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4)
+#define RV370_BUS_CNTL 0x004c
+# define RV370_BUS_BIOS_DIS_ROM (1 << 2)
/* rv370/rv380, rv410, r423/r430/r480, r5xx */
#define RADEON_MSI_REARM_EN 0x0160
# define RV370_MSI_REARM_EN (1 << 0)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 6e3b11e5abbe..1f5850e473cc 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -426,7 +426,7 @@ int rs600_gart_init(struct radeon_device *rdev)
return radeon_gart_table_vram_alloc(rdev);
}
-int rs600_gart_enable(struct radeon_device *rdev)
+static int rs600_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r, i;
@@ -440,8 +440,8 @@ int rs600_gart_enable(struct radeon_device *rdev)
return r;
radeon_gart_restore(rdev);
/* Enable bus master */
- tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
- WREG32(R_00004C_BUS_CNTL, tmp);
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
/* FIXME: setup default page */
WREG32_MC(R_000100_MC_PT0_CNTL,
(S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 8bb347d23ca6..4de51891aa6d 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1368,6 +1368,7 @@ void rv770_fini(struct radeon_device *rdev)
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rv770_vram_scratch_fini(rdev);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 16db83c83c8b..5f888f7e7dcb 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -333,7 +333,7 @@ config SENSORS_F71882FG
F71858FG
F71862FG
F71863FG
- F71869F/E
+ F71869F/E/A
F71882FG
F71883FG
F71889FG/ED/A
diff --git a/drivers/hwmon/adm1275.c b/drivers/hwmon/adm1275.c
index c2ee2048ab91..8bc1bd663721 100644
--- a/drivers/hwmon/adm1275.c
+++ b/drivers/hwmon/adm1275.c
@@ -32,6 +32,7 @@ static int adm1275_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int config;
+ int ret;
struct pmbus_driver_info *info;
if (!i2c_check_functionality(client->adapter,
@@ -43,30 +44,32 @@ static int adm1275_probe(struct i2c_client *client,
return -ENOMEM;
config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
- if (config < 0)
- return config;
+ if (config < 0) {
+ ret = config;
+ goto err_mem;
+ }
info->pages = 1;
info->direct[PSC_VOLTAGE_IN] = true;
info->direct[PSC_VOLTAGE_OUT] = true;
info->direct[PSC_CURRENT_OUT] = true;
- info->m[PSC_CURRENT_OUT] = 800;
+ info->m[PSC_CURRENT_OUT] = 807;
info->b[PSC_CURRENT_OUT] = 20475;
info->R[PSC_CURRENT_OUT] = -1;
info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
if (config & ADM1275_VRANGE) {
- info->m[PSC_VOLTAGE_IN] = 19045;
+ info->m[PSC_VOLTAGE_IN] = 19199;
info->b[PSC_VOLTAGE_IN] = 0;
info->R[PSC_VOLTAGE_IN] = -2;
- info->m[PSC_VOLTAGE_OUT] = 19045;
+ info->m[PSC_VOLTAGE_OUT] = 19199;
info->b[PSC_VOLTAGE_OUT] = 0;
info->R[PSC_VOLTAGE_OUT] = -2;
} else {
- info->m[PSC_VOLTAGE_IN] = 6666;
+ info->m[PSC_VOLTAGE_IN] = 6720;
info->b[PSC_VOLTAGE_IN] = 0;
info->R[PSC_VOLTAGE_IN] = -1;
- info->m[PSC_VOLTAGE_OUT] = 6666;
+ info->m[PSC_VOLTAGE_OUT] = 6720;
info->b[PSC_VOLTAGE_OUT] = 0;
info->R[PSC_VOLTAGE_OUT] = -1;
}
@@ -76,7 +79,14 @@ static int adm1275_probe(struct i2c_client *client,
else
info->func[0] |= PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT;
- return pmbus_do_probe(client, id, info);
+ ret = pmbus_do_probe(client, id, info);
+ if (ret)
+ goto err_mem;
+ return 0;
+
+err_mem:
+ kfree(info);
+ return ret;
}
static int adm1275_remove(struct i2c_client *client)
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index e0ef32378ac6..0064432f361f 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -78,8 +78,9 @@ static u16 emc6w201_read16(struct i2c_client *client, u8 reg)
lsb = i2c_smbus_read_byte_data(client, reg);
msb = i2c_smbus_read_byte_data(client, reg + 1);
- if (lsb < 0 || msb < 0) {
- dev_err(&client->dev, "16-bit read failed at 0x%02x\n", reg);
+ if (unlikely(lsb < 0 || msb < 0)) {
+ dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+ 16, "read", reg);
return 0xFFFF; /* Arbitrary value */
}
@@ -95,10 +96,39 @@ static int emc6w201_write16(struct i2c_client *client, u8 reg, u16 val)
int err;
err = i2c_smbus_write_byte_data(client, reg, val & 0xff);
- if (!err)
+ if (likely(!err))
err = i2c_smbus_write_byte_data(client, reg + 1, val >> 8);
- if (err < 0)
- dev_err(&client->dev, "16-bit write failed at 0x%02x\n", reg);
+ if (unlikely(err < 0))
+ dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+ 16, "write", reg);
+
+ return err;
+}
+
+/* Read 8-bit value from register */
+static u8 emc6w201_read8(struct i2c_client *client, u8 reg)
+{
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, reg);
+ if (unlikely(val < 0)) {
+ dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+ 8, "read", reg);
+ return 0x00; /* Arbitrary value */
+ }
+
+ return val;
+}
+
+/* Write 8-bit value to register */
+static int emc6w201_write8(struct i2c_client *client, u8 reg, u8 val)
+{
+ int err;
+
+ err = i2c_smbus_write_byte_data(client, reg, val);
+ if (unlikely(err < 0))
+ dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+ 8, "write", reg);
return err;
}
@@ -114,25 +144,25 @@ static struct emc6w201_data *emc6w201_update_device(struct device *dev)
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
for (nr = 0; nr < 6; nr++) {
data->in[input][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_IN(nr));
data->in[min][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_IN_LOW(nr));
data->in[max][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_IN_HIGH(nr));
}
for (nr = 0; nr < 6; nr++) {
data->temp[input][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_TEMP(nr));
data->temp[min][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_TEMP_LOW(nr));
data->temp[max][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_TEMP_HIGH(nr));
}
@@ -192,7 +222,7 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
mutex_lock(&data->update_lock);
data->in[sf][nr] = SENSORS_LIMIT(val, 0, 255);
- err = i2c_smbus_write_byte_data(client, reg, data->in[sf][nr]);
+ err = emc6w201_write8(client, reg, data->in[sf][nr]);
mutex_unlock(&data->update_lock);
return err < 0 ? err : count;
@@ -229,7 +259,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
mutex_lock(&data->update_lock);
data->temp[sf][nr] = SENSORS_LIMIT(val, -127, 128);
- err = i2c_smbus_write_byte_data(client, reg, data->temp[sf][nr]);
+ err = emc6w201_write8(client, reg, data->temp[sf][nr]);
mutex_unlock(&data->update_lock);
return err < 0 ? err : count;
@@ -444,7 +474,7 @@ static int emc6w201_detect(struct i2c_client *client,
/* Check configuration */
config = i2c_smbus_read_byte_data(client, EMC6W201_REG_CONFIG);
- if ((config & 0xF4) != 0x04)
+ if (config < 0 || (config & 0xF4) != 0x04)
return -ENODEV;
if (!(config & 0x01)) {
dev_err(&client->dev, "Monitoring not enabled\n");
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index a4a94a096c90..2d96ed2bf8ed 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -52,6 +52,7 @@
#define SIO_F71858_ID 0x0507 /* Chipset ID */
#define SIO_F71862_ID 0x0601 /* Chipset ID */
#define SIO_F71869_ID 0x0814 /* Chipset ID */
+#define SIO_F71869A_ID 0x1007 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
#define SIO_F71889_ID 0x0723 /* Chipset ID */
#define SIO_F71889E_ID 0x0909 /* Chipset ID */
@@ -108,8 +109,8 @@ static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71882fg, f71889fg,
- f71889ed, f71889a, f8000, f81865f };
+enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71869a, f71882fg,
+ f71889fg, f71889ed, f71889a, f8000, f81865f };
static const char *f71882fg_names[] = {
"f71808e",
@@ -117,6 +118,7 @@ static const char *f71882fg_names[] = {
"f71858fg",
"f71862fg",
"f71869", /* Both f71869f and f71869e, reg. compatible and same id */
+ "f71869a",
"f71882fg",
"f71889fg", /* f81801u too, same id */
"f71889ed",
@@ -131,6 +133,7 @@ static const char f71882fg_has_in[][F71882FG_MAX_INS] = {
[f71858fg] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 },
[f71862fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
[f71869] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+ [f71869a] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
[f71882fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
[f71889fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
[f71889ed] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
@@ -145,6 +148,7 @@ static const char f71882fg_has_in1_alarm[] = {
[f71858fg] = 0,
[f71862fg] = 0,
[f71869] = 0,
+ [f71869a] = 0,
[f71882fg] = 1,
[f71889fg] = 1,
[f71889ed] = 1,
@@ -159,6 +163,7 @@ static const char f71882fg_fan_has_beep[] = {
[f71858fg] = 0,
[f71862fg] = 1,
[f71869] = 1,
+ [f71869a] = 1,
[f71882fg] = 1,
[f71889fg] = 1,
[f71889ed] = 1,
@@ -173,6 +178,7 @@ static const char f71882fg_nr_fans[] = {
[f71858fg] = 3,
[f71862fg] = 3,
[f71869] = 3,
+ [f71869a] = 3,
[f71882fg] = 4,
[f71889fg] = 3,
[f71889ed] = 3,
@@ -187,6 +193,7 @@ static const char f71882fg_temp_has_beep[] = {
[f71858fg] = 0,
[f71862fg] = 1,
[f71869] = 1,
+ [f71869a] = 1,
[f71882fg] = 1,
[f71889fg] = 1,
[f71889ed] = 1,
@@ -201,6 +208,7 @@ static const char f71882fg_nr_temps[] = {
[f71858fg] = 3,
[f71862fg] = 3,
[f71869] = 3,
+ [f71869a] = 3,
[f71882fg] = 3,
[f71889fg] = 3,
[f71889ed] = 3,
@@ -2243,6 +2251,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
case f71808e:
case f71808a:
case f71869:
+ case f71869a:
/* These always have signed auto point temps */
data->auto_point_temp_signed = 1;
/* Fall through to select correct fan/pwm reg bank! */
@@ -2305,6 +2314,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
case f71808e:
case f71808a:
case f71869:
+ case f71869a:
case f71889fg:
case f71889ed:
case f71889a:
@@ -2528,6 +2538,9 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
case SIO_F71869_ID:
sio_data->type = f71869;
break;
+ case SIO_F71869A_ID:
+ sio_data->type = f71869a;
+ break;
case SIO_F71882_ID:
sio_data->type = f71882fg;
break;
@@ -2662,7 +2675,7 @@ static void __exit f71882fg_exit(void)
}
MODULE_DESCRIPTION("F71882FG Hardware Monitoring Driver");
-MODULE_AUTHOR("Hans Edgington, Hans de Goede (hdegoede@redhat.com)");
+MODULE_AUTHOR("Hans Edgington, Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
module_init(f71882fg_init);
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 2582bfef6ccb..c8195a077da3 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -202,7 +202,7 @@ static struct vrm_model vrm_models[] = {
{X86_VENDOR_CENTAUR, 0x6, 0x7, ANY, 85}, /* Eden ESP/Ezra */
{X86_VENDOR_CENTAUR, 0x6, 0x8, 0x7, 85}, /* Ezra T */
- {X86_VENDOR_CENTAUR, 0x6, 0x9, 0x7, 85}, /* Nemiah */
+ {X86_VENDOR_CENTAUR, 0x6, 0x9, 0x7, 85}, /* Nehemiah */
{X86_VENDOR_CENTAUR, 0x6, 0x9, ANY, 17}, /* C3-M, Eden-N */
{X86_VENDOR_CENTAUR, 0x6, 0xA, 0x7, 0}, /* No information */
{X86_VENDOR_CENTAUR, 0x6, 0xA, ANY, 13}, /* C7, Esther */
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 1a6dfb6df1e7..d3b464b74ced 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -98,11 +98,16 @@ struct lm95241_data {
};
/* Conversions */
-static int TempFromReg(u8 val_h, u8 val_l)
+static int temp_from_reg_signed(u8 val_h, u8 val_l)
{
- if (val_h & 0x80)
- return val_h - 0x100;
- return val_h * 1000 + val_l * 1000 / 256;
+ s16 val_hl = (val_h << 8) | val_l;
+ return val_hl * 1000 / 256;
+}
+
+static int temp_from_reg_unsigned(u8 val_h, u8 val_l)
+{
+ u16 val_hl = (val_h << 8) | val_l;
+ return val_hl * 1000 / 256;
}
static struct lm95241_data *lm95241_update_device(struct device *dev)
@@ -135,10 +140,13 @@ static ssize_t show_input(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm95241_data *data = lm95241_update_device(dev);
+ int index = to_sensor_dev_attr(attr)->index;
return snprintf(buf, PAGE_SIZE - 1, "%d\n",
- TempFromReg(data->temp[to_sensor_dev_attr(attr)->index],
- data->temp[to_sensor_dev_attr(attr)->index + 1]));
+ index == 0 || (data->config & (1 << (index / 2))) ?
+ temp_from_reg_signed(data->temp[index], data->temp[index + 1]) :
+ temp_from_reg_unsigned(data->temp[index],
+ data->temp[index + 1]));
}
static ssize_t show_type(struct device *dev, struct device_attribute *attr,
@@ -339,7 +347,7 @@ static int lm95241_detect(struct i2c_client *new_client,
if ((i2c_smbus_read_byte_data(new_client, LM95241_REG_R_MAN_ID)
== MANUFACTURER_ID)
&& (i2c_smbus_read_byte_data(new_client, LM95241_REG_R_CHIP_ID)
- >= DEFAULT_REVISION)) {
+ == DEFAULT_REVISION)) {
name = DEVNAME;
} else {
dev_dbg(&adapter->dev, "LM95241 detection failed at 0x%02x\n",
diff --git a/drivers/hwmon/pmbus.c b/drivers/hwmon/pmbus.c
index 98e2e28899e2..9b1f0c37ef77 100644
--- a/drivers/hwmon/pmbus.c
+++ b/drivers/hwmon/pmbus.c
@@ -47,22 +47,29 @@ static void pmbus_find_sensor_groups(struct i2c_client *client,
if (info->func[0]
&& pmbus_check_byte_register(client, 0, PMBUS_STATUS_INPUT))
info->func[0] |= PMBUS_HAVE_STATUS_INPUT;
- if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
+ if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_12) &&
+ pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
info->func[0] |= PMBUS_HAVE_FAN12;
if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_12))
info->func[0] |= PMBUS_HAVE_STATUS_FAN12;
}
- if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
+ if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_34) &&
+ pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
info->func[0] |= PMBUS_HAVE_FAN34;
if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_34))
info->func[0] |= PMBUS_HAVE_STATUS_FAN34;
}
- if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_1)) {
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_1))
info->func[0] |= PMBUS_HAVE_TEMP;
- if (pmbus_check_byte_register(client, 0,
- PMBUS_STATUS_TEMPERATURE))
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_2))
+ info->func[0] |= PMBUS_HAVE_TEMP2;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_3))
+ info->func[0] |= PMBUS_HAVE_TEMP3;
+ if (info->func[0] & (PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2
+ | PMBUS_HAVE_TEMP3)
+ && pmbus_check_byte_register(client, 0,
+ PMBUS_STATUS_TEMPERATURE))
info->func[0] |= PMBUS_HAVE_STATUS_TEMP;
- }
/* Sensors detected on all pages */
for (page = 0; page < info->pages; page++) {
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
index 354770ed3186..8e31a8e2c746 100644
--- a/drivers/hwmon/pmbus_core.c
+++ b/drivers/hwmon/pmbus_core.c
@@ -362,8 +362,8 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
* Convert linear sensor values to milli- or micro-units
* depending on sensor type.
*/
-static int pmbus_reg2data_linear(struct pmbus_data *data,
- struct pmbus_sensor *sensor)
+static long pmbus_reg2data_linear(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
{
s16 exponent;
s32 mantissa;
@@ -397,15 +397,15 @@ static int pmbus_reg2data_linear(struct pmbus_data *data,
else
val >>= -exponent;
- return (int)val;
+ return val;
}
/*
* Convert direct sensor values to milli- or micro-units
* depending on sensor type.
*/
-static int pmbus_reg2data_direct(struct pmbus_data *data,
- struct pmbus_sensor *sensor)
+static long pmbus_reg2data_direct(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
{
long val = (s16) sensor->data;
long m, b, R;
@@ -440,12 +440,12 @@ static int pmbus_reg2data_direct(struct pmbus_data *data,
R++;
}
- return (int)((val - b) / m);
+ return (val - b) / m;
}
-static int pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
+static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
{
- int val;
+ long val;
if (data->info->direct[sensor->class])
val = pmbus_reg2data_direct(data, sensor);
@@ -619,7 +619,7 @@ static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val)
if (!s1 && !s2)
*val = !!regval;
else {
- int v1, v2;
+ long v1, v2;
struct pmbus_sensor *sensor1, *sensor2;
sensor1 = &data->sensors[s1];
@@ -661,7 +661,7 @@ static ssize_t pmbus_show_sensor(struct device *dev,
if (sensor->data < 0)
return sensor->data;
- return snprintf(buf, PAGE_SIZE, "%d\n", pmbus_reg2data(data, sensor));
+ return snprintf(buf, PAGE_SIZE, "%ld\n", pmbus_reg2data(data, sensor));
}
static ssize_t pmbus_set_sensor(struct device *dev,
@@ -1430,14 +1430,9 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
- /*
- * Bail out if status register or PMBus revision register
- * does not exist.
- */
- if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0
- || i2c_smbus_read_byte_data(client, PMBUS_REVISION) < 0) {
- dev_err(&client->dev,
- "Status or revision register not found\n");
+ /* Bail out if PMBus status register does not exist. */
+ if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0) {
+ dev_err(&client->dev, "PMBus status register not found\n");
ret = -ENODEV;
goto out_data;
}
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 020c87273ea1..3494a4cce414 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -887,7 +887,7 @@ static void __exit sch5627_exit(void)
}
MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
-MODULE_AUTHOR("Hans de Goede (hdegoede@redhat.com)");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
module_init(sch5627_init);
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 52b545a795f2..cbc98aea5b09 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -193,7 +193,13 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface,
return;
}
if (twi_int_status & MCOMP) {
- if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
+ if ((read_MASTER_CTL(iface) & MEN) == 0 &&
+ (iface->cur_mode == TWI_I2C_MODE_REPEAT ||
+ iface->cur_mode == TWI_I2C_MODE_COMBINED)) {
+ iface->result = -1;
+ write_INT_MASK(iface, 0);
+ write_MASTER_CTL(iface, 0);
+ } else if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
if (iface->readNum == 0) {
/* set the read number to 1 and ask for manual
* stop in block combine mode
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 6c00c107ebf3..f84a63c6dd97 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -248,12 +248,12 @@ static inline int is_msgend(struct s3c24xx_i2c *i2c)
return i2c->msg_ptr >= i2c->msg->len;
}
-/* i2s_s3c_irq_nextbyte
+/* i2c_s3c_irq_nextbyte
*
* process an interrupt and work out what to do
*/
-static int i2s_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
+static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
{
unsigned long tmp;
unsigned char byte;
@@ -264,7 +264,6 @@ static int i2s_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
case STATE_IDLE:
dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __func__);
goto out;
- break;
case STATE_STOP:
dev_err(i2c->dev, "%s: called in STATE_STOP\n", __func__);
@@ -444,7 +443,7 @@ static irqreturn_t s3c24xx_i2c_irq(int irqno, void *dev_id)
/* pretty much this leaves us with the fact that we've
* transmitted or received whatever byte we last sent */
- i2s_s3c_irq_nextbyte(i2c, status);
+ i2c_s3c_irq_nextbyte(i2c, status);
out:
return IRQ_HANDLED;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 4d9319665e32..fb3b4f8f8152 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -40,8 +40,10 @@
#define I2C_CNFG_NEW_MASTER_FSM (1<<11)
#define I2C_STATUS 0x01C
#define I2C_SL_CNFG 0x020
+#define I2C_SL_CNFG_NACK (1<<1)
#define I2C_SL_CNFG_NEWSL (1<<2)
#define I2C_SL_ADDR1 0x02c
+#define I2C_SL_ADDR2 0x030
#define I2C_TX_FIFO 0x050
#define I2C_RX_FIFO 0x054
#define I2C_PACKET_TRANSFER_STATUS 0x058
@@ -337,7 +339,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
if (!i2c_dev->is_dvc) {
u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG);
- i2c_writel(i2c_dev, sl_cfg | I2C_SL_CNFG_NEWSL, I2C_SL_CNFG);
+ sl_cfg |= I2C_SL_CNFG_NACK | I2C_SL_CNFG_NEWSL;
+ i2c_writel(i2c_dev, sl_cfg, I2C_SL_CNFG);
+ i2c_writel(i2c_dev, 0xfc, I2C_SL_ADDR1);
+ i2c_writel(i2c_dev, 0x00, I2C_SL_ADDR2);
+
}
val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f62f52fb9ece..fc0f2bd9ca82 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3641,7 +3641,8 @@ static struct kobj_type cm_port_obj_type = {
static char *cm_devnode(struct device *dev, mode_t *mode)
{
- *mode = 0666;
+ if (mode)
+ *mode = 0666;
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index e49a85f8a44d..56898b6578a4 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -826,7 +826,8 @@ static void ib_uverbs_remove_one(struct ib_device *device)
static char *uverbs_devnode(struct device *dev, mode_t *mode)
{
- *mode = 0666;
+ if (mode)
+ *mode = 0666;
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 40b02ae96f86..6229c3e8e78b 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -520,7 +520,8 @@ static void pmic8xxx_kp_close(struct input_dev *dev)
*/
static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
{
- const struct pm8xxx_keypad_platform_data *pdata = mfd_get_data(pdev);
+ const struct pm8xxx_keypad_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
const struct matrix_keymap_data *keymap_data;
struct pmic8xxx_kp *kp;
int rc;
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 97e07e786e41..b3cfb9c71e66 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -90,7 +90,8 @@ static int __devinit pmic8xxx_pwrkey_probe(struct platform_device *pdev)
unsigned int delay;
u8 pon_cntl;
struct pmic8xxx_pwrkey *pwrkey;
- const struct pm8xxx_pwrkey_platform_data *pdata = mfd_get_data(pdev);
+ const struct pm8xxx_pwrkey_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "power key platform data not supplied\n");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index d8d3a1e910a1..a2c874623e35 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -88,7 +88,7 @@ static const struct pca9532_chip_info pca9532_chip_info_tbl[] = {
static struct i2c_driver pca9532_driver = {
.driver = {
- .name = "pca953x",
+ .name = "leds-pca953x",
},
.probe = pca9532_probe,
.remove = pca9532_remove,
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index 8fa539dde1b4..7f7079b12f23 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -597,12 +597,17 @@ static void __devexit fintek_remove(struct pnp_dev *pdev)
static int fintek_suspend(struct pnp_dev *pdev, pm_message_t state)
{
struct fintek_dev *fintek = pnp_get_drvdata(pdev);
+ unsigned long flags;
fit_dbg("%s called", __func__);
+ spin_lock_irqsave(&fintek->fintek_lock, flags);
+
/* disable all CIR interrupts */
fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+ spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+
fintek_config_mode_enable(fintek);
/* disable cir logical dev */
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 3f3c70716268..6bc35eeb653b 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -307,6 +307,14 @@ static const struct {
/* 0xffdc iMON MCE VFD */
{ 0x00010000ffffffeell, KEY_VOLUMEUP },
{ 0x01000000ffffffeell, KEY_VOLUMEDOWN },
+ { 0x00000001ffffffeell, KEY_MUTE },
+ { 0x0000000fffffffeell, KEY_MEDIA },
+ { 0x00000012ffffffeell, KEY_UP },
+ { 0x00000013ffffffeell, KEY_DOWN },
+ { 0x00000014ffffffeell, KEY_LEFT },
+ { 0x00000015ffffffeell, KEY_RIGHT },
+ { 0x00000016ffffffeell, KEY_ENTER },
+ { 0x00000017ffffffeell, KEY_ESC },
/* iMON Knob values */
{ 0x000100ffffffffeell, KEY_VOLUMEUP },
{ 0x010000ffffffffeell, KEY_VOLUMEDOWN },
@@ -1582,16 +1590,16 @@ static void imon_incoming_packet(struct imon_context *ictx,
/* Only panel type events left to process now */
spin_lock_irqsave(&ictx->kc_lock, flags);
+ do_gettimeofday(&t);
/* KEY_MUTE repeats from knob need to be suppressed */
if (ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode) {
- do_gettimeofday(&t);
msec = tv2int(&t, &prev_time);
- prev_time = t;
if (msec < ictx->idev->rep[REP_DELAY]) {
spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
}
}
+ prev_time = t;
kc = ictx->kc;
spin_unlock_irqrestore(&ictx->kc_lock, flags);
@@ -1603,7 +1611,9 @@ static void imon_incoming_packet(struct imon_context *ictx,
input_report_key(ictx->idev, kc, 0);
input_sync(ictx->idev);
+ spin_lock_irqsave(&ictx->kc_lock, flags);
ictx->last_keycode = kc;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
@@ -1740,6 +1750,8 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
detected_display_type = IMON_DISPLAY_TYPE_VFD;
break;
/* iMON VFD, MCE IR */
+ case 0x46:
+ case 0x7e:
case 0x9e:
dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
@@ -1755,6 +1767,9 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
dev_info(ictx->dev, "Unknown 0xffdc device, "
"defaulting to VFD and iMON IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ /* We don't know which one it is, allow user to set the
+ * RC6 one from userspace if OTHER wasn't correct. */
+ allowed_protos |= RC_TYPE_RC6;
break;
}
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 11c19d8d0ee0..423ed45d6c55 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -114,18 +114,20 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
s64 delta; /* ns */
DEFINE_IR_RAW_EVENT(ev);
int rc = 0;
+ int delay;
if (!dev->raw)
return -EINVAL;
now = ktime_get();
delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
+ delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
/* Check for a long duration since last event or if we're
* being called for the first time, note that delta can't
* possibly be negative.
*/
- if (delta > IR_MAX_DURATION || !dev->raw->last_type)
+ if (delta > delay || !dev->raw->last_type)
type |= IR_START_EVENT;
else
ev.duration = delta;
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index e716b931cf7e..ecd3d0280768 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1347,6 +1347,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
{ /* 0: ITE8704 */
.model = "ITE8704 CIR transceiver",
.io_region_size = IT87_IOREG_LENGTH,
+ .io_rsrc_no = 0,
.hw_tx_capable = true,
.sample_period = (u32) (1000000000ULL / 115200),
.tx_carrier_freq = 38000,
@@ -1371,6 +1372,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
{ /* 1: ITE8713 */
.model = "ITE8713 CIR transceiver",
.io_region_size = IT87_IOREG_LENGTH,
+ .io_rsrc_no = 0,
.hw_tx_capable = true,
.sample_period = (u32) (1000000000ULL / 115200),
.tx_carrier_freq = 38000,
@@ -1395,6 +1397,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
{ /* 2: ITE8708 */
.model = "ITE8708 CIR transceiver",
.io_region_size = IT8708_IOREG_LENGTH,
+ .io_rsrc_no = 0,
.hw_tx_capable = true,
.sample_period = (u32) (1000000000ULL / 115200),
.tx_carrier_freq = 38000,
@@ -1420,6 +1423,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
{ /* 3: ITE8709 */
.model = "ITE8709 CIR transceiver",
.io_region_size = IT8709_IOREG_LENGTH,
+ .io_rsrc_no = 2,
.hw_tx_capable = true,
.sample_period = (u32) (1000000000ULL / 115200),
.tx_carrier_freq = 38000,
@@ -1461,6 +1465,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
struct rc_dev *rdev = NULL;
int ret = -ENOMEM;
int model_no;
+ int io_rsrc_no;
ite_dbg("%s called", __func__);
@@ -1490,10 +1495,11 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
/* get the description for the device */
dev_desc = &ite_dev_descs[model_no];
+ io_rsrc_no = dev_desc->io_rsrc_no;
/* validate pnp resources */
- if (!pnp_port_valid(pdev, 0) ||
- pnp_port_len(pdev, 0) != dev_desc->io_region_size) {
+ if (!pnp_port_valid(pdev, io_rsrc_no) ||
+ pnp_port_len(pdev, io_rsrc_no) != dev_desc->io_region_size) {
dev_err(&pdev->dev, "IR PNP Port not valid!\n");
goto failure;
}
@@ -1504,7 +1510,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
}
/* store resource values */
- itdev->cir_addr = pnp_port_start(pdev, 0);
+ itdev->cir_addr = pnp_port_start(pdev, io_rsrc_no);
itdev->cir_irq = pnp_irq(pdev, 0);
/* initialize spinlocks */
diff --git a/drivers/media/rc/ite-cir.h b/drivers/media/rc/ite-cir.h
index 16a19f5fd718..aa899a0b9750 100644
--- a/drivers/media/rc/ite-cir.h
+++ b/drivers/media/rc/ite-cir.h
@@ -57,6 +57,9 @@ struct ite_dev_params {
/* size of the I/O region */
int io_region_size;
+ /* IR pnp I/O resource number */
+ int io_rsrc_no;
+
/* true if the hardware supports transmission */
bool hw_tx_capable;
diff --git a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
index bb10ffe086b4..8d558ae63456 100644
--- a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
+++ b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
@@ -15,43 +15,39 @@
/* Pinnacle PCTV HD 800i mini remote */
static struct rc_map_table pinnacle_pctv_hd[] = {
-
- { 0x0f, KEY_1 },
- { 0x15, KEY_2 },
- { 0x10, KEY_3 },
- { 0x18, KEY_4 },
- { 0x1b, KEY_5 },
- { 0x1e, KEY_6 },
- { 0x11, KEY_7 },
- { 0x21, KEY_8 },
- { 0x12, KEY_9 },
- { 0x27, KEY_0 },
-
- { 0x24, KEY_ZOOM },
- { 0x2a, KEY_SUBTITLE },
-
- { 0x00, KEY_MUTE },
- { 0x01, KEY_ENTER }, /* Pinnacle Logo */
- { 0x39, KEY_POWER },
-
- { 0x03, KEY_VOLUMEUP },
- { 0x09, KEY_VOLUMEDOWN },
- { 0x06, KEY_CHANNELUP },
- { 0x0c, KEY_CHANNELDOWN },
-
- { 0x2d, KEY_REWIND },
- { 0x30, KEY_PLAYPAUSE },
- { 0x33, KEY_FASTFORWARD },
- { 0x3c, KEY_STOP },
- { 0x36, KEY_RECORD },
- { 0x3f, KEY_EPG }, /* Labeled "?" */
+ /* Key codes for the tiny Pinnacle remote*/
+ { 0x0700, KEY_MUTE },
+ { 0x0701, KEY_MENU }, /* Pinnacle logo */
+ { 0x0739, KEY_POWER },
+ { 0x0703, KEY_VOLUMEUP },
+ { 0x0709, KEY_VOLUMEDOWN },
+ { 0x0706, KEY_CHANNELUP },
+ { 0x070c, KEY_CHANNELDOWN },
+ { 0x070f, KEY_1 },
+ { 0x0715, KEY_2 },
+ { 0x0710, KEY_3 },
+ { 0x0718, KEY_4 },
+ { 0x071b, KEY_5 },
+ { 0x071e, KEY_6 },
+ { 0x0711, KEY_7 },
+ { 0x0721, KEY_8 },
+ { 0x0712, KEY_9 },
+ { 0x0727, KEY_0 },
+ { 0x0724, KEY_ZOOM }, /* 'Square' key */
+ { 0x072a, KEY_SUBTITLE }, /* 'T' key */
+ { 0x072d, KEY_REWIND },
+ { 0x0730, KEY_PLAYPAUSE },
+ { 0x0733, KEY_FASTFORWARD },
+ { 0x0736, KEY_RECORD },
+ { 0x073c, KEY_STOP },
+ { 0x073f, KEY_HELP }, /* '?' key */
};
static struct rc_map_list pinnacle_pctv_hd_map = {
.map = {
.scan = pinnacle_pctv_hd,
.size = ARRAY_SIZE(pinnacle_pctv_hd),
- .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
+ .rc_type = RC_TYPE_RC5,
.name = RC_MAP_PINNACLE_PCTV_HD,
}
};
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index fd237ab120bb..27997a9ceb0d 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -55,6 +55,8 @@ struct irctl {
struct lirc_buffer *buf;
unsigned int chunk_size;
+ struct cdev *cdev;
+
struct task_struct *task;
long jiffies_to_wait;
};
@@ -62,7 +64,6 @@ struct irctl {
static DEFINE_MUTEX(lirc_dev_lock);
static struct irctl *irctls[MAX_IRCTL_DEVICES];
-static struct cdev cdevs[MAX_IRCTL_DEVICES];
/* Only used for sysfs but defined to void otherwise */
static struct class *lirc_class;
@@ -167,9 +168,13 @@ static struct file_operations lirc_dev_fops = {
static int lirc_cdev_add(struct irctl *ir)
{
- int retval;
+ int retval = -ENOMEM;
struct lirc_driver *d = &ir->d;
- struct cdev *cdev = &cdevs[d->minor];
+ struct cdev *cdev;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ goto err_out;
if (d->fops) {
cdev_init(cdev, d->fops);
@@ -180,12 +185,20 @@ static int lirc_cdev_add(struct irctl *ir)
}
retval = kobject_set_name(&cdev->kobj, "lirc%d", d->minor);
if (retval)
- return retval;
+ goto err_out;
retval = cdev_add(cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1);
- if (retval)
+ if (retval) {
kobject_put(&cdev->kobj);
+ goto err_out;
+ }
+
+ ir->cdev = cdev;
+
+ return 0;
+err_out:
+ kfree(cdev);
return retval;
}
@@ -214,7 +227,7 @@ int lirc_register_driver(struct lirc_driver *d)
if (MAX_IRCTL_DEVICES <= d->minor) {
dev_err(d->dev, "lirc_dev: lirc_register_driver: "
"\"minor\" must be between 0 and %d (%d)!\n",
- MAX_IRCTL_DEVICES-1, d->minor);
+ MAX_IRCTL_DEVICES - 1, d->minor);
err = -EBADRQC;
goto out;
}
@@ -369,7 +382,7 @@ int lirc_unregister_driver(int minor)
if (minor < 0 || minor >= MAX_IRCTL_DEVICES) {
printk(KERN_ERR "lirc_dev: %s: minor (%d) must be between "
- "0 and %d!\n", __func__, minor, MAX_IRCTL_DEVICES-1);
+ "0 and %d!\n", __func__, minor, MAX_IRCTL_DEVICES - 1);
return -EBADRQC;
}
@@ -380,7 +393,7 @@ int lirc_unregister_driver(int minor)
return -ENOENT;
}
- cdev = &cdevs[minor];
+ cdev = ir->cdev;
mutex_lock(&lirc_dev_lock);
@@ -410,6 +423,7 @@ int lirc_unregister_driver(int minor)
} else {
lirc_irctl_cleanup(ir);
cdev_del(cdev);
+ kfree(cdev);
kfree(ir);
irctls[minor] = NULL;
}
@@ -453,7 +467,7 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
goto error;
}
- cdev = &cdevs[iminor(inode)];
+ cdev = ir->cdev;
if (try_module_get(cdev->owner)) {
ir->open++;
retval = ir->d.set_use_inc(ir->d.data);
@@ -484,13 +498,15 @@ EXPORT_SYMBOL(lirc_dev_fop_open);
int lirc_dev_fop_close(struct inode *inode, struct file *file)
{
struct irctl *ir = irctls[iminor(inode)];
- struct cdev *cdev = &cdevs[iminor(inode)];
+ struct cdev *cdev;
if (!ir) {
printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
return -EINVAL;
}
+ cdev = ir->cdev;
+
dev_dbg(ir->d.dev, LOGHEAD "close called\n", ir->d.name, ir->d.minor);
WARN_ON(mutex_lock_killable(&lirc_dev_lock));
@@ -503,6 +519,7 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
lirc_irctl_cleanup(ir);
cdev_del(cdev);
irctls[ir->d.minor] = NULL;
+ kfree(cdev);
kfree(ir);
}
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index ad927fcaa020..06dfe0957b5e 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -108,6 +108,12 @@ static int debug = 1;
static int debug;
#endif
+#define mce_dbg(dev, fmt, ...) \
+ do { \
+ if (debug) \
+ dev_info(dev, fmt, ## __VA_ARGS__); \
+ } while (0)
+
/* general constants */
#define SEND_FLAG_IN_PROGRESS 1
#define SEND_FLAG_COMPLETE 2
@@ -246,6 +252,9 @@ static struct usb_device_id mceusb_dev_table[] = {
.driver_info = MCE_GEN2_TX_INV },
/* SMK eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_SMK, 0x0338) },
+ /* SMK/I-O Data GV-MC7/RCKIT Receiver */
+ { USB_DEVICE(VENDOR_SMK, 0x0353),
+ .driver_info = MCE_GEN2_NO_TX },
/* Tatung eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_TATUNG, 0x9150) },
/* Shuttle eHome Infrared Transceiver */
@@ -606,12 +615,15 @@ static void mce_async_callback(struct urb *urb, struct pt_regs *regs)
if (ir) {
len = urb->actual_length;
- dev_dbg(ir->dev, "callback called (status=%d len=%d)\n",
+ mce_dbg(ir->dev, "callback called (status=%d len=%d)\n",
urb->status, len);
mceusb_dev_printdata(ir, urb->transfer_buffer, 0, len, true);
}
+ /* the transfer buffer and urb were allocated in mce_request_packet */
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
}
/* request incoming or send outgoing usb packet - used to initialize remote */
@@ -655,17 +667,17 @@ static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
return;
}
- dev_dbg(dev, "receive request called (size=%#x)\n", size);
+ mce_dbg(dev, "receive request called (size=%#x)\n", size);
async_urb->transfer_buffer_length = size;
async_urb->dev = ir->usbdev;
res = usb_submit_urb(async_urb, GFP_ATOMIC);
if (res) {
- dev_dbg(dev, "receive request FAILED! (res=%d)\n", res);
+ mce_dbg(dev, "receive request FAILED! (res=%d)\n", res);
return;
}
- dev_dbg(dev, "receive request complete (res=%d)\n", res);
+ mce_dbg(dev, "receive request complete (res=%d)\n", res);
}
static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
@@ -673,9 +685,9 @@ static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
mce_request_packet(ir, data, size, MCEUSB_TX);
}
-static void mce_sync_in(struct mceusb_dev *ir, unsigned char *data, int size)
+static void mce_flush_rx_buffer(struct mceusb_dev *ir, int size)
{
- mce_request_packet(ir, data, size, MCEUSB_RX);
+ mce_request_packet(ir, NULL, size, MCEUSB_RX);
}
/* Send data out the IR blaster port(s) */
@@ -794,7 +806,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
ir->carrier = carrier;
cmdbuf[2] = MCE_CMD_SIG_END;
cmdbuf[3] = MCE_IRDATA_TRAILER;
- dev_dbg(ir->dev, "%s: disabling carrier "
+ mce_dbg(ir->dev, "%s: disabling carrier "
"modulation\n", __func__);
mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
return carrier;
@@ -806,7 +818,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
ir->carrier = carrier;
cmdbuf[2] = prescaler;
cmdbuf[3] = divisor;
- dev_dbg(ir->dev, "%s: requesting %u HZ "
+ mce_dbg(ir->dev, "%s: requesting %u HZ "
"carrier\n", __func__, carrier);
/* Transmit new carrier to mce device */
@@ -879,7 +891,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
* US_TO_NS(MCE_TIME_UNIT);
- dev_dbg(ir->dev, "Storing %s with duration %d\n",
+ mce_dbg(ir->dev, "Storing %s with duration %d\n",
rawir.pulse ? "pulse" : "space",
rawir.duration);
@@ -911,7 +923,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
if (ir->parser_state != CMD_HEADER && !ir->rem)
ir->parser_state = CMD_HEADER;
}
- dev_dbg(ir->dev, "processed IR data, calling ir_raw_event_handle\n");
+ mce_dbg(ir->dev, "processed IR data, calling ir_raw_event_handle\n");
ir_raw_event_handle(ir->rc);
}
@@ -933,7 +945,7 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
if (ir->send_flags == RECV_FLAG_IN_PROGRESS) {
ir->send_flags = SEND_FLAG_COMPLETE;
- dev_dbg(ir->dev, "setup answer received %d bytes\n",
+ mce_dbg(ir->dev, "setup answer received %d bytes\n",
buf_len);
}
@@ -951,7 +963,7 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
case -EPIPE:
default:
- dev_dbg(ir->dev, "Error: urb status = %d\n", urb->status);
+ mce_dbg(ir->dev, "Error: urb status = %d\n", urb->status);
break;
}
@@ -961,7 +973,6 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
static void mceusb_gen1_init(struct mceusb_dev *ir)
{
int ret;
- int maxp = ir->len_in;
struct device *dev = ir->dev;
char *data;
@@ -978,8 +989,8 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0,
data, USB_CTRL_MSG_SZ, HZ * 3);
- dev_dbg(dev, "%s - ret = %d\n", __func__, ret);
- dev_dbg(dev, "%s - data[0] = %d, data[1] = %d\n",
+ mce_dbg(dev, "%s - ret = %d\n", __func__, ret);
+ mce_dbg(dev, "%s - data[0] = %d, data[1] = %d\n",
__func__, data[0], data[1]);
/* set feature: bit rate 38400 bps */
@@ -987,71 +998,56 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
0xc04e, 0x0000, NULL, 0, HZ * 3);
- dev_dbg(dev, "%s - ret = %d\n", __func__, ret);
+ mce_dbg(dev, "%s - ret = %d\n", __func__, ret);
/* bRequest 4: set char length to 8 bits */
ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
4, USB_TYPE_VENDOR,
0x0808, 0x0000, NULL, 0, HZ * 3);
- dev_dbg(dev, "%s - retB = %d\n", __func__, ret);
+ mce_dbg(dev, "%s - retB = %d\n", __func__, ret);
/* bRequest 2: set handshaking to use DTR/DSR */
ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
2, USB_TYPE_VENDOR,
0x0000, 0x0100, NULL, 0, HZ * 3);
- dev_dbg(dev, "%s - retC = %d\n", __func__, ret);
+ mce_dbg(dev, "%s - retC = %d\n", __func__, ret);
/* device reset */
mce_async_out(ir, DEVICE_RESET, sizeof(DEVICE_RESET));
- mce_sync_in(ir, NULL, maxp);
/* get hw/sw revision? */
mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
- mce_sync_in(ir, NULL, maxp);
kfree(data);
};
static void mceusb_gen2_init(struct mceusb_dev *ir)
{
- int maxp = ir->len_in;
-
/* device reset */
mce_async_out(ir, DEVICE_RESET, sizeof(DEVICE_RESET));
- mce_sync_in(ir, NULL, maxp);
/* get hw/sw revision? */
mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
- mce_sync_in(ir, NULL, maxp);
/* unknown what the next two actually return... */
mce_async_out(ir, GET_UNKNOWN, sizeof(GET_UNKNOWN));
- mce_sync_in(ir, NULL, maxp);
mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
- mce_sync_in(ir, NULL, maxp);
}
static void mceusb_get_parameters(struct mceusb_dev *ir)
{
- int maxp = ir->len_in;
-
/* get the carrier and frequency */
mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
- mce_sync_in(ir, NULL, maxp);
- if (!ir->flags.no_tx) {
+ if (!ir->flags.no_tx)
/* get the transmitter bitmask */
mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
- mce_sync_in(ir, NULL, maxp);
- }
/* get receiver timeout value */
mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
- mce_sync_in(ir, NULL, maxp);
/* get receiver sensor setting */
mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
- mce_sync_in(ir, NULL, maxp);
}
static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
@@ -1122,7 +1118,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
bool tx_mask_normal;
int ir_intfnum;
- dev_dbg(&intf->dev, "%s called\n", __func__);
+ mce_dbg(&intf->dev, "%s called\n", __func__);
idesc = intf->cur_altsetting;
@@ -1150,7 +1146,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
ep_in = ep;
ep_in->bmAttributes = USB_ENDPOINT_XFER_INT;
ep_in->bInterval = 1;
- dev_dbg(&intf->dev, "acceptable inbound endpoint "
+ mce_dbg(&intf->dev, "acceptable inbound endpoint "
"found\n");
}
@@ -1165,12 +1161,12 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
ep_out = ep;
ep_out->bmAttributes = USB_ENDPOINT_XFER_INT;
ep_out->bInterval = 1;
- dev_dbg(&intf->dev, "acceptable outbound endpoint "
+ mce_dbg(&intf->dev, "acceptable outbound endpoint "
"found\n");
}
}
if (ep_in == NULL) {
- dev_dbg(&intf->dev, "inbound and/or endpoint not found\n");
+ mce_dbg(&intf->dev, "inbound and/or endpoint not found\n");
return -ENODEV;
}
@@ -1215,16 +1211,16 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
if (!ir->rc)
goto rc_dev_fail;
- /* flush buffers on the device */
- mce_sync_in(ir, NULL, maxp);
- mce_sync_in(ir, NULL, maxp);
-
/* wire up inbound data handler */
usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in,
maxp, (usb_complete_t) mceusb_dev_recv, ir, ep_in->bInterval);
ir->urb_in->transfer_dma = ir->dma_in;
ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ /* flush buffers on the device */
+ mce_dbg(&intf->dev, "Flushing receive buffers\n");
+ mce_flush_rx_buffer(ir, maxp);
+
/* initialize device */
if (ir->flags.microsoft_gen1)
mceusb_gen1_init(ir);
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index bf3060ea6107..565f24c20d77 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -991,7 +991,6 @@ static int nvt_open(struct rc_dev *dev)
unsigned long flags;
spin_lock_irqsave(&nvt->nvt_lock, flags);
- nvt->in_use = true;
nvt_enable_cir(nvt);
spin_unlock_irqrestore(&nvt->nvt_lock, flags);
@@ -1004,7 +1003,6 @@ static void nvt_close(struct rc_dev *dev)
unsigned long flags;
spin_lock_irqsave(&nvt->nvt_lock, flags);
- nvt->in_use = false;
nvt_disable_cir(nvt);
spin_unlock_irqrestore(&nvt->nvt_lock, flags);
}
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 379795d61ea7..1241fc89a36c 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -70,7 +70,6 @@ struct nvt_dev {
struct ir_raw_event rawir;
spinlock_t nvt_lock;
- bool in_use;
/* for rx */
u8 buf[RX_BUF_LEN];
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index f57cd5677ac2..3186ac7c2c10 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -522,18 +522,20 @@ EXPORT_SYMBOL_GPL(rc_g_keycode_from_table);
/**
* ir_do_keyup() - internal function to signal the release of a keypress
* @dev: the struct rc_dev descriptor of the device
+ * @sync: whether or not to call input_sync
*
* This function is used internally to release a keypress, it must be
* called with keylock held.
*/
-static void ir_do_keyup(struct rc_dev *dev)
+static void ir_do_keyup(struct rc_dev *dev, bool sync)
{
if (!dev->keypressed)
return;
IR_dprintk(1, "keyup key 0x%04x\n", dev->last_keycode);
input_report_key(dev->input_dev, dev->last_keycode, 0);
- input_sync(dev->input_dev);
+ if (sync)
+ input_sync(dev->input_dev);
dev->keypressed = false;
}
@@ -549,7 +551,7 @@ void rc_keyup(struct rc_dev *dev)
unsigned long flags;
spin_lock_irqsave(&dev->keylock, flags);
- ir_do_keyup(dev);
+ ir_do_keyup(dev, true);
spin_unlock_irqrestore(&dev->keylock, flags);
}
EXPORT_SYMBOL_GPL(rc_keyup);
@@ -578,7 +580,7 @@ static void ir_timer_keyup(unsigned long cookie)
*/
spin_lock_irqsave(&dev->keylock, flags);
if (time_is_before_eq_jiffies(dev->keyup_jiffies))
- ir_do_keyup(dev);
+ ir_do_keyup(dev, true);
spin_unlock_irqrestore(&dev->keylock, flags);
}
@@ -597,6 +599,7 @@ void rc_repeat(struct rc_dev *dev)
spin_lock_irqsave(&dev->keylock, flags);
input_event(dev->input_dev, EV_MSC, MSC_SCAN, dev->last_scancode);
+ input_sync(dev->input_dev);
if (!dev->keypressed)
goto out;
@@ -622,29 +625,28 @@ EXPORT_SYMBOL_GPL(rc_repeat);
static void ir_do_keydown(struct rc_dev *dev, int scancode,
u32 keycode, u8 toggle)
{
- input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
-
- /* Repeat event? */
- if (dev->keypressed &&
- dev->last_scancode == scancode &&
- dev->last_toggle == toggle)
- return;
+ bool new_event = !dev->keypressed ||
+ dev->last_scancode != scancode ||
+ dev->last_toggle != toggle;
- /* Release old keypress */
- ir_do_keyup(dev);
+ if (new_event && dev->keypressed)
+ ir_do_keyup(dev, false);
- dev->last_scancode = scancode;
- dev->last_toggle = toggle;
- dev->last_keycode = keycode;
+ input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
- if (keycode == KEY_RESERVED)
- return;
+ if (new_event && keycode != KEY_RESERVED) {
+ /* Register a keypress */
+ dev->keypressed = true;
+ dev->last_scancode = scancode;
+ dev->last_toggle = toggle;
+ dev->last_keycode = keycode;
+
+ IR_dprintk(1, "%s: key down event, "
+ "key 0x%04x, scancode 0x%04x\n",
+ dev->input_name, keycode, scancode);
+ input_report_key(dev->input_dev, keycode, 1);
+ }
- /* Register a keypress */
- dev->keypressed = true;
- IR_dprintk(1, "%s: key down event, key 0x%04x, scancode 0x%04x\n",
- dev->input_name, keycode, scancode);
- input_report_key(dev->input_dev, dev->last_keycode, 1);
input_sync(dev->input_dev);
}
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index a97cf2750bd9..834a48394bce 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -3474,7 +3474,7 @@ static int radio_s_tuner(struct file *file, void *priv,
if (0 != t->index)
return -EINVAL;
- bttv_call_all(btv, tuner, g_tuner, t);
+ bttv_call_all(btv, tuner, s_tuner, t);
return 0;
}
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index 1933d4d11bf2..e80134f52ef5 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -695,14 +695,10 @@ static int cx18_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
cx18_call_all(cx, tuner, g_tuner, vt);
- if (test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
+ if (vt->type == V4L2_TUNER_RADIO)
strlcpy(vt->name, "cx18 Radio Tuner", sizeof(vt->name));
- vt->type = V4L2_TUNER_RADIO;
- } else {
+ else
strlcpy(vt->name, "cx18 TV Tuner", sizeof(vt->name));
- vt->type = V4L2_TUNER_ANALOG_TV;
- }
-
return 0;
}
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index f9e347dae739..120c7d8e0895 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -1184,14 +1184,10 @@ static int ivtv_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
ivtv_call_all(itv, tuner, g_tuner, vt);
- if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
+ if (vt->type == V4L2_TUNER_RADIO)
strlcpy(vt->name, "ivtv Radio Tuner", sizeof(vt->name));
- vt->type = V4L2_TUNER_RADIO;
- } else {
+ else
strlcpy(vt->name, "ivtv TV Tuner", sizeof(vt->name));
- vt->type = V4L2_TUNER_ANALOG_TV;
- }
-
return 0;
}
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
index 10b55c854487..89d09a8914f8 100644
--- a/drivers/media/video/m5mols/m5mols.h
+++ b/drivers/media/video/m5mols/m5mols.h
@@ -2,10 +2,10 @@
* Header for M-5MOLS 8M Pixel camera sensor with ISP
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
*
* Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -106,23 +106,23 @@ struct m5mols_capture {
* The each value according to each scenemode is recommended in the documents.
*/
struct m5mols_scenemode {
- u32 metering;
- u32 ev_bias;
- u32 wb_mode;
- u32 wb_preset;
- u32 chroma_en;
- u32 chroma_lvl;
- u32 edge_en;
- u32 edge_lvl;
- u32 af_range;
- u32 fd_mode;
- u32 mcc;
- u32 light;
- u32 flash;
- u32 tone;
- u32 iso;
- u32 capt_mode;
- u32 wdr;
+ u8 metering;
+ u8 ev_bias;
+ u8 wb_mode;
+ u8 wb_preset;
+ u8 chroma_en;
+ u8 chroma_lvl;
+ u8 edge_en;
+ u8 edge_lvl;
+ u8 af_range;
+ u8 fd_mode;
+ u8 mcc;
+ u8 light;
+ u8 flash;
+ u8 tone;
+ u8 iso;
+ u8 capt_mode;
+ u8 wdr;
};
/**
@@ -154,7 +154,6 @@ struct m5mols_version {
u8 str[VERSION_STRING_SIZE];
u8 af;
};
-#define VERSION_SIZE sizeof(struct m5mols_version)
/**
* struct m5mols_info - M-5MOLS driver data structure
@@ -216,9 +215,9 @@ struct m5mols_info {
bool lock_ae;
bool lock_awb;
u8 resolution;
- u32 interrupt;
- u32 mode;
- u32 mode_save;
+ u8 interrupt;
+ u8 mode;
+ u8 mode_save;
int (*set_power)(struct device *dev, int on);
};
@@ -256,9 +255,11 @@ struct m5mols_info {
* +-------+---+----------+-----+------+------+------+------+
* - d[0..3]: according to size1
*/
-int m5mols_read(struct v4l2_subdev *sd, u32 reg_comb, u32 *val);
+int m5mols_read_u8(struct v4l2_subdev *sd, u32 reg_comb, u8 *val);
+int m5mols_read_u16(struct v4l2_subdev *sd, u32 reg_comb, u16 *val);
+int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg_comb, u32 *val);
int m5mols_write(struct v4l2_subdev *sd, u32 reg_comb, u32 val);
-int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 value);
+int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u8 value);
/*
* Mode operation of the M-5MOLS
@@ -280,12 +281,12 @@ int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 value);
* The available executing order between each modes are as follows:
* PARAMETER <---> MONITOR <---> CAPTURE
*/
-int m5mols_mode(struct m5mols_info *info, u32 mode);
+int m5mols_mode(struct m5mols_info *info, u8 mode);
-int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg);
+int m5mols_enable_interrupt(struct v4l2_subdev *sd, u8 reg);
int m5mols_sync_controls(struct m5mols_info *info);
int m5mols_start_capture(struct m5mols_info *info);
-int m5mols_do_scenemode(struct m5mols_info *info, u32 mode);
+int m5mols_do_scenemode(struct m5mols_info *info, u8 mode);
int m5mols_lock_3a(struct m5mols_info *info, bool lock);
int m5mols_set_ctrl(struct v4l2_ctrl *ctrl);
diff --git a/drivers/media/video/m5mols/m5mols_capture.c b/drivers/media/video/m5mols/m5mols_capture.c
index d71a3903b60f..d9471928369d 100644
--- a/drivers/media/video/m5mols/m5mols_capture.c
+++ b/drivers/media/video/m5mols/m5mols_capture.c
@@ -2,10 +2,10 @@
* The Capture code for Fujitsu M-5MOLS ISP
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
*
* Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -58,9 +58,9 @@ static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num,
{
u32 num, den;
- int ret = m5mols_read(sd, addr_num, &num);
+ int ret = m5mols_read_u32(sd, addr_num, &num);
if (!ret)
- ret = m5mols_read(sd, addr_den, &den);
+ ret = m5mols_read_u32(sd, addr_den, &den);
if (ret)
return ret;
*val = den == 0 ? 0 : num / den;
@@ -99,20 +99,20 @@ static int m5mols_capture_info(struct m5mols_info *info)
if (ret)
return ret;
- ret = m5mols_read(sd, EXIF_INFO_ISO, (u32 *)&exif->iso_speed);
+ ret = m5mols_read_u16(sd, EXIF_INFO_ISO, &exif->iso_speed);
if (!ret)
- ret = m5mols_read(sd, EXIF_INFO_FLASH, (u32 *)&exif->flash);
+ ret = m5mols_read_u16(sd, EXIF_INFO_FLASH, &exif->flash);
if (!ret)
- ret = m5mols_read(sd, EXIF_INFO_SDR, (u32 *)&exif->sdr);
+ ret = m5mols_read_u16(sd, EXIF_INFO_SDR, &exif->sdr);
if (!ret)
- ret = m5mols_read(sd, EXIF_INFO_QVAL, (u32 *)&exif->qval);
+ ret = m5mols_read_u16(sd, EXIF_INFO_QVAL, &exif->qval);
if (ret)
return ret;
if (!ret)
- ret = m5mols_read(sd, CAPC_IMAGE_SIZE, &info->cap.main);
+ ret = m5mols_read_u32(sd, CAPC_IMAGE_SIZE, &info->cap.main);
if (!ret)
- ret = m5mols_read(sd, CAPC_THUMB_SIZE, &info->cap.thumb);
+ ret = m5mols_read_u32(sd, CAPC_THUMB_SIZE, &info->cap.thumb);
if (!ret)
info->cap.total = info->cap.main + info->cap.thumb;
@@ -122,7 +122,7 @@ static int m5mols_capture_info(struct m5mols_info *info)
int m5mols_start_capture(struct m5mols_info *info)
{
struct v4l2_subdev *sd = &info->sd;
- u32 resolution = info->resolution;
+ u8 resolution = info->resolution;
int timeout;
int ret;
diff --git a/drivers/media/video/m5mols/m5mols_controls.c b/drivers/media/video/m5mols/m5mols_controls.c
index 817c16fec368..d135d20d09cf 100644
--- a/drivers/media/video/m5mols/m5mols_controls.c
+++ b/drivers/media/video/m5mols/m5mols_controls.c
@@ -2,10 +2,10 @@
* Controls for M-5MOLS 8M Pixel camera sensor with ISP
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
*
* Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -130,7 +130,7 @@ static struct m5mols_scenemode m5mols_default_scenemode[] = {
*
* WARNING: The execution order is important. Do not change the order.
*/
-int m5mols_do_scenemode(struct m5mols_info *info, u32 mode)
+int m5mols_do_scenemode(struct m5mols_info *info, u8 mode)
{
struct v4l2_subdev *sd = &info->sd;
struct m5mols_scenemode scenemode = m5mols_default_scenemode[mode];
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
index 76eac26e84ae..43c68f51c5ce 100644
--- a/drivers/media/video/m5mols/m5mols_core.c
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -2,10 +2,10 @@
* Driver for M-5MOLS 8M Pixel camera sensor with ISP
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
*
* Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -133,13 +133,13 @@ static u32 m5mols_swap_byte(u8 *data, u8 length)
/**
* m5mols_read - I2C read function
* @reg: combination of size, category and command for the I2C packet
+ * @size: desired size of I2C packet
* @val: read value
*/
-int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
+static int m5mols_read(struct v4l2_subdev *sd, u32 size, u32 reg, u32 *val)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
u8 rbuf[M5MOLS_I2C_MAX_SIZE + 1];
- u8 size = I2C_SIZE(reg);
u8 category = I2C_CATEGORY(reg);
u8 cmd = I2C_COMMAND(reg);
struct i2c_msg msg[2];
@@ -149,11 +149,6 @@ int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
if (!client->adapter)
return -ENODEV;
- if (size != 1 && size != 2 && size != 4) {
- v4l2_err(sd, "Wrong data size\n");
- return -EINVAL;
- }
-
msg[0].addr = client->addr;
msg[0].flags = 0;
msg[0].len = 5;
@@ -184,6 +179,52 @@ int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
return 0;
}
+int m5mols_read_u8(struct v4l2_subdev *sd, u32 reg, u8 *val)
+{
+ u32 val_32;
+ int ret;
+
+ if (I2C_SIZE(reg) != 1) {
+ v4l2_err(sd, "Wrong data size\n");
+ return -EINVAL;
+ }
+
+ ret = m5mols_read(sd, I2C_SIZE(reg), reg, &val_32);
+ if (ret)
+ return ret;
+
+ *val = (u8)val_32;
+ return ret;
+}
+
+int m5mols_read_u16(struct v4l2_subdev *sd, u32 reg, u16 *val)
+{
+ u32 val_32;
+ int ret;
+
+ if (I2C_SIZE(reg) != 2) {
+ v4l2_err(sd, "Wrong data size\n");
+ return -EINVAL;
+ }
+
+ ret = m5mols_read(sd, I2C_SIZE(reg), reg, &val_32);
+ if (ret)
+ return ret;
+
+ *val = (u16)val_32;
+ return ret;
+}
+
+int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg, u32 *val)
+{
+ if (I2C_SIZE(reg) != 4) {
+ v4l2_err(sd, "Wrong data size\n");
+ return -EINVAL;
+ }
+
+ return m5mols_read(sd, I2C_SIZE(reg), reg, val);
+}
+
/**
* m5mols_write - I2C command write function
* @reg: combination of size, category and command for the I2C packet
@@ -231,13 +272,14 @@ int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
return 0;
}
-int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 mask)
+int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u8 mask)
{
- u32 busy, i;
+ u8 busy;
+ int i;
int ret;
for (i = 0; i < M5MOLS_I2C_CHECK_RETRY; i++) {
- ret = m5mols_read(sd, I2C_REG(category, cmd, 1), &busy);
+ ret = m5mols_read_u8(sd, I2C_REG(category, cmd, 1), &busy);
if (ret < 0)
return ret;
if ((busy & mask) == mask)
@@ -252,14 +294,14 @@ int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 mask)
* Before writing desired interrupt value the INT_FACTOR register should
* be read to clear pending interrupts.
*/
-int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg)
+int m5mols_enable_interrupt(struct v4l2_subdev *sd, u8 reg)
{
struct m5mols_info *info = to_m5mols(sd);
- u32 mask = is_available_af(info) ? REG_INT_AF : 0;
- u32 dummy;
+ u8 mask = is_available_af(info) ? REG_INT_AF : 0;
+ u8 dummy;
int ret;
- ret = m5mols_read(sd, SYSTEM_INT_FACTOR, &dummy);
+ ret = m5mols_read_u8(sd, SYSTEM_INT_FACTOR, &dummy);
if (!ret)
ret = m5mols_write(sd, SYSTEM_INT_ENABLE, reg & ~mask);
return ret;
@@ -271,7 +313,7 @@ int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg)
* It always accompanies a little delay changing the M-5MOLS mode, so it is
* needed checking current busy status to guarantee right mode.
*/
-static int m5mols_reg_mode(struct v4l2_subdev *sd, u32 mode)
+static int m5mols_reg_mode(struct v4l2_subdev *sd, u8 mode)
{
int ret = m5mols_write(sd, SYSTEM_SYSMODE, mode);
@@ -286,16 +328,16 @@ static int m5mols_reg_mode(struct v4l2_subdev *sd, u32 mode)
* can be guaranteed only when the sensor is operating in mode which which
* a command belongs to.
*/
-int m5mols_mode(struct m5mols_info *info, u32 mode)
+int m5mols_mode(struct m5mols_info *info, u8 mode)
{
struct v4l2_subdev *sd = &info->sd;
int ret = -EINVAL;
- u32 reg;
+ u8 reg;
if (mode < REG_PARAMETER && mode > REG_CAPTURE)
return ret;
- ret = m5mols_read(sd, SYSTEM_SYSMODE, &reg);
+ ret = m5mols_read_u8(sd, SYSTEM_SYSMODE, &reg);
if ((!ret && reg == mode) || ret)
return ret;
@@ -344,41 +386,37 @@ int m5mols_mode(struct m5mols_info *info, u32 mode)
static int m5mols_get_version(struct v4l2_subdev *sd)
{
struct m5mols_info *info = to_m5mols(sd);
- union {
- struct m5mols_version ver;
- u8 bytes[VERSION_SIZE];
- } version;
- u32 *value;
- u8 cmd = CAT0_VER_CUSTOMER;
+ struct m5mols_version *ver = &info->ver;
+ u8 *str = ver->str;
+ int i;
int ret;
- do {
- value = (u32 *)&version.bytes[cmd];
- ret = m5mols_read(sd, SYSTEM_CMD(cmd), value);
- if (ret)
- return ret;
- } while (cmd++ != CAT0_VER_AWB);
+ ret = m5mols_read_u8(sd, SYSTEM_VER_CUSTOMER, &ver->customer);
+ if (!ret)
+ ret = m5mols_read_u8(sd, SYSTEM_VER_PROJECT, &ver->project);
+ if (!ret)
+ ret = m5mols_read_u16(sd, SYSTEM_VER_FIRMWARE, &ver->fw);
+ if (!ret)
+ ret = m5mols_read_u16(sd, SYSTEM_VER_HARDWARE, &ver->hw);
+ if (!ret)
+ ret = m5mols_read_u16(sd, SYSTEM_VER_PARAMETER, &ver->param);
+ if (!ret)
+ ret = m5mols_read_u16(sd, SYSTEM_VER_AWB, &ver->awb);
+ if (!ret)
+ ret = m5mols_read_u8(sd, AF_VERSION, &ver->af);
+ if (ret)
+ return ret;
- do {
- value = (u32 *)&version.bytes[cmd];
- ret = m5mols_read(sd, SYSTEM_VER_STRING, value);
+ for (i = 0; i < VERSION_STRING_SIZE; i++) {
+ ret = m5mols_read_u8(sd, SYSTEM_VER_STRING, &str[i]);
if (ret)
return ret;
- if (cmd >= VERSION_SIZE - 1)
- return -EINVAL;
- } while (version.bytes[cmd++]);
-
- value = (u32 *)&version.bytes[cmd];
- ret = m5mols_read(sd, AF_VERSION, value);
- if (ret)
- return ret;
+ }
- /* store version information swapped for being readable */
- info->ver = version.ver;
- info->ver.fw = be16_to_cpu(info->ver.fw);
- info->ver.hw = be16_to_cpu(info->ver.hw);
- info->ver.param = be16_to_cpu(info->ver.param);
- info->ver.awb = be16_to_cpu(info->ver.awb);
+ ver->fw = be16_to_cpu(ver->fw);
+ ver->hw = be16_to_cpu(ver->hw);
+ ver->param = be16_to_cpu(ver->param);
+ ver->awb = be16_to_cpu(ver->awb);
v4l2_info(sd, "Manufacturer\t[%s]\n",
is_manufacturer(info, REG_SAMSUNG_ELECTRO) ?
@@ -722,7 +760,7 @@ static int m5mols_init_controls(struct m5mols_info *info)
int ret;
/* Determine value's range & step of controls for various FW version */
- ret = m5mols_read(sd, AE_MAX_GAIN_MON, (u32 *)&max_exposure);
+ ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &max_exposure);
if (!ret)
step_zoom = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1;
if (ret)
@@ -842,18 +880,18 @@ static void m5mols_irq_work(struct work_struct *work)
struct m5mols_info *info =
container_of(work, struct m5mols_info, work_irq);
struct v4l2_subdev *sd = &info->sd;
- u32 reg;
+ u8 reg;
int ret;
if (!is_powered(info) ||
- m5mols_read(sd, SYSTEM_INT_FACTOR, &info->interrupt))
+ m5mols_read_u8(sd, SYSTEM_INT_FACTOR, &info->interrupt))
return;
switch (info->interrupt & REG_INT_MASK) {
case REG_INT_AF:
if (!is_available_af(info))
break;
- ret = m5mols_read(sd, AF_STATUS, &reg);
+ ret = m5mols_read_u8(sd, AF_STATUS, &reg);
v4l2_dbg(2, m5mols_debug, sd, "AF %s\n",
reg == REG_AF_FAIL ? "Failed" :
reg == REG_AF_SUCCESS ? "Success" :
diff --git a/drivers/media/video/m5mols/m5mols_reg.h b/drivers/media/video/m5mols/m5mols_reg.h
index b83e36fc6ac6..c755bd6edfe9 100644
--- a/drivers/media/video/m5mols/m5mols_reg.h
+++ b/drivers/media/video/m5mols/m5mols_reg.h
@@ -2,10 +2,10 @@
* Register map for M-5MOLS 8M Pixel camera sensor with ISP
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
*
* Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -56,13 +56,24 @@
* more specific contents, see definition if file m5mols.h.
*/
#define CAT0_VER_CUSTOMER 0x00 /* customer version */
-#define CAT0_VER_AWB 0x09 /* Auto WB version */
+#define CAT0_VER_PROJECT 0x01 /* project version */
+#define CAT0_VER_FIRMWARE 0x02 /* Firmware version */
+#define CAT0_VER_HARDWARE 0x04 /* Hardware version */
+#define CAT0_VER_PARAMETER 0x06 /* Parameter version */
+#define CAT0_VER_AWB 0x08 /* Auto WB version */
#define CAT0_VER_STRING 0x0a /* string including M-5MOLS */
#define CAT0_SYSMODE 0x0b /* SYSTEM mode register */
#define CAT0_STATUS 0x0c /* SYSTEM mode status register */
#define CAT0_INT_FACTOR 0x10 /* interrupt pending register */
#define CAT0_INT_ENABLE 0x11 /* interrupt enable register */
+#define SYSTEM_VER_CUSTOMER I2C_REG(CAT_SYSTEM, CAT0_VER_CUSTOMER, 1)
+#define SYSTEM_VER_PROJECT I2C_REG(CAT_SYSTEM, CAT0_VER_PROJECT, 1)
+#define SYSTEM_VER_FIRMWARE I2C_REG(CAT_SYSTEM, CAT0_VER_FIRMWARE, 2)
+#define SYSTEM_VER_HARDWARE I2C_REG(CAT_SYSTEM, CAT0_VER_HARDWARE, 2)
+#define SYSTEM_VER_PARAMETER I2C_REG(CAT_SYSTEM, CAT0_VER_PARAMETER, 2)
+#define SYSTEM_VER_AWB I2C_REG(CAT_SYSTEM, CAT0_VER_AWB, 2)
+
#define SYSTEM_SYSMODE I2C_REG(CAT_SYSTEM, CAT0_SYSMODE, 1)
#define REG_SYSINIT 0x00 /* SYSTEM mode */
#define REG_PARAMETER 0x01 /* PARAMETER mode */
@@ -382,8 +393,8 @@
#define REG_CAP_START_MAIN 0x01
#define REG_CAP_START_THUMB 0x03
-#define CAPC_IMAGE_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 1)
-#define CAPC_THUMB_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 1)
+#define CAPC_IMAGE_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 4)
+#define CAPC_THUMB_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 4)
/*
* Category F - Flash
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index de5d481b0328..c43c81f5f978 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -480,12 +480,14 @@ static int msp_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
struct msp_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (state->radio)
+ if (vt->type != V4L2_TUNER_ANALOG_TV)
return 0;
- if (state->opmode == OPMODE_AUTOSELECT)
- msp_detect_stereo(client);
- vt->audmode = state->audmode;
- vt->rxsubchans = state->rxsubchans;
+ if (!state->radio) {
+ if (state->opmode == OPMODE_AUTOSELECT)
+ msp_detect_stereo(client);
+ vt->rxsubchans = state->rxsubchans;
+ }
+ vt->audmode = state->audmode;
vt->capability |= V4L2_TUNER_CAP_STEREO |
V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
return 0;
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index bc0c23a1009c..63f8a0cc33d8 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -444,12 +444,9 @@ static int mx1_camera_add_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct mx1_camera_dev *pcdev = ici->priv;
- int ret;
- if (pcdev->icd) {
- ret = -EBUSY;
- goto ebusy;
- }
+ if (pcdev->icd)
+ return -EBUSY;
dev_info(icd->dev.parent, "MX1 Camera driver attached to camera %d\n",
icd->devnum);
@@ -458,8 +455,7 @@ static int mx1_camera_add_device(struct soc_camera_device *icd)
pcdev->icd = icd;
-ebusy:
- return ret;
+ return 0;
}
static void mx1_camera_remove_device(struct soc_camera_device *icd)
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 4ada9be1d430..4d07c5844402 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -982,6 +982,14 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
startindex = (vout->vid == OMAP_VIDEO1) ?
video1_numbuffers : video2_numbuffers;
+ /* Check the size of the buffer */
+ if (*size > vout->buffer_size) {
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "buffer allocation mismatch [%u] [%u]\n",
+ *size, vout->buffer_size);
+ return -ENOMEM;
+ }
+
for (i = startindex; i < *count; i++) {
vout->buffer_size = *size;
@@ -1228,6 +1236,14 @@ static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma)
(vma->vm_pgoff << PAGE_SHIFT));
return -EINVAL;
}
+ /* Check the size of the buffer */
+ if (size > vout->buffer_size) {
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "insufficient memory [%lu] [%u]\n",
+ size, vout->buffer_size);
+ return -ENOMEM;
+ }
+
q->bufs[i]->baddr = vma->vm_start;
vma->vm_flags |= VM_RESERVED;
@@ -2391,7 +2407,7 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
/* Register the Video device with V4L2
*/
vfd = vout->vfd;
- if (video_register_device(vfd, VFL_TYPE_GRABBER, k + 1) < 0) {
+ if (video_register_device(vfd, VFL_TYPE_GRABBER, -1) < 0) {
dev_err(&pdev->dev, ": Could not register "
"Video for Linux device\n");
vfd->minor = -1;
diff --git a/drivers/media/video/omap/omap_voutlib.c b/drivers/media/video/omap/omap_voutlib.c
index 2aa6a76c5e59..8ae74817a110 100644
--- a/drivers/media/video/omap/omap_voutlib.c
+++ b/drivers/media/video/omap/omap_voutlib.c
@@ -193,7 +193,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
return -EINVAL;
if (cpu_is_omap24xx()) {
- if (crop->height != win->w.height) {
+ if (try_crop.height != win->w.height) {
/* If we're resizing vertically, we can't support a
* crop width wider than 768 pixels.
*/
@@ -202,7 +202,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
}
}
/* vertical resizing */
- vresize = (1024 * crop->height) / win->w.height;
+ vresize = (1024 * try_crop.height) / win->w.height;
if (cpu_is_omap24xx() && (vresize > 2048))
vresize = 2048;
else if (cpu_is_omap34xx() && (vresize > 4096))
@@ -221,7 +221,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
try_crop.height = 2;
}
/* horizontal resizing */
- hresize = (1024 * crop->width) / win->w.width;
+ hresize = (1024 * try_crop.width) / win->w.width;
if (cpu_is_omap24xx() && (hresize > 2048))
hresize = 2048;
else if (cpu_is_omap34xx() && (hresize > 4096))
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index c9fd04ee70a8..94b6ed89e195 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -1748,7 +1748,7 @@ static int isp_register_entities(struct isp_device *isp)
goto done;
/* Register external entities */
- for (subdevs = pdata->subdevs; subdevs->subdevs; ++subdevs) {
+ for (subdevs = pdata->subdevs; subdevs && subdevs->subdevs; ++subdevs) {
struct v4l2_subdev *sensor;
struct media_entity *input;
unsigned int flags;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 9d0dd08f57f8..e98d38212791 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -3046,6 +3046,8 @@ static void pvr2_subdev_update(struct pvr2_hdw *hdw)
if (hdw->input_dirty || hdw->audiomode_dirty || hdw->force_dirty) {
struct v4l2_tuner vt;
memset(&vt, 0, sizeof(vt));
+ vt.type = (hdw->input_val == PVR2_CVAL_INPUT_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
vt.audmode = hdw->audiomode_val;
v4l2_device_call_all(&hdw->v4l2_dev, 0, tuner, s_tuner, &vt);
}
@@ -5171,6 +5173,8 @@ void pvr2_hdw_status_poll(struct pvr2_hdw *hdw)
{
struct v4l2_tuner *vtp = &hdw->tuner_signal_info;
memset(vtp, 0, sizeof(*vtp));
+ vtp->type = (hdw->input_val == PVR2_CVAL_INPUT_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
hdw->tuner_signal_stale = 0;
/* Note: There apparently is no replacement for VIDIOC_CROPCAP
using v4l2-subdev - therefore we can't support that AT ALL right
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 1593f8deb810..760b4de13adf 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -1414,7 +1414,7 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
{
ARG_DEF(struct pwc_probe, probe)
- strcpy(ARGR(probe).name, pdev->vdev->name);
+ strcpy(ARGR(probe).name, pdev->vdev.name);
ARGR(probe).type = pdev->type;
ARG_OUT(probe)
break;
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 356cd42b593b..b0bde5a87c8a 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -40,7 +40,7 @@
Oh yes, convention: to disctinguish between all the various pointers to
device-structures, I use these names for the pointer variables:
udev: struct usb_device *
- vdev: struct video_device *
+ vdev: struct video_device (member of pwc_dev)
pdev: struct pwc_devive *
*/
@@ -152,6 +152,7 @@ static ssize_t pwc_video_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
static unsigned int pwc_video_poll(struct file *file, poll_table *wait);
static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma);
+static void pwc_video_release(struct video_device *vfd);
static const struct v4l2_file_operations pwc_fops = {
.owner = THIS_MODULE,
@@ -164,42 +165,12 @@ static const struct v4l2_file_operations pwc_fops = {
};
static struct video_device pwc_template = {
.name = "Philips Webcam", /* Filled in later */
- .release = video_device_release,
+ .release = pwc_video_release,
.fops = &pwc_fops,
+ .ioctl_ops = &pwc_ioctl_ops,
};
/***************************************************************************/
-
-/* Okay, this is some magic that I worked out and the reasoning behind it...
-
- The biggest problem with any USB device is of course: "what to do
- when the user unplugs the device while it is in use by an application?"
- We have several options:
- 1) Curse them with the 7 plagues when they do (requires divine intervention)
- 2) Tell them not to (won't work: they'll do it anyway)
- 3) Oops the kernel (this will have a negative effect on a user's uptime)
- 4) Do something sensible.
-
- Of course, we go for option 4.
-
- It happens that this device will be linked to two times, once from
- usb_device and once from the video_device in their respective 'private'
- pointers. This is done when the device is probed() and all initialization
- succeeded. The pwc_device struct links back to both structures.
-
- When a device is unplugged while in use it will be removed from the
- list of known USB devices; I also de-register it as a V4L device, but
- unfortunately I can't free the memory since the struct is still in use
- by the file descriptor. This free-ing is then deferend until the first
- opportunity. Crude, but it works.
-
- A small 'advantage' is that if a user unplugs the cam and plugs it back
- in, it should get assigned the same video device minor, but unfortunately
- it's non-trivial to re-link the cam back to the video device... (that
- would surely be magic! :))
-*/
-
-/***************************************************************************/
/* Private functions */
/* Here we want the physical address of the memory.
@@ -1016,16 +987,15 @@ static ssize_t show_snapshot_button_status(struct device *class_dev,
static DEVICE_ATTR(button, S_IRUGO | S_IWUSR, show_snapshot_button_status,
NULL);
-static int pwc_create_sysfs_files(struct video_device *vdev)
+static int pwc_create_sysfs_files(struct pwc_device *pdev)
{
- struct pwc_device *pdev = video_get_drvdata(vdev);
int rc;
- rc = device_create_file(&vdev->dev, &dev_attr_button);
+ rc = device_create_file(&pdev->vdev.dev, &dev_attr_button);
if (rc)
goto err;
if (pdev->features & FEATURE_MOTOR_PANTILT) {
- rc = device_create_file(&vdev->dev, &dev_attr_pan_tilt);
+ rc = device_create_file(&pdev->vdev.dev, &dev_attr_pan_tilt);
if (rc)
goto err_button;
}
@@ -1033,19 +1003,17 @@ static int pwc_create_sysfs_files(struct video_device *vdev)
return 0;
err_button:
- device_remove_file(&vdev->dev, &dev_attr_button);
+ device_remove_file(&pdev->vdev.dev, &dev_attr_button);
err:
PWC_ERROR("Could not create sysfs files.\n");
return rc;
}
-static void pwc_remove_sysfs_files(struct video_device *vdev)
+static void pwc_remove_sysfs_files(struct pwc_device *pdev)
{
- struct pwc_device *pdev = video_get_drvdata(vdev);
-
if (pdev->features & FEATURE_MOTOR_PANTILT)
- device_remove_file(&vdev->dev, &dev_attr_pan_tilt);
- device_remove_file(&vdev->dev, &dev_attr_button);
+ device_remove_file(&pdev->vdev.dev, &dev_attr_pan_tilt);
+ device_remove_file(&pdev->vdev.dev, &dev_attr_button);
}
#ifdef CONFIG_USB_PWC_DEBUG
@@ -1106,7 +1074,7 @@ static int pwc_video_open(struct file *file)
if (ret >= 0)
{
PWC_DEBUG_OPEN("This %s camera is equipped with a %s (%d).\n",
- pdev->vdev->name,
+ pdev->vdev.name,
pwc_sensor_type_to_string(i), i);
}
}
@@ -1180,16 +1148,15 @@ static int pwc_video_open(struct file *file)
return 0;
}
-
-static void pwc_cleanup(struct pwc_device *pdev)
+static void pwc_video_release(struct video_device *vfd)
{
- pwc_remove_sysfs_files(pdev->vdev);
- video_unregister_device(pdev->vdev);
+ struct pwc_device *pdev = container_of(vfd, struct pwc_device, vdev);
+ int hint;
-#ifdef CONFIG_USB_PWC_INPUT_EVDEV
- if (pdev->button_dev)
- input_unregister_device(pdev->button_dev);
-#endif
+ /* search device_hint[] table if we occupy a slot, by any chance */
+ for (hint = 0; hint < MAX_DEV_HINTS; hint++)
+ if (device_hint[hint].pdev == pdev)
+ device_hint[hint].pdev = NULL;
kfree(pdev);
}
@@ -1199,7 +1166,7 @@ static int pwc_video_close(struct file *file)
{
struct video_device *vdev = file->private_data;
struct pwc_device *pdev;
- int i, hint;
+ int i;
PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev);
@@ -1234,12 +1201,6 @@ static int pwc_video_close(struct file *file)
}
pdev->vopen--;
PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", pdev->vopen);
- } else {
- pwc_cleanup(pdev);
- /* search device_hint[] table if we occupy a slot, by any chance */
- for (hint = 0; hint < MAX_DEV_HINTS; hint++)
- if (device_hint[hint].pdev == pdev)
- device_hint[hint].pdev = NULL;
}
return 0;
@@ -1715,19 +1676,12 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
init_waitqueue_head(&pdev->frameq);
pdev->vcompression = pwc_preferred_compression;
- /* Allocate video_device structure */
- pdev->vdev = video_device_alloc();
- if (!pdev->vdev) {
- PWC_ERROR("Err, cannot allocate video_device struture. Failing probe.");
- rc = -ENOMEM;
- goto err_free_mem;
- }
- memcpy(pdev->vdev, &pwc_template, sizeof(pwc_template));
- pdev->vdev->parent = &intf->dev;
- pdev->vdev->lock = &pdev->modlock;
- pdev->vdev->ioctl_ops = &pwc_ioctl_ops;
- strcpy(pdev->vdev->name, name);
- video_set_drvdata(pdev->vdev, pdev);
+ /* Init video_device structure */
+ memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template));
+ pdev->vdev.parent = &intf->dev;
+ pdev->vdev.lock = &pdev->modlock;
+ strcpy(pdev->vdev.name, name);
+ video_set_drvdata(&pdev->vdev, pdev);
pdev->release = le16_to_cpu(udev->descriptor.bcdDevice);
PWC_DEBUG_PROBE("Release: %04x\n", pdev->release);
@@ -1746,8 +1700,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
}
}
- pdev->vdev->release = video_device_release;
-
/* occupy slot */
if (hint < MAX_DEV_HINTS)
device_hint[hint].pdev = pdev;
@@ -1759,16 +1711,16 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pwc_set_leds(pdev, 0, 0);
pwc_camera_power(pdev, 0);
- rc = video_register_device(pdev->vdev, VFL_TYPE_GRABBER, video_nr);
+ rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr);
if (rc < 0) {
PWC_ERROR("Failed to register as video device (%d).\n", rc);
- goto err_video_release;
+ goto err_free_mem;
}
- rc = pwc_create_sysfs_files(pdev->vdev);
+ rc = pwc_create_sysfs_files(pdev);
if (rc)
goto err_video_unreg;
- PWC_INFO("Registered as %s.\n", video_device_node_name(pdev->vdev));
+ PWC_INFO("Registered as %s.\n", video_device_node_name(&pdev->vdev));
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
/* register webcam snapshot button input device */
@@ -1776,7 +1728,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
if (!pdev->button_dev) {
PWC_ERROR("Err, insufficient memory for webcam snapshot button device.");
rc = -ENOMEM;
- pwc_remove_sysfs_files(pdev->vdev);
+ pwc_remove_sysfs_files(pdev);
goto err_video_unreg;
}
@@ -1794,7 +1746,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
if (rc) {
input_free_device(pdev->button_dev);
pdev->button_dev = NULL;
- pwc_remove_sysfs_files(pdev->vdev);
+ pwc_remove_sysfs_files(pdev);
goto err_video_unreg;
}
#endif
@@ -1804,10 +1756,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
err_video_unreg:
if (hint < MAX_DEV_HINTS)
device_hint[hint].pdev = NULL;
- video_unregister_device(pdev->vdev);
- pdev->vdev = NULL; /* So we don't try to release it below */
-err_video_release:
- video_device_release(pdev->vdev);
+ video_unregister_device(&pdev->vdev);
err_free_mem:
kfree(pdev);
return rc;
@@ -1816,10 +1765,8 @@ err_free_mem:
/* The user yanked out the cable... */
static void usb_pwc_disconnect(struct usb_interface *intf)
{
- struct pwc_device *pdev;
- int hint;
+ struct pwc_device *pdev = usb_get_intfdata(intf);
- pdev = usb_get_intfdata (intf);
mutex_lock(&pdev->modlock);
usb_set_intfdata (intf, NULL);
if (pdev == NULL) {
@@ -1836,30 +1783,25 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
}
/* We got unplugged; this is signalled by an EPIPE error code */
- if (pdev->vopen) {
- PWC_INFO("Disconnected while webcam is in use!\n");
- pdev->error_status = EPIPE;
- }
+ pdev->error_status = EPIPE;
+ pdev->unplugged = 1;
/* Alert waiting processes */
wake_up_interruptible(&pdev->frameq);
- /* Wait until device is closed */
- if (pdev->vopen) {
- pdev->unplugged = 1;
- pwc_iso_stop(pdev);
- } else {
- /* Device is closed, so we can safely unregister it */
- PWC_DEBUG_PROBE("Unregistering video device in disconnect().\n");
-disconnect_out:
- /* search device_hint[] table if we occupy a slot, by any chance */
- for (hint = 0; hint < MAX_DEV_HINTS; hint++)
- if (device_hint[hint].pdev == pdev)
- device_hint[hint].pdev = NULL;
- }
+ /* No need to keep the urbs around after disconnection */
+ pwc_isoc_cleanup(pdev);
+disconnect_out:
mutex_unlock(&pdev->modlock);
- pwc_cleanup(pdev);
+
+ pwc_remove_sysfs_files(pdev);
+ video_unregister_device(&pdev->vdev);
+
+#ifdef CONFIG_USB_PWC_INPUT_EVDEV
+ if (pdev->button_dev)
+ input_unregister_device(pdev->button_dev);
+#endif
}
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index e947766337d6..083f8b15df73 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -162,9 +162,9 @@ struct pwc_imgbuf
struct pwc_device
{
- struct video_device *vdev;
+ struct video_device vdev;
- /* Pointer to our usb_device */
+ /* Pointer to our usb_device, may be NULL after unplug */
struct usb_device *udev;
int type; /* type of cam (645, 646, 675, 680, 690, 720, 730, 740, 750) */
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index d142b40ea64e..81b4a826ee5e 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -1,7 +1,7 @@
/*
- * Samsung S5P SoC series camera interface (camera capture) driver
+ * Samsung S5P/EXYNOS4 SoC series camera interface (camera capture) driver
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd
+ * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
* Author: Sylwester Nawrocki, <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -262,12 +262,7 @@ static unsigned int get_plane_size(struct fimc_frame *fr, unsigned int plane)
{
if (!fr || plane >= fr->fmt->memplanes)
return 0;
-
- dbg("%s: w: %d. h: %d. depth[%d]: %d",
- __func__, fr->width, fr->height, plane, fr->fmt->depth[plane]);
-
return fr->f_width * fr->f_height * fr->fmt->depth[plane] / 8;
-
}
static int queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
@@ -283,24 +278,14 @@ static int queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
*num_planes = fmt->memplanes;
- dbg("%s, buffer count=%d, plane count=%d",
- __func__, *num_buffers, *num_planes);
-
for (i = 0; i < fmt->memplanes; i++) {
sizes[i] = get_plane_size(&ctx->d_frame, i);
- dbg("plane: %u, plane_size: %lu", i, sizes[i]);
allocators[i] = ctx->fimc_dev->alloc_ctx;
}
return 0;
}
-static int buffer_init(struct vb2_buffer *vb)
-{
- /* TODO: */
- return 0;
-}
-
static int buffer_prepare(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
@@ -380,7 +365,6 @@ static struct vb2_ops fimc_capture_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
- .buf_init = buffer_init,
.wait_prepare = fimc_unlock,
.wait_finish = fimc_lock,
.start_streaming = start_streaming,
@@ -903,6 +887,7 @@ err_vd_reg:
err_v4l2_reg:
v4l2_device_unregister(v4l2_dev);
err_info:
+ kfree(ctx);
dev_err(&fimc->pdev->dev, "failed to install\n");
return ret;
}
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index dc91a8511af6..bdf19ada9172 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -1,9 +1,8 @@
/*
- * S5P camera interface (video postprocessor) driver
+ * Samsung S5P/EXYNOS4 SoC series camera interface (video postprocessor) driver
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd
- *
- * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2010-2011 Samsung Electronics Co., Ltd.
+ * Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
@@ -42,7 +41,6 @@ static struct fimc_fmt fimc_formats[] = {
.color = S5P_FIMC_RGB565,
.memplanes = 1,
.colplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_BE,
.flags = FMT_FLAGS_M2M,
}, {
.name = "BGR666",
@@ -232,11 +230,7 @@ static int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
return 0;
}
}
-
*shift = 0, *ratio = 1;
-
- dbg("s: %d, t: %d, shift: %d, ratio: %d",
- src, tar, *shift, *ratio);
return 0;
}
@@ -268,10 +262,8 @@ int fimc_set_scaler_info(struct fimc_ctx *ctx)
err("invalid source size: %d x %d", sx, sy);
return -EINVAL;
}
-
sc->real_width = sx;
sc->real_height = sy;
- dbg("sx= %d, sy= %d, tx= %d, ty= %d", sx, sy, tx, ty);
ret = fimc_get_scaler_factor(sx, tx, &sc->pre_hratio, &sc->hfactor);
if (ret)
@@ -711,22 +703,18 @@ static int fimc_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
f = ctx_get_frame(ctx, vq->type);
if (IS_ERR(f))
return PTR_ERR(f);
-
/*
* Return number of non-contigous planes (plane buffers)
* depending on the configured color format.
*/
- if (f->fmt)
- *num_planes = f->fmt->memplanes;
+ if (!f->fmt)
+ return -EINVAL;
+ *num_planes = f->fmt->memplanes;
for (i = 0; i < f->fmt->memplanes; i++) {
- sizes[i] = (f->width * f->height * f->fmt->depth[i]) >> 3;
+ sizes[i] = (f->f_width * f->f_height * f->fmt->depth[i]) / 8;
allocators[i] = ctx->fimc_dev->alloc_ctx;
}
-
- if (*num_buffers == 0)
- *num_buffers = 1;
-
return 0;
}
@@ -852,7 +840,7 @@ struct fimc_fmt *find_format(struct v4l2_format *f, unsigned int mask)
for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
fmt = &fimc_formats[i];
- if (fmt->fourcc == f->fmt.pix.pixelformat &&
+ if (fmt->fourcc == f->fmt.pix_mp.pixelformat &&
(fmt->flags & mask))
break;
}
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index 3beb1e5320ce..1f70772daaf0 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -1,7 +1,5 @@
/*
- * Copyright (c) 2010 Samsung Electronics
- *
- * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -135,9 +133,10 @@ enum fimc_color_fmt {
* @name: format description
* @fourcc: the fourcc code for this format, 0 if not applicable
* @color: the corresponding fimc_color_fmt
- * @depth: per plane driver's private 'number of bits per pixel'
* @memplanes: number of physically non-contiguous data planes
* @colplanes: number of physically contiguous data planes
+ * @depth: per plane driver's private 'number of bits per pixel'
+ * @flags: flags indicating which operation mode format applies to
*/
struct fimc_fmt {
enum v4l2_mbus_pixelcode mbus_code;
@@ -171,7 +170,7 @@ struct fimc_dma_offset {
};
/**
- * struct fimc_effect - the configuration data for the "Arbitrary" image effect
+ * struct fimc_effect - color effect information
* @type: effect type
* @pat_cb: cr value when type is "arbitrary"
* @pat_cr: cr value when type is "arbitrary"
@@ -184,7 +183,6 @@ struct fimc_effect {
/**
* struct fimc_scaler - the configuration data for FIMC inetrnal scaler
- *
* @scaleup_h: flag indicating scaling up horizontally
* @scaleup_v: flag indicating scaling up vertically
* @copy_mode: flag indicating transparent DMA transfer (no scaling
@@ -220,7 +218,6 @@ struct fimc_scaler {
/**
* struct fimc_addr - the FIMC physical address set for DMA
- *
* @y: luminance plane physical address
* @cb: Cb plane physical address
* @cr: Cr plane physical address
@@ -234,6 +231,7 @@ struct fimc_addr {
/**
* struct fimc_vid_buffer - the driver's video buffer
* @vb: v4l videobuf buffer
+ * @list: linked list structure for buffer queue
* @paddr: precalculated physical address set
* @index: buffer index for the output DMA engine
*/
@@ -254,11 +252,10 @@ struct fimc_vid_buffer {
* @offs_v: image vertical pixel offset
* @width: image pixel width
* @height: image pixel weight
- * @paddr: image frame buffer physical addresses
- * @buf_cnt: number of buffers depending on a color format
* @payload: image size in bytes (w x h x bpp)
- * @color: color format
+ * @paddr: image frame buffer physical addresses
* @dma_offset: DMA offset in bytes
+ * @fmt: fimc color format pointer
*/
struct fimc_frame {
u32 f_width;
@@ -390,21 +387,22 @@ struct fimc_ctx;
/**
* struct fimc_dev - abstraction for FIMC entity
- *
* @slock: the spinlock protecting this data structure
* @lock: the mutex protecting this data structure
* @pdev: pointer to the FIMC platform device
* @pdata: pointer to the device platform data
+ * @variant: the IP variant information
* @id: FIMC device index (0..FIMC_MAX_DEVS)
* @num_clocks: the number of clocks managed by this device instance
- * @clock[]: the clocks required for FIMC operation
+ * @clock: clocks required for FIMC operation
* @regs: the mapped hardware registers
* @regs_res: the resource claimed for IO registers
- * @irq: interrupt number of the FIMC subdevice
- * @irq_queue:
+ * @irq: FIMC interrupt number
+ * @irq_queue: interrupt handler waitqueue
* @m2m: memory-to-memory V4L2 device information
* @vid_cap: camera capture device information
* @state: flags used to synchronize m2m and capture mode operation
+ * @alloc_ctx: videobuf2 memory allocator context
*/
struct fimc_dev {
spinlock_t slock;
@@ -427,8 +425,7 @@ struct fimc_dev {
/**
* fimc_ctx - the device context data
- *
- * @lock: mutex protecting this data structure
+ * @slock: spinlock protecting this data structure
* @s_frame: source frame properties
* @d_frame: destination frame properties
* @out_order_1p: output 1-plane YCBCR order
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index ff6c0e97563e..d4ee24bf6928 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -963,7 +963,7 @@ static int saa7134_raw_decode_irq(struct saa7134_dev *dev)
* to work with other protocols.
*/
if (!ir->active) {
- timeout = jiffies + jiffies_to_msecs(15);
+ timeout = jiffies + msecs_to_jiffies(15);
mod_timer(&ir->timer, timeout);
ir->active = true;
}
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 9363ed91a4cb..cfa9f7efe93d 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -724,19 +724,15 @@ static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode)
}
/**
- * set_mode_freq - Switch tuner to other mode.
- * @client: struct i2c_client pointer
+ * set_mode - Switch tuner to other mode.
* @t: a pointer to the module's internal struct_tuner
* @mode: enum v4l2_type (radio or TV)
- * @freq: frequency to set (0 means to use the previous one)
*
* If tuner doesn't support the needed mode (radio or TV), prints a
* debug message and returns -EINVAL, changing its state to standby.
- * Otherwise, changes the state and sets frequency to the last value, if
- * the tuner can sleep or if it supports both Radio and TV.
+ * Otherwise, changes the mode and returns 0.
*/
-static int set_mode_freq(struct i2c_client *client, struct tuner *t,
- enum v4l2_tuner_type mode, unsigned int freq)
+static int set_mode(struct tuner *t, enum v4l2_tuner_type mode)
{
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
@@ -752,17 +748,27 @@ static int set_mode_freq(struct i2c_client *client, struct tuner *t,
t->mode = mode;
tuner_dbg("Changing to mode %d\n", mode);
}
+ return 0;
+}
+
+/**
+ * set_freq - Set the tuner to the desired frequency.
+ * @t: a pointer to the module's internal struct_tuner
+ * @freq: frequency to set (0 means to use the current frequency)
+ */
+static void set_freq(struct tuner *t, unsigned int freq)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
+
if (t->mode == V4L2_TUNER_RADIO) {
- if (freq)
- t->radio_freq = freq;
- set_radio_freq(client, t->radio_freq);
+ if (!freq)
+ freq = t->radio_freq;
+ set_radio_freq(client, freq);
} else {
- if (freq)
- t->tv_freq = freq;
- set_tv_freq(client, t->tv_freq);
+ if (!freq)
+ freq = t->tv_freq;
+ set_tv_freq(client, freq);
}
-
- return 0;
}
/*
@@ -817,7 +823,8 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
/**
* tuner_fixup_std - force a given video standard variant
*
- * @t: tuner internal struct
+ * @t: tuner internal struct
+ * @std: TV standard
*
* A few devices or drivers have problem to detect some standard variations.
* On other operational systems, the drivers generally have a per-country
@@ -827,57 +834,39 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
* to distinguish all video standard variations, a modprobe parameter can
* be used to force a video standard match.
*/
-static int tuner_fixup_std(struct tuner *t)
+static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
{
- if ((t->std & V4L2_STD_PAL) == V4L2_STD_PAL) {
+ if (pal[0] != '-' && (std & V4L2_STD_PAL) == V4L2_STD_PAL) {
switch (pal[0]) {
case '6':
- tuner_dbg("insmod fixup: PAL => PAL-60\n");
- t->std = V4L2_STD_PAL_60;
- break;
+ return V4L2_STD_PAL_60;
case 'b':
case 'B':
case 'g':
case 'G':
- tuner_dbg("insmod fixup: PAL => PAL-BG\n");
- t->std = V4L2_STD_PAL_BG;
- break;
+ return V4L2_STD_PAL_BG;
case 'i':
case 'I':
- tuner_dbg("insmod fixup: PAL => PAL-I\n");
- t->std = V4L2_STD_PAL_I;
- break;
+ return V4L2_STD_PAL_I;
case 'd':
case 'D':
case 'k':
case 'K':
- tuner_dbg("insmod fixup: PAL => PAL-DK\n");
- t->std = V4L2_STD_PAL_DK;
- break;
+ return V4L2_STD_PAL_DK;
case 'M':
case 'm':
- tuner_dbg("insmod fixup: PAL => PAL-M\n");
- t->std = V4L2_STD_PAL_M;
- break;
+ return V4L2_STD_PAL_M;
case 'N':
case 'n':
- if (pal[1] == 'c' || pal[1] == 'C') {
- tuner_dbg("insmod fixup: PAL => PAL-Nc\n");
- t->std = V4L2_STD_PAL_Nc;
- } else {
- tuner_dbg("insmod fixup: PAL => PAL-N\n");
- t->std = V4L2_STD_PAL_N;
- }
- break;
- case '-':
- /* default parameter, do nothing */
- break;
+ if (pal[1] == 'c' || pal[1] == 'C')
+ return V4L2_STD_PAL_Nc;
+ return V4L2_STD_PAL_N;
default:
tuner_warn("pal= argument not recognised\n");
break;
}
}
- if ((t->std & V4L2_STD_SECAM) == V4L2_STD_SECAM) {
+ if (secam[0] != '-' && (std & V4L2_STD_SECAM) == V4L2_STD_SECAM) {
switch (secam[0]) {
case 'b':
case 'B':
@@ -885,63 +874,42 @@ static int tuner_fixup_std(struct tuner *t)
case 'G':
case 'h':
case 'H':
- tuner_dbg("insmod fixup: SECAM => SECAM-BGH\n");
- t->std = V4L2_STD_SECAM_B |
- V4L2_STD_SECAM_G |
- V4L2_STD_SECAM_H;
- break;
+ return V4L2_STD_SECAM_B |
+ V4L2_STD_SECAM_G |
+ V4L2_STD_SECAM_H;
case 'd':
case 'D':
case 'k':
case 'K':
- tuner_dbg("insmod fixup: SECAM => SECAM-DK\n");
- t->std = V4L2_STD_SECAM_DK;
- break;
+ return V4L2_STD_SECAM_DK;
case 'l':
case 'L':
- if ((secam[1] == 'C') || (secam[1] == 'c')) {
- tuner_dbg("insmod fixup: SECAM => SECAM-L'\n");
- t->std = V4L2_STD_SECAM_LC;
- } else {
- tuner_dbg("insmod fixup: SECAM => SECAM-L\n");
- t->std = V4L2_STD_SECAM_L;
- }
- break;
- case '-':
- /* default parameter, do nothing */
- break;
+ if ((secam[1] == 'C') || (secam[1] == 'c'))
+ return V4L2_STD_SECAM_LC;
+ return V4L2_STD_SECAM_L;
default:
tuner_warn("secam= argument not recognised\n");
break;
}
}
- if ((t->std & V4L2_STD_NTSC) == V4L2_STD_NTSC) {
+ if (ntsc[0] != '-' && (std & V4L2_STD_NTSC) == V4L2_STD_NTSC) {
switch (ntsc[0]) {
case 'm':
case 'M':
- tuner_dbg("insmod fixup: NTSC => NTSC-M\n");
- t->std = V4L2_STD_NTSC_M;
- break;
+ return V4L2_STD_NTSC_M;
case 'j':
case 'J':
- tuner_dbg("insmod fixup: NTSC => NTSC_M_JP\n");
- t->std = V4L2_STD_NTSC_M_JP;
- break;
+ return V4L2_STD_NTSC_M_JP;
case 'k':
case 'K':
- tuner_dbg("insmod fixup: NTSC => NTSC_M_KR\n");
- t->std = V4L2_STD_NTSC_M_KR;
- break;
- case '-':
- /* default parameter, do nothing */
- break;
+ return V4L2_STD_NTSC_M_KR;
default:
tuner_info("ntsc= argument not recognised\n");
break;
}
}
- return 0;
+ return std;
}
/*
@@ -1058,10 +1026,9 @@ static void tuner_status(struct dvb_frontend *fe)
static int tuner_s_radio(struct v4l2_subdev *sd)
{
struct tuner *t = to_tuner(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (set_mode_freq(client, t, V4L2_TUNER_RADIO, 0) == -EINVAL)
- return 0;
+ if (set_mode(t, V4L2_TUNER_RADIO) == 0)
+ set_freq(t, 0);
return 0;
}
@@ -1072,16 +1039,20 @@ static int tuner_s_radio(struct v4l2_subdev *sd)
/**
* tuner_s_power - controls the power state of the tuner
* @sd: pointer to struct v4l2_subdev
- * @on: a zero value puts the tuner to sleep
+ * @on: a zero value puts the tuner to sleep, non-zero wakes it up
*/
static int tuner_s_power(struct v4l2_subdev *sd, int on)
{
struct tuner *t = to_tuner(sd);
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
- /* FIXME: Why this function don't wake the tuner if on != 0 ? */
- if (on)
+ if (on) {
+ if (t->standby && set_mode(t, t->mode) == 0) {
+ tuner_dbg("Waking up tuner\n");
+ set_freq(t, 0);
+ }
return 0;
+ }
tuner_dbg("Putting tuner to sleep\n");
t->standby = true;
@@ -1093,28 +1064,36 @@ static int tuner_s_power(struct v4l2_subdev *sd, int on)
static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct tuner *t = to_tuner(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (set_mode_freq(client, t, V4L2_TUNER_ANALOG_TV, 0) == -EINVAL)
+ if (set_mode(t, V4L2_TUNER_ANALOG_TV))
return 0;
- t->std = std;
- tuner_fixup_std(t);
-
+ t->std = tuner_fixup_std(t, std);
+ if (t->std != std)
+ tuner_dbg("Fixup standard %llx to %llx\n", std, t->std);
+ set_freq(t, 0);
return 0;
}
static int tuner_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
{
struct tuner *t = to_tuner(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- if (set_mode_freq(client, t, f->type, f->frequency) == -EINVAL)
- return 0;
+ if (set_mode(t, f->type) == 0)
+ set_freq(t, f->frequency);
return 0;
}
+/**
+ * tuner_g_frequency - Get the tuned frequency for the tuner
+ * @sd: pointer to struct v4l2_subdev
+ * @f: pointer to struct v4l2_frequency
+ *
+ * At return, the structure f will be filled with tuner frequency
+ * if the tuner matches the f->type.
+ * Note: f->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
{
struct tuner *t = to_tuner(sd);
@@ -1122,8 +1101,7 @@ static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
if (check_mode(t, f->type) == -EINVAL)
return 0;
- f->type = t->mode;
- if (fe_tuner_ops->get_frequency && !t->standby) {
+ if (f->type == t->mode && fe_tuner_ops->get_frequency && !t->standby) {
u32 abs_freq;
fe_tuner_ops->get_frequency(&t->fe, &abs_freq);
@@ -1131,12 +1109,22 @@ static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
DIV_ROUND_CLOSEST(abs_freq * 2, 125) :
DIV_ROUND_CLOSEST(abs_freq, 62500);
} else {
- f->frequency = (V4L2_TUNER_RADIO == t->mode) ?
+ f->frequency = (V4L2_TUNER_RADIO == f->type) ?
t->radio_freq : t->tv_freq;
}
return 0;
}
+/**
+ * tuner_g_tuner - Fill in tuner information
+ * @sd: pointer to struct v4l2_subdev
+ * @vt: pointer to struct v4l2_tuner
+ *
+ * At return, the structure vt will be filled with tuner information
+ * if the tuner matches vt->type.
+ * Note: vt->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct tuner *t = to_tuner(sd);
@@ -1145,48 +1133,58 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
if (check_mode(t, vt->type) == -EINVAL)
return 0;
- vt->type = t->mode;
- if (analog_ops->get_afc)
+ if (vt->type == t->mode && analog_ops->get_afc)
vt->afc = analog_ops->get_afc(&t->fe);
- if (t->mode == V4L2_TUNER_ANALOG_TV)
+ if (vt->type == V4L2_TUNER_ANALOG_TV)
vt->capability |= V4L2_TUNER_CAP_NORM;
- if (t->mode != V4L2_TUNER_RADIO) {
+ if (vt->type != V4L2_TUNER_RADIO) {
vt->rangelow = tv_range[0] * 16;
vt->rangehigh = tv_range[1] * 16;
return 0;
}
/* radio mode */
- vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
- if (fe_tuner_ops->get_status) {
- u32 tuner_status;
-
- fe_tuner_ops->get_status(&t->fe, &tuner_status);
- vt->rxsubchans =
- (tuner_status & TUNER_STATUS_STEREO) ?
- V4L2_TUNER_SUB_STEREO :
- V4L2_TUNER_SUB_MONO;
+ if (vt->type == t->mode) {
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
+ if (fe_tuner_ops->get_status) {
+ u32 tuner_status;
+
+ fe_tuner_ops->get_status(&t->fe, &tuner_status);
+ vt->rxsubchans =
+ (tuner_status & TUNER_STATUS_STEREO) ?
+ V4L2_TUNER_SUB_STEREO :
+ V4L2_TUNER_SUB_MONO;
+ }
+ if (analog_ops->has_signal)
+ vt->signal = analog_ops->has_signal(&t->fe);
+ vt->audmode = t->audmode;
}
- if (analog_ops->has_signal)
- vt->signal = analog_ops->has_signal(&t->fe);
vt->capability |= V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
- vt->audmode = t->audmode;
vt->rangelow = radio_range[0] * 16000;
vt->rangehigh = radio_range[1] * 16000;
return 0;
}
+/**
+ * tuner_s_tuner - Set the tuner's audio mode
+ * @sd: pointer to struct v4l2_subdev
+ * @vt: pointer to struct v4l2_tuner
+ *
+ * Sets the audio mode if the tuner matches vt->type.
+ * Note: vt->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
static int tuner_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct tuner *t = to_tuner(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (set_mode_freq(client, t, vt->type, 0) == -EINVAL)
+ if (set_mode(t, vt->type))
return 0;
if (t->mode == V4L2_TUNER_RADIO)
t->audmode = vt->audmode;
+ set_freq(t, 0);
return 0;
}
@@ -1221,7 +1219,8 @@ static int tuner_resume(struct i2c_client *c)
tuner_dbg("resume\n");
if (!t->standby)
- set_mode_freq(c, t, t->type, 0);
+ if (set_mode(t, t->mode) == 0)
+ set_freq(t, 0);
return 0;
}
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
index c3ab0c813be2..48fea373c25a 100644
--- a/drivers/media/video/uvc/uvc_entity.c
+++ b/drivers/media/video/uvc/uvc_entity.c
@@ -27,14 +27,20 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
struct uvc_entity *entity)
{
const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
- struct uvc_entity *remote;
+ struct media_entity *sink;
unsigned int i;
- u8 remote_pad;
- int ret = 0;
+ int ret;
+
+ sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
+ ? (entity->vdev ? &entity->vdev->entity : NULL)
+ : &entity->subdev.entity;
+ if (sink == NULL)
+ return 0;
for (i = 0; i < entity->num_pads; ++i) {
struct media_entity *source;
- struct media_entity *sink;
+ struct uvc_entity *remote;
+ u8 remote_pad;
if (!(entity->pads[i].flags & MEDIA_PAD_FL_SINK))
continue;
@@ -43,10 +49,11 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
if (remote == NULL)
return -EINVAL;
- source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
- ? &remote->vdev->entity : &remote->subdev.entity;
- sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
- ? &entity->vdev->entity : &entity->subdev.entity;
+ source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING)
+ ? (remote->vdev ? &remote->vdev->entity : NULL)
+ : &remote->subdev.entity;
+ if (source == NULL)
+ continue;
remote_pad = remote->num_pads - 1;
ret = media_entity_create_link(source, remote_pad,
@@ -55,11 +62,10 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
return ret;
}
- if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING)
- ret = v4l2_device_register_subdev(&chain->dev->vdev,
- &entity->subdev);
+ if (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
+ return 0;
- return ret;
+ return v4l2_device_register_subdev(&chain->dev->vdev, &entity->subdev);
}
static struct v4l2_subdev_ops uvc_subdev_ops = {
@@ -84,9 +90,11 @@ static int uvc_mc_init_entity(struct uvc_entity *entity)
ret = media_entity_init(&entity->subdev.entity,
entity->num_pads, entity->pads, 0);
- } else
+ } else if (entity->vdev != NULL) {
ret = media_entity_init(&entity->vdev->entity,
entity->num_pads, entity->pads, 0);
+ } else
+ ret = 0;
return ret;
}
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 109a06384a8f..f90ce9fce539 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -104,6 +104,8 @@ static int __uvc_free_buffers(struct uvc_video_queue *queue)
}
if (queue->count) {
+ uvc_queue_cancel(queue, 0);
+ INIT_LIST_HEAD(&queue->mainqueue);
vfree(queue->mem);
queue->count = 0;
}
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index fc766b9f24c5..49994793cc77 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -1255,8 +1255,10 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
/* Commit the streaming parameters. */
ret = uvc_commit_video(stream, &stream->ctrl);
- if (ret < 0)
+ if (ret < 0) {
+ uvc_queue_enable(&stream->queue, 0);
return ret;
+ }
return uvc_init_video(stream, GFP_KERNEL);
}
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 19d5ae293780..06f14008b346 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -167,6 +167,12 @@ static void v4l2_device_release(struct device *cd)
mutex_unlock(&videodev_lock);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
+ vdev->vfl_type != VFL_TYPE_SUBDEV)
+ media_device_unregister_entity(&vdev->entity);
+#endif
+
/* Release video_device and perform other
cleanups as needed. */
vdev->release(vdev);
@@ -389,9 +395,6 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
static int v4l2_open(struct inode *inode, struct file *filp)
{
struct video_device *vdev;
-#if defined(CONFIG_MEDIA_CONTROLLER)
- struct media_entity *entity = NULL;
-#endif
int ret = 0;
/* Check if the video device is available */
@@ -405,17 +408,6 @@ static int v4l2_open(struct inode *inode, struct file *filp)
/* and increase the device refcount */
video_get(vdev);
mutex_unlock(&videodev_lock);
-#if defined(CONFIG_MEDIA_CONTROLLER)
- if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
- vdev->vfl_type != VFL_TYPE_SUBDEV) {
- entity = media_entity_get(&vdev->entity);
- if (!entity) {
- ret = -EBUSY;
- video_put(vdev);
- return ret;
- }
- }
-#endif
if (vdev->fops->open) {
if (vdev->lock && mutex_lock_interruptible(vdev->lock)) {
ret = -ERESTARTSYS;
@@ -431,14 +423,8 @@ static int v4l2_open(struct inode *inode, struct file *filp)
err:
/* decrease the refcount in case of an error */
- if (ret) {
-#if defined(CONFIG_MEDIA_CONTROLLER)
- if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
- vdev->vfl_type != VFL_TYPE_SUBDEV)
- media_entity_put(entity);
-#endif
+ if (ret)
video_put(vdev);
- }
return ret;
}
@@ -455,11 +441,6 @@ static int v4l2_release(struct inode *inode, struct file *filp)
if (vdev->lock)
mutex_unlock(vdev->lock);
}
-#if defined(CONFIG_MEDIA_CONTROLLER)
- if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
- vdev->vfl_type != VFL_TYPE_SUBDEV)
- media_entity_put(&vdev->entity);
-#endif
/* decrease the refcount unconditionally since the release()
return value is ignored. */
video_put(vdev);
@@ -754,12 +735,6 @@ void video_unregister_device(struct video_device *vdev)
if (!vdev || !video_is_registered(vdev))
return;
-#if defined(CONFIG_MEDIA_CONTROLLER)
- if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
- vdev->vfl_type != VFL_TYPE_SUBDEV)
- media_device_unregister_entity(&vdev->entity);
-#endif
-
mutex_lock(&videodev_lock);
/* This must be in a critical section to prevent a race with v4l2_open.
* Once this bit has been cleared video_get may never be called again.
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 506edcc2ddeb..69e8c6ffcc49 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -1822,6 +1822,8 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_g_tuner)
break;
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
ret = ops->vidioc_g_tuner(file, fh, p);
if (!ret)
dbgarg(cmd, "index=%d, name=%s, type=%d, "
@@ -1840,6 +1842,8 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_s_tuner)
break;
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd, "index=%d, name=%s, type=%d, "
"capability=0x%x, rangelow=%d, "
"rangehigh=%d, signal=%d, afc=%d, "
@@ -1858,6 +1862,8 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_g_frequency)
break;
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
ret = ops->vidioc_g_frequency(file, fh, p);
if (!ret)
dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
@@ -1940,13 +1946,19 @@ static long __video_do_ioctl(struct file *file,
case VIDIOC_S_HW_FREQ_SEEK:
{
struct v4l2_hw_freq_seek *p = arg;
+ enum v4l2_tuner_type type;
if (!ops->vidioc_s_hw_freq_seek)
break;
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd,
- "tuner=%d, type=%d, seek_upward=%d, wrap_around=%d\n",
- p->tuner, p->type, p->seek_upward, p->wrap_around);
- ret = ops->vidioc_s_hw_freq_seek(file, fh, p);
+ "tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u\n",
+ p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing);
+ if (p->type != type)
+ ret = -EINVAL;
+ else
+ ret = ops->vidioc_s_hw_freq_seek(file, fh, p);
break;
}
case VIDIOC_ENUM_FRAMESIZES:
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
index 6ba1461d51ef..3015e6000946 100644
--- a/drivers/media/video/videobuf2-core.c
+++ b/drivers/media/video/videobuf2-core.c
@@ -492,13 +492,6 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
return -EINVAL;
}
- /*
- * If the same number of buffers and memory access method is requested
- * then return immediately.
- */
- if (q->memory == req->memory && req->count == q->num_buffers)
- return 0;
-
if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
/*
* We already have buffers allocated, so first check if they
@@ -539,9 +532,9 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
/* Finally, allocate buffers and video memory */
ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes,
plane_sizes);
- if (ret < 0) {
- dprintk(1, "Memory allocation failed with error: %d\n", ret);
- return ret;
+ if (ret == 0) {
+ dprintk(1, "Memory allocation failed\n");
+ return -ENOMEM;
}
/*
@@ -1196,6 +1189,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
* has not already dequeued before initiating cancel.
*/
INIT_LIST_HEAD(&q->done_list);
+ atomic_set(&q->queued_count, 0);
wake_up_all(&q->done_wq);
/*
diff --git a/drivers/media/video/videobuf2-dma-sg.c b/drivers/media/video/videobuf2-dma-sg.c
index b2d9485aac75..10a20d9509d9 100644
--- a/drivers/media/video/videobuf2-dma-sg.c
+++ b/drivers/media/video/videobuf2-dma-sg.c
@@ -62,7 +62,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
goto fail_pages_array_alloc;
for (i = 0; i < buf->sg_desc.num_pages; ++i) {
- buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
if (NULL == buf->pages[i])
goto fail_pages_alloc;
sg_set_page(&buf->sg_desc.sglist[i],
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 0f09c057e796..6ca938a6bf94 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -728,6 +728,9 @@ config MFD_TPS65910
if you say yes here you get support for the TPS65910 series of
Power Management chips.
+config TPS65911_COMPARATOR
+ tristate
+
endif # MFD_SUPPORT
menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index efe3cc33ed92..d7d47d2a4c76 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -94,3 +94,4 @@ obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o
obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o
obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o
obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
+obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index c27fd1fc3b86..c71ae09430c5 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -619,6 +619,7 @@ static void asic3_clk_disable(struct asic3 *asic, struct asic3_clk *clk)
/* MFD cells (SPI, PWM, LED, DS1WM, MMC) */
static struct ds1wm_driver_data ds1wm_pdata = {
.active_high = 1,
+ .reset_recover_delay = 1,
};
static struct resource ds1wm_resources[] = {
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index 2808bd125d13..04c7093d6499 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -99,6 +99,7 @@ static int ds1wm_disable(struct platform_device *pdev)
static struct ds1wm_driver_data ds1wm_pdata = {
.active_high = 0,
+ .reset_recover_delay = 1,
};
static struct resource ds1wm_resources[] __initdata = {
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 855219526ccb..1717144fe7f4 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -26,7 +26,6 @@
#include <linux/spinlock.h>
#include <linux/gpio.h>
#include <plat/usb.h>
-#include <linux/pm_runtime.h>
#define USBHS_DRIVER_NAME "usbhs-omap"
#define OMAP_EHCI_DEVICE "ehci-omap"
@@ -147,6 +146,9 @@
struct usbhs_hcd_omap {
+ struct clk *usbhost_ick;
+ struct clk *usbhost_hs_fck;
+ struct clk *usbhost_fs_fck;
struct clk *xclk60mhsp1_ck;
struct clk *xclk60mhsp2_ck;
struct clk *utmi_p1_fck;
@@ -156,6 +158,8 @@ struct usbhs_hcd_omap {
struct clk *usbhost_p2_fck;
struct clk *usbtll_p2_fck;
struct clk *init_60m_fclk;
+ struct clk *usbtll_fck;
+ struct clk *usbtll_ick;
void __iomem *uhh_base;
void __iomem *tll_base;
@@ -349,13 +353,46 @@ static int __devinit usbhs_omap_probe(struct platform_device *pdev)
omap->platdata.ehci_data = pdata->ehci_data;
omap->platdata.ohci_data = pdata->ohci_data;
- pm_runtime_enable(&pdev->dev);
+ omap->usbhost_ick = clk_get(dev, "usbhost_ick");
+ if (IS_ERR(omap->usbhost_ick)) {
+ ret = PTR_ERR(omap->usbhost_ick);
+ dev_err(dev, "usbhost_ick failed error:%d\n", ret);
+ goto err_end;
+ }
+
+ omap->usbhost_hs_fck = clk_get(dev, "hs_fck");
+ if (IS_ERR(omap->usbhost_hs_fck)) {
+ ret = PTR_ERR(omap->usbhost_hs_fck);
+ dev_err(dev, "usbhost_hs_fck failed error:%d\n", ret);
+ goto err_usbhost_ick;
+ }
+
+ omap->usbhost_fs_fck = clk_get(dev, "fs_fck");
+ if (IS_ERR(omap->usbhost_fs_fck)) {
+ ret = PTR_ERR(omap->usbhost_fs_fck);
+ dev_err(dev, "usbhost_fs_fck failed error:%d\n", ret);
+ goto err_usbhost_hs_fck;
+ }
+
+ omap->usbtll_fck = clk_get(dev, "usbtll_fck");
+ if (IS_ERR(omap->usbtll_fck)) {
+ ret = PTR_ERR(omap->usbtll_fck);
+ dev_err(dev, "usbtll_fck failed error:%d\n", ret);
+ goto err_usbhost_fs_fck;
+ }
+
+ omap->usbtll_ick = clk_get(dev, "usbtll_ick");
+ if (IS_ERR(omap->usbtll_ick)) {
+ ret = PTR_ERR(omap->usbtll_ick);
+ dev_err(dev, "usbtll_ick failed error:%d\n", ret);
+ goto err_usbtll_fck;
+ }
omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
if (IS_ERR(omap->utmi_p1_fck)) {
ret = PTR_ERR(omap->utmi_p1_fck);
dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
- goto err_end;
+ goto err_usbtll_ick;
}
omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
@@ -485,8 +522,22 @@ err_xclk60mhsp1_ck:
err_utmi_p1_fck:
clk_put(omap->utmi_p1_fck);
+err_usbtll_ick:
+ clk_put(omap->usbtll_ick);
+
+err_usbtll_fck:
+ clk_put(omap->usbtll_fck);
+
+err_usbhost_fs_fck:
+ clk_put(omap->usbhost_fs_fck);
+
+err_usbhost_hs_fck:
+ clk_put(omap->usbhost_hs_fck);
+
+err_usbhost_ick:
+ clk_put(omap->usbhost_ick);
+
err_end:
- pm_runtime_disable(&pdev->dev);
kfree(omap);
end_probe:
@@ -520,7 +571,11 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
clk_put(omap->utmi_p2_fck);
clk_put(omap->xclk60mhsp1_ck);
clk_put(omap->utmi_p1_fck);
- pm_runtime_disable(&pdev->dev);
+ clk_put(omap->usbtll_ick);
+ clk_put(omap->usbtll_fck);
+ clk_put(omap->usbhost_fs_fck);
+ clk_put(omap->usbhost_hs_fck);
+ clk_put(omap->usbhost_ick);
kfree(omap);
return 0;
@@ -640,6 +695,7 @@ static int usbhs_enable(struct device *dev)
struct usbhs_omap_platform_data *pdata = &omap->platdata;
unsigned long flags = 0;
int ret = 0;
+ unsigned long timeout;
unsigned reg;
dev_dbg(dev, "starting TI HSUSB Controller\n");
@@ -652,7 +708,11 @@ static int usbhs_enable(struct device *dev)
if (omap->count > 0)
goto end_count;
- pm_runtime_get_sync(dev);
+ clk_enable(omap->usbhost_ick);
+ clk_enable(omap->usbhost_hs_fck);
+ clk_enable(omap->usbhost_fs_fck);
+ clk_enable(omap->usbtll_fck);
+ clk_enable(omap->usbtll_ick);
if (pdata->ehci_data->phy_reset) {
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) {
@@ -676,6 +736,50 @@ static int usbhs_enable(struct device *dev)
omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
+ /* perform TLL soft reset, and wait until reset is complete */
+ usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+ OMAP_USBTLL_SYSCONFIG_SOFTRESET);
+
+ /* Wait for TLL reset to complete */
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+ & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(dev, "operation timed out\n");
+ ret = -EINVAL;
+ goto err_tll;
+ }
+ }
+
+ dev_dbg(dev, "TLL RESET DONE\n");
+
+ /* (1<<3) = no idle mode only for initial debugging */
+ usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+ OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
+ OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
+ OMAP_USBTLL_SYSCONFIG_AUTOIDLE);
+
+ /* Put UHH in NoIdle/NoStandby mode */
+ reg = usbhs_read(omap->uhh_base, OMAP_UHH_SYSCONFIG);
+ if (is_omap_usbhs_rev1(omap)) {
+ reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
+ | OMAP_UHH_SYSCONFIG_SIDLEMODE
+ | OMAP_UHH_SYSCONFIG_CACTIVITY
+ | OMAP_UHH_SYSCONFIG_MIDLEMODE);
+ reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
+
+
+ } else if (is_omap_usbhs_rev2(omap)) {
+ reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
+ reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
+ reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
+ reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
+ }
+
+ usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
+
reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
/* setup ULPI bypass and burst configurations */
reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
@@ -815,8 +919,6 @@ end_count:
return 0;
err_tll:
- pm_runtime_put_sync(dev);
- spin_unlock_irqrestore(&omap->lock, flags);
if (pdata->ehci_data->phy_reset) {
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
gpio_free(pdata->ehci_data->reset_gpio_port[0]);
@@ -824,6 +926,13 @@ err_tll:
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
gpio_free(pdata->ehci_data->reset_gpio_port[1]);
}
+
+ clk_disable(omap->usbtll_ick);
+ clk_disable(omap->usbtll_fck);
+ clk_disable(omap->usbhost_fs_fck);
+ clk_disable(omap->usbhost_hs_fck);
+ clk_disable(omap->usbhost_ick);
+ spin_unlock_irqrestore(&omap->lock, flags);
return ret;
}
@@ -896,7 +1005,11 @@ static void usbhs_disable(struct device *dev)
clk_disable(omap->utmi_p1_fck);
}
- pm_runtime_put_sync(dev);
+ clk_disable(omap->usbtll_ick);
+ clk_disable(omap->usbtll_fck);
+ clk_disable(omap->usbhost_fs_fck);
+ clk_disable(omap->usbhost_hs_fck);
+ clk_disable(omap->usbhost_ick);
/* The gpio_free migh sleep; so unlock the spinlock */
spin_unlock_irqrestore(&omap->lock, flags);
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
index 3d2dc56a3d40..283ac6759757 100644
--- a/drivers/mfd/tps65911-comparator.c
+++ b/drivers/mfd/tps65911-comparator.c
@@ -125,7 +125,7 @@ static DEVICE_ATTR(comp2_threshold, S_IRUGO, comp_threshold_show, NULL);
static __devinit int tps65911_comparator_probe(struct platform_device *pdev)
{
struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
- struct tps65910_platform_data *pdata = dev_get_platdata(tps65910->dev);
+ struct tps65910_board *pdata = dev_get_platdata(tps65910->dev);
int ret;
ret = comp_threshold_set(tps65910, COMP1, pdata->vmbch_threshold);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 2a7e43bc796d..aa7d1d79b8c5 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -247,12 +247,12 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
return 0;
/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
+ card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
if (card->csd.structure == 3) {
- int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE];
- if (ext_csd_struct > 2) {
+ if (card->ext_csd.raw_ext_csd_structure > 2) {
printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
"version %d\n", mmc_hostname(card->host),
- ext_csd_struct);
+ card->ext_csd.raw_ext_csd_structure);
err = -EINVAL;
goto out;
}
@@ -266,6 +266,10 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
goto out;
}
+ card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
+ card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
+ card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
+ card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
if (card->ext_csd.rev >= 2) {
card->ext_csd.sectors =
ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
@@ -277,7 +281,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
mmc_card_set_blockaddr(card);
}
-
+ card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
EXT_CSD_CARD_TYPE_26:
@@ -307,6 +311,11 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
mmc_hostname(card->host));
}
+ card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
+ card->ext_csd.raw_erase_timeout_mult =
+ ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
+ card->ext_csd.raw_hc_erase_grp_size =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
if (card->ext_csd.rev >= 3) {
u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
@@ -334,6 +343,16 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
}
+ card->ext_csd.raw_hc_erase_gap_size =
+ ext_csd[EXT_CSD_PARTITION_ATTRIBUTE];
+ card->ext_csd.raw_sec_trim_mult =
+ ext_csd[EXT_CSD_SEC_TRIM_MULT];
+ card->ext_csd.raw_sec_erase_mult =
+ ext_csd[EXT_CSD_SEC_ERASE_MULT];
+ card->ext_csd.raw_sec_feature_support =
+ ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
+ card->ext_csd.raw_trim_mult =
+ ext_csd[EXT_CSD_TRIM_MULT];
if (card->ext_csd.rev >= 4) {
/*
* Enhanced area feature support -- check whether the eMMC
@@ -341,7 +360,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
* area offset and size to user by adding sysfs interface.
*/
if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
- (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
+ (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
u8 hc_erase_grp_sz =
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
u8 hc_wp_grp_sz =
@@ -401,17 +420,17 @@ static inline void mmc_free_ext_csd(u8 *ext_csd)
}
-static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
- unsigned bus_width)
+static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
{
u8 *bw_ext_csd;
int err;
+ if (bus_width == MMC_BUS_WIDTH_1)
+ return 0;
+
err = mmc_get_ext_csd(card, &bw_ext_csd);
- if (err)
- return err;
- if ((ext_csd == NULL || bw_ext_csd == NULL)) {
+ if (err || bw_ext_csd == NULL) {
if (bus_width != MMC_BUS_WIDTH_1)
err = -EINVAL;
goto out;
@@ -421,35 +440,40 @@ static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
goto out;
/* only compare read only fields */
- err = (!(ext_csd[EXT_CSD_PARTITION_SUPPORT] ==
+ err = (!(card->ext_csd.raw_partition_support ==
bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
- (ext_csd[EXT_CSD_ERASED_MEM_CONT] ==
+ (card->ext_csd.raw_erased_mem_count ==
bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
- (ext_csd[EXT_CSD_REV] ==
+ (card->ext_csd.rev ==
bw_ext_csd[EXT_CSD_REV]) &&
- (ext_csd[EXT_CSD_STRUCTURE] ==
+ (card->ext_csd.raw_ext_csd_structure ==
bw_ext_csd[EXT_CSD_STRUCTURE]) &&
- (ext_csd[EXT_CSD_CARD_TYPE] ==
+ (card->ext_csd.raw_card_type ==
bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
- (ext_csd[EXT_CSD_S_A_TIMEOUT] ==
+ (card->ext_csd.raw_s_a_timeout ==
bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
- (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
+ (card->ext_csd.raw_hc_erase_gap_size ==
bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
- (ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT] ==
+ (card->ext_csd.raw_erase_timeout_mult ==
bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
- (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
+ (card->ext_csd.raw_hc_erase_grp_size ==
bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
- (ext_csd[EXT_CSD_SEC_TRIM_MULT] ==
+ (card->ext_csd.raw_sec_trim_mult ==
bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
- (ext_csd[EXT_CSD_SEC_ERASE_MULT] ==
+ (card->ext_csd.raw_sec_erase_mult ==
bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
- (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] ==
+ (card->ext_csd.raw_sec_feature_support ==
bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
- (ext_csd[EXT_CSD_TRIM_MULT] ==
+ (card->ext_csd.raw_trim_mult ==
bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
- memcmp(&ext_csd[EXT_CSD_SEC_CNT],
- &bw_ext_csd[EXT_CSD_SEC_CNT],
- 4) != 0);
+ (card->ext_csd.raw_sectors[0] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
+ (card->ext_csd.raw_sectors[1] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
+ (card->ext_csd.raw_sectors[2] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
+ (card->ext_csd.raw_sectors[3] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 3]));
if (err)
err = -EINVAL;
@@ -770,7 +794,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/
if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
err = mmc_compare_ext_csds(card,
- ext_csd,
bus_width);
else
err = mmc_bus_test(card, bus_width);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 7721de942c69..fe140724a02e 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -582,6 +582,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
data->error = -EILSEQ;
} else if (status & MCI_DATATIMEOUT) {
data->error = -ETIMEDOUT;
+ } else if (status & MCI_STARTBITERR) {
+ data->error = -ECOMM;
} else if (status & MCI_TXUNDERRUN) {
data->error = -EIO;
} else if (status & MCI_RXOVERRUN) {
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index bb32e21c09db..2164e8c6476c 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -86,6 +86,7 @@
#define MCI_CMDRESPEND (1 << 6)
#define MCI_CMDSENT (1 << 7)
#define MCI_DATAEND (1 << 8)
+#define MCI_STARTBITERR (1 << 9)
#define MCI_DATABLOCKEND (1 << 10)
#define MCI_CMDACTIVE (1 << 11)
#define MCI_TXACTIVE (1 << 12)
@@ -112,6 +113,7 @@
#define MCI_CMDRESPENDCLR (1 << 6)
#define MCI_CMDSENTCLR (1 << 7)
#define MCI_DATAENDCLR (1 << 8)
+#define MCI_STARTBITERRCLR (1 << 9)
#define MCI_DATABLOCKENDCLR (1 << 10)
/* Extended status bits for the ST Micro variants */
#define MCI_ST_SDIOITC (1 << 22)
@@ -127,6 +129,7 @@
#define MCI_CMDRESPENDMASK (1 << 6)
#define MCI_CMDSENTMASK (1 << 7)
#define MCI_DATAENDMASK (1 << 8)
+#define MCI_STARTBITERRMASK (1 << 9)
#define MCI_DATABLOCKENDMASK (1 << 10)
#define MCI_CMDACTIVEMASK (1 << 11)
#define MCI_TXACTIVEMASK (1 << 12)
@@ -150,7 +153,7 @@
#define MCI_IRQENABLE \
(MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
- MCI_CMDRESPENDMASK|MCI_CMDSENTMASK)
+ MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_STARTBITERRMASK)
/* These interrupts are directed to IRQ1 when two IRQ lines are available */
#define MCI_IRQ1MASK \
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 98517a373473..e3bad8247fd1 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -992,6 +992,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
* features
*/
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
+ dev->vlan_features = dev->features;
dev->irq = pdev->irq;
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index 7d25a97d33f6..44e219c910da 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -1111,7 +1111,7 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
struct bna_intr_info *intr_info)
{
int err = 0;
- unsigned long flags;
+ unsigned long irq_flags = 0, flags;
u32 irq;
irq_handler_t irq_handler;
@@ -1125,18 +1125,17 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
if (bnad->cfg_flags & BNAD_CF_MSIX) {
irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
irq = bnad->msix_table[bnad->msix_num - 1].vector;
- flags = 0;
intr_info->intr_type = BNA_INTR_T_MSIX;
intr_info->idl[0].vector = bnad->msix_num - 1;
} else {
irq_handler = (irq_handler_t)bnad_isr;
irq = bnad->pcidev->irq;
- flags = IRQF_SHARED;
+ irq_flags = IRQF_SHARED;
intr_info->intr_type = BNA_INTR_T_INTX;
/* intr_info->idl.vector = 0 ? */
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
-
+ flags = irq_flags;
sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
/*
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index f181304a7ab6..672f096fe090 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1015,11 +1015,10 @@ static int greth_set_mac_add(struct net_device *dev, void *p)
return -EINVAL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
+ GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
+ dev->dev_addr[4] << 8 | dev->dev_addr[5]);
- GRETH_REGSAVE(regs->esa_msb, addr->sa_data[0] << 8 | addr->sa_data[1]);
- GRETH_REGSAVE(regs->esa_lsb,
- addr->sa_data[2] << 24 | addr->
- sa_data[3] << 16 | addr->sa_data[4] << 8 | addr->sa_data[5]);
return 0;
}
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 3e5d0b6b6516..0d283781bc5e 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -692,10 +692,10 @@ static void sixpack_close(struct tty_struct *tty)
{
struct sixpack *sp;
- write_lock(&disc_data_lock);
+ write_lock_bh(&disc_data_lock);
sp = tty->disc_data;
tty->disc_data = NULL;
- write_unlock(&disc_data_lock);
+ write_unlock_bh(&disc_data_lock);
if (!sp)
return;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 4c628393c8b1..bc02968cee16 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -813,10 +813,10 @@ static void mkiss_close(struct tty_struct *tty)
{
struct mkiss *ax;
- write_lock(&disc_data_lock);
+ write_lock_bh(&disc_data_lock);
ax = tty->disc_data;
tty->disc_data = NULL;
- write_unlock(&disc_data_lock);
+ write_unlock_bh(&disc_data_lock);
if (!ax)
return;
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index b78be088c4ad..60f46bc2bf64 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -140,7 +140,7 @@ MODULE_LICENSE("GPL");
module_param(mtu, int, 0);
module_param(debug, int, 0);
module_param(rx_copybreak, int, 0);
-module_param(dspcfg_workaround, int, 1);
+module_param(dspcfg_workaround, int, 0);
module_param_array(options, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
@@ -2028,8 +2028,8 @@ static void drain_rx(struct net_device *dev)
np->rx_ring[i].cmd_status = 0;
np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (np->rx_skbuff[i]) {
- pci_unmap_single(np->pci_dev,
- np->rx_dma[i], buflen,
+ pci_unmap_single(np->pci_dev, np->rx_dma[i],
+ buflen + NATSEMI_PADDING,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(np->rx_skbuff[i]);
}
@@ -2360,7 +2360,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
PCI_DMA_FROMDEVICE);
} else {
pci_unmap_single(np->pci_dev, np->rx_dma[entry],
- buflen, PCI_DMA_FROMDEVICE);
+ buflen + NATSEMI_PADDING,
+ PCI_DMA_FROMDEVICE);
skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
}
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index d32850715f5c..ca306fd5f588 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.27.00.00-01"
+#define DRV_VERSION "v1.00.00.29.00.00-01"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
@@ -1996,6 +1996,7 @@ enum {
QL_LB_LINK_UP = 10,
QL_FRC_COREDUMP = 11,
QL_EEH_FATAL = 12,
+ QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */
};
/* link_status bit definitions */
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 930ae45457bb..6b4ff970972b 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -2152,6 +2152,10 @@ void ql_queue_asic_error(struct ql_adapter *qdev)
* thread
*/
clear_bit(QL_ADAPTER_UP, &qdev->flags);
+ /* Set asic recovery bit to indicate reset process that we are
+ * in fatal error recovery process rather than normal close
+ */
+ set_bit(QL_ASIC_RECOVERY, &qdev->flags);
queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
}
@@ -2166,23 +2170,20 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
return;
case CAM_LOOKUP_ERR_EVENT:
- netif_err(qdev, link, qdev->ndev,
- "Multiple CAM hits lookup occurred.\n");
- netif_err(qdev, drv, qdev->ndev,
- "This event shouldn't occur.\n");
+ netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
+ netdev_err(qdev->ndev, "This event shouldn't occur.\n");
ql_queue_asic_error(qdev);
return;
case SOFT_ECC_ERROR_EVENT:
- netif_err(qdev, rx_err, qdev->ndev,
- "Soft ECC error detected.\n");
+ netdev_err(qdev->ndev, "Soft ECC error detected.\n");
ql_queue_asic_error(qdev);
break;
case PCI_ERR_ANON_BUF_RD:
- netif_err(qdev, rx_err, qdev->ndev,
- "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
- ib_ae_rsp->q_id);
+ netdev_err(qdev->ndev, "PCI error occurred when reading "
+ "anonymous buffers from rx_ring %d.\n",
+ ib_ae_rsp->q_id);
ql_queue_asic_error(qdev);
break;
@@ -2437,11 +2438,10 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
*/
if (var & STS_FE) {
ql_queue_asic_error(qdev);
- netif_err(qdev, intr, qdev->ndev,
- "Got fatal error, STS = %x.\n", var);
+ netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
var = ql_read32(qdev, ERR_STS);
- netif_err(qdev, intr, qdev->ndev,
- "Resetting chip. Error Status Register = 0x%x\n", var);
+ netdev_err(qdev->ndev, "Resetting chip. "
+ "Error Status Register = 0x%x\n", var);
return IRQ_HANDLED;
}
@@ -3818,11 +3818,17 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
end_jiffies = jiffies +
max((unsigned long)1, usecs_to_jiffies(30));
- /* Stop management traffic. */
- ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
+ /* Check if bit is set then skip the mailbox command and
+ * clear the bit, else we are in normal reset process.
+ */
+ if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
+ /* Stop management traffic. */
+ ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
- /* Wait for the NIC and MGMNT FIFOs to empty. */
- ql_wait_fifo_empty(qdev);
+ /* Wait for the NIC and MGMNT FIFOs to empty. */
+ ql_wait_fifo_empty(qdev);
+ } else
+ clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 8a72a979ee71..1f3f7b4dd638 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -140,6 +140,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.tpauser = 1,
.hw_swap = 1,
.no_ade = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
};
#define SH_GIGA_ETH_BASE 0xfee00000
@@ -1184,8 +1186,8 @@ static void sh_eth_adjust_link(struct net_device *ndev)
mdp->cd->set_rate(ndev);
}
if (mdp->link == PHY_DOWN) {
- sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_TXF)
- | ECMR_DM, ECMR);
+ sh_eth_write(ndev,
+ (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
new_state = 1;
mdp->link = phydev->link;
}
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 8ec1a9a0bb9a..2f110fb30daa 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -182,11 +182,11 @@ static int sl_alloc_bufs(struct slip *sl, int mtu)
#ifdef SL_INCLUDE_CSLIP
cbuff = xchg(&sl->cbuff, cbuff);
slcomp = xchg(&sl->slcomp, slcomp);
+#endif
#ifdef CONFIG_SLIP_MODE_SLIP6
sl->xdata = 0;
sl->xbits = 0;
#endif
-#endif
spin_unlock_bh(&sl->lock);
err = 0;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 387ca43f26f4..304fe78ff60e 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2421,10 +2421,8 @@ static void hso_free_net_device(struct hso_device *hso_dev)
remove_net_device(hso_net->parent);
- if (hso_net->net) {
+ if (hso_net->net)
unregister_netdev(hso_net->net);
- free_netdev(hso_net->net);
- }
/* start freeing */
for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
@@ -2436,6 +2434,9 @@ static void hso_free_net_device(struct hso_device *hso_dev)
kfree(hso_net->mux_bulk_tx_buf);
hso_net->mux_bulk_tx_buf = NULL;
+ if (hso_net->net)
+ free_netdev(hso_net->net);
+
kfree(hso_dev);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index fa6e2ac7475a..67402350d0df 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -575,7 +575,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
u32 val;
- while (num_allocated < num_to_alloc) {
+ while (num_allocated <= num_to_alloc) {
struct vmxnet3_rx_buf_info *rbi;
union Vmxnet3_GenericDesc *gd;
@@ -621,9 +621,15 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
BUG_ON(rbi->dma_addr == 0);
gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
- gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
+ gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
| val | rbi->len);
+ /* Fill the last buffer but dont mark it ready, or else the
+ * device will think that the queue is full */
+ if (num_allocated == num_to_alloc)
+ break;
+
+ gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
num_allocated++;
vmxnet3_cmd_ring_adv_next2fill(ring);
}
@@ -1140,6 +1146,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
};
u32 num_rxd = 0;
+ bool skip_page_frags = false;
struct Vmxnet3_RxCompDesc *rcd;
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
#ifdef __BIG_ENDIAN_BITFIELD
@@ -1150,11 +1157,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
&rxComp);
while (rcd->gen == rq->comp_ring.gen) {
struct vmxnet3_rx_buf_info *rbi;
- struct sk_buff *skb;
+ struct sk_buff *skb, *new_skb = NULL;
+ struct page *new_page = NULL;
int num_to_alloc;
struct Vmxnet3_RxDesc *rxd;
u32 idx, ring_idx;
-
+ struct vmxnet3_cmd_ring *ring = NULL;
if (num_rxd >= quota) {
/* we may stop even before we see the EOP desc of
* the current pkt
@@ -1165,6 +1173,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
idx = rcd->rxdIdx;
ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
+ ring = rq->rx_ring + ring_idx;
vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
&rxCmdDesc);
rbi = rq->buf_info[ring_idx] + idx;
@@ -1193,37 +1202,80 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
goto rcd_done;
}
+ skip_page_frags = false;
ctx->skb = rbi->skb;
- rbi->skb = NULL;
+ new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN);
+ if (new_skb == NULL) {
+ /* Skb allocation failed, do not handover this
+ * skb to stack. Reuse it. Drop the existing pkt
+ */
+ rq->stats.rx_buf_alloc_failure++;
+ ctx->skb = NULL;
+ rq->stats.drop_total++;
+ skip_page_frags = true;
+ goto rcd_done;
+ }
pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE);
skb_put(ctx->skb, rcd->len);
+
+ /* Immediate refill */
+ new_skb->dev = adapter->netdev;
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ rbi->skb = new_skb;
+ rbi->dma_addr = pci_map_single(adapter->pdev,
+ rbi->skb->data, rbi->len,
+ PCI_DMA_FROMDEVICE);
+ rxd->addr = cpu_to_le64(rbi->dma_addr);
+ rxd->len = rbi->len;
+
} else {
- BUG_ON(ctx->skb == NULL);
+ BUG_ON(ctx->skb == NULL && !skip_page_frags);
+
/* non SOP buffer must be type 1 in most cases */
- if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
- BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
+ BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
+ BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
- if (rcd->len) {
- pci_unmap_page(adapter->pdev,
- rbi->dma_addr, rbi->len,
- PCI_DMA_FROMDEVICE);
+ /* If an sop buffer was dropped, skip all
+ * following non-sop fragments. They will be reused.
+ */
+ if (skip_page_frags)
+ goto rcd_done;
- vmxnet3_append_frag(ctx->skb, rcd, rbi);
- rbi->page = NULL;
- }
- } else {
- /*
- * The only time a non-SOP buffer is type 0 is
- * when it's EOP and error flag is raised, which
- * has already been handled.
+ new_page = alloc_page(GFP_ATOMIC);
+ if (unlikely(new_page == NULL)) {
+ /* Replacement page frag could not be allocated.
+ * Reuse this page. Drop the pkt and free the
+ * skb which contained this page as a frag. Skip
+ * processing all the following non-sop frags.
*/
- BUG_ON(true);
+ rq->stats.rx_buf_alloc_failure++;
+ dev_kfree_skb(ctx->skb);
+ ctx->skb = NULL;
+ skip_page_frags = true;
+ goto rcd_done;
+ }
+
+ if (rcd->len) {
+ pci_unmap_page(adapter->pdev,
+ rbi->dma_addr, rbi->len,
+ PCI_DMA_FROMDEVICE);
+
+ vmxnet3_append_frag(ctx->skb, rcd, rbi);
}
+
+ /* Immediate refill */
+ rbi->page = new_page;
+ rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
+ 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ rxd->addr = cpu_to_le64(rbi->dma_addr);
+ rxd->len = rbi->len;
}
+
skb = ctx->skb;
if (rcd->eop) {
skb->len += skb->data_len;
@@ -1244,26 +1296,27 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
}
rcd_done:
- /* device may skip some rx descs */
- rq->rx_ring[ring_idx].next2comp = idx;
- VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
- rq->rx_ring[ring_idx].size);
-
- /* refill rx buffers frequently to avoid starving the h/w */
- num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
- ring_idx);
- if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
- ring_idx, adapter))) {
- vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
- adapter);
-
- /* if needed, update the register */
- if (unlikely(rq->shared->updateRxProd)) {
- VMXNET3_WRITE_BAR0_REG(adapter,
- rxprod_reg[ring_idx] + rq->qid * 8,
- rq->rx_ring[ring_idx].next2fill);
- rq->uncommitted[ring_idx] = 0;
- }
+ /* device may have skipped some rx descs */
+ ring->next2comp = idx;
+ num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
+ ring = rq->rx_ring + ring_idx;
+ while (num_to_alloc) {
+ vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
+ &rxCmdDesc);
+ BUG_ON(!rxd->addr);
+
+ /* Recv desc is ready to be used by the device */
+ rxd->gen = ring->gen;
+ vmxnet3_cmd_ring_adv_next2fill(ring);
+ num_to_alloc--;
+ }
+
+ /* if needed, update the register */
+ if (unlikely(rq->shared->updateRxProd)) {
+ VMXNET3_WRITE_BAR0_REG(adapter,
+ rxprod_reg[ring_idx] + rq->qid * 8,
+ ring->next2fill);
+ rq->uncommitted[ring_idx] = 0;
}
vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
@@ -2894,6 +2947,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
else
#endif
num_rx_queues = 1;
+ num_rx_queues = rounddown_pow_of_two(num_rx_queues);
if (enable_mq)
num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
@@ -2901,6 +2955,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
else
num_tx_queues = 1;
+ num_tx_queues = rounddown_pow_of_two(num_tx_queues);
netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
max(num_tx_queues, num_rx_queues));
printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
@@ -3085,6 +3140,7 @@ vmxnet3_remove_device(struct pci_dev *pdev)
else
#endif
num_rx_queues = 1;
+ num_rx_queues = rounddown_pow_of_two(num_rx_queues);
cancel_work_sync(&adapter->work);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index f50d36fdf405..e08d75e3f170 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -55,6 +55,7 @@
#include <linux/if_vlan.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
+#include <linux/log2.h>
#include "vmxnet3_defs.h"
@@ -68,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.1.9.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.1.18.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01010900
+#define VMXNET3_DRIVER_VERSION_NUM 0x01011200
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index 62172d585723..f82383b3ed30 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -107,10 +107,13 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
case AR5K_PKT_TYPE_BEACON:
case AR5K_PKT_TYPE_PROBE_RESP:
frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY;
+ break;
case AR5K_PKT_TYPE_PIFS:
frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
+ break;
default:
frame_type = type;
+ break;
}
tx_ctl->tx_control_0 |=
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 1fef84f87c78..392771f93759 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -691,14 +691,12 @@ ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode)
if (!chinfo[pier].pd_curves)
continue;
- for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
+ for (pdg = 0; pdg < AR5K_EEPROM_N_PD_CURVES; pdg++) {
struct ath5k_pdgain_info *pd =
&chinfo[pier].pd_curves[pdg];
- if (pd != NULL) {
- kfree(pd->pd_step);
- kfree(pd->pd_pwr);
- }
+ kfree(pd->pd_step);
+ kfree(pd->pd_pwr);
}
kfree(chinfo[pier].pd_curves);
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index 296c316a8341..f2c0c236392f 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -297,7 +297,9 @@ ath5k_pci_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int ath5k_pci_suspend(struct device *dev)
{
- struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ath5k_softc *sc = hw->priv;
ath5k_led_off(sc);
return 0;
@@ -306,7 +308,8 @@ static int ath5k_pci_suspend(struct device *dev)
static int ath5k_pci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct ath5k_softc *sc = pci_get_drvdata(pdev);
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ath5k_softc *sc = hw->priv;
/*
* Suspend/Resume resets the PCI configuration space, so we have to
diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c
index 929c68cdf8ab..a073cdce1f15 100644
--- a/drivers/net/wireless/ath/ath5k/sysfs.c
+++ b/drivers/net/wireless/ath/ath5k/sysfs.c
@@ -10,7 +10,8 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- struct ath5k_softc *sc = dev_get_drvdata(dev); \
+ struct ieee80211_hw *hw = dev_get_drvdata(dev); \
+ struct ath5k_softc *sc = hw->priv; \
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
} \
\
@@ -18,7 +19,8 @@ static ssize_t ath5k_attr_store_##name(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
- struct ath5k_softc *sc = dev_get_drvdata(dev); \
+ struct ieee80211_hw *hw = dev_get_drvdata(dev); \
+ struct ath5k_softc *sc = hw->priv; \
int val; \
\
val = (int)simple_strtoul(buf, NULL, 10); \
@@ -33,7 +35,8 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- struct ath5k_softc *sc = dev_get_drvdata(dev); \
+ struct ieee80211_hw *hw = dev_get_drvdata(dev); \
+ struct ath5k_softc *sc = hw->priv; \
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
} \
static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index b8cbfc707213..3bad0b2cf9a3 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -278,6 +278,12 @@ static int ath_pci_suspend(struct device *device)
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+ /* The device has to be moved to FULLSLEEP forcibly.
+ * Otherwise the chip never moved to full sleep,
+ * when no interface is up.
+ */
+ ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 3779b8977d47..33443bcaa8d9 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -671,7 +671,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
* TODO - this could be improved to be dependent on the rate.
* The hardware can keep up at lower rates, but not higher rates
*/
- if (fi->keyix != ATH9K_TXKEYIX_INVALID)
+ if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
+ !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
ndelim += ATH_AGGR_ENCRYPTDELIM;
/*
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 2fb53d067512..333b69ef2ae2 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -112,6 +112,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
{ USB_DEVICE(0x04bb, 0x093f) },
/* NEC WL300NU-G */
{ USB_DEVICE(0x0409, 0x0249) },
+ /* NEC WL300NU-AG */
+ { USB_DEVICE(0x0409, 0x02b4) },
/* AVM FRITZ!WLAN USB Stick N */
{ USB_DEVICE(0x057c, 0x8401) },
/* AVM FRITZ!WLAN USB Stick N 2.4 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 61d4a11f566b..2a88e73bb39c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -36,6 +36,7 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
+#include <linux/stringify.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
@@ -55,10 +56,10 @@
#define IWL100_UCODE_API_MIN 5
#define IWL1000_FW_PRE "iwlwifi-1000-"
-#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE #api ".ucode"
+#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode"
#define IWL100_FW_PRE "iwlwifi-100-"
-#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE #api ".ucode"
+#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode"
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 2282279cffc4..3df76f53a41b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -36,6 +36,7 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
+#include <linux/stringify.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
@@ -58,13 +59,13 @@
#define IWL105_UCODE_API_MIN 5
#define IWL2030_FW_PRE "iwlwifi-2030-"
-#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE #api ".ucode"
+#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode"
#define IWL2000_FW_PRE "iwlwifi-2000-"
-#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE #api ".ucode"
+#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE __stringify(api) ".ucode"
#define IWL105_FW_PRE "iwlwifi-105-"
-#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE #api ".ucode"
+#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode"
static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
{
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index f99f9c193352..e816c27db794 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -37,6 +37,7 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
+#include <linux/stringify.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
@@ -57,10 +58,10 @@
#define IWL5150_UCODE_API_MIN 1
#define IWL5000_FW_PRE "iwlwifi-5000-"
-#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE #api ".ucode"
+#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode"
#define IWL5150_FW_PRE "iwlwifi-5150-"
-#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode"
+#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode"
/* NIC configuration for 5000 series */
static void iwl5000_nic_config(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index fbe565c816e3..5b150bc70b06 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -36,6 +36,7 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
+#include <linux/stringify.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
@@ -58,16 +59,16 @@
#define IWL6000G2_UCODE_API_MIN 4
#define IWL6000_FW_PRE "iwlwifi-6000-"
-#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
+#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
#define IWL6050_FW_PRE "iwlwifi-6050-"
-#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
+#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode"
#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
-#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode"
+#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode"
#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
-#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode"
+#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
{
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 213c80c6a668..45cc51c9c93e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1763,6 +1763,7 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_rxon_context *tmp;
+ enum nl80211_iftype newviftype = newtype;
u32 interface_modes;
int err;
@@ -1818,7 +1819,7 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/* success */
iwl_teardown_interface(priv, vif, true);
- vif->type = newtype;
+ vif->type = newviftype;
vif->p2p = newp2p;
err = iwl_setup_interface(priv, ctx);
WARN_ON(err);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 686e176b5ebd..137dba95b1ad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -126,7 +126,7 @@ static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
}
static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
- struct iwl_tfd *tfd)
+ struct iwl_tfd *tfd, int dma_dir)
{
struct pci_dev *dev = priv->pci_dev;
int i;
@@ -151,7 +151,7 @@ static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
/* Unmap chunks, if any. */
for (i = 1; i < num_tbs; i++)
pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
- iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
+ iwl_tfd_tb_get_len(tfd, i), dma_dir);
}
/**
@@ -167,7 +167,8 @@ void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
struct iwl_tfd *tfd_tmp = txq->tfds;
int index = txq->q.read_ptr;
- iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index]);
+ iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index],
+ PCI_DMA_TODEVICE);
/* free SKB */
if (txq->txb) {
@@ -310,9 +311,7 @@ void iwl_cmd_queue_unmap(struct iwl_priv *priv)
i = get_cmd_index(q, q->read_ptr);
if (txq->meta[i].flags & CMD_MAPPED) {
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(&txq->meta[i], mapping),
- dma_unmap_len(&txq->meta[i], len),
+ iwlagn_unmap_tfd(priv, &txq->meta[i], &txq->tfds[i],
PCI_DMA_BIDIRECTIONAL);
txq->meta[i].flags = 0;
}
@@ -535,12 +534,7 @@ out_free_arrays:
void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id)
{
- int actual_slots = slots_num;
-
- if (txq_id == priv->cmd_queue)
- actual_slots++;
-
- memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
+ memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * slots_num);
txq->need_update = 0;
@@ -700,10 +694,11 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
continue;
phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i],
- cmd->len[i], PCI_DMA_TODEVICE);
+ cmd->len[i], PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) {
iwlagn_unmap_tfd(priv, out_meta,
- &txq->tfds[q->write_ptr]);
+ &txq->tfds[q->write_ptr],
+ PCI_DMA_BIDIRECTIONAL);
idx = -ENOMEM;
goto out;
}
@@ -807,7 +802,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
cmd = txq->cmd[cmd_index];
meta = &txq->meta[cmd_index];
- iwlagn_unmap_tfd(priv, meta, &txq->tfds[index]);
+ iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], PCI_DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 092e342c19df..942f7a3969a7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -298,6 +298,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
{RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
/* HP - Lite-On ,8188CUS Slim Combo */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 2c5b9b991279..692671b11667 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3483,6 +3483,8 @@ static int __init pci_setup(char *str)
pci_no_msi();
} else if (!strcmp(str, "noaer")) {
pci_no_aer();
+ } else if (!strncmp(str, "realloc", 7)) {
+ pci_realloc();
} else if (!strcmp(str, "nodomains")) {
pci_no_domains();
} else if (!strncmp(str, "cbiosize=", 9)) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 731e20265ace..3a39bf1f1e2c 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -146,6 +146,8 @@ static inline void pci_no_msi(void) { }
static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
#endif
+extern void pci_realloc(void);
+
static inline int pci_no_d1d2(struct pci_dev *dev)
{
unsigned int parent_dstates = 0;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 1e9e5a5b8c81..9995842e45b5 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -47,6 +47,13 @@ struct resource_list_x {
(head)->next = NULL; \
} while (0)
+int pci_realloc_enable = 0;
+#define pci_realloc_enabled() pci_realloc_enable
+void pci_realloc(void)
+{
+ pci_realloc_enable = 1;
+}
+
/**
* add_to_list() - add a new resource tracker to the list
* @head: Head of the list
@@ -1025,6 +1032,7 @@ static int __init pci_get_max_depth(void)
return depth;
}
+
/*
* first try will not touch pci bridge res
* second and later try will clear small leaf bridge res
@@ -1068,6 +1076,13 @@ again:
/* any device complain? */
if (!head.next)
goto enable_and_dump;
+
+ /* don't realloc if asked to do so */
+ if (!pci_realloc_enabled()) {
+ free_list(resource_list_x, &head);
+ goto enable_and_dump;
+ }
+
failed_type = 0;
for (list = head.next; list;) {
failed_type |= list->flags;
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
index 712baab3c83d..e956f659089a 100644
--- a/drivers/pcmcia/pxa2xx_vpac270.c
+++ b/drivers/pcmcia/pxa2xx_vpac270.c
@@ -76,10 +76,10 @@ static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
{
if (skt->nr == 0)
- gpio_request_array(vpac270_pcmcia_gpios,
+ gpio_free_array(vpac270_pcmcia_gpios,
ARRAY_SIZE(vpac270_pcmcia_gpios));
else
- gpio_request_array(vpac270_cf_gpios,
+ gpio_free_array(vpac270_cf_gpios,
ARRAY_SIZE(vpac270_cf_gpios));
}
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 005417bd429e..e1c4938b301b 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1156,9 +1156,9 @@ static acpi_status wmid3_set_device_status(u32 value, u16 device)
struct wmid3_gds_input_param params = {
.function_num = 0x1,
.hotkey_number = 0x01,
- .devices = ACER_WMID3_GDS_WIRELESS &
- ACER_WMID3_GDS_THREEG &
- ACER_WMID3_GDS_WIMAX &
+ .devices = ACER_WMID3_GDS_WIRELESS |
+ ACER_WMID3_GDS_THREEG |
+ ACER_WMID3_GDS_WIMAX |
ACER_WMID3_GDS_BLUETOOTH,
};
struct acpi_buffer input = {
@@ -1445,6 +1445,8 @@ static void acer_wmi_notify(u32 value, void *context)
union acpi_object *obj;
struct event_return_value return_value;
acpi_status status;
+ u16 device_state;
+ const struct key_entry *key;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
@@ -1472,23 +1474,32 @@ static void acer_wmi_notify(u32 value, void *context)
switch (return_value.function) {
case WMID_HOTKEY_EVENT:
- if (return_value.device_state) {
- u16 device_state = return_value.device_state;
- pr_debug("device state: 0x%x\n", device_state);
- if (has_cap(ACER_CAP_WIRELESS))
- rfkill_set_sw_state(wireless_rfkill,
- !(device_state & ACER_WMID3_GDS_WIRELESS));
- if (has_cap(ACER_CAP_BLUETOOTH))
- rfkill_set_sw_state(bluetooth_rfkill,
- !(device_state & ACER_WMID3_GDS_BLUETOOTH));
- if (has_cap(ACER_CAP_THREEG))
- rfkill_set_sw_state(threeg_rfkill,
- !(device_state & ACER_WMID3_GDS_THREEG));
- }
- if (!sparse_keymap_report_event(acer_wmi_input_dev,
- return_value.key_num, 1, true))
+ device_state = return_value.device_state;
+ pr_debug("device state: 0x%x\n", device_state);
+
+ key = sparse_keymap_entry_from_scancode(acer_wmi_input_dev,
+ return_value.key_num);
+ if (!key) {
pr_warn("Unknown key number - 0x%x\n",
return_value.key_num);
+ } else {
+ switch (key->keycode) {
+ case KEY_WLAN:
+ case KEY_BLUETOOTH:
+ if (has_cap(ACER_CAP_WIRELESS))
+ rfkill_set_sw_state(wireless_rfkill,
+ !(device_state & ACER_WMID3_GDS_WIRELESS));
+ if (has_cap(ACER_CAP_THREEG))
+ rfkill_set_sw_state(threeg_rfkill,
+ !(device_state & ACER_WMID3_GDS_THREEG));
+ if (has_cap(ACER_CAP_BLUETOOTH))
+ rfkill_set_sw_state(bluetooth_rfkill,
+ !(device_state & ACER_WMID3_GDS_BLUETOOTH));
+ break;
+ }
+ sparse_keymap_report_entry(acer_wmi_input_dev, key,
+ 1, true);
+ }
break;
default:
pr_warn("Unknown function number - %d - %d\n",
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 00460cb9587b..3c7857c71a23 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -1025,6 +1025,7 @@ static int asus_wmi_backlight_init(struct asus_wmi *asus)
return power;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = max;
bd = backlight_device_register(asus->driver->name,
&asus->platform_device->dev, asus,
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 3f204fde1b02..8877b836d27c 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -1030,8 +1030,10 @@ static int __devinit compal_probe(struct platform_device *pdev)
initialize_fan_control_data(data);
err = sysfs_create_group(&pdev->dev.kobj, &compal_attribute_group);
- if (err)
+ if (err) {
+ kfree(data);
return err;
+ }
data->hwmon_dev = hwmon_device_register(&pdev->dev);
if (IS_ERR(data->hwmon_dev)) {
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index d3841de6a8cf..e39ab1d3ed87 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -292,12 +292,9 @@ static int dell_rfkill_set(void *data, bool blocked)
dell_send_request(buffer, 17, 11);
/* If the hardware switch controls this radio, and the hardware
- switch is disabled, don't allow changing the software state.
- If the hardware switch is reported as not supported, always
- fire the SMI to toggle the killswitch. */
+ switch is disabled, don't allow changing the software state */
if ((hwswitch_state & BIT(hwswitch_bit)) &&
- !(buffer->output[1] & BIT(16)) &&
- (buffer->output[1] & BIT(0))) {
+ !(buffer->output[1] & BIT(16))) {
ret = -EINVAL;
goto out;
}
@@ -403,23 +400,6 @@ static const struct file_operations dell_debugfs_fops = {
static void dell_update_rfkill(struct work_struct *ignored)
{
- int status;
-
- get_buffer();
- dell_send_request(buffer, 17, 11);
- status = buffer->output[1];
- release_buffer();
-
- /* if hardware rfkill is not supported, set it explicitly */
- if (!(status & BIT(0))) {
- if (wifi_rfkill)
- dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17));
- if (bluetooth_rfkill)
- dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18));
- if (wwan_rfkill)
- dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19));
- }
-
if (wifi_rfkill)
dell_rfkill_query(wifi_rfkill, (void *)1);
if (bluetooth_rfkill)
@@ -560,11 +540,11 @@ static int dell_get_intensity(struct backlight_device *bd)
else
dell_send_request(buffer, 0, 1);
+ ret = buffer->output[1];
+
out:
release_buffer();
- if (ret)
- return ret;
- return buffer->output[1];
+ return ret;
}
static const struct backlight_ops dell_ops = {
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index f94017bcdd6e..e2faa3cbb792 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -207,6 +207,7 @@ static int hp_wmi_perform_query(int query, int write, void *buffer,
};
struct acpi_buffer input = { sizeof(struct bios_args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ u32 rc;
if (WARN_ON(insize > sizeof(args.data)))
return -EINVAL;
@@ -224,13 +225,13 @@ static int hp_wmi_perform_query(int query, int write, void *buffer,
}
bios_return = (struct bios_return *)obj->buffer.pointer;
+ rc = bios_return->return_code;
- if (bios_return->return_code) {
- if (bios_return->return_code != HPWMI_RET_UNKNOWN_CMDTYPE)
- pr_warn("query 0x%x returned error 0x%x\n",
- query, bios_return->return_code);
+ if (rc) {
+ if (rc != HPWMI_RET_UNKNOWN_CMDTYPE)
+ pr_warn("query 0x%x returned error 0x%x\n", query, rc);
kfree(obj);
- return bios_return->return_code;
+ return rc;
}
if (!outsize) {
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
index e936364a609d..7f88c7923fc6 100644
--- a/drivers/platform/x86/intel_oaktrail.c
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -250,6 +250,7 @@ static int oaktrail_backlight_init(void)
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = OT_EC_BL_BRIGHTNESS_MAX;
bd = backlight_device_register(DRIVER_NAME,
&oaktrail_device->dev, NULL,
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 77f6e707a2a9..26c5b117df22 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -184,6 +184,10 @@ enum tpacpi_hkey_event_t {
/* Misc bay events */
TP_HKEY_EV_OPTDRV_EJ = 0x3006, /* opt. drive tray ejected */
+ TP_HKEY_EV_HOTPLUG_DOCK = 0x4010, /* docked into hotplug dock
+ or port replicator */
+ TP_HKEY_EV_HOTPLUG_UNDOCK = 0x4011, /* undocked from hotplug
+ dock or port replicator */
/* User-interface events */
TP_HKEY_EV_LID_CLOSE = 0x5001, /* laptop lid closed */
@@ -194,6 +198,10 @@ enum tpacpi_hkey_event_t {
TP_HKEY_EV_PEN_REMOVED = 0x500c, /* tablet pen removed */
TP_HKEY_EV_BRGHT_CHANGED = 0x5010, /* backlight control event */
+ /* Key-related user-interface events */
+ TP_HKEY_EV_KEY_NUMLOCK = 0x6000, /* NumLock key pressed */
+ TP_HKEY_EV_KEY_FN = 0x6005, /* Fn key pressed? E420 */
+
/* Thermal events */
TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */
TP_HKEY_EV_ALARM_BAT_XHOT = 0x6012, /* battery critically hot */
@@ -201,6 +209,10 @@ enum tpacpi_hkey_event_t {
TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */
TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* thermal table changed */
+ TP_HKEY_EV_UNK_6040 = 0x6040, /* Related to AC change?
+ some sort of APM hint,
+ W520 */
+
/* Misc */
TP_HKEY_EV_RFKILL_CHANGED = 0x7000, /* rfkill switch changed */
};
@@ -3513,6 +3525,34 @@ static bool hotkey_notify_wakeup(const u32 hkey,
return true;
}
+static bool hotkey_notify_dockevent(const u32 hkey,
+ bool *send_acpi_ev,
+ bool *ignore_acpi_ev)
+{
+ /* 0x4000-0x4FFF: dock-related events */
+ *send_acpi_ev = true;
+ *ignore_acpi_ev = false;
+
+ switch (hkey) {
+ case TP_HKEY_EV_UNDOCK_ACK:
+ /* ACPI undock operation completed after wakeup */
+ hotkey_autosleep_ack = 1;
+ pr_info("undocked\n");
+ hotkey_wakeup_hotunplug_complete_notify_change();
+ return true;
+
+ case TP_HKEY_EV_HOTPLUG_DOCK: /* docked to port replicator */
+ pr_info("docked into hotplug port replicator\n");
+ return true;
+ case TP_HKEY_EV_HOTPLUG_UNDOCK: /* undocked from port replicator */
+ pr_info("undocked from hotplug port replicator\n");
+ return true;
+
+ default:
+ return false;
+ }
+}
+
static bool hotkey_notify_usrevent(const u32 hkey,
bool *send_acpi_ev,
bool *ignore_acpi_ev)
@@ -3547,13 +3587,13 @@ static bool hotkey_notify_usrevent(const u32 hkey,
static void thermal_dump_all_sensors(void);
-static bool hotkey_notify_thermal(const u32 hkey,
+static bool hotkey_notify_6xxx(const u32 hkey,
bool *send_acpi_ev,
bool *ignore_acpi_ev)
{
bool known = true;
- /* 0x6000-0x6FFF: thermal alarms */
+ /* 0x6000-0x6FFF: thermal alarms/notices and keyboard events */
*send_acpi_ev = true;
*ignore_acpi_ev = false;
@@ -3582,8 +3622,17 @@ static bool hotkey_notify_thermal(const u32 hkey,
"a sensor reports something is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
break;
+
+ case TP_HKEY_EV_KEY_NUMLOCK:
+ case TP_HKEY_EV_KEY_FN:
+ /* key press events, we just ignore them as long as the EC
+ * is still reporting them in the normal keyboard stream */
+ *send_acpi_ev = false;
+ *ignore_acpi_ev = true;
+ return true;
+
default:
- pr_alert("THERMAL ALERT: unknown thermal alarm received\n");
+ pr_warn("unknown possible thermal alarm or keyboard event received\n");
known = false;
}
@@ -3652,15 +3701,9 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
}
break;
case 4:
- /* 0x4000-0x4FFF: dock-related wakeups */
- if (hkey == TP_HKEY_EV_UNDOCK_ACK) {
- hotkey_autosleep_ack = 1;
- pr_info("undocked\n");
- hotkey_wakeup_hotunplug_complete_notify_change();
- known_ev = true;
- } else {
- known_ev = false;
- }
+ /* 0x4000-0x4FFF: dock-related events */
+ known_ev = hotkey_notify_dockevent(hkey, &send_acpi_ev,
+ &ignore_acpi_ev);
break;
case 5:
/* 0x5000-0x5FFF: human interface helpers */
@@ -3668,8 +3711,9 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
&ignore_acpi_ev);
break;
case 6:
- /* 0x6000-0x6FFF: thermal alarms */
- known_ev = hotkey_notify_thermal(hkey, &send_acpi_ev,
+ /* 0x6000-0x6FFF: thermal alarms/notices and
+ * keyboard events */
+ known_ev = hotkey_notify_6xxx(hkey, &send_acpi_ev,
&ignore_acpi_ev);
break;
case 7:
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index e5f7b8fe51f4..2bb8f451cc06 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -266,7 +266,7 @@ static struct regulator_ops db8500_regulator_switch_ops = {
* Regulator information
*/
static struct db8500_regulator_info
- db8500_regulator_info[DB8500_NUM_REGULATORS] = {
+db8500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VAPE] = {
.desc = {
.name = "db8500-vape",
@@ -492,11 +492,9 @@ static int __devinit db8500_regulator_probe(struct platform_device *pdev)
info->desc.name, err);
/* if failing, unregister all earlier regulators */
- i--;
- while (i >= 0) {
+ while (--i >= 0) {
info = &db8500_regulator_info[i];
regulator_unregister(info->rdev);
- i--;
}
return err;
}
@@ -536,13 +534,7 @@ static struct platform_driver db8500_regulator_driver = {
static int __init db8500_regulator_init(void)
{
- int ret;
-
- ret = platform_driver_register(&db8500_regulator_driver);
- if (ret < 0)
- return -ENODEV;
-
- return 0;
+ return platform_driver_register(&db8500_regulator_driver);
}
static void __exit db8500_regulator_exit(void)
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index daff7fd0e95c..486ed8141fcd 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -139,7 +139,7 @@ static int max8952_set_voltage(struct regulator_dev *rdev,
s8 vid = -1, i;
if (!gpio_is_valid(max8952->pdata->gpio_vid0) ||
- !gpio_is_valid(max8952->pdata->gpio_vid0)) {
+ !gpio_is_valid(max8952->pdata->gpio_vid1)) {
/* DVS not supported */
return -EPERM;
}
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 10d5a1d9768e..ad6628ca94f4 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -39,25 +39,28 @@ struct max8997_data {
struct regulator_dev **rdev;
int ramp_delay; /* in mV/us */
+ bool buck1_gpiodvs;
+ bool buck2_gpiodvs;
+ bool buck5_gpiodvs;
u8 buck1_vol[8];
u8 buck2_vol[8];
u8 buck5_vol[8];
+ int buck125_gpios[3];
int buck125_gpioindex;
+ bool ignore_gpiodvs_side_effect;
u8 saved_states[MAX8997_REG_MAX];
};
static inline void max8997_set_gpio(struct max8997_data *max8997)
{
- struct max8997_platform_data *pdata =
- dev_get_platdata(max8997->iodev->dev);
int set3 = (max8997->buck125_gpioindex) & 0x1;
int set2 = ((max8997->buck125_gpioindex) >> 1) & 0x1;
int set1 = ((max8997->buck125_gpioindex) >> 2) & 0x1;
- gpio_set_value(pdata->buck125_gpios[0], set1);
- gpio_set_value(pdata->buck125_gpios[1], set2);
- gpio_set_value(pdata->buck125_gpios[2], set3);
+ gpio_set_value(max8997->buck125_gpios[0], set1);
+ gpio_set_value(max8997->buck125_gpios[1], set2);
+ gpio_set_value(max8997->buck125_gpios[2], set3);
}
struct voltage_map_desc {
@@ -380,8 +383,6 @@ static int max8997_get_voltage_register(struct regulator_dev *rdev,
static int max8997_get_voltage(struct regulator_dev *rdev)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
- struct max8997_platform_data *pdata =
- dev_get_platdata(max8997->iodev->dev);
struct i2c_client *i2c = max8997->iodev->i2c;
int reg, shift, mask, ret;
int rid = max8997_get_rid(rdev);
@@ -391,9 +392,9 @@ static int max8997_get_voltage(struct regulator_dev *rdev)
if (ret)
return ret;
- if ((rid == MAX8997_BUCK1 && pdata->buck1_gpiodvs) ||
- (rid == MAX8997_BUCK2 && pdata->buck2_gpiodvs) ||
- (rid == MAX8997_BUCK5 && pdata->buck5_gpiodvs))
+ if ((rid == MAX8997_BUCK1 && max8997->buck1_gpiodvs) ||
+ (rid == MAX8997_BUCK2 && max8997->buck2_gpiodvs) ||
+ (rid == MAX8997_BUCK5 && max8997->buck5_gpiodvs))
reg += max8997->buck125_gpioindex;
ret = max8997_read_reg(i2c, reg, &val);
@@ -543,7 +544,8 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
rid == MAX8997_BUCK4 || rid == MAX8997_BUCK5) {
/* If the voltage is increasing */
if (org < i)
- udelay(desc->step * (i - org) / max8997->ramp_delay);
+ udelay(DIV_ROUND_UP(desc->step * (i - org),
+ max8997->ramp_delay));
}
return ret;
@@ -561,8 +563,6 @@ static int max8997_assess_side_effect(struct regulator_dev *rdev,
u8 new_val, int *best)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
- struct max8997_platform_data *pdata =
- dev_get_platdata(max8997->iodev->dev);
int rid = max8997_get_rid(rdev);
u8 *buckx_val[3];
bool buckx_gpiodvs[3];
@@ -589,9 +589,9 @@ static int max8997_assess_side_effect(struct regulator_dev *rdev,
buckx_val[0] = max8997->buck1_vol;
buckx_val[1] = max8997->buck2_vol;
buckx_val[2] = max8997->buck5_vol;
- buckx_gpiodvs[0] = pdata->buck1_gpiodvs;
- buckx_gpiodvs[1] = pdata->buck2_gpiodvs;
- buckx_gpiodvs[2] = pdata->buck5_gpiodvs;
+ buckx_gpiodvs[0] = max8997->buck1_gpiodvs;
+ buckx_gpiodvs[1] = max8997->buck2_gpiodvs;
+ buckx_gpiodvs[2] = max8997->buck5_gpiodvs;
for (i = 0; i < 8; i++) {
int others;
@@ -640,8 +640,6 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
- struct max8997_platform_data *pdata =
- dev_get_platdata(max8997->iodev->dev);
int rid = max8997_get_rid(rdev);
const struct voltage_map_desc *desc;
int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
@@ -653,15 +651,15 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
switch (rid) {
case MAX8997_BUCK1:
- if (pdata->buck1_gpiodvs)
+ if (max8997->buck1_gpiodvs)
gpio_dvs_mode = true;
break;
case MAX8997_BUCK2:
- if (pdata->buck2_gpiodvs)
+ if (max8997->buck2_gpiodvs)
gpio_dvs_mode = true;
break;
case MAX8997_BUCK5:
- if (pdata->buck5_gpiodvs)
+ if (max8997->buck5_gpiodvs)
gpio_dvs_mode = true;
break;
}
@@ -695,7 +693,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
new_idx = tmp_idx;
new_val = tmp_val;
- if (pdata->ignore_gpiodvs_side_effect == false)
+ if (max8997->ignore_gpiodvs_side_effect == false)
return -EINVAL;
dev_warn(&rdev->dev, "MAX8997 GPIO-DVS Side Effect Warning: GPIO SET:"
@@ -993,6 +991,11 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
i2c = max8997->iodev->i2c;
max8997->buck125_gpioindex = pdata->buck125_default_idx;
+ max8997->buck1_gpiodvs = pdata->buck1_gpiodvs;
+ max8997->buck2_gpiodvs = pdata->buck2_gpiodvs;
+ max8997->buck5_gpiodvs = pdata->buck5_gpiodvs;
+ memcpy(max8997->buck125_gpios, pdata->buck125_gpios, sizeof(int) * 3);
+ max8997->ignore_gpiodvs_side_effect = pdata->ignore_gpiodvs_side_effect;
for (i = 0; i < 8; i++) {
max8997->buck1_vol[i] = ret =
@@ -1124,6 +1127,10 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
0x3f);
}
+ /* Misc Settings */
+ max8997->ramp_delay = 10; /* set 10mV/us, which is the default */
+ max8997_write_reg(i2c, MAX8997_REG_BUCKRAMP, (0xf << 4) | 0x9);
+
for (i = 0; i < pdata->num_regulators; i++) {
const struct voltage_map_desc *desc;
int id = pdata->regulators[i].id;
@@ -1148,10 +1155,6 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
}
}
- /* Misc Settings */
- max8997->ramp_delay = 10; /* set 10mV/us, which is the default */
- max8997_write_reg(i2c, MAX8997_REG_BUCKRAMP, (0xf << 4) | 0x9);
-
return 0;
err:
for (i = 0; i < max8997->num_regulators; i++)
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 12ef9121d4f0..11312f401c70 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -258,13 +258,13 @@ static int vmwdt_suspend(void)
if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
pr_err("The system cannot be suspended while the watchdog"
" is in use\n");
- return NOTIFY_BAD;
+ return notifier_from_errno(-EBUSY);
}
if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) {
clear_bit(VMWDT_OPEN, &vmwdt_is_open);
pr_err("The system cannot be suspended while the watchdog"
" is running\n");
- return NOTIFY_BAD;
+ return notifier_from_errno(-EBUSY);
}
return NOTIFY_DONE;
}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index c47b25fd3f43..92d7324acb1c 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -814,8 +814,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
mutex_unlock(&css->mutex);
continue;
}
- if (__chsc_do_secm(css, 0))
- ret = NOTIFY_BAD;
+ ret = __chsc_do_secm(css, 0);
+ ret = notifier_from_errno(ret);
mutex_unlock(&css->mutex);
}
break;
@@ -831,8 +831,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
mutex_unlock(&css->mutex);
continue;
}
- if (__chsc_do_secm(css, 1))
- ret = NOTIFY_BAD;
+ ret = __chsc_do_secm(css, 1);
+ ret = notifier_from_errno(ret);
mutex_unlock(&css->mutex);
}
/* search for subchannels, which appeared during hibernation */
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4a1f029c4fe9..8d9dae89f065 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -830,6 +830,19 @@ config SCSI_GDTH
To compile this driver as a module, choose M here: the
module will be called gdth.
+config SCSI_ISCI
+ tristate "Intel(R) C600 Series Chipset SAS Controller"
+ depends on PCI && SCSI
+ depends on X86
+ # (temporary): known alpha quality driver
+ depends on EXPERIMENTAL
+ select SCSI_SAS_LIBSAS
+ ---help---
+ This driver supports the 6Gb/s SAS capabilities of the storage
+ control unit found in the Intel(R) C600 series chipset.
+
+ The experimental tag will be removed after the driver exits alpha
+
config SCSI_GENERIC_NCR5380
tristate "Generic NCR5380/53c400 SCSI PIO support"
depends on ISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 7ad0b8a79ae8..3c08f5352b2d 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_SCSI_AACRAID) += aacraid/
obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o
obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/
obj-$(CONFIG_SCSI_PM8001) += pm8001/
+obj-$(CONFIG_SCSI_ISCI) += isci/
obj-$(CONFIG_SCSI_IPS) += ips.o
obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o
obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c6c0434d8034..6bba23a26303 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1037,6 +1037,7 @@ static void complete_scsi_command(struct CommandList *cp)
unsigned char sense_key;
unsigned char asc; /* additional sense code */
unsigned char ascq; /* additional sense code qualifier */
+ unsigned long sense_data_size;
ei = cp->err_info;
cmd = (struct scsi_cmnd *) cp->scsi_cmd;
@@ -1051,10 +1052,14 @@ static void complete_scsi_command(struct CommandList *cp)
cmd->result |= ei->ScsiStatus;
/* copy the sense data whether we need to or not. */
- memcpy(cmd->sense_buffer, ei->SenseInfo,
- ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
- SCSI_SENSE_BUFFERSIZE :
- ei->SenseLen);
+ if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
+ sense_data_size = SCSI_SENSE_BUFFERSIZE;
+ else
+ sense_data_size = sizeof(ei->SenseInfo);
+ if (ei->SenseLen < sense_data_size)
+ sense_data_size = ei->SenseLen;
+
+ memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
scsi_set_resid(cmd, ei->ResidualCnt);
if (ei->CommandStatus == 0) {
@@ -2580,7 +2585,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->SG[0].Ext = 0; /* we are not chaining*/
}
hpsa_scsi_do_simple_cmd_core(h, c);
- hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
+ if (iocommand.buf_size > 0)
+ hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
/* Copy the error information out */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index b7650613b8c2..bdfa223a7dbb 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4306,8 +4306,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
spin_lock_irqsave(vhost->host->host_lock, flags);
if (rc == H_CLOSED)
vio_enable_interrupts(to_vio_dev(vhost->dev));
- else if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
- (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+ if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
+ (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
}
diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile
new file mode 100644
index 000000000000..3359e10e0d8f
--- /dev/null
+++ b/drivers/scsi/isci/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_SCSI_ISCI) += isci.o
+isci-objs := init.o phy.o request.o \
+ remote_device.o port.o \
+ host.o task.o probe_roms.o \
+ remote_node_context.o \
+ remote_node_table.o \
+ unsolicited_frame_control.o \
+ port_config.o \
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
new file mode 100644
index 000000000000..5f54461cabc5
--- /dev/null
+++ b/drivers/scsi/isci/firmware/Makefile
@@ -0,0 +1,19 @@
+# Makefile for create_fw
+#
+CC=gcc
+CFLAGS=-c -Wall -O2 -g
+LDFLAGS=
+SOURCES=create_fw.c
+OBJECTS=$(SOURCES:.cpp=.o)
+EXECUTABLE=create_fw
+
+all: $(SOURCES) $(EXECUTABLE)
+
+$(EXECUTABLE): $(OBJECTS)
+ $(CC) $(LDFLAGS) $(OBJECTS) -o $@
+
+.c.o:
+ $(CC) $(CFLAGS) $< -O $@
+
+clean:
+ rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
new file mode 100644
index 000000000000..8056d2bd233b
--- /dev/null
+++ b/drivers/scsi/isci/firmware/README
@@ -0,0 +1,36 @@
+This defines the temporary binary blow we are to pass to the SCU
+driver to emulate the binary firmware that we will eventually be
+able to access via NVRAM on the SCU controller.
+
+The current size of the binary blob is expected to be 149 bytes or larger
+
+Header Types:
+0x1: Phy Masks
+0x2: Phy Gens
+0x3: SAS Addrs
+0xff: End of Data
+
+ID string - u8[12]: "#SCU MAGIC#\0"
+Version - u8: 1
+SubVersion - u8: 0
+
+Header Type - u8: 0x1
+Size - u8: 8
+Phy Mask - u32[8]
+
+Header Type - u8: 0x2
+Size - u8: 8
+Phy Gen - u32[8]
+
+Header Type - u8: 0x3
+Size - u8: 8
+Sas Addr - u64[8]
+
+Header Type - u8: 0xf
+
+
+==============================================================================
+
+Place isci_firmware.bin in /lib/firmware
+Be sure to recreate the initramfs image to include the firmware.
+
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
new file mode 100644
index 000000000000..c7a2887a7e95
--- /dev/null
+++ b/drivers/scsi/isci/firmware/create_fw.c
@@ -0,0 +1,99 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+#include <asm/types.h>
+#include <strings.h>
+#include <stdint.h>
+
+#include "create_fw.h"
+#include "../probe_roms.h"
+
+int write_blob(struct isci_orom *isci_orom)
+{
+ FILE *fd;
+ int err;
+ size_t count;
+
+ fd = fopen(blob_name, "w+");
+ if (!fd) {
+ perror("Open file for write failed");
+ fclose(fd);
+ return -EIO;
+ }
+
+ count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
+ if (count != 1) {
+ perror("Write data failed");
+ fclose(fd);
+ return -EIO;
+ }
+
+ fclose(fd);
+
+ return 0;
+}
+
+void set_binary_values(struct isci_orom *isci_orom)
+{
+ int ctrl_idx, phy_idx, port_idx;
+
+ /* setting OROM signature */
+ strncpy(isci_orom->hdr.signature, sig, strlen(sig));
+ isci_orom->hdr.version = version;
+ isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
+ isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
+ isci_orom->hdr.num_elements = num_elements;
+
+ for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
+ isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
+ isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
+ max_num_concurrent_dev_spin_up;
+ isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
+ enable_ssc;
+
+ for (port_idx = 0; port_idx < 4; port_idx++)
+ isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
+ phy_mask[ctrl_idx][port_idx];
+
+ for (phy_idx = 0; phy_idx < 4; phy_idx++) {
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
+ (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
+ (__u32)(sas_addr[ctrl_idx][phy_idx]);
+
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
+ afe_tx_amp_control0;
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
+ afe_tx_amp_control1;
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
+ afe_tx_amp_control2;
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
+ afe_tx_amp_control3;
+ }
+ }
+}
+
+int main(void)
+{
+ int err;
+ struct isci_orom *isci_orom;
+
+ isci_orom = malloc(sizeof(struct isci_orom));
+ memset(isci_orom, 0, sizeof(struct isci_orom));
+
+ set_binary_values(isci_orom);
+
+ err = write_blob(isci_orom);
+ if (err < 0) {
+ free(isci_orom);
+ return err;
+ }
+
+ free(isci_orom);
+ return 0;
+}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
new file mode 100644
index 000000000000..5f298828d22e
--- /dev/null
+++ b/drivers/scsi/isci/firmware/create_fw.h
@@ -0,0 +1,77 @@
+#ifndef _CREATE_FW_H_
+#define _CREATE_FW_H_
+#include "../probe_roms.h"
+
+
+/* we are configuring for 2 SCUs */
+static const int num_elements = 2;
+
+/*
+ * For all defined arrays:
+ * elements 0-3 are for SCU0, ports 0-3
+ * elements 4-7 are for SCU1, ports 0-3
+ *
+ * valid configurations for one SCU are:
+ * P0 P1 P2 P3
+ * ----------------
+ * 0xF,0x0,0x0,0x0 # 1 x4 port
+ * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
+ * # ports
+ * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
+ * # port
+ * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
+ * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
+ *
+ * if there is a port/phy on which you do not wish to override the default
+ * values, use the value assigned to UNINIT_PARAM (255).
+ */
+
+/* discovery mode type (port auto config mode by default ) */
+
+/*
+ * if there is a port/phy on which you do not wish to override the default
+ * values, use the value "0000000000000000". SAS address of zero's is
+ * considered invalid and will not be used.
+ */
+#ifdef MPC
+static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
+static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
+ {1, 2, 4, 8} };
+static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
+ 0x5FCFFFFFF0000002ULL,
+ 0x5FCFFFFFF0000003ULL,
+ 0x5FCFFFFFF0000004ULL },
+ { 0x5FCFFFFFF0000005ULL,
+ 0x5FCFFFFFF0000006ULL,
+ 0x5FCFFFFFF0000007ULL,
+ 0x5FCFFFFFF0000008ULL } };
+#else /* APC (default) */
+static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+static const __u8 phy_mask[2][4];
+static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
+ 0x5FCFFFFF00000001ULL,
+ 0x5FCFFFFF00000001ULL,
+ 0x5FCFFFFF00000001ULL },
+ { 0x5FCFFFFF00000002ULL,
+ 0x5FCFFFFF00000002ULL,
+ 0x5FCFFFFF00000002ULL,
+ 0x5FCFFFFF00000002ULL } };
+#endif
+
+/* Maximum number of concurrent device spin up */
+static const int max_num_concurrent_dev_spin_up = 1;
+
+/* enable of ssc operation */
+static const int enable_ssc;
+
+/* AFE_TX_AMP_CONTROL */
+static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
+static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
+static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
+static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
+
+static const char blob_name[] = "isci_firmware.bin";
+static const char sig[] = "ISCUOEMB";
+static const unsigned char version = 0x10;
+
+#endif
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
new file mode 100644
index 000000000000..26072f1e9852
--- /dev/null
+++ b/drivers/scsi/isci/host.c
@@ -0,0 +1,2751 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <scsi/sas.h>
+#include "host.h"
+#include "isci.h"
+#include "port.h"
+#include "host.h"
+#include "probe_roms.h"
+#include "remote_device.h"
+#include "request.h"
+#include "scu_completion_codes.h"
+#include "scu_event_codes.h"
+#include "registers.h"
+#include "scu_remote_node_context.h"
+#include "scu_task_context.h"
+
+#define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
+
+#define smu_max_ports(dcc_value) \
+ (\
+ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
+ )
+
+#define smu_max_task_contexts(dcc_value) \
+ (\
+ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
+ )
+
+#define smu_max_rncs(dcc_value) \
+ (\
+ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
+ )
+
+#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
+
+/**
+ *
+ *
+ * The number of milliseconds to wait while a given phy is consuming power
+ * before allowing another set of phys to consume power. Ultimately, this will
+ * be specified by OEM parameter.
+ */
+#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
+
+/**
+ * NORMALIZE_PUT_POINTER() -
+ *
+ * This macro will normalize the completion queue put pointer so its value can
+ * be used as an array inde
+ */
+#define NORMALIZE_PUT_POINTER(x) \
+ ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
+
+
+/**
+ * NORMALIZE_EVENT_POINTER() -
+ *
+ * This macro will normalize the completion queue event entry so its value can
+ * be used as an index.
+ */
+#define NORMALIZE_EVENT_POINTER(x) \
+ (\
+ ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
+ >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
+ )
+
+/**
+ * NORMALIZE_GET_POINTER() -
+ *
+ * This macro will normalize the completion queue get pointer so its value can
+ * be used as an index into an array
+ */
+#define NORMALIZE_GET_POINTER(x) \
+ ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
+
+/**
+ * NORMALIZE_GET_POINTER_CYCLE_BIT() -
+ *
+ * This macro will normalize the completion queue cycle pointer so it matches
+ * the completion queue cycle bit
+ */
+#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
+ ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
+
+/**
+ * COMPLETION_QUEUE_CYCLE_BIT() -
+ *
+ * This macro will return the cycle bit of the completion queue entry
+ */
+#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
+
+/* Init the state machine and call the state entry function (if any) */
+void sci_init_sm(struct sci_base_state_machine *sm,
+ const struct sci_base_state *state_table, u32 initial_state)
+{
+ sci_state_transition_t handler;
+
+ sm->initial_state_id = initial_state;
+ sm->previous_state_id = initial_state;
+ sm->current_state_id = initial_state;
+ sm->state_table = state_table;
+
+ handler = sm->state_table[initial_state].enter_state;
+ if (handler)
+ handler(sm);
+}
+
+/* Call the state exit fn, update the current state, call the state entry fn */
+void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
+{
+ sci_state_transition_t handler;
+
+ handler = sm->state_table[sm->current_state_id].exit_state;
+ if (handler)
+ handler(sm);
+
+ sm->previous_state_id = sm->current_state_id;
+ sm->current_state_id = next_state;
+
+ handler = sm->state_table[sm->current_state_id].enter_state;
+ if (handler)
+ handler(sm);
+}
+
+static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
+{
+ u32 get_value = ihost->completion_queue_get;
+ u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
+
+ if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
+ COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
+ return true;
+
+ return false;
+}
+
+static bool sci_controller_isr(struct isci_host *ihost)
+{
+ if (sci_controller_completion_queue_has_entries(ihost)) {
+ return true;
+ } else {
+ /*
+ * we have a spurious interrupt it could be that we have already
+ * emptied the completion queue from a previous interrupt */
+ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+
+ /*
+ * There is a race in the hardware that could cause us not to be notified
+ * of an interrupt completion if we do not take this step. We will mask
+ * then unmask the interrupts so if there is another interrupt pending
+ * the clearing of the interrupt source we get the next interrupt message. */
+ writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
+ writel(0, &ihost->smu_registers->interrupt_mask);
+ }
+
+ return false;
+}
+
+irqreturn_t isci_msix_isr(int vec, void *data)
+{
+ struct isci_host *ihost = data;
+
+ if (sci_controller_isr(ihost))
+ tasklet_schedule(&ihost->completion_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static bool sci_controller_error_isr(struct isci_host *ihost)
+{
+ u32 interrupt_status;
+
+ interrupt_status =
+ readl(&ihost->smu_registers->interrupt_status);
+ interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
+
+ if (interrupt_status != 0) {
+ /*
+ * There is an error interrupt pending so let it through and handle
+ * in the callback */
+ return true;
+ }
+
+ /*
+ * There is a race in the hardware that could cause us not to be notified
+ * of an interrupt completion if we do not take this step. We will mask
+ * then unmask the error interrupts so if there was another interrupt
+ * pending we will be notified.
+ * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
+ writel(0xff, &ihost->smu_registers->interrupt_mask);
+ writel(0, &ihost->smu_registers->interrupt_mask);
+
+ return false;
+}
+
+static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
+{
+ u32 index = SCU_GET_COMPLETION_INDEX(ent);
+ struct isci_request *ireq = ihost->reqs[index];
+
+ /* Make sure that we really want to process this IO request */
+ if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
+ ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
+ ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
+ /* Yep this is a valid io request pass it along to the
+ * io request handler
+ */
+ sci_io_request_tc_completion(ireq, ent);
+}
+
+static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
+{
+ u32 index;
+ struct isci_request *ireq;
+ struct isci_remote_device *idev;
+
+ index = SCU_GET_COMPLETION_INDEX(ent);
+
+ switch (scu_get_command_request_type(ent)) {
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
+ ireq = ihost->reqs[index];
+ dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
+ __func__, ent, ireq);
+ /* @todo For a post TC operation we need to fail the IO
+ * request
+ */
+ break;
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
+ idev = ihost->device_table[index];
+ dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
+ __func__, ent, idev);
+ /* @todo For a port RNC operation we need to fail the
+ * device
+ */
+ break;
+ default:
+ dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
+ __func__, ent);
+ break;
+ }
+}
+
+static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
+{
+ u32 index;
+ u32 frame_index;
+
+ struct scu_unsolicited_frame_header *frame_header;
+ struct isci_phy *iphy;
+ struct isci_remote_device *idev;
+
+ enum sci_status result = SCI_FAILURE;
+
+ frame_index = SCU_GET_FRAME_INDEX(ent);
+
+ frame_header = ihost->uf_control.buffers.array[frame_index].header;
+ ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
+
+ if (SCU_GET_FRAME_ERROR(ent)) {
+ /*
+ * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
+ * / this cause a problem? We expect the phy initialization will
+ * / fail if there is an error in the frame. */
+ sci_controller_release_frame(ihost, frame_index);
+ return;
+ }
+
+ if (frame_header->is_address_frame) {
+ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+ iphy = &ihost->phys[index];
+ result = sci_phy_frame_handler(iphy, frame_index);
+ } else {
+
+ index = SCU_GET_COMPLETION_INDEX(ent);
+
+ if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+ /*
+ * This is a signature fis or a frame from a direct attached SATA
+ * device that has not yet been created. In either case forwared
+ * the frame to the PE and let it take care of the frame data. */
+ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+ iphy = &ihost->phys[index];
+ result = sci_phy_frame_handler(iphy, frame_index);
+ } else {
+ if (index < ihost->remote_node_entries)
+ idev = ihost->device_table[index];
+ else
+ idev = NULL;
+
+ if (idev != NULL)
+ result = sci_remote_device_frame_handler(idev, frame_index);
+ else
+ sci_controller_release_frame(ihost, frame_index);
+ }
+ }
+
+ if (result != SCI_SUCCESS) {
+ /*
+ * / @todo Is there any reason to report some additional error message
+ * / when we get this failure notifiction? */
+ }
+}
+
+static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
+{
+ struct isci_remote_device *idev;
+ struct isci_request *ireq;
+ struct isci_phy *iphy;
+ u32 index;
+
+ index = SCU_GET_COMPLETION_INDEX(ent);
+
+ switch (scu_get_event_type(ent)) {
+ case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
+ /* / @todo The driver did something wrong and we need to fix the condtion. */
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received SMU command error "
+ "0x%x\n",
+ __func__,
+ ihost,
+ ent);
+ break;
+
+ case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
+ case SCU_EVENT_TYPE_SMU_ERROR:
+ case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
+ /*
+ * / @todo This is a hardware failure and its likely that we want to
+ * / reset the controller. */
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received fatal controller "
+ "event 0x%x\n",
+ __func__,
+ ihost,
+ ent);
+ break;
+
+ case SCU_EVENT_TYPE_TRANSPORT_ERROR:
+ ireq = ihost->reqs[index];
+ sci_io_request_event_handler(ireq, ent);
+ break;
+
+ case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
+ switch (scu_get_event_specifier(ent)) {
+ case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
+ case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
+ ireq = ihost->reqs[index];
+ if (ireq != NULL)
+ sci_io_request_event_handler(ireq, ent);
+ else
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received "
+ "event 0x%x for io request object "
+ "that doesnt exist.\n",
+ __func__,
+ ihost,
+ ent);
+
+ break;
+
+ case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
+ idev = ihost->device_table[index];
+ if (idev != NULL)
+ sci_remote_device_event_handler(idev, ent);
+ else
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received "
+ "event 0x%x for remote device object "
+ "that doesnt exist.\n",
+ __func__,
+ ihost,
+ ent);
+
+ break;
+ }
+ break;
+
+ case SCU_EVENT_TYPE_BROADCAST_CHANGE:
+ /*
+ * direct the broadcast change event to the phy first and then let
+ * the phy redirect the broadcast change to the port object */
+ case SCU_EVENT_TYPE_ERR_CNT_EVENT:
+ /*
+ * direct error counter event to the phy object since that is where
+ * we get the event notification. This is a type 4 event. */
+ case SCU_EVENT_TYPE_OSSP_EVENT:
+ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+ iphy = &ihost->phys[index];
+ sci_phy_event_handler(iphy, ent);
+ break;
+
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+ case SCU_EVENT_TYPE_RNC_OPS_MISC:
+ if (index < ihost->remote_node_entries) {
+ idev = ihost->device_table[index];
+
+ if (idev != NULL)
+ sci_remote_device_event_handler(idev, ent);
+ } else
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received event 0x%x "
+ "for remote device object 0x%0x that doesnt "
+ "exist.\n",
+ __func__,
+ ihost,
+ ent,
+ index);
+
+ break;
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller received unknown event code %x\n",
+ __func__,
+ ent);
+ break;
+ }
+}
+
+static void sci_controller_process_completions(struct isci_host *ihost)
+{
+ u32 completion_count = 0;
+ u32 ent;
+ u32 get_index;
+ u32 get_cycle;
+ u32 event_get;
+ u32 event_cycle;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: completion queue begining get:0x%08x\n",
+ __func__,
+ ihost->completion_queue_get);
+
+ /* Get the component parts of the completion queue */
+ get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
+ get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
+
+ event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
+ event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
+
+ while (
+ NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
+ == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
+ ) {
+ completion_count++;
+
+ ent = ihost->completion_queue[get_index];
+
+ /* increment the get pointer and check for rollover to toggle the cycle bit */
+ get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
+ (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
+ get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: completion queue entry:0x%08x\n",
+ __func__,
+ ent);
+
+ switch (SCU_GET_COMPLETION_TYPE(ent)) {
+ case SCU_COMPLETION_TYPE_TASK:
+ sci_controller_task_completion(ihost, ent);
+ break;
+
+ case SCU_COMPLETION_TYPE_SDMA:
+ sci_controller_sdma_completion(ihost, ent);
+ break;
+
+ case SCU_COMPLETION_TYPE_UFI:
+ sci_controller_unsolicited_frame(ihost, ent);
+ break;
+
+ case SCU_COMPLETION_TYPE_EVENT:
+ case SCU_COMPLETION_TYPE_NOTIFY: {
+ event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
+ (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
+ event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
+
+ sci_controller_event_completion(ihost, ent);
+ break;
+ }
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller received unknown "
+ "completion type %x\n",
+ __func__,
+ ent);
+ break;
+ }
+ }
+
+ /* Update the get register if we completed one or more entries */
+ if (completion_count > 0) {
+ ihost->completion_queue_get =
+ SMU_CQGR_GEN_BIT(ENABLE) |
+ SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
+ event_cycle |
+ SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
+ get_cycle |
+ SMU_CQGR_GEN_VAL(POINTER, get_index);
+
+ writel(ihost->completion_queue_get,
+ &ihost->smu_registers->completion_queue_get);
+
+ }
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: completion queue ending get:0x%08x\n",
+ __func__,
+ ihost->completion_queue_get);
+
+}
+
+static void sci_controller_error_handler(struct isci_host *ihost)
+{
+ u32 interrupt_status;
+
+ interrupt_status =
+ readl(&ihost->smu_registers->interrupt_status);
+
+ if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
+ sci_controller_completion_queue_has_entries(ihost)) {
+
+ sci_controller_process_completions(ihost);
+ writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
+ } else {
+ dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
+ interrupt_status);
+
+ sci_change_state(&ihost->sm, SCIC_FAILED);
+
+ return;
+ }
+
+ /* If we dont process any completions I am not sure that we want to do this.
+ * We are in the middle of a hardware fault and should probably be reset.
+ */
+ writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+irqreturn_t isci_intx_isr(int vec, void *data)
+{
+ irqreturn_t ret = IRQ_NONE;
+ struct isci_host *ihost = data;
+
+ if (sci_controller_isr(ihost)) {
+ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+ tasklet_schedule(&ihost->completion_tasklet);
+ ret = IRQ_HANDLED;
+ } else if (sci_controller_error_isr(ihost)) {
+ spin_lock(&ihost->scic_lock);
+ sci_controller_error_handler(ihost);
+ spin_unlock(&ihost->scic_lock);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+irqreturn_t isci_error_isr(int vec, void *data)
+{
+ struct isci_host *ihost = data;
+
+ if (sci_controller_error_isr(ihost))
+ sci_controller_error_handler(ihost);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * isci_host_start_complete() - This function is called by the core library,
+ * through the ISCI Module, to indicate controller start status.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @completion_status: This parameter specifies the completion status from the
+ * core library.
+ *
+ */
+static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
+{
+ if (completion_status != SCI_SUCCESS)
+ dev_info(&ihost->pdev->dev,
+ "controller start timed out, continuing...\n");
+ isci_host_change_state(ihost, isci_ready);
+ clear_bit(IHOST_START_PENDING, &ihost->flags);
+ wake_up(&ihost->eventq);
+}
+
+int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+
+ if (test_bit(IHOST_START_PENDING, &ihost->flags))
+ return 0;
+
+ /* todo: use sas_flush_discovery once it is upstream */
+ scsi_flush_work(shost);
+
+ scsi_flush_work(shost);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: ihost->status = %d, time = %ld\n",
+ __func__, isci_host_get_state(ihost), time);
+
+ return 1;
+
+}
+
+/**
+ * sci_controller_get_suggested_start_timeout() - This method returns the
+ * suggested sci_controller_start() timeout amount. The user is free to
+ * use any timeout value, but this method provides the suggested minimum
+ * start timeout value. The returned value is based upon empirical
+ * information determined as a result of interoperability testing.
+ * @controller: the handle to the controller object for which to return the
+ * suggested start timeout.
+ *
+ * This method returns the number of milliseconds for the suggested start
+ * operation timeout.
+ */
+static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
+{
+ /* Validate the user supplied parameters. */
+ if (!ihost)
+ return 0;
+
+ /*
+ * The suggested minimum timeout value for a controller start operation:
+ *
+ * Signature FIS Timeout
+ * + Phy Start Timeout
+ * + Number of Phy Spin Up Intervals
+ * ---------------------------------
+ * Number of milliseconds for the controller start operation.
+ *
+ * NOTE: The number of phy spin up intervals will be equivalent
+ * to the number of phys divided by the number phys allowed
+ * per interval - 1 (once OEM parameters are supported).
+ * Currently we assume only 1 phy per interval. */
+
+ return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
+ + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
+ + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+}
+
+static void sci_controller_enable_interrupts(struct isci_host *ihost)
+{
+ BUG_ON(ihost->smu_registers == NULL);
+ writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+void sci_controller_disable_interrupts(struct isci_host *ihost)
+{
+ BUG_ON(ihost->smu_registers == NULL);
+ writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
+}
+
+static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
+{
+ u32 port_task_scheduler_value;
+
+ port_task_scheduler_value =
+ readl(&ihost->scu_registers->peg0.ptsg.control);
+ port_task_scheduler_value |=
+ (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
+ SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
+ writel(port_task_scheduler_value,
+ &ihost->scu_registers->peg0.ptsg.control);
+}
+
+static void sci_controller_assign_task_entries(struct isci_host *ihost)
+{
+ u32 task_assignment;
+
+ /*
+ * Assign all the TCs to function 0
+ * TODO: Do we actually need to read this register to write it back?
+ */
+
+ task_assignment =
+ readl(&ihost->smu_registers->task_context_assignment[0]);
+
+ task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
+ (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) |
+ (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
+
+ writel(task_assignment,
+ &ihost->smu_registers->task_context_assignment[0]);
+
+}
+
+static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
+{
+ u32 index;
+ u32 completion_queue_control_value;
+ u32 completion_queue_get_value;
+ u32 completion_queue_put_value;
+
+ ihost->completion_queue_get = 0;
+
+ completion_queue_control_value =
+ (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
+ SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
+
+ writel(completion_queue_control_value,
+ &ihost->smu_registers->completion_queue_control);
+
+
+ /* Set the completion queue get pointer and enable the queue */
+ completion_queue_get_value = (
+ (SMU_CQGR_GEN_VAL(POINTER, 0))
+ | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
+ | (SMU_CQGR_GEN_BIT(ENABLE))
+ | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
+ );
+
+ writel(completion_queue_get_value,
+ &ihost->smu_registers->completion_queue_get);
+
+ /* Set the completion queue put pointer */
+ completion_queue_put_value = (
+ (SMU_CQPR_GEN_VAL(POINTER, 0))
+ | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
+ );
+
+ writel(completion_queue_put_value,
+ &ihost->smu_registers->completion_queue_put);
+
+ /* Initialize the cycle bit of the completion queue entries */
+ for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
+ /*
+ * If get.cycle_bit != completion_queue.cycle_bit
+ * its not a valid completion queue entry
+ * so at system start all entries are invalid */
+ ihost->completion_queue[index] = 0x80000000;
+ }
+}
+
+static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
+{
+ u32 frame_queue_control_value;
+ u32 frame_queue_get_value;
+ u32 frame_queue_put_value;
+
+ /* Write the queue size */
+ frame_queue_control_value =
+ SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
+
+ writel(frame_queue_control_value,
+ &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
+
+ /* Setup the get pointer for the unsolicited frame queue */
+ frame_queue_get_value = (
+ SCU_UFQGP_GEN_VAL(POINTER, 0)
+ | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
+ );
+
+ writel(frame_queue_get_value,
+ &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+ /* Setup the put pointer for the unsolicited frame queue */
+ frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
+ writel(frame_queue_put_value,
+ &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
+}
+
+static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
+{
+ if (ihost->sm.current_state_id == SCIC_STARTING) {
+ /*
+ * We move into the ready state, because some of the phys/ports
+ * may be up and operational.
+ */
+ sci_change_state(&ihost->sm, SCIC_READY);
+
+ isci_host_start_complete(ihost, status);
+ }
+}
+
+static bool is_phy_starting(struct isci_phy *iphy)
+{
+ enum sci_phy_states state;
+
+ state = iphy->sm.current_state_id;
+ switch (state) {
+ case SCI_PHY_STARTING:
+ case SCI_PHY_SUB_INITIAL:
+ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_IAF_UF:
+ case SCI_PHY_SUB_AWAIT_SAS_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+ case SCI_PHY_SUB_FINAL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * sci_controller_start_next_phy - start phy
+ * @scic: controller
+ *
+ * If all the phys have been started, then attempt to transition the
+ * controller to the READY state and inform the user
+ * (sci_cb_controller_start_complete()).
+ */
+static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
+{
+ struct sci_oem_params *oem = &ihost->oem_parameters;
+ struct isci_phy *iphy;
+ enum sci_status status;
+
+ status = SCI_SUCCESS;
+
+ if (ihost->phy_startup_timer_pending)
+ return status;
+
+ if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
+ bool is_controller_start_complete = true;
+ u32 state;
+ u8 index;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ iphy = &ihost->phys[index];
+ state = iphy->sm.current_state_id;
+
+ if (!phy_get_non_dummy_port(iphy))
+ continue;
+
+ /* The controller start operation is complete iff:
+ * - all links have been given an opportunity to start
+ * - have no indication of a connected device
+ * - have an indication of a connected device and it has
+ * finished the link training process.
+ */
+ if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
+ (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
+ (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
+ is_controller_start_complete = false;
+ break;
+ }
+ }
+
+ /*
+ * The controller has successfully finished the start process.
+ * Inform the SCI Core user and transition to the READY state. */
+ if (is_controller_start_complete == true) {
+ sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
+ sci_del_timer(&ihost->phy_timer);
+ ihost->phy_startup_timer_pending = false;
+ }
+ } else {
+ iphy = &ihost->phys[ihost->next_phy_to_start];
+
+ if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+ if (phy_get_non_dummy_port(iphy) == NULL) {
+ ihost->next_phy_to_start++;
+
+ /* Caution recursion ahead be forwarned
+ *
+ * The PHY was never added to a PORT in MPC mode
+ * so start the next phy in sequence This phy
+ * will never go link up and will not draw power
+ * the OEM parameters either configured the phy
+ * incorrectly for the PORT or it was never
+ * assigned to a PORT
+ */
+ return sci_controller_start_next_phy(ihost);
+ }
+ }
+
+ status = sci_phy_start(iphy);
+
+ if (status == SCI_SUCCESS) {
+ sci_mod_timer(&ihost->phy_timer,
+ SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
+ ihost->phy_startup_timer_pending = true;
+ } else {
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed "
+ "to stop phy %d because of status "
+ "%d.\n",
+ __func__,
+ ihost->phys[ihost->next_phy_to_start].phy_index,
+ status);
+ }
+
+ ihost->next_phy_to_start++;
+ }
+
+ return status;
+}
+
+static void phy_startup_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
+ unsigned long flags;
+ enum sci_status status;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ ihost->phy_startup_timer_pending = false;
+
+ do {
+ status = sci_controller_start_next_phy(ihost);
+ } while (status != SCI_SUCCESS);
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static u16 isci_tci_active(struct isci_host *ihost)
+{
+ return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
+}
+
+static enum sci_status sci_controller_start(struct isci_host *ihost,
+ u32 timeout)
+{
+ enum sci_status result;
+ u16 index;
+
+ if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
+ dev_warn(&ihost->pdev->dev,
+ "SCIC Controller start operation requested in "
+ "invalid state\n");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ /* Build the TCi free pool */
+ BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
+ ihost->tci_head = 0;
+ ihost->tci_tail = 0;
+ for (index = 0; index < ihost->task_context_entries; index++)
+ isci_tci_free(ihost, index);
+
+ /* Build the RNi free pool */
+ sci_remote_node_table_initialize(&ihost->available_remote_nodes,
+ ihost->remote_node_entries);
+
+ /*
+ * Before anything else lets make sure we will not be
+ * interrupted by the hardware.
+ */
+ sci_controller_disable_interrupts(ihost);
+
+ /* Enable the port task scheduler */
+ sci_controller_enable_port_task_scheduler(ihost);
+
+ /* Assign all the task entries to ihost physical function */
+ sci_controller_assign_task_entries(ihost);
+
+ /* Now initialize the completion queue */
+ sci_controller_initialize_completion_queue(ihost);
+
+ /* Initialize the unsolicited frame queue for use */
+ sci_controller_initialize_unsolicited_frame_queue(ihost);
+
+ /* Start all of the ports on this controller */
+ for (index = 0; index < ihost->logical_port_entries; index++) {
+ struct isci_port *iport = &ihost->ports[index];
+
+ result = sci_port_start(iport);
+ if (result)
+ return result;
+ }
+
+ sci_controller_start_next_phy(ihost);
+
+ sci_mod_timer(&ihost->timer, timeout);
+
+ sci_change_state(&ihost->sm, SCIC_STARTING);
+
+ return SCI_SUCCESS;
+}
+
+void isci_host_scan_start(struct Scsi_Host *shost)
+{
+ struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+ unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
+
+ set_bit(IHOST_START_PENDING, &ihost->flags);
+
+ spin_lock_irq(&ihost->scic_lock);
+ sci_controller_start(ihost, tmo);
+ sci_controller_enable_interrupts(ihost);
+ spin_unlock_irq(&ihost->scic_lock);
+}
+
+static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
+{
+ isci_host_change_state(ihost, isci_stopped);
+ sci_controller_disable_interrupts(ihost);
+ clear_bit(IHOST_STOP_PENDING, &ihost->flags);
+ wake_up(&ihost->eventq);
+}
+
+static void sci_controller_completion_handler(struct isci_host *ihost)
+{
+ /* Empty out the completion queue */
+ if (sci_controller_completion_queue_has_entries(ihost))
+ sci_controller_process_completions(ihost);
+
+ /* Clear the interrupt and enable all interrupts again */
+ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+ /* Could we write the value of SMU_ISR_COMPLETION? */
+ writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
+ writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+/**
+ * isci_host_completion_routine() - This function is the delayed service
+ * routine that calls the sci core library's completion handler. It's
+ * scheduled as a tasklet from the interrupt service routine when interrupts
+ * in use, or set as the timeout function in polled mode.
+ * @data: This parameter specifies the ISCI host object
+ *
+ */
+static void isci_host_completion_routine(unsigned long data)
+{
+ struct isci_host *ihost = (struct isci_host *)data;
+ struct list_head completed_request_list;
+ struct list_head errored_request_list;
+ struct list_head *current_position;
+ struct list_head *next_position;
+ struct isci_request *request;
+ struct isci_request *next_request;
+ struct sas_task *task;
+
+ INIT_LIST_HEAD(&completed_request_list);
+ INIT_LIST_HEAD(&errored_request_list);
+
+ spin_lock_irq(&ihost->scic_lock);
+
+ sci_controller_completion_handler(ihost);
+
+ /* Take the lists of completed I/Os from the host. */
+
+ list_splice_init(&ihost->requests_to_complete,
+ &completed_request_list);
+
+ /* Take the list of errored I/Os from the host. */
+ list_splice_init(&ihost->requests_to_errorback,
+ &errored_request_list);
+
+ spin_unlock_irq(&ihost->scic_lock);
+
+ /* Process any completions in the lists. */
+ list_for_each_safe(current_position, next_position,
+ &completed_request_list) {
+
+ request = list_entry(current_position, struct isci_request,
+ completed_node);
+ task = isci_request_access_task(request);
+
+ /* Normal notification (task_done) */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Normal - request/task = %p/%p\n",
+ __func__,
+ request,
+ task);
+
+ /* Return the task to libsas */
+ if (task != NULL) {
+
+ task->lldd_task = NULL;
+ if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+
+ /* If the task is already in the abort path,
+ * the task_done callback cannot be called.
+ */
+ task->task_done(task);
+ }
+ }
+
+ spin_lock_irq(&ihost->scic_lock);
+ isci_free_tag(ihost, request->io_tag);
+ spin_unlock_irq(&ihost->scic_lock);
+ }
+ list_for_each_entry_safe(request, next_request, &errored_request_list,
+ completed_node) {
+
+ task = isci_request_access_task(request);
+
+ /* Use sas_task_abort */
+ dev_warn(&ihost->pdev->dev,
+ "%s: Error - request/task = %p/%p\n",
+ __func__,
+ request,
+ task);
+
+ if (task != NULL) {
+
+ /* Put the task into the abort path if it's not there
+ * already.
+ */
+ if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
+ sas_task_abort(task);
+
+ } else {
+ /* This is a case where the request has completed with a
+ * status such that it needed further target servicing,
+ * but the sas_task reference has already been removed
+ * from the request. Since it was errored, it was not
+ * being aborted, so there is nothing to do except free
+ * it.
+ */
+
+ spin_lock_irq(&ihost->scic_lock);
+ /* Remove the request from the remote device's list
+ * of pending requests.
+ */
+ list_del_init(&request->dev_node);
+ isci_free_tag(ihost, request->io_tag);
+ spin_unlock_irq(&ihost->scic_lock);
+ }
+ }
+
+}
+
+/**
+ * sci_controller_stop() - This method will stop an individual controller
+ * object.This method will invoke the associated user callback upon
+ * completion. The completion callback is called when the following
+ * conditions are met: -# the method return status is SCI_SUCCESS. -# the
+ * controller has been quiesced. This method will ensure that all IO
+ * requests are quiesced, phys are stopped, and all additional operation by
+ * the hardware is halted.
+ * @controller: the handle to the controller object to stop.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ * stop operation should complete.
+ *
+ * The controller must be in the STARTED or STOPPED state. Indicate if the
+ * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
+ * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
+ * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
+ * controller is not either in the STARTED or STOPPED states.
+ */
+static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
+{
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev,
+ "SCIC Controller stop operation requested in "
+ "invalid state\n");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_mod_timer(&ihost->timer, timeout);
+ sci_change_state(&ihost->sm, SCIC_STOPPING);
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_reset() - This method will reset the supplied core
+ * controller regardless of the state of said controller. This operation is
+ * considered destructive. In other words, all current operations are wiped
+ * out. No IO completions for outstanding devices occur. Outstanding IO
+ * requests are not aborted or completed at the actual remote device.
+ * @controller: the handle to the controller object to reset.
+ *
+ * Indicate if the controller reset method succeeded or failed in some way.
+ * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
+ * the controller reset operation is unable to complete.
+ */
+static enum sci_status sci_controller_reset(struct isci_host *ihost)
+{
+ switch (ihost->sm.current_state_id) {
+ case SCIC_RESET:
+ case SCIC_READY:
+ case SCIC_STOPPED:
+ case SCIC_FAILED:
+ /*
+ * The reset operation is not a graceful cleanup, just
+ * perform the state transition.
+ */
+ sci_change_state(&ihost->sm, SCIC_RESETTING);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "SCIC Controller reset operation requested in "
+ "invalid state\n");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+void isci_host_deinit(struct isci_host *ihost)
+{
+ int i;
+
+ isci_host_change_state(ihost, isci_stopping);
+ for (i = 0; i < SCI_MAX_PORTS; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+ struct isci_remote_device *idev, *d;
+
+ list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
+ if (test_bit(IDEV_ALLOCATED, &idev->flags))
+ isci_remote_device_stop(ihost, idev);
+ }
+ }
+
+ set_bit(IHOST_STOP_PENDING, &ihost->flags);
+
+ spin_lock_irq(&ihost->scic_lock);
+ sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
+ spin_unlock_irq(&ihost->scic_lock);
+
+ wait_for_stop(ihost);
+ sci_controller_reset(ihost);
+
+ /* Cancel any/all outstanding port timers */
+ for (i = 0; i < ihost->logical_port_entries; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+ del_timer_sync(&iport->timer.timer);
+ }
+
+ /* Cancel any/all outstanding phy timers */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ struct isci_phy *iphy = &ihost->phys[i];
+ del_timer_sync(&iphy->sata_timer.timer);
+ }
+
+ del_timer_sync(&ihost->port_agent.timer.timer);
+
+ del_timer_sync(&ihost->power_control.timer.timer);
+
+ del_timer_sync(&ihost->timer.timer);
+
+ del_timer_sync(&ihost->phy_timer.timer);
+}
+
+static void __iomem *scu_base(struct isci_host *isci_host)
+{
+ struct pci_dev *pdev = isci_host->pdev;
+ int id = isci_host->id;
+
+ return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
+}
+
+static void __iomem *smu_base(struct isci_host *isci_host)
+{
+ struct pci_dev *pdev = isci_host->pdev;
+ int id = isci_host->id;
+
+ return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
+}
+
+static void isci_user_parameters_get(struct sci_user_parameters *u)
+{
+ int i;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ struct sci_phy_user_params *u_phy = &u->phys[i];
+
+ u_phy->max_speed_generation = phy_gen;
+
+ /* we are not exporting these for now */
+ u_phy->align_insertion_frequency = 0x7f;
+ u_phy->in_connection_align_insertion_frequency = 0xff;
+ u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
+ }
+
+ u->stp_inactivity_timeout = stp_inactive_to;
+ u->ssp_inactivity_timeout = ssp_inactive_to;
+ u->stp_max_occupancy_timeout = stp_max_occ_to;
+ u->ssp_max_occupancy_timeout = ssp_max_occ_to;
+ u->no_outbound_task_timeout = no_outbound_task_to;
+ u->max_number_concurrent_device_spin_up = max_concurr_spinup;
+}
+
+static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_change_state(&ihost->sm, SCIC_RESET);
+}
+
+static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_del_timer(&ihost->timer);
+}
+
+#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
+#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
+#define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
+#define INTERRUPT_COALESCE_NUMBER_MAX 256
+#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
+#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
+
+/**
+ * sci_controller_set_interrupt_coalescence() - This method allows the user to
+ * configure the interrupt coalescence.
+ * @controller: This parameter represents the handle to the controller object
+ * for which its interrupt coalesce register is overridden.
+ * @coalesce_number: Used to control the number of entries in the Completion
+ * Queue before an interrupt is generated. If the number of entries exceed
+ * this number, an interrupt will be generated. The valid range of the input
+ * is [0, 256]. A setting of 0 results in coalescing being disabled.
+ * @coalesce_timeout: Timeout value in microseconds. The valid range of the
+ * input is [0, 2700000] . A setting of 0 is allowed and results in no
+ * interrupt coalescing timeout.
+ *
+ * Indicate if the user successfully set the interrupt coalesce parameters.
+ * SCI_SUCCESS The user successfully updated the interrutp coalescence.
+ * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
+ */
+static enum sci_status
+sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
+ u32 coalesce_number,
+ u32 coalesce_timeout)
+{
+ u8 timeout_encode = 0;
+ u32 min = 0;
+ u32 max = 0;
+
+ /* Check if the input parameters fall in the range. */
+ if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ /*
+ * Defined encoding for interrupt coalescing timeout:
+ * Value Min Max Units
+ * ----- --- --- -----
+ * 0 - - Disabled
+ * 1 13.3 20.0 ns
+ * 2 26.7 40.0
+ * 3 53.3 80.0
+ * 4 106.7 160.0
+ * 5 213.3 320.0
+ * 6 426.7 640.0
+ * 7 853.3 1280.0
+ * 8 1.7 2.6 us
+ * 9 3.4 5.1
+ * 10 6.8 10.2
+ * 11 13.7 20.5
+ * 12 27.3 41.0
+ * 13 54.6 81.9
+ * 14 109.2 163.8
+ * 15 218.5 327.7
+ * 16 436.9 655.4
+ * 17 873.8 1310.7
+ * 18 1.7 2.6 ms
+ * 19 3.5 5.2
+ * 20 7.0 10.5
+ * 21 14.0 21.0
+ * 22 28.0 41.9
+ * 23 55.9 83.9
+ * 24 111.8 167.8
+ * 25 223.7 335.5
+ * 26 447.4 671.1
+ * 27 894.8 1342.2
+ * 28 1.8 2.7 s
+ * Others Undefined */
+
+ /*
+ * Use the table above to decide the encode of interrupt coalescing timeout
+ * value for register writing. */
+ if (coalesce_timeout == 0)
+ timeout_encode = 0;
+ else{
+ /* make the timeout value in unit of (10 ns). */
+ coalesce_timeout = coalesce_timeout * 100;
+ min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
+ max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
+
+ /* get the encode of timeout for register writing. */
+ for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
+ timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
+ timeout_encode++) {
+ if (min <= coalesce_timeout && max > coalesce_timeout)
+ break;
+ else if (coalesce_timeout >= max && coalesce_timeout < min * 2
+ && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
+ if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
+ break;
+ else{
+ timeout_encode++;
+ break;
+ }
+ } else {
+ max = max * 2;
+ min = min * 2;
+ }
+ }
+
+ if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
+ /* the value is out of range. */
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+ }
+
+ writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
+ SMU_ICC_GEN_VAL(TIMER, timeout_encode),
+ &ihost->smu_registers->interrupt_coalesce_control);
+
+
+ ihost->interrupt_coalesce_number = (u16)coalesce_number;
+ ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
+
+ return SCI_SUCCESS;
+}
+
+
+static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ /* set the default interrupt coalescence number and timeout value. */
+ sci_controller_set_interrupt_coalescence(ihost, 0x10, 250);
+}
+
+static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ /* disable interrupt coalescence. */
+ sci_controller_set_interrupt_coalescence(ihost, 0, 0);
+}
+
+static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
+{
+ u32 index;
+ enum sci_status status;
+ enum sci_status phy_status;
+
+ status = SCI_SUCCESS;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ phy_status = sci_phy_stop(&ihost->phys[index]);
+
+ if (phy_status != SCI_SUCCESS &&
+ phy_status != SCI_FAILURE_INVALID_STATE) {
+ status = SCI_FAILURE;
+
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed to stop "
+ "phy %d because of status %d.\n",
+ __func__,
+ ihost->phys[index].phy_index, phy_status);
+ }
+ }
+
+ return status;
+}
+
+static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
+{
+ u32 index;
+ enum sci_status port_status;
+ enum sci_status status = SCI_SUCCESS;
+
+ for (index = 0; index < ihost->logical_port_entries; index++) {
+ struct isci_port *iport = &ihost->ports[index];
+
+ port_status = sci_port_stop(iport);
+
+ if ((port_status != SCI_SUCCESS) &&
+ (port_status != SCI_FAILURE_INVALID_STATE)) {
+ status = SCI_FAILURE;
+
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed to "
+ "stop port %d because of status %d.\n",
+ __func__,
+ iport->logical_port_index,
+ port_status);
+ }
+ }
+
+ return status;
+}
+
+static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
+{
+ u32 index;
+ enum sci_status status;
+ enum sci_status device_status;
+
+ status = SCI_SUCCESS;
+
+ for (index = 0; index < ihost->remote_node_entries; index++) {
+ if (ihost->device_table[index] != NULL) {
+ /* / @todo What timeout value do we want to provide to this request? */
+ device_status = sci_remote_device_stop(ihost->device_table[index], 0);
+
+ if ((device_status != SCI_SUCCESS) &&
+ (device_status != SCI_FAILURE_INVALID_STATE)) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed "
+ "to stop device 0x%p because of "
+ "status %d.\n",
+ __func__,
+ ihost->device_table[index], device_status);
+ }
+ }
+ }
+
+ return status;
+}
+
+static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ /* Stop all of the components for this controller */
+ sci_controller_stop_phys(ihost);
+ sci_controller_stop_ports(ihost);
+ sci_controller_stop_devices(ihost);
+}
+
+static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_del_timer(&ihost->timer);
+}
+
+static void sci_controller_reset_hardware(struct isci_host *ihost)
+{
+ /* Disable interrupts so we dont take any spurious interrupts */
+ sci_controller_disable_interrupts(ihost);
+
+ /* Reset the SCU */
+ writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
+
+ /* Delay for 1ms to before clearing the CQP and UFQPR. */
+ udelay(1000);
+
+ /* The write to the CQGR clears the CQP */
+ writel(0x00000000, &ihost->smu_registers->completion_queue_get);
+
+ /* The write to the UFQGP clears the UFQPR */
+ writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+}
+
+static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_controller_reset_hardware(ihost);
+ sci_change_state(&ihost->sm, SCIC_RESET);
+}
+
+static const struct sci_base_state sci_controller_state_table[] = {
+ [SCIC_INITIAL] = {
+ .enter_state = sci_controller_initial_state_enter,
+ },
+ [SCIC_RESET] = {},
+ [SCIC_INITIALIZING] = {},
+ [SCIC_INITIALIZED] = {},
+ [SCIC_STARTING] = {
+ .exit_state = sci_controller_starting_state_exit,
+ },
+ [SCIC_READY] = {
+ .enter_state = sci_controller_ready_state_enter,
+ .exit_state = sci_controller_ready_state_exit,
+ },
+ [SCIC_RESETTING] = {
+ .enter_state = sci_controller_resetting_state_enter,
+ },
+ [SCIC_STOPPING] = {
+ .enter_state = sci_controller_stopping_state_enter,
+ .exit_state = sci_controller_stopping_state_exit,
+ },
+ [SCIC_STOPPED] = {},
+ [SCIC_FAILED] = {}
+};
+
+static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
+{
+ /* these defaults are overridden by the platform / firmware */
+ u16 index;
+
+ /* Default to APC mode. */
+ ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+
+ /* Default to APC mode. */
+ ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
+
+ /* Default to no SSC operation. */
+ ihost->oem_parameters.controller.do_enable_ssc = false;
+
+ /* Initialize all of the port parameter information to narrow ports. */
+ for (index = 0; index < SCI_MAX_PORTS; index++) {
+ ihost->oem_parameters.ports[index].phy_mask = 0;
+ }
+
+ /* Initialize all of the phy parameter information. */
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ /* Default to 6G (i.e. Gen 3) for now. */
+ ihost->user_parameters.phys[index].max_speed_generation = 3;
+
+ /* the frequencies cannot be 0 */
+ ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
+ ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
+ ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
+
+ /*
+ * Previous Vitesse based expanders had a arbitration issue that
+ * is worked around by having the upper 32-bits of SAS address
+ * with a value greater then the Vitesse company identifier.
+ * Hence, usage of 0x5FCFFFFF. */
+ ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
+ ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
+ }
+
+ ihost->user_parameters.stp_inactivity_timeout = 5;
+ ihost->user_parameters.ssp_inactivity_timeout = 5;
+ ihost->user_parameters.stp_max_occupancy_timeout = 5;
+ ihost->user_parameters.ssp_max_occupancy_timeout = 20;
+ ihost->user_parameters.no_outbound_task_timeout = 20;
+}
+
+static void controller_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
+ struct sci_base_state_machine *sm = &ihost->sm;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ if (sm->current_state_id == SCIC_STARTING)
+ sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
+ else if (sm->current_state_id == SCIC_STOPPING) {
+ sci_change_state(sm, SCIC_FAILED);
+ isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
+ } else /* / @todo Now what do we want to do in this case? */
+ dev_err(&ihost->pdev->dev,
+ "%s: Controller timer fired when controller was not "
+ "in a state being timed.\n",
+ __func__);
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static enum sci_status sci_controller_construct(struct isci_host *ihost,
+ void __iomem *scu_base,
+ void __iomem *smu_base)
+{
+ u8 i;
+
+ sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
+
+ ihost->scu_registers = scu_base;
+ ihost->smu_registers = smu_base;
+
+ sci_port_configuration_agent_construct(&ihost->port_agent);
+
+ /* Construct the ports for this controller */
+ for (i = 0; i < SCI_MAX_PORTS; i++)
+ sci_port_construct(&ihost->ports[i], i, ihost);
+ sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
+
+ /* Construct the phys for this controller */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ /* Add all the PHYs to the dummy port */
+ sci_phy_construct(&ihost->phys[i],
+ &ihost->ports[SCI_MAX_PORTS], i);
+ }
+
+ ihost->invalid_phy_mask = 0;
+
+ sci_init_timer(&ihost->timer, controller_timeout);
+
+ /* Initialize the User and OEM parameters to default values. */
+ sci_controller_set_default_config_parameters(ihost);
+
+ return sci_controller_reset(ihost);
+}
+
+int sci_oem_parameters_validate(struct sci_oem_params *oem)
+{
+ int i;
+
+ for (i = 0; i < SCI_MAX_PORTS; i++)
+ if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ if (oem->phys[i].sas_address.high == 0 &&
+ oem->phys[i].sas_address.low == 0)
+ return -EINVAL;
+
+ if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ if (oem->ports[i].phy_mask != 0)
+ return -EINVAL;
+ } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+ u8 phy_mask = 0;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ phy_mask |= oem->ports[i].phy_mask;
+
+ if (phy_mask == 0)
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
+{
+ u32 state = ihost->sm.current_state_id;
+
+ if (state == SCIC_RESET ||
+ state == SCIC_INITIALIZING ||
+ state == SCIC_INITIALIZED) {
+
+ if (sci_oem_parameters_validate(&ihost->oem_parameters))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INVALID_STATE;
+}
+
+static void power_control_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
+ struct isci_phy *iphy;
+ unsigned long flags;
+ u8 i;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ ihost->power_control.phys_granted_power = 0;
+
+ if (ihost->power_control.phys_waiting == 0) {
+ ihost->power_control.timer_started = false;
+ goto done;
+ }
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+
+ if (ihost->power_control.phys_waiting == 0)
+ break;
+
+ iphy = ihost->power_control.requesters[i];
+ if (iphy == NULL)
+ continue;
+
+ if (ihost->power_control.phys_granted_power >=
+ ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
+ break;
+
+ ihost->power_control.requesters[i] = NULL;
+ ihost->power_control.phys_waiting--;
+ ihost->power_control.phys_granted_power++;
+ sci_phy_consume_power_handler(iphy);
+ }
+
+ /*
+ * It doesn't matter if the power list is empty, we need to start the
+ * timer in case another phy becomes ready.
+ */
+ sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+ ihost->power_control.timer_started = true;
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+void sci_controller_power_control_queue_insert(struct isci_host *ihost,
+ struct isci_phy *iphy)
+{
+ BUG_ON(iphy == NULL);
+
+ if (ihost->power_control.phys_granted_power <
+ ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
+ ihost->power_control.phys_granted_power++;
+ sci_phy_consume_power_handler(iphy);
+
+ /*
+ * stop and start the power_control timer. When the timer fires, the
+ * no_of_phys_granted_power will be set to 0
+ */
+ if (ihost->power_control.timer_started)
+ sci_del_timer(&ihost->power_control.timer);
+
+ sci_mod_timer(&ihost->power_control.timer,
+ SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+ ihost->power_control.timer_started = true;
+
+ } else {
+ /* Add the phy in the waiting list */
+ ihost->power_control.requesters[iphy->phy_index] = iphy;
+ ihost->power_control.phys_waiting++;
+ }
+}
+
+void sci_controller_power_control_queue_remove(struct isci_host *ihost,
+ struct isci_phy *iphy)
+{
+ BUG_ON(iphy == NULL);
+
+ if (ihost->power_control.requesters[iphy->phy_index])
+ ihost->power_control.phys_waiting--;
+
+ ihost->power_control.requesters[iphy->phy_index] = NULL;
+}
+
+#define AFE_REGISTER_WRITE_DELAY 10
+
+/* Initialize the AFE for this phy index. We need to read the AFE setup from
+ * the OEM parameters
+ */
+static void sci_controller_afe_initialization(struct isci_host *ihost)
+{
+ const struct sci_oem_params *oem = &ihost->oem_parameters;
+ struct pci_dev *pdev = ihost->pdev;
+ u32 afe_status;
+ u32 phy_id;
+
+ /* Clear DFX Status registers */
+ writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ if (is_b0(pdev)) {
+ /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
+ * Timer, PM Stagger Timer */
+ writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ /* Configure bias currents to normal */
+ if (is_a2(pdev))
+ writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
+ else if (is_b0(pdev) || is_c0(pdev))
+ writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
+
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Enable PLL */
+ if (is_b0(pdev) || is_c0(pdev))
+ writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
+ else
+ writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
+
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Wait for the PLL to lock */
+ do {
+ afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } while ((afe_status & 0x00001000) == 0);
+
+ if (is_a2(pdev)) {
+ /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
+ writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
+ const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
+
+ if (is_b0(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } else if (is_c0(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /*
+ * All defaults, except the Receive Word Alignament/Comma Detect
+ * Enable....(0xe800) */
+ writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } else {
+ /*
+ * All defaults, except the Receive Word Alignament/Comma Detect
+ * Enable....(0xe800) */
+ writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ /*
+ * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+ * & increase TX int & ext bias 20%....(0xe85c) */
+ if (is_a2(pdev))
+ writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ else if (is_b0(pdev)) {
+ /* Power down TX and RX (PWRDNTX and PWRDNRX) */
+ writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /*
+ * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+ * & increase TX int & ext bias 20%....(0xe85c) */
+ writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ } else {
+ writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /*
+ * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+ * & increase TX int & ext bias 20%....(0xe85c) */
+ writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ }
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ if (is_a2(pdev)) {
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ /*
+ * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
+ * RDD=0x0(RX Detect Enabled) ....(0xe800) */
+ writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Leave DFE/FFE on */
+ if (is_a2(pdev))
+ writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ else if (is_b0(pdev)) {
+ writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ } else {
+ writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ }
+
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(oem_phy->afe_tx_amp_control0,
+ &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(oem_phy->afe_tx_amp_control1,
+ &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(oem_phy->afe_tx_amp_control2,
+ &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(oem_phy->afe_tx_amp_control3,
+ &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ /* Transfer control to the PEs */
+ writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+}
+
+static void sci_controller_initialize_power_control(struct isci_host *ihost)
+{
+ sci_init_timer(&ihost->power_control.timer, power_control_timeout);
+
+ memset(ihost->power_control.requesters, 0,
+ sizeof(ihost->power_control.requesters));
+
+ ihost->power_control.phys_waiting = 0;
+ ihost->power_control.phys_granted_power = 0;
+}
+
+static enum sci_status sci_controller_initialize(struct isci_host *ihost)
+{
+ struct sci_base_state_machine *sm = &ihost->sm;
+ enum sci_status result = SCI_FAILURE;
+ unsigned long i, state, val;
+
+ if (ihost->sm.current_state_id != SCIC_RESET) {
+ dev_warn(&ihost->pdev->dev,
+ "SCIC Controller initialize operation requested "
+ "in invalid state\n");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(sm, SCIC_INITIALIZING);
+
+ sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
+
+ ihost->next_phy_to_start = 0;
+ ihost->phy_startup_timer_pending = false;
+
+ sci_controller_initialize_power_control(ihost);
+
+ /*
+ * There is nothing to do here for B0 since we do not have to
+ * program the AFE registers.
+ * / @todo The AFE settings are supposed to be correct for the B0 but
+ * / presently they seem to be wrong. */
+ sci_controller_afe_initialization(ihost);
+
+
+ /* Take the hardware out of reset */
+ writel(0, &ihost->smu_registers->soft_reset_control);
+
+ /*
+ * / @todo Provide meaningfull error code for hardware failure
+ * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
+ for (i = 100; i >= 1; i--) {
+ u32 status;
+
+ /* Loop until the hardware reports success */
+ udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
+ status = readl(&ihost->smu_registers->control_status);
+
+ if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
+ break;
+ }
+ if (i == 0)
+ goto out;
+
+ /*
+ * Determine what are the actaul device capacities that the
+ * hardware will support */
+ val = readl(&ihost->smu_registers->device_context_capacity);
+
+ /* Record the smaller of the two capacity values */
+ ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
+ ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
+ ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
+
+ /*
+ * Make all PEs that are unassigned match up with the
+ * logical ports
+ */
+ for (i = 0; i < ihost->logical_port_entries; i++) {
+ struct scu_port_task_scheduler_group_registers __iomem
+ *ptsg = &ihost->scu_registers->peg0.ptsg;
+
+ writel(i, &ptsg->protocol_engine[i]);
+ }
+
+ /* Initialize hardware PCI Relaxed ordering in DMA engines */
+ val = readl(&ihost->scu_registers->sdma.pdma_configuration);
+ val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
+ writel(val, &ihost->scu_registers->sdma.pdma_configuration);
+
+ val = readl(&ihost->scu_registers->sdma.cdma_configuration);
+ val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
+ writel(val, &ihost->scu_registers->sdma.cdma_configuration);
+
+ /*
+ * Initialize the PHYs before the PORTs because the PHY registers
+ * are accessed during the port initialization.
+ */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ result = sci_phy_initialize(&ihost->phys[i],
+ &ihost->scu_registers->peg0.pe[i].tl,
+ &ihost->scu_registers->peg0.pe[i].ll);
+ if (result != SCI_SUCCESS)
+ goto out;
+ }
+
+ for (i = 0; i < ihost->logical_port_entries; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+
+ iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
+ iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
+ iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
+ }
+
+ result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
+
+ out:
+ /* Advance the controller state machine */
+ if (result == SCI_SUCCESS)
+ state = SCIC_INITIALIZED;
+ else
+ state = SCIC_FAILED;
+ sci_change_state(sm, state);
+
+ return result;
+}
+
+static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
+ struct sci_user_parameters *sci_parms)
+{
+ u32 state = ihost->sm.current_state_id;
+
+ if (state == SCIC_RESET ||
+ state == SCIC_INITIALIZING ||
+ state == SCIC_INITIALIZED) {
+ u16 index;
+
+ /*
+ * Validate the user parameters. If they are not legal, then
+ * return a failure.
+ */
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ struct sci_phy_user_params *user_phy;
+
+ user_phy = &sci_parms->phys[index];
+
+ if (!((user_phy->max_speed_generation <=
+ SCIC_SDS_PARM_MAX_SPEED) &&
+ (user_phy->max_speed_generation >
+ SCIC_SDS_PARM_NO_SPEED)))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ if (user_phy->in_connection_align_insertion_frequency <
+ 3)
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ if ((user_phy->in_connection_align_insertion_frequency <
+ 3) ||
+ (user_phy->align_insertion_frequency == 0) ||
+ (user_phy->
+ notify_enable_spin_up_insertion_frequency ==
+ 0))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+ }
+
+ if ((sci_parms->stp_inactivity_timeout == 0) ||
+ (sci_parms->ssp_inactivity_timeout == 0) ||
+ (sci_parms->stp_max_occupancy_timeout == 0) ||
+ (sci_parms->ssp_max_occupancy_timeout == 0) ||
+ (sci_parms->no_outbound_task_timeout == 0))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INVALID_STATE;
+}
+
+static int sci_controller_mem_init(struct isci_host *ihost)
+{
+ struct device *dev = &ihost->pdev->dev;
+ dma_addr_t dma;
+ size_t size;
+ int err;
+
+ size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
+ ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+ if (!ihost->completion_queue)
+ return -ENOMEM;
+
+ writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
+ writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
+
+ size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
+ ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
+ GFP_KERNEL);
+ if (!ihost->remote_node_context_table)
+ return -ENOMEM;
+
+ writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
+ writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
+
+ size = ihost->task_context_entries * sizeof(struct scu_task_context),
+ ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+ if (!ihost->task_context_table)
+ return -ENOMEM;
+
+ ihost->task_context_dma = dma;
+ writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
+ writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
+
+ err = sci_unsolicited_frame_control_construct(ihost);
+ if (err)
+ return err;
+
+ /*
+ * Inform the silicon as to the location of the UF headers and
+ * address table.
+ */
+ writel(lower_32_bits(ihost->uf_control.headers.physical_address),
+ &ihost->scu_registers->sdma.uf_header_base_address_lower);
+ writel(upper_32_bits(ihost->uf_control.headers.physical_address),
+ &ihost->scu_registers->sdma.uf_header_base_address_upper);
+
+ writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
+ &ihost->scu_registers->sdma.uf_address_table_lower);
+ writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
+ &ihost->scu_registers->sdma.uf_address_table_upper);
+
+ return 0;
+}
+
+int isci_host_init(struct isci_host *ihost)
+{
+ int err = 0, i;
+ enum sci_status status;
+ struct sci_user_parameters sci_user_params;
+ struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
+
+ spin_lock_init(&ihost->state_lock);
+ spin_lock_init(&ihost->scic_lock);
+ init_waitqueue_head(&ihost->eventq);
+
+ isci_host_change_state(ihost, isci_starting);
+
+ status = sci_controller_construct(ihost, scu_base(ihost),
+ smu_base(ihost));
+
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: sci_controller_construct failed - status = %x\n",
+ __func__,
+ status);
+ return -ENODEV;
+ }
+
+ ihost->sas_ha.dev = &ihost->pdev->dev;
+ ihost->sas_ha.lldd_ha = ihost;
+
+ /*
+ * grab initial values stored in the controller object for OEM and USER
+ * parameters
+ */
+ isci_user_parameters_get(&sci_user_params);
+ status = sci_user_parameters_set(ihost, &sci_user_params);
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: sci_user_parameters_set failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ /* grab any OEM parameters specified in orom */
+ if (pci_info->orom) {
+ status = isci_parse_oem_parameters(&ihost->oem_parameters,
+ pci_info->orom,
+ ihost->id);
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "parsing firmware oem parameters failed\n");
+ return -EINVAL;
+ }
+ }
+
+ status = sci_oem_parameters_set(ihost);
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: sci_oem_parameters_set failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ tasklet_init(&ihost->completion_tasklet,
+ isci_host_completion_routine, (unsigned long)ihost);
+
+ INIT_LIST_HEAD(&ihost->requests_to_complete);
+ INIT_LIST_HEAD(&ihost->requests_to_errorback);
+
+ spin_lock_irq(&ihost->scic_lock);
+ status = sci_controller_initialize(ihost);
+ spin_unlock_irq(&ihost->scic_lock);
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: sci_controller_initialize failed -"
+ " status = 0x%x\n",
+ __func__, status);
+ return -ENODEV;
+ }
+
+ err = sci_controller_mem_init(ihost);
+ if (err)
+ return err;
+
+ for (i = 0; i < SCI_MAX_PORTS; i++)
+ isci_port_init(&ihost->ports[i], ihost, i);
+
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ isci_phy_init(&ihost->phys[i], ihost, i);
+
+ for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+ struct isci_remote_device *idev = &ihost->devices[i];
+
+ INIT_LIST_HEAD(&idev->reqs_in_process);
+ INIT_LIST_HEAD(&idev->node);
+ }
+
+ for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+ struct isci_request *ireq;
+ dma_addr_t dma;
+
+ ireq = dmam_alloc_coherent(&ihost->pdev->dev,
+ sizeof(struct isci_request), &dma,
+ GFP_KERNEL);
+ if (!ireq)
+ return -ENOMEM;
+
+ ireq->tc = &ihost->task_context_table[i];
+ ireq->owning_controller = ihost;
+ spin_lock_init(&ireq->state_lock);
+ ireq->request_daddr = dma;
+ ireq->isci_host = ihost;
+ ihost->reqs[i] = ireq;
+ }
+
+ return 0;
+}
+
+void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ switch (ihost->sm.current_state_id) {
+ case SCIC_STARTING:
+ sci_del_timer(&ihost->phy_timer);
+ ihost->phy_startup_timer_pending = false;
+ ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
+ iport, iphy);
+ sci_controller_start_next_phy(ihost);
+ break;
+ case SCIC_READY:
+ ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
+ iport, iphy);
+ break;
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCIC Controller linkup event from phy %d in "
+ "unexpected state %d\n", __func__, iphy->phy_index,
+ ihost->sm.current_state_id);
+ }
+}
+
+void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ switch (ihost->sm.current_state_id) {
+ case SCIC_STARTING:
+ case SCIC_READY:
+ ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
+ iport, iphy);
+ break;
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCIC Controller linkdown event from phy %d in "
+ "unexpected state %d\n",
+ __func__,
+ iphy->phy_index,
+ ihost->sm.current_state_id);
+ }
+}
+
+static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
+{
+ u32 index;
+
+ for (index = 0; index < ihost->remote_node_entries; index++) {
+ if ((ihost->device_table[index] != NULL) &&
+ (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
+ return true;
+ }
+
+ return false;
+}
+
+void sci_controller_remote_device_stopped(struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ if (ihost->sm.current_state_id != SCIC_STOPPING) {
+ dev_dbg(&ihost->pdev->dev,
+ "SCIC Controller 0x%p remote device stopped event "
+ "from device 0x%p in unexpected state %d\n",
+ ihost, idev,
+ ihost->sm.current_state_id);
+ return;
+ }
+
+ if (!sci_controller_has_remote_devices_stopping(ihost))
+ sci_change_state(&ihost->sm, SCIC_STOPPED);
+}
+
+void sci_controller_post_request(struct isci_host *ihost, u32 request)
+{
+ dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
+ __func__, ihost->id, request);
+
+ writel(request, &ihost->smu_registers->post_context_port);
+}
+
+struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
+{
+ u16 task_index;
+ u16 task_sequence;
+
+ task_index = ISCI_TAG_TCI(io_tag);
+
+ if (task_index < ihost->task_context_entries) {
+ struct isci_request *ireq = ihost->reqs[task_index];
+
+ if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
+ task_sequence = ISCI_TAG_SEQ(io_tag);
+
+ if (task_sequence == ihost->io_request_sequence[task_index])
+ return ireq;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * This method allocates remote node index and the reserves the remote node
+ * context space for use. This method can fail if there are no more remote
+ * node index available.
+ * @scic: This is the controller object which contains the set of
+ * free remote node ids
+ * @sci_dev: This is the device object which is requesting the a remote node
+ * id
+ * @node_id: This is the remote node id that is assinged to the device if one
+ * is available
+ *
+ * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
+ * node index available.
+ */
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 *node_id)
+{
+ u16 node_index;
+ u32 remote_node_count = sci_remote_device_node_count(idev);
+
+ node_index = sci_remote_node_table_allocate_remote_node(
+ &ihost->available_remote_nodes, remote_node_count
+ );
+
+ if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+ ihost->device_table[node_index] = idev;
+
+ *node_id = node_index;
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+}
+
+void sci_controller_free_remote_node_context(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 node_id)
+{
+ u32 remote_node_count = sci_remote_device_node_count(idev);
+
+ if (ihost->device_table[node_id] == idev) {
+ ihost->device_table[node_id] = NULL;
+
+ sci_remote_node_table_release_remote_node_index(
+ &ihost->available_remote_nodes, remote_node_count, node_id
+ );
+ }
+}
+
+void sci_controller_copy_sata_response(void *response_buffer,
+ void *frame_header,
+ void *frame_buffer)
+{
+ /* XXX type safety? */
+ memcpy(response_buffer, frame_header, sizeof(u32));
+
+ memcpy(response_buffer + sizeof(u32),
+ frame_buffer,
+ sizeof(struct dev_to_host_fis) - sizeof(u32));
+}
+
+void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
+{
+ if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
+ writel(ihost->uf_control.get,
+ &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+}
+
+void isci_tci_free(struct isci_host *ihost, u16 tci)
+{
+ u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
+
+ ihost->tci_pool[tail] = tci;
+ ihost->tci_tail = tail + 1;
+}
+
+static u16 isci_tci_alloc(struct isci_host *ihost)
+{
+ u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
+ u16 tci = ihost->tci_pool[head];
+
+ ihost->tci_head = head + 1;
+ return tci;
+}
+
+static u16 isci_tci_space(struct isci_host *ihost)
+{
+ return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
+}
+
+u16 isci_alloc_tag(struct isci_host *ihost)
+{
+ if (isci_tci_space(ihost)) {
+ u16 tci = isci_tci_alloc(ihost);
+ u8 seq = ihost->io_request_sequence[tci];
+
+ return ISCI_TAG(seq, tci);
+ }
+
+ return SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
+{
+ u16 tci = ISCI_TAG_TCI(io_tag);
+ u16 seq = ISCI_TAG_SEQ(io_tag);
+
+ /* prevent tail from passing head */
+ if (isci_tci_active(ihost) == 0)
+ return SCI_FAILURE_INVALID_IO_TAG;
+
+ if (seq == ihost->io_request_sequence[tci]) {
+ ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
+
+ isci_tci_free(ihost, tci);
+
+ return SCI_SUCCESS;
+ }
+ return SCI_FAILURE_INVALID_IO_TAG;
+}
+
+enum sci_status sci_controller_start_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status;
+
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev, "invalid state to start I/O");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ status = sci_remote_device_start_io(ihost, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ set_bit(IREQ_ACTIVE, &ireq->flags);
+ sci_controller_post_request(ihost, ireq->post_context);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ /* terminate an ongoing (i.e. started) core IO request. This does not
+ * abort the IO request at the target, but rather removes the IO
+ * request from the host controller.
+ */
+ enum sci_status status;
+
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev,
+ "invalid state to terminate request\n");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ status = sci_io_request_terminate(ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ /*
+ * Utilize the original post context command and or in the POST_TC_ABORT
+ * request sub-type.
+ */
+ sci_controller_post_request(ihost,
+ ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_complete_io() - This method will perform core specific
+ * completion operations for an IO request. After this method is invoked,
+ * the user should consider the IO request as invalid until it is properly
+ * reused (i.e. re-constructed).
+ * @ihost: The handle to the controller object for which to complete the
+ * IO request.
+ * @idev: The handle to the remote device object for which to complete
+ * the IO request.
+ * @ireq: the handle to the io request object to complete.
+ */
+enum sci_status sci_controller_complete_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status;
+ u16 index;
+
+ switch (ihost->sm.current_state_id) {
+ case SCIC_STOPPING:
+ /* XXX: Implement this function */
+ return SCI_FAILURE;
+ case SCIC_READY:
+ status = sci_remote_device_complete_io(ihost, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ index = ISCI_TAG_TCI(ireq->io_tag);
+ clear_bit(IREQ_ACTIVE, &ireq->flags);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(&ihost->pdev->dev, "invalid state to complete I/O");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+}
+
+enum sci_status sci_controller_continue_io(struct isci_request *ireq)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev, "invalid state to continue I/O");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ set_bit(IREQ_ACTIVE, &ireq->flags);
+ sci_controller_post_request(ihost, ireq->post_context);
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_start_task() - This method is called by the SCIC user to
+ * send/start a framework task management request.
+ * @controller: the handle to the controller object for which to start the task
+ * management request.
+ * @remote_device: the handle to the remote device object for which to start
+ * the task management request.
+ * @task_request: the handle to the task request object to start.
+ */
+enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status;
+
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller starting task from invalid "
+ "state\n",
+ __func__);
+ return SCI_TASK_FAILURE_INVALID_STATE;
+ }
+
+ status = sci_remote_device_start_task(ihost, idev, ireq);
+ switch (status) {
+ case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
+ set_bit(IREQ_ACTIVE, &ireq->flags);
+
+ /*
+ * We will let framework know this task request started successfully,
+ * although core is still woring on starting the request (to post tc when
+ * RNC is resumed.)
+ */
+ return SCI_SUCCESS;
+ case SCI_SUCCESS:
+ set_bit(IREQ_ACTIVE, &ireq->flags);
+ sci_controller_post_request(ihost, ireq->post_context);
+ break;
+ default:
+ break;
+ }
+
+ return status;
+}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
new file mode 100644
index 000000000000..062101a39f79
--- /dev/null
+++ b/drivers/scsi/isci/host.h
@@ -0,0 +1,542 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _SCI_HOST_H_
+#define _SCI_HOST_H_
+
+#include "remote_device.h"
+#include "phy.h"
+#include "isci.h"
+#include "remote_node_table.h"
+#include "registers.h"
+#include "unsolicited_frame_control.h"
+#include "probe_roms.h"
+
+struct isci_request;
+struct scu_task_context;
+
+
+/**
+ * struct sci_power_control -
+ *
+ * This structure defines the fields for managing power control for direct
+ * attached disk devices.
+ */
+struct sci_power_control {
+ /**
+ * This field is set when the power control timer is running and cleared when
+ * it is not.
+ */
+ bool timer_started;
+
+ /**
+ * Timer to control when the directed attached disks can consume power.
+ */
+ struct sci_timer timer;
+
+ /**
+ * This field is used to keep track of how many phys are put into the
+ * requesters field.
+ */
+ u8 phys_waiting;
+
+ /**
+ * This field is used to keep track of how many phys have been granted to consume power
+ */
+ u8 phys_granted_power;
+
+ /**
+ * This field is an array of phys that we are waiting on. The phys are direct
+ * mapped into requesters via struct sci_phy.phy_index
+ */
+ struct isci_phy *requesters[SCI_MAX_PHYS];
+
+};
+
+struct sci_port_configuration_agent;
+typedef void (*port_config_fn)(struct isci_host *,
+ struct sci_port_configuration_agent *,
+ struct isci_port *, struct isci_phy *);
+
+struct sci_port_configuration_agent {
+ u16 phy_configured_mask;
+ u16 phy_ready_mask;
+ struct {
+ u8 min_index;
+ u8 max_index;
+ } phy_valid_port_range[SCI_MAX_PHYS];
+ bool timer_pending;
+ port_config_fn link_up_handler;
+ port_config_fn link_down_handler;
+ struct sci_timer timer;
+};
+
+/**
+ * isci_host - primary host/controller object
+ * @timer: timeout start/stop operations
+ * @device_table: rni (hw remote node index) to remote device lookup table
+ * @available_remote_nodes: rni allocator
+ * @power_control: manage device spin up
+ * @io_request_sequence: generation number for tci's (task contexts)
+ * @task_context_table: hw task context table
+ * @remote_node_context_table: hw remote node context table
+ * @completion_queue: hw-producer driver-consumer communication ring
+ * @completion_queue_get: tracks the driver 'head' of the ring to notify hw
+ * @logical_port_entries: min({driver|silicon}-supported-port-count)
+ * @remote_node_entries: min({driver|silicon}-supported-node-count)
+ * @task_context_entries: min({driver|silicon}-supported-task-count)
+ * @phy_timer: phy startup timer
+ * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for
+ * the phy index is set so further notifications are not
+ * made. Once the phy reports link up and is made part of a
+ * port then this bit is cleared.
+
+ */
+struct isci_host {
+ struct sci_base_state_machine sm;
+ /* XXX can we time this externally */
+ struct sci_timer timer;
+ /* XXX drop reference module params directly */
+ struct sci_user_parameters user_parameters;
+ /* XXX no need to be a union */
+ struct sci_oem_params oem_parameters;
+ struct sci_port_configuration_agent port_agent;
+ struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
+ struct sci_remote_node_table available_remote_nodes;
+ struct sci_power_control power_control;
+ u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
+ struct scu_task_context *task_context_table;
+ dma_addr_t task_context_dma;
+ union scu_remote_node_context *remote_node_context_table;
+ u32 *completion_queue;
+ u32 completion_queue_get;
+ u32 logical_port_entries;
+ u32 remote_node_entries;
+ u32 task_context_entries;
+ struct sci_unsolicited_frame_control uf_control;
+
+ /* phy startup */
+ struct sci_timer phy_timer;
+ /* XXX kill */
+ bool phy_startup_timer_pending;
+ u32 next_phy_to_start;
+ /* XXX convert to unsigned long and use bitops */
+ u8 invalid_phy_mask;
+
+ /* TODO attempt dynamic interrupt coalescing scheme */
+ u16 interrupt_coalesce_number;
+ u32 interrupt_coalesce_timeout;
+ struct smu_registers __iomem *smu_registers;
+ struct scu_registers __iomem *scu_registers;
+
+ u16 tci_head;
+ u16 tci_tail;
+ u16 tci_pool[SCI_MAX_IO_REQUESTS];
+
+ int id; /* unique within a given pci device */
+ struct isci_phy phys[SCI_MAX_PHYS];
+ struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
+ struct sas_ha_struct sas_ha;
+
+ spinlock_t state_lock;
+ struct pci_dev *pdev;
+ enum isci_status status;
+ #define IHOST_START_PENDING 0
+ #define IHOST_STOP_PENDING 1
+ unsigned long flags;
+ wait_queue_head_t eventq;
+ struct Scsi_Host *shost;
+ struct tasklet_struct completion_tasklet;
+ struct list_head requests_to_complete;
+ struct list_head requests_to_errorback;
+ spinlock_t scic_lock;
+ struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
+ struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
+};
+
+/**
+ * enum sci_controller_states - This enumeration depicts all the states
+ * for the common controller state machine.
+ */
+enum sci_controller_states {
+ /**
+ * Simply the initial state for the base controller state machine.
+ */
+ SCIC_INITIAL = 0,
+
+ /**
+ * This state indicates that the controller is reset. The memory for
+ * the controller is in it's initial state, but the controller requires
+ * initialization.
+ * This state is entered from the INITIAL state.
+ * This state is entered from the RESETTING state.
+ */
+ SCIC_RESET,
+
+ /**
+ * This state is typically an action state that indicates the controller
+ * is in the process of initialization. In this state no new IO operations
+ * are permitted.
+ * This state is entered from the RESET state.
+ */
+ SCIC_INITIALIZING,
+
+ /**
+ * This state indicates that the controller has been successfully
+ * initialized. In this state no new IO operations are permitted.
+ * This state is entered from the INITIALIZING state.
+ */
+ SCIC_INITIALIZED,
+
+ /**
+ * This state indicates the the controller is in the process of becoming
+ * ready (i.e. starting). In this state no new IO operations are permitted.
+ * This state is entered from the INITIALIZED state.
+ */
+ SCIC_STARTING,
+
+ /**
+ * This state indicates the controller is now ready. Thus, the user
+ * is able to perform IO operations on the controller.
+ * This state is entered from the STARTING state.
+ */
+ SCIC_READY,
+
+ /**
+ * This state is typically an action state that indicates the controller
+ * is in the process of resetting. Thus, the user is unable to perform
+ * IO operations on the controller. A reset is considered destructive in
+ * most cases.
+ * This state is entered from the READY state.
+ * This state is entered from the FAILED state.
+ * This state is entered from the STOPPED state.
+ */
+ SCIC_RESETTING,
+
+ /**
+ * This state indicates that the controller is in the process of stopping.
+ * In this state no new IO operations are permitted, but existing IO
+ * operations are allowed to complete.
+ * This state is entered from the READY state.
+ */
+ SCIC_STOPPING,
+
+ /**
+ * This state indicates that the controller has successfully been stopped.
+ * In this state no new IO operations are permitted.
+ * This state is entered from the STOPPING state.
+ */
+ SCIC_STOPPED,
+
+ /**
+ * This state indicates that the controller could not successfully be
+ * initialized. In this state no new IO operations are permitted.
+ * This state is entered from the INITIALIZING state.
+ * This state is entered from the STARTING state.
+ * This state is entered from the STOPPING state.
+ * This state is entered from the RESETTING state.
+ */
+ SCIC_FAILED,
+};
+
+/**
+ * struct isci_pci_info - This class represents the pci function containing the
+ * controllers. Depending on PCI SKU, there could be up to 2 controllers in
+ * the PCI function.
+ */
+#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
+
+struct isci_pci_info {
+ struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
+ struct isci_host *hosts[SCI_MAX_CONTROLLERS];
+ struct isci_orom *orom;
+};
+
+static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
+{
+ return pci_get_drvdata(pdev);
+}
+
+#define for_each_isci_host(id, ihost, pdev) \
+ for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
+ id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
+ ihost = to_pci_info(pdev)->hosts[++id])
+
+static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
+{
+ return isci_host->status;
+}
+
+static inline void isci_host_change_state(struct isci_host *isci_host,
+ enum isci_status status)
+{
+ unsigned long flags;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_host = %p, state = 0x%x",
+ __func__,
+ isci_host,
+ status);
+ spin_lock_irqsave(&isci_host->state_lock, flags);
+ isci_host->status = status;
+ spin_unlock_irqrestore(&isci_host->state_lock, flags);
+
+}
+
+static inline void wait_for_start(struct isci_host *ihost)
+{
+ wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
+}
+
+static inline void wait_for_stop(struct isci_host *ihost)
+{
+ wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags));
+}
+
+static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags));
+}
+
+static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags));
+}
+
+static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
+{
+ return dev->port->ha->lldd_ha;
+}
+
+/* we always use protocol engine group zero */
+#define ISCI_PEG 0
+
+/* see sci_controller_io_tag_allocate|free for how seq and tci are built */
+#define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
+
+/* these are returned by the hardware, so sanitize them */
+#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
+#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
+
+/* expander attached sata devices require 3 rnc slots */
+static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
+{
+ struct domain_device *dev = idev->domain_dev;
+
+ if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
+ !idev->is_direct_attached)
+ return SCU_STP_REMOTE_NODE_COUNT;
+ return SCU_SSP_REMOTE_NODE_COUNT;
+}
+
+/**
+ * sci_controller_clear_invalid_phy() -
+ *
+ * This macro will clear the bit in the invalid phy mask for this controller
+ * object. This is used to control messages reported for invalid link up
+ * notifications.
+ */
+#define sci_controller_clear_invalid_phy(controller, phy) \
+ ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
+
+static inline struct device *sciphy_to_dev(struct isci_phy *iphy)
+{
+
+ if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
+ return NULL;
+
+ return &iphy->isci_port->isci_host->pdev->dev;
+}
+
+static inline struct device *sciport_to_dev(struct isci_port *iport)
+{
+
+ if (!iport || !iport->isci_host)
+ return NULL;
+
+ return &iport->isci_host->pdev->dev;
+}
+
+static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
+{
+ if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
+ return NULL;
+
+ return &idev->isci_port->isci_host->pdev->dev;
+}
+
+static inline bool is_a2(struct pci_dev *pdev)
+{
+ if (pdev->revision < 4)
+ return true;
+ return false;
+}
+
+static inline bool is_b0(struct pci_dev *pdev)
+{
+ if (pdev->revision == 4)
+ return true;
+ return false;
+}
+
+static inline bool is_c0(struct pci_dev *pdev)
+{
+ if (pdev->revision >= 5)
+ return true;
+ return false;
+}
+
+void sci_controller_post_request(struct isci_host *ihost,
+ u32 request);
+void sci_controller_release_frame(struct isci_host *ihost,
+ u32 frame_index);
+void sci_controller_copy_sata_response(void *response_buffer,
+ void *frame_header,
+ void *frame_buffer);
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 *node_id);
+void sci_controller_free_remote_node_context(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 node_id);
+
+struct isci_request *sci_request_by_tag(struct isci_host *ihost,
+ u16 io_tag);
+
+void sci_controller_power_control_queue_insert(
+ struct isci_host *ihost,
+ struct isci_phy *iphy);
+
+void sci_controller_power_control_queue_remove(
+ struct isci_host *ihost,
+ struct isci_phy *iphy);
+
+void sci_controller_link_up(
+ struct isci_host *ihost,
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+void sci_controller_link_down(
+ struct isci_host *ihost,
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+void sci_controller_remote_device_stopped(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev);
+
+void sci_controller_copy_task_context(
+ struct isci_host *ihost,
+ struct isci_request *ireq);
+
+void sci_controller_register_setup(struct isci_host *ihost);
+
+enum sci_status sci_controller_continue_io(struct isci_request *ireq);
+int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
+void isci_host_scan_start(struct Scsi_Host *);
+u16 isci_alloc_tag(struct isci_host *ihost);
+enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
+void isci_tci_free(struct isci_host *ihost, u16 tci);
+
+int isci_host_init(struct isci_host *);
+
+void isci_host_init_controller_names(
+ struct isci_host *isci_host,
+ unsigned int controller_idx);
+
+void isci_host_deinit(
+ struct isci_host *);
+
+void isci_host_port_link_up(
+ struct isci_host *,
+ struct isci_port *,
+ struct isci_phy *);
+int isci_host_dev_found(struct domain_device *);
+
+void isci_host_remote_device_start_complete(
+ struct isci_host *,
+ struct isci_remote_device *,
+ enum sci_status);
+
+void sci_controller_disable_interrupts(
+ struct isci_host *ihost);
+
+enum sci_status sci_controller_start_io(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_task_status sci_controller_start_task(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_controller_terminate_request(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_controller_complete_io(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+void sci_port_configuration_agent_construct(
+ struct sci_port_configuration_agent *port_agent);
+
+enum sci_status sci_port_configuration_agent_initialize(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent);
+#endif
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
new file mode 100644
index 000000000000..61e0d09e2b57
--- /dev/null
+++ b/drivers/scsi/isci/init.c
@@ -0,0 +1,565 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/efi.h>
+#include <asm/string.h>
+#include "isci.h"
+#include "task.h"
+#include "probe_roms.h"
+
+static struct scsi_transport_template *isci_transport_template;
+
+static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = {
+ { PCI_VDEVICE(INTEL, 0x1D61),},
+ { PCI_VDEVICE(INTEL, 0x1D63),},
+ { PCI_VDEVICE(INTEL, 0x1D65),},
+ { PCI_VDEVICE(INTEL, 0x1D67),},
+ { PCI_VDEVICE(INTEL, 0x1D69),},
+ { PCI_VDEVICE(INTEL, 0x1D6B),},
+ { PCI_VDEVICE(INTEL, 0x1D60),},
+ { PCI_VDEVICE(INTEL, 0x1D62),},
+ { PCI_VDEVICE(INTEL, 0x1D64),},
+ { PCI_VDEVICE(INTEL, 0x1D66),},
+ { PCI_VDEVICE(INTEL, 0x1D68),},
+ { PCI_VDEVICE(INTEL, 0x1D6A),},
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, isci_id_table);
+
+/* linux isci specific settings */
+
+unsigned char no_outbound_task_to = 20;
+module_param(no_outbound_task_to, byte, 0);
+MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
+
+u16 ssp_max_occ_to = 20;
+module_param(ssp_max_occ_to, ushort, 0);
+MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)");
+
+u16 stp_max_occ_to = 5;
+module_param(stp_max_occ_to, ushort, 0);
+MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)");
+
+u16 ssp_inactive_to = 5;
+module_param(ssp_inactive_to, ushort, 0);
+MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)");
+
+u16 stp_inactive_to = 5;
+module_param(stp_inactive_to, ushort, 0);
+MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
+
+unsigned char phy_gen = 3;
+module_param(phy_gen, byte, 0);
+MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
+
+unsigned char max_concurr_spinup = 1;
+module_param(max_concurr_spinup, byte, 0);
+MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
+
+static struct scsi_host_template isci_sht = {
+
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .proc_name = DRV_NAME,
+ .queuecommand = sas_queuecommand,
+ .target_alloc = sas_target_alloc,
+ .slave_configure = sas_slave_configure,
+ .slave_destroy = sas_slave_destroy,
+ .scan_finished = isci_host_scan_finished,
+ .scan_start = isci_host_scan_start,
+ .change_queue_depth = sas_change_queue_depth,
+ .change_queue_type = sas_change_queue_type,
+ .bios_param = sas_bios_param,
+ .can_queue = ISCI_CAN_QUEUE_VAL,
+ .cmd_per_lun = 1,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_device_reset_handler = sas_eh_device_reset_handler,
+ .eh_bus_reset_handler = isci_bus_reset_handler,
+ .slave_alloc = sas_slave_alloc,
+ .target_destroy = sas_target_destroy,
+ .ioctl = sas_ioctl,
+};
+
+static struct sas_domain_function_template isci_transport_ops = {
+
+ /* The class calls these to notify the LLDD of an event. */
+ .lldd_port_formed = isci_port_formed,
+ .lldd_port_deformed = isci_port_deformed,
+
+ /* The class calls these when a device is found or gone. */
+ .lldd_dev_found = isci_remote_device_found,
+ .lldd_dev_gone = isci_remote_device_gone,
+
+ .lldd_execute_task = isci_task_execute_task,
+ /* Task Management Functions. Must be called from process context. */
+ .lldd_abort_task = isci_task_abort_task,
+ .lldd_abort_task_set = isci_task_abort_task_set,
+ .lldd_clear_aca = isci_task_clear_aca,
+ .lldd_clear_task_set = isci_task_clear_task_set,
+ .lldd_I_T_nexus_reset = isci_task_I_T_nexus_reset,
+ .lldd_lu_reset = isci_task_lu_reset,
+ .lldd_query_task = isci_task_query_task,
+
+ /* Port and Adapter management */
+ .lldd_clear_nexus_port = isci_task_clear_nexus_port,
+ .lldd_clear_nexus_ha = isci_task_clear_nexus_ha,
+
+ /* Phy management */
+ .lldd_control_phy = isci_phy_control,
+};
+
+
+/******************************************************************************
+* P R O T E C T E D M E T H O D S
+******************************************************************************/
+
+
+
+/**
+ * isci_register_sas_ha() - This method initializes various lldd
+ * specific members of the sas_ha struct and calls the libsas
+ * sas_register_ha() function.
+ * @isci_host: This parameter specifies the lldd specific wrapper for the
+ * libsas sas_ha struct.
+ *
+ * This method returns an error code indicating sucess or failure. The user
+ * should check for possible memory allocation error return otherwise, a zero
+ * indicates success.
+ */
+static int isci_register_sas_ha(struct isci_host *isci_host)
+{
+ int i;
+ struct sas_ha_struct *sas_ha = &(isci_host->sas_ha);
+ struct asd_sas_phy **sas_phys;
+ struct asd_sas_port **sas_ports;
+
+ sas_phys = devm_kzalloc(&isci_host->pdev->dev,
+ SCI_MAX_PHYS * sizeof(void *),
+ GFP_KERNEL);
+ if (!sas_phys)
+ return -ENOMEM;
+
+ sas_ports = devm_kzalloc(&isci_host->pdev->dev,
+ SCI_MAX_PORTS * sizeof(void *),
+ GFP_KERNEL);
+ if (!sas_ports)
+ return -ENOMEM;
+
+ /*----------------- Libsas Initialization Stuff----------------------
+ * Set various fields in the sas_ha struct:
+ */
+
+ sas_ha->sas_ha_name = DRV_NAME;
+ sas_ha->lldd_module = THIS_MODULE;
+ sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0];
+
+ /* set the array of phy and port structs. */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ sas_phys[i] = &isci_host->phys[i].sas_phy;
+ sas_ports[i] = &isci_host->ports[i].sas_port;
+ }
+
+ sas_ha->sas_phy = sas_phys;
+ sas_ha->sas_port = sas_ports;
+ sas_ha->num_phys = SCI_MAX_PHYS;
+
+ sas_ha->lldd_queue_size = ISCI_CAN_QUEUE_VAL;
+ sas_ha->lldd_max_execute_num = 1;
+ sas_ha->strict_wide_ports = 1;
+
+ sas_register_ha(sas_ha);
+
+ return 0;
+}
+
+static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
+}
+
+static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
+
+static void isci_unregister(struct isci_host *isci_host)
+{
+ struct Scsi_Host *shost;
+
+ if (!isci_host)
+ return;
+
+ shost = isci_host->shost;
+ device_remove_file(&shost->shost_dev, &dev_attr_isci_id);
+
+ sas_unregister_ha(&isci_host->sas_ha);
+
+ sas_remove_host(isci_host->shost);
+ scsi_remove_host(isci_host->shost);
+ scsi_host_put(isci_host->shost);
+}
+
+static int __devinit isci_pci_init(struct pci_dev *pdev)
+{
+ int err, bar_num, bar_mask = 0;
+ void __iomem * const *iomap;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed enable PCI device %s!\n",
+ pci_name(pdev));
+ return err;
+ }
+
+ for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++)
+ bar_mask |= 1 << (bar_num * 2);
+
+ err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME);
+ if (err)
+ return err;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int num_controllers(struct pci_dev *pdev)
+{
+ /* bar size alone can tell us if we are running with a dual controller
+ * part, no need to trust revision ids that might be under broken firmware
+ * control
+ */
+ resource_size_t scu_bar_size = pci_resource_len(pdev, SCI_SCU_BAR*2);
+ resource_size_t smu_bar_size = pci_resource_len(pdev, SCI_SMU_BAR*2);
+
+ if (scu_bar_size >= SCI_SCU_BAR_SIZE*SCI_MAX_CONTROLLERS &&
+ smu_bar_size >= SCI_SMU_BAR_SIZE*SCI_MAX_CONTROLLERS)
+ return SCI_MAX_CONTROLLERS;
+ else
+ return 1;
+}
+
+static int isci_setup_interrupts(struct pci_dev *pdev)
+{
+ int err, i, num_msix;
+ struct isci_host *ihost;
+ struct isci_pci_info *pci_info = to_pci_info(pdev);
+
+ /*
+ * Determine the number of vectors associated with this
+ * PCI function.
+ */
+ num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
+
+ for (i = 0; i < num_msix; i++)
+ pci_info->msix_entries[i].entry = i;
+
+ err = pci_enable_msix(pdev, pci_info->msix_entries, num_msix);
+ if (err)
+ goto intx;
+
+ for (i = 0; i < num_msix; i++) {
+ int id = i / SCI_NUM_MSI_X_INT;
+ struct msix_entry *msix = &pci_info->msix_entries[i];
+ irq_handler_t isr;
+
+ ihost = pci_info->hosts[id];
+ /* odd numbered vectors are error interrupts */
+ if (i & 1)
+ isr = isci_error_isr;
+ else
+ isr = isci_msix_isr;
+
+ err = devm_request_irq(&pdev->dev, msix->vector, isr, 0,
+ DRV_NAME"-msix", ihost);
+ if (!err)
+ continue;
+
+ dev_info(&pdev->dev, "msix setup failed falling back to intx\n");
+ while (i--) {
+ id = i / SCI_NUM_MSI_X_INT;
+ ihost = pci_info->hosts[id];
+ msix = &pci_info->msix_entries[i];
+ devm_free_irq(&pdev->dev, msix->vector, ihost);
+ }
+ pci_disable_msix(pdev);
+ goto intx;
+ }
+ return 0;
+
+ intx:
+ for_each_isci_host(i, ihost, pdev) {
+ err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr,
+ IRQF_SHARED, DRV_NAME"-intx", ihost);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
+{
+ struct isci_host *isci_host;
+ struct Scsi_Host *shost;
+ int err;
+
+ isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL);
+ if (!isci_host)
+ return NULL;
+
+ isci_host->pdev = pdev;
+ isci_host->id = id;
+
+ shost = scsi_host_alloc(&isci_sht, sizeof(void *));
+ if (!shost)
+ return NULL;
+ isci_host->shost = shost;
+
+ err = isci_host_init(isci_host);
+ if (err)
+ goto err_shost;
+
+ SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha;
+ isci_host->sas_ha.core.shost = shost;
+ shost->transportt = isci_transport_template;
+
+ shost->max_id = ~0;
+ shost->max_lun = ~0;
+ shost->max_cmd_len = MAX_COMMAND_SIZE;
+
+ err = scsi_add_host(shost, &pdev->dev);
+ if (err)
+ goto err_shost;
+
+ err = isci_register_sas_ha(isci_host);
+ if (err)
+ goto err_shost_remove;
+
+ err = device_create_file(&shost->shost_dev, &dev_attr_isci_id);
+ if (err)
+ goto err_unregister_ha;
+
+ return isci_host;
+
+ err_unregister_ha:
+ sas_unregister_ha(&(isci_host->sas_ha));
+ err_shost_remove:
+ scsi_remove_host(shost);
+ err_shost:
+ scsi_host_put(shost);
+
+ return NULL;
+}
+
+static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct isci_pci_info *pci_info;
+ int err, i;
+ struct isci_host *isci_host;
+ const struct firmware *fw = NULL;
+ struct isci_orom *orom = NULL;
+ char *source = "(platform)";
+
+ dev_info(&pdev->dev, "driver configured for rev: %d silicon\n",
+ pdev->revision);
+
+ pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+ pci_set_drvdata(pdev, pci_info);
+
+ if (efi_enabled)
+ orom = isci_get_efi_var(pdev);
+
+ if (!orom)
+ orom = isci_request_oprom(pdev);
+
+ for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
+ if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+ dev_warn(&pdev->dev,
+ "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
+ devm_kfree(&pdev->dev, orom);
+ orom = NULL;
+ break;
+ }
+ }
+
+ if (!orom) {
+ source = "(firmware)";
+ orom = isci_request_firmware(pdev, fw);
+ if (!orom) {
+ /* TODO convert this to WARN_TAINT_ONCE once the
+ * orom/efi parameter support is widely available
+ */
+ dev_warn(&pdev->dev,
+ "Loading user firmware failed, using default "
+ "values\n");
+ dev_warn(&pdev->dev,
+ "Default OEM configuration being used: 4 "
+ "narrow ports, and default SAS Addresses\n");
+ }
+ }
+
+ if (orom)
+ dev_info(&pdev->dev,
+ "OEM SAS parameters (version: %u.%u) loaded %s\n",
+ (orom->hdr.version & 0xf0) >> 4,
+ (orom->hdr.version & 0xf), source);
+
+ pci_info->orom = orom;
+
+ err = isci_pci_init(pdev);
+ if (err)
+ return err;
+
+ for (i = 0; i < num_controllers(pdev); i++) {
+ struct isci_host *h = isci_host_alloc(pdev, i);
+
+ if (!h) {
+ err = -ENOMEM;
+ goto err_host_alloc;
+ }
+ pci_info->hosts[i] = h;
+ }
+
+ err = isci_setup_interrupts(pdev);
+ if (err)
+ goto err_host_alloc;
+
+ for_each_isci_host(i, isci_host, pdev)
+ scsi_scan_host(isci_host->shost);
+
+ return 0;
+
+ err_host_alloc:
+ for_each_isci_host(i, isci_host, pdev)
+ isci_unregister(isci_host);
+ return err;
+}
+
+static void __devexit isci_pci_remove(struct pci_dev *pdev)
+{
+ struct isci_host *ihost;
+ int i;
+
+ for_each_isci_host(i, ihost, pdev) {
+ isci_unregister(ihost);
+ isci_host_deinit(ihost);
+ sci_controller_disable_interrupts(ihost);
+ }
+}
+
+static struct pci_driver isci_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = isci_id_table,
+ .probe = isci_pci_probe,
+ .remove = __devexit_p(isci_pci_remove),
+};
+
+static __init int isci_init(void)
+{
+ int err;
+
+ pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME);
+
+ isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
+ if (!isci_transport_template)
+ return -ENOMEM;
+
+ err = pci_register_driver(&isci_pci_driver);
+ if (err)
+ sas_release_transport(isci_transport_template);
+
+ return err;
+}
+
+static __exit void isci_exit(void)
+{
+ pci_unregister_driver(&isci_pci_driver);
+ sas_release_transport(isci_transport_template);
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(ISCI_FW_NAME);
+module_init(isci_init);
+module_exit(isci_exit);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
new file mode 100644
index 000000000000..d1de63312e7f
--- /dev/null
+++ b/drivers/scsi/isci/isci.h
@@ -0,0 +1,538 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ISCI_H__
+#define __ISCI_H__
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+#define DRV_NAME "isci"
+#define SCI_PCI_BAR_COUNT 2
+#define SCI_NUM_MSI_X_INT 2
+#define SCI_SMU_BAR 0
+#define SCI_SMU_BAR_SIZE (16*1024)
+#define SCI_SCU_BAR 1
+#define SCI_SCU_BAR_SIZE (4*1024*1024)
+#define SCI_IO_SPACE_BAR0 2
+#define SCI_IO_SPACE_BAR1 3
+#define ISCI_CAN_QUEUE_VAL 250 /* < SCI_MAX_IO_REQUESTS ? */
+#define SCIC_CONTROLLER_STOP_TIMEOUT 5000
+
+#define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF
+
+#define SCI_MAX_PHYS (4UL)
+#define SCI_MAX_PORTS SCI_MAX_PHYS
+#define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */
+#define SCI_MAX_REMOTE_DEVICES (256UL)
+#define SCI_MAX_IO_REQUESTS (256UL)
+#define SCI_MAX_SEQ (16)
+#define SCI_MAX_MSIX_MESSAGES (2)
+#define SCI_MAX_SCATTER_GATHER_ELEMENTS 130 /* not silicon constrained */
+#define SCI_MAX_CONTROLLERS 2
+#define SCI_MAX_DOMAINS SCI_MAX_PORTS
+
+#define SCU_MAX_CRITICAL_NOTIFICATIONS (384)
+#define SCU_MAX_EVENTS_SHIFT (7)
+#define SCU_MAX_EVENTS (1 << SCU_MAX_EVENTS_SHIFT)
+#define SCU_MAX_UNSOLICITED_FRAMES (128)
+#define SCU_MAX_COMPLETION_QUEUE_SCRATCH (128)
+#define SCU_MAX_COMPLETION_QUEUE_ENTRIES (SCU_MAX_CRITICAL_NOTIFICATIONS \
+ + SCU_MAX_EVENTS \
+ + SCU_MAX_UNSOLICITED_FRAMES \
+ + SCI_MAX_IO_REQUESTS \
+ + SCU_MAX_COMPLETION_QUEUE_SCRATCH)
+#define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
+
+#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
+#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024)
+#define SCU_INVALID_FRAME_INDEX (0xFFFF)
+
+#define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF)
+#define SCU_IO_REQUEST_MAX_TRANSFER_LENGTH (0x00FFFFFF)
+
+static inline void check_sizes(void)
+{
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_EVENTS);
+ BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES <= 8);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_UNSOLICITED_FRAMES);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_COMPLETION_QUEUE_ENTRIES);
+ BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES > SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_IO_REQUESTS);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_SEQ);
+}
+
+/**
+ * enum sci_status - This is the general return status enumeration for non-IO,
+ * non-task management related SCI interface methods.
+ *
+ *
+ */
+enum sci_status {
+ /**
+ * This member indicates successful completion.
+ */
+ SCI_SUCCESS = 0,
+
+ /**
+ * This value indicates that the calling method completed successfully,
+ * but that the IO may have completed before having it's start method
+ * invoked. This occurs during SAT translation for requests that do
+ * not require an IO to the target or for any other requests that may
+ * be completed without having to submit IO.
+ */
+ SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
+
+ /**
+ * This Value indicates that the SCU hardware returned an early response
+ * because the io request specified more data than is returned by the
+ * target device (mode pages, inquiry data, etc.). The completion routine
+ * will handle this case to get the actual number of bytes transferred.
+ */
+ SCI_SUCCESS_IO_DONE_EARLY,
+
+ /**
+ * This member indicates that the object for which a state change is
+ * being requested is already in said state.
+ */
+ SCI_WARNING_ALREADY_IN_STATE,
+
+ /**
+ * This member indicates interrupt coalescence timer may cause SAS
+ * specification compliance issues (i.e. SMP target mode response
+ * frames must be returned within 1.9 milliseconds).
+ */
+ SCI_WARNING_TIMER_CONFLICT,
+
+ /**
+ * This field indicates a sequence of action is not completed yet. Mostly,
+ * this status is used when multiple ATA commands are needed in a SATI translation.
+ */
+ SCI_WARNING_SEQUENCE_INCOMPLETE,
+
+ /**
+ * This member indicates that there was a general failure.
+ */
+ SCI_FAILURE,
+
+ /**
+ * This member indicates that the SCI implementation is unable to complete
+ * an operation due to a critical flaw the prevents any further operation
+ * (i.e. an invalid pointer).
+ */
+ SCI_FATAL_ERROR,
+
+ /**
+ * This member indicates the calling function failed, because the state
+ * of the controller is in a state that prevents successful completion.
+ */
+ SCI_FAILURE_INVALID_STATE,
+
+ /**
+ * This member indicates the calling function failed, because there is
+ * insufficient resources/memory to complete the request.
+ */
+ SCI_FAILURE_INSUFFICIENT_RESOURCES,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * controller object required for the operation can't be located.
+ */
+ SCI_FAILURE_CONTROLLER_NOT_FOUND,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * discovered controller type is not supported by the library.
+ */
+ SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested initialization data version isn't supported.
+ */
+ SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested configuration of SAS Phys into SAS Ports is not supported.
+ */
+ SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested protocol is not supported by the remote device, port,
+ * or controller.
+ */
+ SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested information type is not supported by the SCI implementation.
+ */
+ SCI_FAILURE_UNSUPPORTED_INFORMATION_TYPE,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * device already exists.
+ */
+ SCI_FAILURE_DEVICE_EXISTS,
+
+ /**
+ * This member indicates the calling function failed, because adding
+ * a phy to the object is not possible.
+ */
+ SCI_FAILURE_ADDING_PHY_UNSUPPORTED,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested information type is not supported by the SCI implementation.
+ */
+ SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD,
+
+ /**
+ * This member indicates the calling function failed, because the SCI
+ * implementation does not support the supplied time limit.
+ */
+ SCI_FAILURE_UNSUPPORTED_TIME_LIMIT,
+
+ /**
+ * This member indicates the calling method failed, because the SCI
+ * implementation does not contain the specified Phy.
+ */
+ SCI_FAILURE_INVALID_PHY,
+
+ /**
+ * This member indicates the calling method failed, because the SCI
+ * implementation does not contain the specified Port.
+ */
+ SCI_FAILURE_INVALID_PORT,
+
+ /**
+ * This member indicates the calling method was partly successful
+ * The port was reset but not all phys in port are operational
+ */
+ SCI_FAILURE_RESET_PORT_PARTIAL_SUCCESS,
+
+ /**
+ * This member indicates that calling method failed
+ * The port reset did not complete because none of the phys are operational
+ */
+ SCI_FAILURE_RESET_PORT_FAILURE,
+
+ /**
+ * This member indicates the calling method failed, because the SCI
+ * implementation does not contain the specified remote device.
+ */
+ SCI_FAILURE_INVALID_REMOTE_DEVICE,
+
+ /**
+ * This member indicates the calling method failed, because the remote
+ * device is in a bad state and requires a reset.
+ */
+ SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+
+ /**
+ * This member indicates the calling method failed, because the SCI
+ * implementation does not contain or support the specified IO tag.
+ */
+ SCI_FAILURE_INVALID_IO_TAG,
+
+ /**
+ * This member indicates that the operation failed and the user should
+ * check the response data associated with the IO.
+ */
+ SCI_FAILURE_IO_RESPONSE_VALID,
+
+ /**
+ * This member indicates that the operation failed, the failure is
+ * controller implementation specific, and the response data associated
+ * with the request is not valid. You can query for the controller
+ * specific error information via sci_controller_get_request_status()
+ */
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+
+ /**
+ * This member indicated that the operation failed because the
+ * user requested this IO to be terminated.
+ */
+ SCI_FAILURE_IO_TERMINATED,
+
+ /**
+ * This member indicates that the operation failed and the associated
+ * request requires a SCSI abort task to be sent to the target.
+ */
+ SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
+
+ /**
+ * This member indicates that the operation failed because the supplied
+ * device could not be located.
+ */
+ SCI_FAILURE_DEVICE_NOT_FOUND,
+
+ /**
+ * This member indicates that the operation failed because the
+ * objects association is required and is not correctly set.
+ */
+ SCI_FAILURE_INVALID_ASSOCIATION,
+
+ /**
+ * This member indicates that the operation failed, because a timeout
+ * occurred.
+ */
+ SCI_FAILURE_TIMEOUT,
+
+ /**
+ * This member indicates that the operation failed, because the user
+ * specified a value that is either invalid or not supported.
+ */
+ SCI_FAILURE_INVALID_PARAMETER_VALUE,
+
+ /**
+ * This value indicates that the operation failed, because the number
+ * of messages (MSI-X) is not supported.
+ */
+ SCI_FAILURE_UNSUPPORTED_MESSAGE_COUNT,
+
+ /**
+ * This value indicates that the method failed due to a lack of
+ * available NCQ tags.
+ */
+ SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
+
+ /**
+ * This value indicates that a protocol violation has occurred on the
+ * link.
+ */
+ SCI_FAILURE_PROTOCOL_VIOLATION,
+
+ /**
+ * This value indicates a failure condition that retry may help to clear.
+ */
+ SCI_FAILURE_RETRY_REQUIRED,
+
+ /**
+ * This field indicates the retry limit was reached when a retry is attempted
+ */
+ SCI_FAILURE_RETRY_LIMIT_REACHED,
+
+ /**
+ * This member indicates the calling method was partly successful.
+ * Mostly, this status is used when a LUN_RESET issued to an expander attached
+ * STP device in READY NCQ substate needs to have RNC suspended/resumed
+ * before posting TC.
+ */
+ SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS,
+
+ /**
+ * This field indicates an illegal phy connection based on the routing attribute
+ * of both expander phy attached to each other.
+ */
+ SCI_FAILURE_ILLEGAL_ROUTING_ATTRIBUTE_CONFIGURATION,
+
+ /**
+ * This field indicates a CONFIG ROUTE INFO command has a response with function result
+ * INDEX DOES NOT EXIST, usually means exceeding max route index.
+ */
+ SCI_FAILURE_EXCEED_MAX_ROUTE_INDEX,
+
+ /**
+ * This value indicates that an unsupported PCI device ID has been
+ * specified. This indicates that attempts to invoke
+ * sci_library_allocate_controller() will fail.
+ */
+ SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID
+
+};
+
+/**
+ * enum sci_io_status - This enumeration depicts all of the possible IO
+ * completion status values. Each value in this enumeration maps directly
+ * to a value in the enum sci_status enumeration. Please refer to that
+ * enumeration for detailed comments concerning what the status represents.
+ *
+ * Add the API to retrieve the SCU status from the core. Check to see that the
+ * following status are properly handled: - SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL
+ * - SCI_IO_FAILURE_INVALID_IO_TAG
+ */
+enum sci_io_status {
+ SCI_IO_SUCCESS = SCI_SUCCESS,
+ SCI_IO_FAILURE = SCI_FAILURE,
+ SCI_IO_SUCCESS_COMPLETE_BEFORE_START = SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
+ SCI_IO_SUCCESS_IO_DONE_EARLY = SCI_SUCCESS_IO_DONE_EARLY,
+ SCI_IO_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE,
+ SCI_IO_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES,
+ SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+ SCI_IO_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID,
+ SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+ SCI_IO_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED,
+ SCI_IO_FAILURE_REQUIRES_SCSI_ABORT = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
+ SCI_IO_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
+ SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE = SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
+ SCI_IO_FAILURE_PROTOCOL_VIOLATION = SCI_FAILURE_PROTOCOL_VIOLATION,
+
+ SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+
+ SCI_IO_FAILURE_RETRY_REQUIRED = SCI_FAILURE_RETRY_REQUIRED,
+ SCI_IO_FAILURE_RETRY_LIMIT_REACHED = SCI_FAILURE_RETRY_LIMIT_REACHED,
+ SCI_IO_FAILURE_INVALID_REMOTE_DEVICE = SCI_FAILURE_INVALID_REMOTE_DEVICE
+};
+
+/**
+ * enum sci_task_status - This enumeration depicts all of the possible task
+ * completion status values. Each value in this enumeration maps directly
+ * to a value in the enum sci_status enumeration. Please refer to that
+ * enumeration for detailed comments concerning what the status represents.
+ *
+ * Check to see that the following status are properly handled:
+ */
+enum sci_task_status {
+ SCI_TASK_SUCCESS = SCI_SUCCESS,
+ SCI_TASK_FAILURE = SCI_FAILURE,
+ SCI_TASK_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE,
+ SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES,
+ SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+ SCI_TASK_FAILURE_INVALID_TAG = SCI_FAILURE_INVALID_IO_TAG,
+ SCI_TASK_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID,
+ SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+ SCI_TASK_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED,
+ SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
+
+ SCI_TASK_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+ SCI_TASK_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS = SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS
+
+};
+
+/**
+ * sci_swab32_cpy - convert between scsi and scu-hardware byte format
+ * @dest: receive the 4-byte endian swapped version of src
+ * @src: word aligned source buffer
+ *
+ * scu hardware handles SSP/SMP control, response, and unidentified
+ * frames in "big endian dword" order. Regardless of host endian this
+ * is always a swab32()-per-dword conversion of the standard definition,
+ * i.e. single byte fields swapped and multi-byte fields in little-
+ * endian
+ */
+static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt)
+{
+ u32 *dest = _dest, *src = _src;
+
+ while (--word_cnt >= 0)
+ dest[word_cnt] = swab32(src[word_cnt]);
+}
+
+extern unsigned char no_outbound_task_to;
+extern u16 ssp_max_occ_to;
+extern u16 stp_max_occ_to;
+extern u16 ssp_inactive_to;
+extern u16 stp_inactive_to;
+extern unsigned char phy_gen;
+extern unsigned char max_concurr_spinup;
+
+irqreturn_t isci_msix_isr(int vec, void *data);
+irqreturn_t isci_intx_isr(int vec, void *data);
+irqreturn_t isci_error_isr(int vec, void *data);
+
+/*
+ * Each timer is associated with a cancellation flag that is set when
+ * del_timer() is called and checked in the timer callback function. This
+ * is needed since del_timer_sync() cannot be called with sci_lock held.
+ * For deinit however, del_timer_sync() is used without holding the lock.
+ */
+struct sci_timer {
+ struct timer_list timer;
+ bool cancel;
+};
+
+static inline
+void sci_init_timer(struct sci_timer *tmr, void (*fn)(unsigned long))
+{
+ tmr->timer.function = fn;
+ tmr->timer.data = (unsigned long) tmr;
+ tmr->cancel = 0;
+ init_timer(&tmr->timer);
+}
+
+static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
+{
+ tmr->cancel = 0;
+ mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec));
+}
+
+static inline void sci_del_timer(struct sci_timer *tmr)
+{
+ tmr->cancel = 1;
+ del_timer(&tmr->timer);
+}
+
+struct sci_base_state_machine {
+ const struct sci_base_state *state_table;
+ u32 initial_state_id;
+ u32 current_state_id;
+ u32 previous_state_id;
+};
+
+typedef void (*sci_state_transition_t)(struct sci_base_state_machine *sm);
+
+struct sci_base_state {
+ sci_state_transition_t enter_state; /* Called on state entry */
+ sci_state_transition_t exit_state; /* Called on state exit */
+};
+
+extern void sci_init_sm(struct sci_base_state_machine *sm,
+ const struct sci_base_state *state_table,
+ u32 initial_state);
+extern void sci_change_state(struct sci_base_state_machine *sm, u32 next_state);
+#endif /* __ISCI_H__ */
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
new file mode 100644
index 000000000000..79313a7a2356
--- /dev/null
+++ b/drivers/scsi/isci/phy.c
@@ -0,0 +1,1312 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "host.h"
+#include "phy.h"
+#include "scu_event_codes.h"
+#include "probe_roms.h"
+
+/* Maximum arbitration wait time in micro-seconds */
+#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700)
+
+enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
+{
+ return iphy->max_negotiated_speed;
+}
+
+static enum sci_status
+sci_phy_transport_layer_initialization(struct isci_phy *iphy,
+ struct scu_transport_layer_registers __iomem *reg)
+{
+ u32 tl_control;
+
+ iphy->transport_layer_registers = reg;
+
+ writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX,
+ &iphy->transport_layer_registers->stp_rni);
+
+ /*
+ * Hardware team recommends that we enable the STP prefetch for all
+ * transports
+ */
+ tl_control = readl(&iphy->transport_layer_registers->control);
+ tl_control |= SCU_TLCR_GEN_BIT(STP_WRITE_DATA_PREFETCH);
+ writel(tl_control, &iphy->transport_layer_registers->control);
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+sci_phy_link_layer_initialization(struct isci_phy *iphy,
+ struct scu_link_layer_registers __iomem *reg)
+{
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+ int phy_idx = iphy->phy_index;
+ struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
+ struct sci_phy_oem_params *phy_oem =
+ &ihost->oem_parameters.phys[phy_idx];
+ u32 phy_configuration;
+ struct sci_phy_cap phy_cap;
+ u32 parity_check = 0;
+ u32 parity_count = 0;
+ u32 llctl, link_rate;
+ u32 clksm_value = 0;
+
+ iphy->link_layer_registers = reg;
+
+ /* Set our IDENTIFY frame data */
+ #define SCI_END_DEVICE 0x01
+
+ writel(SCU_SAS_TIID_GEN_BIT(SMP_INITIATOR) |
+ SCU_SAS_TIID_GEN_BIT(SSP_INITIATOR) |
+ SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
+ SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
+ SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
+ &iphy->link_layer_registers->transmit_identification);
+
+ /* Write the device SAS Address */
+ writel(0xFEDCBA98,
+ &iphy->link_layer_registers->sas_device_name_high);
+ writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
+
+ /* Write the source SAS Address */
+ writel(phy_oem->sas_address.high,
+ &iphy->link_layer_registers->source_sas_address_high);
+ writel(phy_oem->sas_address.low,
+ &iphy->link_layer_registers->source_sas_address_low);
+
+ /* Clear and Set the PHY Identifier */
+ writel(0, &iphy->link_layer_registers->identify_frame_phy_id);
+ writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx),
+ &iphy->link_layer_registers->identify_frame_phy_id);
+
+ /* Change the initial state of the phy configuration register */
+ phy_configuration =
+ readl(&iphy->link_layer_registers->phy_configuration);
+
+ /* Hold OOB state machine in reset */
+ phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+ writel(phy_configuration,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Configure the SNW capabilities */
+ phy_cap.all = 0;
+ phy_cap.start = 1;
+ phy_cap.gen3_no_ssc = 1;
+ phy_cap.gen2_no_ssc = 1;
+ phy_cap.gen1_no_ssc = 1;
+ if (ihost->oem_parameters.controller.do_enable_ssc == true) {
+ phy_cap.gen3_ssc = 1;
+ phy_cap.gen2_ssc = 1;
+ phy_cap.gen1_ssc = 1;
+ }
+
+ /*
+ * The SAS specification indicates that the phy_capabilities that
+ * are transmitted shall have an even parity. Calculate the parity. */
+ parity_check = phy_cap.all;
+ while (parity_check != 0) {
+ if (parity_check & 0x1)
+ parity_count++;
+ parity_check >>= 1;
+ }
+
+ /*
+ * If parity indicates there are an odd number of bits set, then
+ * set the parity bit to 1 in the phy capabilities. */
+ if ((parity_count % 2) != 0)
+ phy_cap.parity = 1;
+
+ writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities);
+
+ /* Set the enable spinup period but disable the ability to send
+ * notify enable spinup
+ */
+ writel(SCU_ENSPINUP_GEN_VAL(COUNT,
+ phy_user->notify_enable_spin_up_insertion_frequency),
+ &iphy->link_layer_registers->notify_enable_spinup_control);
+
+ /* Write the ALIGN Insertion Ferequency for connected phy and
+ * inpendent of connected state
+ */
+ clksm_value = SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(CONNECTED,
+ phy_user->in_connection_align_insertion_frequency);
+
+ clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
+ phy_user->align_insertion_frequency);
+
+ writel(clksm_value, &iphy->link_layer_registers->clock_skew_management);
+
+ /* @todo Provide a way to write this register correctly */
+ writel(0x02108421,
+ &iphy->link_layer_registers->afe_lookup_table_control);
+
+ llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
+ (u8)ihost->user_parameters.no_outbound_task_timeout);
+
+ switch (phy_user->max_speed_generation) {
+ case SCIC_SDS_PARM_GEN3_SPEED:
+ link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3;
+ break;
+ case SCIC_SDS_PARM_GEN2_SPEED:
+ link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2;
+ break;
+ default:
+ link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1;
+ break;
+ }
+ llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
+ writel(llctl, &iphy->link_layer_registers->link_layer_control);
+
+ if (is_a2(ihost->pdev)) {
+ /* Program the max ARB time for the PHY to 700us so we inter-operate with
+ * the PMC expander which shuts down PHYs if the expander PHY generates too
+ * many breaks. This time value will guarantee that the initiator PHY will
+ * generate the break.
+ */
+ writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
+ &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout);
+ }
+
+ /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */
+ writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout);
+
+ /* We can exit the initial state to the stopped state */
+ sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+
+ return SCI_SUCCESS;
+}
+
+static void phy_sata_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: SCIC SDS Phy 0x%p did not receive signature fis before "
+ "timeout.\n",
+ __func__,
+ iphy);
+
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/**
+ * This method returns the port currently containing this phy. If the phy is
+ * currently contained by the dummy port, then the phy is considered to not
+ * be part of a port.
+ * @sci_phy: This parameter specifies the phy for which to retrieve the
+ * containing port.
+ *
+ * This method returns a handle to a port that contains the supplied phy.
+ * NULL This value is returned if the phy is not part of a real
+ * port (i.e. it's contained in the dummy port). !NULL All other
+ * values indicate a handle/pointer to the port containing the phy.
+ */
+struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
+{
+ struct isci_port *iport = iphy->owning_port;
+
+ if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT)
+ return NULL;
+
+ return iphy->owning_port;
+}
+
+/**
+ * This method will assign a port to the phy object.
+ * @out]: iphy This parameter specifies the phy for which to assign a port
+ * object.
+ *
+ *
+ */
+void sci_phy_set_port(
+ struct isci_phy *iphy,
+ struct isci_port *iport)
+{
+ iphy->owning_port = iport;
+
+ if (iphy->bcn_received_while_port_unassigned) {
+ iphy->bcn_received_while_port_unassigned = false;
+ sci_port_broadcast_change_received(iphy->owning_port, iphy);
+ }
+}
+
+enum sci_status sci_phy_initialize(struct isci_phy *iphy,
+ struct scu_transport_layer_registers __iomem *tl,
+ struct scu_link_layer_registers __iomem *ll)
+{
+ /* Perfrom the initialization of the TL hardware */
+ sci_phy_transport_layer_initialization(iphy, tl);
+
+ /* Perofrm the initialization of the PE hardware */
+ sci_phy_link_layer_initialization(iphy, ll);
+
+ /* There is nothing that needs to be done in this state just
+ * transition to the stopped state
+ */
+ sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+
+ return SCI_SUCCESS;
+}
+
+/**
+ * This method assigns the direct attached device ID for this phy.
+ *
+ * @iphy The phy for which the direct attached device id is to
+ * be assigned.
+ * @device_id The direct attached device ID to assign to the phy.
+ * This will either be the RNi for the device or an invalid RNi if there
+ * is no current device assigned to the phy.
+ */
+void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id)
+{
+ u32 tl_control;
+
+ writel(device_id, &iphy->transport_layer_registers->stp_rni);
+
+ /*
+ * The read should guarantee that the first write gets posted
+ * before the next write
+ */
+ tl_control = readl(&iphy->transport_layer_registers->control);
+ tl_control |= SCU_TLCR_GEN_BIT(CLEAR_TCI_NCQ_MAPPING_TABLE);
+ writel(tl_control, &iphy->transport_layer_registers->control);
+}
+
+static void sci_phy_suspend(struct isci_phy *iphy)
+{
+ u32 scu_sas_pcfg_value;
+
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
+}
+
+void sci_phy_resume(struct isci_phy *iphy)
+{
+ u32 scu_sas_pcfg_value;
+
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+}
+
+void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
+{
+ sas->high = readl(&iphy->link_layer_registers->source_sas_address_high);
+ sas->low = readl(&iphy->link_layer_registers->source_sas_address_low);
+}
+
+void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
+{
+ struct sas_identify_frame *iaf;
+
+ iaf = &iphy->frame_rcvd.iaf;
+ memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE);
+}
+
+void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto)
+{
+ proto->all = readl(&iphy->link_layer_registers->transmit_identification);
+}
+
+enum sci_status sci_phy_start(struct isci_phy *iphy)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ if (state != SCI_PHY_STOPPED) {
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_stop(struct isci_phy *iphy)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ switch (state) {
+ case SCI_PHY_SUB_INITIAL:
+ case SCI_PHY_SUB_AWAIT_OSSP_EN:
+ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_SAS_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+ case SCI_PHY_SUB_FINAL:
+ case SCI_PHY_READY:
+ break;
+ default:
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_reset(struct isci_phy *iphy)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ if (state != SCI_PHY_READY) {
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(&iphy->sm, SCI_PHY_RESETTING);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ switch (state) {
+ case SCI_PHY_SUB_AWAIT_SAS_POWER: {
+ u32 enable_spinup;
+
+ enable_spinup = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
+ enable_spinup |= SCU_ENSPINUP_GEN_BIT(ENABLE);
+ writel(enable_spinup, &iphy->link_layer_registers->notify_enable_spinup_control);
+
+ /* Change state to the final state this substate machine has run to completion */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
+
+ return SCI_SUCCESS;
+ }
+ case SCI_PHY_SUB_AWAIT_SATA_POWER: {
+ u32 scu_sas_pcfg_value;
+
+ /* Release the spinup hold state and reset the OOB state machine */
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value &=
+ ~(SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE));
+ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Now restart the OOB operation */
+ scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Change state to the final state this substate machine has run to completion */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN);
+
+ return SCI_SUCCESS;
+ }
+ default:
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
+{
+ /* continue the link training for the phy as if it were a SAS PHY
+ * instead of a SATA PHY. This is done because the completion queue had a SAS
+ * PHY DETECTED event when the state machine was expecting a SATA PHY event.
+ */
+ u32 phy_control;
+
+ phy_control = readl(&iphy->link_layer_registers->phy_configuration);
+ phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD);
+ writel(phy_control,
+ &iphy->link_layer_registers->phy_configuration);
+
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
+
+ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS;
+}
+
+static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
+{
+ /* This method continues the link training for the phy as if it were a SATA PHY
+ * instead of a SAS PHY. This is done because the completion queue had a SATA
+ * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none
+ */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
+
+ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+}
+
+/**
+ * sci_phy_complete_link_training - perform processing common to
+ * all protocols upon completion of link training.
+ * @sci_phy: This parameter specifies the phy object for which link training
+ * has completed.
+ * @max_link_rate: This parameter specifies the maximum link rate to be
+ * associated with this phy.
+ * @next_state: This parameter specifies the next state for the phy's starting
+ * sub-state machine.
+ *
+ */
+static void sci_phy_complete_link_training(struct isci_phy *iphy,
+ enum sas_linkrate max_link_rate,
+ u32 next_state)
+{
+ iphy->max_negotiated_speed = max_link_rate;
+
+ sci_change_state(&iphy->sm, next_state);
+}
+
+enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ switch (state) {
+ case SCI_PHY_SUB_AWAIT_OSSP_EN:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ sci_phy_start_sas_link_training(iphy);
+ iphy->is_in_link_training = true;
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ sci_phy_start_sata_link_training(iphy);
+ iphy->is_in_link_training = true;
+ break;
+ default:
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__,
+ event_code);
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /*
+ * Why is this being reported again by the controller?
+ * We would re-enter this state so just stay here */
+ break;
+ case SCU_EVENT_SAS_15:
+ case SCU_EVENT_SAS_15_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+ SCI_PHY_SUB_AWAIT_IAF_UF);
+ break;
+ case SCU_EVENT_SAS_30:
+ case SCU_EVENT_SAS_30_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
+ SCI_PHY_SUB_AWAIT_IAF_UF);
+ break;
+ case SCU_EVENT_SAS_60:
+ case SCU_EVENT_SAS_60_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
+ SCI_PHY_SUB_AWAIT_IAF_UF);
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ /*
+ * We were doing SAS PHY link training and received a SATA PHY event
+ * continue OOB/SN as if this were a SATA PHY */
+ sci_phy_start_sata_link_training(iphy);
+ break;
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__, event_code);
+
+ return SCI_FAILURE;
+ break;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_IAF_UF:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /* Backup the state machine */
+ sci_phy_start_sas_link_training(iphy);
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ /* We were doing SAS PHY link training and received a
+ * SATA PHY event continue OOB/SN as if this were a
+ * SATA PHY
+ */
+ sci_phy_start_sata_link_training(iphy);
+ break;
+ case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+ case SCU_EVENT_LINK_FAILURE:
+ case SCU_EVENT_HARD_RESET_RECEIVED:
+ /* Start the oob/sn state machine over again */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__, event_code);
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SAS_POWER:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received unexpected "
+ "event_code %x\n",
+ __func__,
+ event_code);
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SATA_POWER:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ /* These events are received every 10ms and are
+ * expected while in this state
+ */
+ break;
+
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /* There has been a change in the phy type before OOB/SN for the
+ * SATA finished start down the SAS link traning path.
+ */
+ sci_phy_start_sas_link_training(iphy);
+ break;
+
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__, event_code);
+
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ /* These events might be received since we dont know how many may be in
+ * the completion queue while waiting for power
+ */
+ break;
+ case SCU_EVENT_SATA_PHY_DETECTED:
+ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+
+ /* We have received the SATA PHY notification change state */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
+ break;
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /* There has been a change in the phy type before OOB/SN for the
+ * SATA finished start down the SAS link traning path.
+ */
+ sci_phy_start_sas_link_training(iphy);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__,
+ event_code);
+
+ return SCI_FAILURE;;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SATA_PHY_DETECTED:
+ /*
+ * The hardware reports multiple SATA PHY detected events
+ * ignore the extras */
+ break;
+ case SCU_EVENT_SATA_15:
+ case SCU_EVENT_SATA_15_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+ break;
+ case SCU_EVENT_SATA_30:
+ case SCU_EVENT_SATA_30_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+ break;
+ case SCU_EVENT_SATA_60:
+ case SCU_EVENT_SATA_60_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+ break;
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /*
+ * There has been a change in the phy type before OOB/SN for the
+ * SATA finished start down the SAS link traning path. */
+ sci_phy_start_sas_link_training(iphy);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__, event_code);
+
+ return SCI_FAILURE;
+ }
+
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SATA_PHY_DETECTED:
+ /* Backup the state machine */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
+ break;
+
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__,
+ event_code);
+
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_READY:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_BROADCAST_CHANGE:
+ /* Broadcast change received. Notify the port. */
+ if (phy_get_non_dummy_port(iphy) != NULL)
+ sci_port_broadcast_change_received(iphy->owning_port, iphy);
+ else
+ iphy->bcn_received_while_port_unassigned = true;
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%sP SCIC PHY 0x%p ready state machine received "
+ "unexpected event_code %x\n",
+ __func__, iphy, event_code);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_RESETTING:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_HARD_RESET_TRANSMITTED:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: SCIC PHY 0x%p resetting state machine received "
+ "unexpected event_code %x\n",
+ __func__, iphy, event_code);
+
+ return SCI_FAILURE_INVALID_STATE;
+ break;
+ }
+ return SCI_SUCCESS;
+ default:
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+ enum sci_status result;
+ unsigned long flags;
+
+ switch (state) {
+ case SCI_PHY_SUB_AWAIT_IAF_UF: {
+ u32 *frame_words;
+ struct sas_identify_frame iaf;
+
+ result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_words);
+
+ if (result != SCI_SUCCESS)
+ return result;
+
+ sci_swab32_cpy(&iaf, frame_words, sizeof(iaf) / sizeof(u32));
+ if (iaf.frame_type == 0) {
+ u32 state;
+
+ spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+ memcpy(&iphy->frame_rcvd.iaf, &iaf, sizeof(iaf));
+ spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+ if (iaf.smp_tport) {
+ /* We got the IAF for an expander PHY go to the final
+ * state since there are no power requirements for
+ * expander phys.
+ */
+ state = SCI_PHY_SUB_FINAL;
+ } else {
+ /* We got the IAF we can now go to the await spinup
+ * semaphore state
+ */
+ state = SCI_PHY_SUB_AWAIT_SAS_POWER;
+ }
+ sci_change_state(&iphy->sm, state);
+ result = SCI_SUCCESS;
+ } else
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected frame id %x\n",
+ __func__, frame_index);
+
+ sci_controller_release_frame(ihost, frame_index);
+ return result;
+ }
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: {
+ struct dev_to_host_fis *frame_header;
+ u32 *fis_frame_data;
+
+ result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (result != SCI_SUCCESS)
+ return result;
+
+ if ((frame_header->fis_type == FIS_REGD2H) &&
+ !(frame_header->status & ATA_BUSY)) {
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&fis_frame_data);
+
+ spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+ sci_controller_copy_sata_response(&iphy->frame_rcvd.fis,
+ frame_header,
+ fis_frame_data);
+ spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+ /* got IAF we can now go to the await spinup semaphore state */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
+
+ result = SCI_SUCCESS;
+ } else
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected frame id %x\n",
+ __func__, frame_index);
+
+ /* Regardless of the result we are done with this frame with it */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return result;
+ }
+ default:
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+}
+
+static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ /* This is just an temporary state go off to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN);
+}
+
+static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+ sci_controller_power_control_queue_insert(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+ sci_controller_power_control_queue_remove(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+ sci_controller_power_control_queue_insert(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+ sci_controller_power_control_queue_remove(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
+}
+
+static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
+}
+
+static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ if (sci_port_link_detected(iphy->owning_port, iphy)) {
+
+ /*
+ * Clear the PE suspend condition so we can actually
+ * receive SIG FIS
+ * The hardware will not respond to the XRDY until the PE
+ * suspend condition is cleared.
+ */
+ sci_phy_resume(iphy);
+
+ sci_mod_timer(&iphy->sata_timer,
+ SCIC_SDS_SIGNATURE_FIS_TIMEOUT);
+ } else
+ iphy->is_in_link_training = false;
+}
+
+static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ /* State machine has run to completion so exit out and change
+ * the base state machine to the ready state
+ */
+ sci_change_state(&iphy->sm, SCI_PHY_READY);
+}
+
+/**
+ *
+ * @sci_phy: This is the struct isci_phy object to stop.
+ *
+ * This method will stop the struct isci_phy object. This does not reset the
+ * protocol engine it just suspends it and places it in a state where it will
+ * not cause the end device to power up. none
+ */
+static void scu_link_layer_stop_protocol_engine(
+ struct isci_phy *iphy)
+{
+ u32 scu_sas_pcfg_value;
+ u32 enable_spinup_value;
+
+ /* Suspend the protocol engine and place it in a sata spinup hold state */
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value |=
+ (SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+ SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE) |
+ SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD));
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Disable the notify enable spinup primitives */
+ enable_spinup_value = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
+ enable_spinup_value &= ~SCU_ENSPINUP_GEN_BIT(ENABLE);
+ writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
+}
+
+/**
+ *
+ *
+ * This method will start the OOB/SN state machine for this struct isci_phy object.
+ */
+static void scu_link_layer_start_oob(
+ struct isci_phy *iphy)
+{
+ u32 scu_sas_pcfg_value;
+
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+ scu_sas_pcfg_value &=
+ ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+ SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+}
+
+/**
+ *
+ *
+ * This method will transmit a hard reset request on the specified phy. The SCU
+ * hardware requires that we reset the OOB state machine and set the hard reset
+ * bit in the phy configuration register. We then must start OOB over with the
+ * hard reset bit set.
+ */
+static void scu_link_layer_tx_hard_reset(
+ struct isci_phy *iphy)
+{
+ u32 phy_configuration_value;
+
+ /*
+ * SAS Phys must wait for the HARD_RESET_TX event notification to transition
+ * to the starting state. */
+ phy_configuration_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ phy_configuration_value |=
+ (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) |
+ SCU_SAS_PCFG_GEN_BIT(OOB_RESET));
+ writel(phy_configuration_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Now take the OOB state machine out of reset */
+ phy_configuration_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+ phy_configuration_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+ writel(phy_configuration_value,
+ &iphy->link_layer_registers->phy_configuration);
+}
+
+static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_port *iport = iphy->owning_port;
+ struct isci_host *ihost = iport->owning_controller;
+
+ /*
+ * @todo We need to get to the controller to place this PE in a
+ * reset state
+ */
+ sci_del_timer(&iphy->sata_timer);
+
+ scu_link_layer_stop_protocol_engine(iphy);
+
+ if (iphy->sm.previous_state_id != SCI_PHY_INITIAL)
+ sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
+}
+
+static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_port *iport = iphy->owning_port;
+ struct isci_host *ihost = iport->owning_controller;
+
+ scu_link_layer_stop_protocol_engine(iphy);
+ scu_link_layer_start_oob(iphy);
+
+ /* We don't know what kind of phy we are going to be just yet */
+ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
+ iphy->bcn_received_while_port_unassigned = false;
+
+ if (iphy->sm.previous_state_id == SCI_PHY_READY)
+ sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
+
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL);
+}
+
+static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_port *iport = iphy->owning_port;
+ struct isci_host *ihost = iport->owning_controller;
+
+ sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy);
+}
+
+static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_phy_suspend(iphy);
+}
+
+static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ /* The phy is being reset, therefore deactivate it from the port. In
+ * the resetting state we don't notify the user regarding link up and
+ * link down notifications
+ */
+ sci_port_deactivate_phy(iphy->owning_port, iphy, false);
+
+ if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+ scu_link_layer_tx_hard_reset(iphy);
+ } else {
+ /* The SCU does not need to have a discrete reset state so
+ * just go back to the starting state.
+ */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ }
+}
+
+static const struct sci_base_state sci_phy_state_table[] = {
+ [SCI_PHY_INITIAL] = { },
+ [SCI_PHY_STOPPED] = {
+ .enter_state = sci_phy_stopped_state_enter,
+ },
+ [SCI_PHY_STARTING] = {
+ .enter_state = sci_phy_starting_state_enter,
+ },
+ [SCI_PHY_SUB_INITIAL] = {
+ .enter_state = sci_phy_starting_initial_substate_enter,
+ },
+ [SCI_PHY_SUB_AWAIT_OSSP_EN] = { },
+ [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { },
+ [SCI_PHY_SUB_AWAIT_IAF_UF] = { },
+ [SCI_PHY_SUB_AWAIT_SAS_POWER] = {
+ .enter_state = sci_phy_starting_await_sas_power_substate_enter,
+ .exit_state = sci_phy_starting_await_sas_power_substate_exit,
+ },
+ [SCI_PHY_SUB_AWAIT_SATA_POWER] = {
+ .enter_state = sci_phy_starting_await_sata_power_substate_enter,
+ .exit_state = sci_phy_starting_await_sata_power_substate_exit
+ },
+ [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = {
+ .enter_state = sci_phy_starting_await_sata_phy_substate_enter,
+ .exit_state = sci_phy_starting_await_sata_phy_substate_exit
+ },
+ [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = {
+ .enter_state = sci_phy_starting_await_sata_speed_substate_enter,
+ .exit_state = sci_phy_starting_await_sata_speed_substate_exit
+ },
+ [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = {
+ .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter,
+ .exit_state = sci_phy_starting_await_sig_fis_uf_substate_exit
+ },
+ [SCI_PHY_SUB_FINAL] = {
+ .enter_state = sci_phy_starting_final_substate_enter,
+ },
+ [SCI_PHY_READY] = {
+ .enter_state = sci_phy_ready_state_enter,
+ .exit_state = sci_phy_ready_state_exit,
+ },
+ [SCI_PHY_RESETTING] = {
+ .enter_state = sci_phy_resetting_state_enter,
+ },
+ [SCI_PHY_FINAL] = { },
+};
+
+void sci_phy_construct(struct isci_phy *iphy,
+ struct isci_port *iport, u8 phy_index)
+{
+ sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL);
+
+ /* Copy the rest of the input data to our locals */
+ iphy->owning_port = iport;
+ iphy->phy_index = phy_index;
+ iphy->bcn_received_while_port_unassigned = false;
+ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
+ iphy->link_layer_registers = NULL;
+ iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
+
+ /* Create the SIGNATURE FIS Timeout timer for this phy */
+ sci_init_timer(&iphy->sata_timer, phy_sata_timeout);
+}
+
+void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
+{
+ struct sci_oem_params *oem = &ihost->oem_parameters;
+ u64 sci_sas_addr;
+ __be64 sas_addr;
+
+ sci_sas_addr = oem->phys[index].sas_address.high;
+ sci_sas_addr <<= 32;
+ sci_sas_addr |= oem->phys[index].sas_address.low;
+ sas_addr = cpu_to_be64(sci_sas_addr);
+ memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
+
+ iphy->isci_port = NULL;
+ iphy->sas_phy.enabled = 0;
+ iphy->sas_phy.id = index;
+ iphy->sas_phy.sas_addr = &iphy->sas_addr[0];
+ iphy->sas_phy.frame_rcvd = (u8 *)&iphy->frame_rcvd;
+ iphy->sas_phy.ha = &ihost->sas_ha;
+ iphy->sas_phy.lldd_phy = iphy;
+ iphy->sas_phy.enabled = 1;
+ iphy->sas_phy.class = SAS;
+ iphy->sas_phy.iproto = SAS_PROTOCOL_ALL;
+ iphy->sas_phy.tproto = 0;
+ iphy->sas_phy.type = PHY_TYPE_PHYSICAL;
+ iphy->sas_phy.role = PHY_ROLE_INITIATOR;
+ iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED;
+ iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN;
+ memset(&iphy->frame_rcvd, 0, sizeof(iphy->frame_rcvd));
+}
+
+
+/**
+ * isci_phy_control() - This function is one of the SAS Domain Template
+ * functions. This is a phy management function.
+ * @phy: This parameter specifies the sphy being controlled.
+ * @func: This parameter specifies the phy control function being invoked.
+ * @buf: This parameter is specific to the phy function being invoked.
+ *
+ * status, zero indicates success.
+ */
+int isci_phy_control(struct asd_sas_phy *sas_phy,
+ enum phy_func func,
+ void *buf)
+{
+ int ret = 0;
+ struct isci_phy *iphy = sas_phy->lldd_phy;
+ struct isci_port *iport = iphy->isci_port;
+ struct isci_host *ihost = sas_phy->ha->lldd_ha;
+ unsigned long flags;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n",
+ __func__, sas_phy, func, buf, iphy, iport);
+
+ switch (func) {
+ case PHY_FUNC_DISABLE:
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ sci_phy_stop(iphy);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ break;
+
+ case PHY_FUNC_LINK_RESET:
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ sci_phy_stop(iphy);
+ sci_phy_start(iphy);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ break;
+
+ case PHY_FUNC_HARD_RESET:
+ if (!iport)
+ return -ENODEV;
+
+ /* Perform the port reset. */
+ ret = isci_port_perform_hard_reset(ihost, iport, iphy);
+
+ break;
+
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: phy %p; func %d NOT IMPLEMENTED!\n",
+ __func__, sas_phy, func);
+ ret = -ENOSYS;
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
new file mode 100644
index 000000000000..67699c8e321c
--- /dev/null
+++ b/drivers/scsi/isci/phy.h
@@ -0,0 +1,504 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_PHY_H_
+#define _ISCI_PHY_H_
+
+#include <scsi/sas.h>
+#include <scsi/libsas.h>
+#include "isci.h"
+#include "sas.h"
+
+/* This is the timeout value for the SATA phy to wait for a SIGNATURE FIS
+ * before restarting the starting state machine. Technically, the old parallel
+ * ATA specification required up to 30 seconds for a device to issue its
+ * signature FIS as a result of a soft reset. Now we see that devices respond
+ * generally within 15 seconds, but we'll use 25 for now.
+ */
+#define SCIC_SDS_SIGNATURE_FIS_TIMEOUT 25000
+
+/* This is the timeout for the SATA OOB/SN because the hardware does not
+ * recognize a hot plug after OOB signal but before the SN signals. We need to
+ * make sure after a hotplug timeout if we have not received the speed event
+ * notification from the hardware that we restart the hardware OOB state
+ * machine.
+ */
+#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250
+
+enum sci_phy_protocol {
+ SCIC_SDS_PHY_PROTOCOL_UNKNOWN,
+ SCIC_SDS_PHY_PROTOCOL_SAS,
+ SCIC_SDS_PHY_PROTOCOL_SATA,
+ SCIC_SDS_MAX_PHY_PROTOCOLS
+};
+
+/**
+ * isci_phy - hba local phy infrastructure
+ * @sm:
+ * @protocol: attached device protocol
+ * @phy_index: physical index relative to the controller (0-3)
+ * @bcn_received_while_port_unassigned: bcn to report after port association
+ * @sata_timer: timeout SATA signature FIS arrival
+ */
+struct isci_phy {
+ struct sci_base_state_machine sm;
+ struct isci_port *owning_port;
+ enum sas_linkrate max_negotiated_speed;
+ enum sci_phy_protocol protocol;
+ u8 phy_index;
+ bool bcn_received_while_port_unassigned;
+ bool is_in_link_training;
+ struct sci_timer sata_timer;
+ struct scu_transport_layer_registers __iomem *transport_layer_registers;
+ struct scu_link_layer_registers __iomem *link_layer_registers;
+ struct asd_sas_phy sas_phy;
+ struct isci_port *isci_port;
+ u8 sas_addr[SAS_ADDR_SIZE];
+ union {
+ struct sas_identify_frame iaf;
+ struct dev_to_host_fis fis;
+ } frame_rcvd;
+};
+
+static inline struct isci_phy *to_iphy(struct asd_sas_phy *sas_phy)
+{
+ struct isci_phy *iphy = container_of(sas_phy, typeof(*iphy), sas_phy);
+
+ return iphy;
+}
+
+struct sci_phy_cap {
+ union {
+ struct {
+ /*
+ * The SAS specification indicates the start bit shall
+ * always be set to
+ * 1. This implementation will have the start bit set
+ * to 0 if the PHY CAPABILITIES were either not
+ * received or speed negotiation failed.
+ */
+ u8 start:1;
+ u8 tx_ssc_type:1;
+ u8 res1:2;
+ u8 req_logical_linkrate:4;
+
+ u32 gen1_no_ssc:1;
+ u32 gen1_ssc:1;
+ u32 gen2_no_ssc:1;
+ u32 gen2_ssc:1;
+ u32 gen3_no_ssc:1;
+ u32 gen3_ssc:1;
+ u32 res2:17;
+ u32 parity:1;
+ };
+ u32 all;
+ };
+} __packed;
+
+/* this data structure reflects the link layer transmit identification reg */
+struct sci_phy_proto {
+ union {
+ struct {
+ u16 _r_a:1;
+ u16 smp_iport:1;
+ u16 stp_iport:1;
+ u16 ssp_iport:1;
+ u16 _r_b:4;
+ u16 _r_c:1;
+ u16 smp_tport:1;
+ u16 stp_tport:1;
+ u16 ssp_tport:1;
+ u16 _r_d:4;
+ };
+ u16 all;
+ };
+} __packed;
+
+
+/**
+ * struct sci_phy_properties - This structure defines the properties common to
+ * all phys that can be retrieved.
+ *
+ *
+ */
+struct sci_phy_properties {
+ /**
+ * This field specifies the port that currently contains the
+ * supplied phy. This field may be set to NULL
+ * if the phy is not currently contained in a port.
+ */
+ struct isci_port *iport;
+
+ /**
+ * This field specifies the link rate at which the phy is
+ * currently operating.
+ */
+ enum sas_linkrate negotiated_link_rate;
+
+ /**
+ * This field specifies the index of the phy in relation to other
+ * phys within the controller. This index is zero relative.
+ */
+ u8 index;
+};
+
+/**
+ * struct sci_sas_phy_properties - This structure defines the properties,
+ * specific to a SAS phy, that can be retrieved.
+ *
+ *
+ */
+struct sci_sas_phy_properties {
+ /**
+ * This field delineates the Identify Address Frame received
+ * from the remote end point.
+ */
+ struct sas_identify_frame rcvd_iaf;
+
+ /**
+ * This field delineates the Phy capabilities structure received
+ * from the remote end point.
+ */
+ struct sci_phy_cap rcvd_cap;
+
+};
+
+/**
+ * struct sci_sata_phy_properties - This structure defines the properties,
+ * specific to a SATA phy, that can be retrieved.
+ *
+ *
+ */
+struct sci_sata_phy_properties {
+ /**
+ * This field delineates the signature FIS received from the
+ * attached target.
+ */
+ struct dev_to_host_fis signature_fis;
+
+ /**
+ * This field specifies to the user if a port selector is connected
+ * on the specified phy.
+ */
+ bool is_port_selector_present;
+
+};
+
+/**
+ * enum sci_phy_counter_id - This enumeration depicts the various pieces of
+ * optional information that can be retrieved for a specific phy.
+ *
+ *
+ */
+enum sci_phy_counter_id {
+ /**
+ * This PHY information field tracks the number of frames received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME,
+
+ /**
+ * This PHY information field tracks the number of frames transmitted.
+ */
+ SCIC_PHY_COUNTER_TRANSMITTED_FRAME,
+
+ /**
+ * This PHY information field tracks the number of DWORDs received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME_WORD,
+
+ /**
+ * This PHY information field tracks the number of DWORDs transmitted.
+ */
+ SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD,
+
+ /**
+ * This PHY information field tracks the number of times DWORD
+ * synchronization was lost.
+ */
+ SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR,
+
+ /**
+ * This PHY information field tracks the number of received DWORDs with
+ * running disparity errors.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR,
+
+ /**
+ * This PHY information field tracks the number of received frames with a
+ * CRC error (not including short or truncated frames).
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR,
+
+ /**
+ * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
+ * primitives received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT,
+
+ /**
+ * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
+ * primitives transmitted.
+ */
+ SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT,
+
+ /**
+ * This PHY information field tracks the number of times the inactivity
+ * timer for connections on the phy has been utilized.
+ */
+ SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED,
+
+ /**
+ * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
+ * primitives received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT,
+
+ /**
+ * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
+ * primitives transmitted.
+ */
+ SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT,
+
+ /**
+ * This PHY information field tracks the number of CREDIT BLOCKED
+ * primitives received.
+ * @note Depending on remote device implementation, credit blocks
+ * may occur regularly.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED,
+
+ /**
+ * This PHY information field contains the number of short frames
+ * received. A short frame is simply a frame smaller then what is
+ * allowed by either the SAS or SATA specification.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME,
+
+ /**
+ * This PHY information field contains the number of frames received after
+ * credit has been exhausted.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT,
+
+ /**
+ * This PHY information field contains the number of frames received after
+ * a DONE has been received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE,
+
+ /**
+ * This PHY information field contains the number of times the phy
+ * failed to achieve DWORD synchronization during speed negotiation.
+ */
+ SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
+};
+
+enum sci_phy_states {
+ /**
+ * Simply the initial state for the base domain state machine.
+ */
+ SCI_PHY_INITIAL,
+
+ /**
+ * This state indicates that the phy has successfully been stopped.
+ * In this state no new IO operations are permitted on this phy.
+ * This state is entered from the INITIAL state.
+ * This state is entered from the STARTING state.
+ * This state is entered from the READY state.
+ * This state is entered from the RESETTING state.
+ */
+ SCI_PHY_STOPPED,
+
+ /**
+ * This state indicates that the phy is in the process of becomming
+ * ready. In this state no new IO operations are permitted on this phy.
+ * This state is entered from the STOPPED state.
+ * This state is entered from the READY state.
+ * This state is entered from the RESETTING state.
+ */
+ SCI_PHY_STARTING,
+
+ /**
+ * Initial state
+ */
+ SCI_PHY_SUB_INITIAL,
+
+ /**
+ * Wait state for the hardware OSSP event type notification
+ */
+ SCI_PHY_SUB_AWAIT_OSSP_EN,
+
+ /**
+ * Wait state for the PHY speed notification
+ */
+ SCI_PHY_SUB_AWAIT_SAS_SPEED_EN,
+
+ /**
+ * Wait state for the IAF Unsolicited frame notification
+ */
+ SCI_PHY_SUB_AWAIT_IAF_UF,
+
+ /**
+ * Wait state for the request to consume power
+ */
+ SCI_PHY_SUB_AWAIT_SAS_POWER,
+
+ /**
+ * Wait state for request to consume power
+ */
+ SCI_PHY_SUB_AWAIT_SATA_POWER,
+
+ /**
+ * Wait state for the SATA PHY notification
+ */
+ SCI_PHY_SUB_AWAIT_SATA_PHY_EN,
+
+ /**
+ * Wait for the SATA PHY speed notification
+ */
+ SCI_PHY_SUB_AWAIT_SATA_SPEED_EN,
+
+ /**
+ * Wait state for the SIGNATURE FIS unsolicited frame notification
+ */
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF,
+
+ /**
+ * Exit state for this state machine
+ */
+ SCI_PHY_SUB_FINAL,
+
+ /**
+ * This state indicates the the phy is now ready. Thus, the user
+ * is able to perform IO operations utilizing this phy as long as it
+ * is currently part of a valid port.
+ * This state is entered from the STARTING state.
+ */
+ SCI_PHY_READY,
+
+ /**
+ * This state indicates that the phy is in the process of being reset.
+ * In this state no new IO operations are permitted on this phy.
+ * This state is entered from the READY state.
+ */
+ SCI_PHY_RESETTING,
+
+ /**
+ * Simply the final state for the base phy state machine.
+ */
+ SCI_PHY_FINAL,
+};
+
+void sci_phy_construct(
+ struct isci_phy *iphy,
+ struct isci_port *iport,
+ u8 phy_index);
+
+struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy);
+
+void sci_phy_set_port(
+ struct isci_phy *iphy,
+ struct isci_port *iport);
+
+enum sci_status sci_phy_initialize(
+ struct isci_phy *iphy,
+ struct scu_transport_layer_registers __iomem *transport_layer_registers,
+ struct scu_link_layer_registers __iomem *link_layer_registers);
+
+enum sci_status sci_phy_start(
+ struct isci_phy *iphy);
+
+enum sci_status sci_phy_stop(
+ struct isci_phy *iphy);
+
+enum sci_status sci_phy_reset(
+ struct isci_phy *iphy);
+
+void sci_phy_resume(
+ struct isci_phy *iphy);
+
+void sci_phy_setup_transport(
+ struct isci_phy *iphy,
+ u32 device_id);
+
+enum sci_status sci_phy_event_handler(
+ struct isci_phy *iphy,
+ u32 event_code);
+
+enum sci_status sci_phy_frame_handler(
+ struct isci_phy *iphy,
+ u32 frame_index);
+
+enum sci_status sci_phy_consume_power_handler(
+ struct isci_phy *iphy);
+
+void sci_phy_get_sas_address(
+ struct isci_phy *iphy,
+ struct sci_sas_address *sas_address);
+
+void sci_phy_get_attached_sas_address(
+ struct isci_phy *iphy,
+ struct sci_sas_address *sas_address);
+
+struct sci_phy_proto;
+void sci_phy_get_protocols(
+ struct isci_phy *iphy,
+ struct sci_phy_proto *protocols);
+enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy);
+
+struct isci_host;
+void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index);
+int isci_phy_control(struct asd_sas_phy *phy, enum phy_func func, void *buf);
+
+#endif /* !defined(_ISCI_PHY_H_) */
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
new file mode 100644
index 000000000000..8f6f9b77e41a
--- /dev/null
+++ b/drivers/scsi/isci/port.c
@@ -0,0 +1,1757 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "port.h"
+#include "request.h"
+
+#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
+#define SCU_DUMMY_INDEX (0xFFFF)
+
+static void isci_port_change_state(struct isci_port *iport, enum isci_status status)
+{
+ unsigned long flags;
+
+ dev_dbg(&iport->isci_host->pdev->dev,
+ "%s: iport = %p, state = 0x%x\n",
+ __func__, iport, status);
+
+ /* XXX pointless lock */
+ spin_lock_irqsave(&iport->state_lock, flags);
+ iport->status = status;
+ spin_unlock_irqrestore(&iport->state_lock, flags);
+}
+
+static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
+{
+ u8 index;
+
+ proto->all = 0;
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ struct isci_phy *iphy = iport->phy_table[index];
+
+ if (!iphy)
+ continue;
+ sci_phy_get_protocols(iphy, proto);
+ }
+}
+
+static u32 sci_port_get_phys(struct isci_port *iport)
+{
+ u32 index;
+ u32 mask;
+
+ mask = 0;
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ if (iport->phy_table[index])
+ mask |= (1 << index);
+
+ return mask;
+}
+
+/**
+ * sci_port_get_properties() - This method simply returns the properties
+ * regarding the port, such as: physical index, protocols, sas address, etc.
+ * @port: this parameter specifies the port for which to retrieve the physical
+ * index.
+ * @properties: This parameter specifies the properties structure into which to
+ * copy the requested information.
+ *
+ * Indicate if the user specified a valid port. SCI_SUCCESS This value is
+ * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
+ * value is returned if the specified port is not valid. When this value is
+ * returned, no data is copied to the properties output parameter.
+ */
+static enum sci_status sci_port_get_properties(struct isci_port *iport,
+ struct sci_port_properties *prop)
+{
+ if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
+ return SCI_FAILURE_INVALID_PORT;
+
+ prop->index = iport->logical_port_index;
+ prop->phy_mask = sci_port_get_phys(iport);
+ sci_port_get_sas_address(iport, &prop->local.sas_address);
+ sci_port_get_protocols(iport, &prop->local.protocols);
+ sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
+
+ return SCI_SUCCESS;
+}
+
+static void sci_port_bcn_enable(struct isci_port *iport)
+{
+ struct isci_phy *iphy;
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
+ iphy = iport->phy_table[i];
+ if (!iphy)
+ continue;
+ val = readl(&iphy->link_layer_registers->link_layer_control);
+ /* clear the bit by writing 1. */
+ writel(val, &iphy->link_layer_registers->link_layer_control);
+ }
+}
+
+/* called under sci_lock to stabilize phy:port associations */
+void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport)
+{
+ int i;
+
+ clear_bit(IPORT_BCN_BLOCKED, &iport->flags);
+ wake_up(&ihost->eventq);
+
+ if (!test_and_clear_bit(IPORT_BCN_PENDING, &iport->flags))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
+ struct isci_phy *iphy = iport->phy_table[i];
+
+ if (!iphy)
+ continue;
+
+ ihost->sas_ha.notify_port_event(&iphy->sas_phy,
+ PORTE_BROADCAST_RCVD);
+ break;
+ }
+}
+
+static void isci_port_bc_change_received(struct isci_host *ihost,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ if (iport && test_bit(IPORT_BCN_BLOCKED, &iport->flags)) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: disabled BCN; isci_phy = %p, sas_phy = %p\n",
+ __func__, iphy, &iphy->sas_phy);
+ set_bit(IPORT_BCN_PENDING, &iport->flags);
+ atomic_inc(&iport->event);
+ wake_up(&ihost->eventq);
+ } else {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_phy = %p, sas_phy = %p\n",
+ __func__, iphy, &iphy->sas_phy);
+
+ ihost->sas_ha.notify_port_event(&iphy->sas_phy,
+ PORTE_BROADCAST_RCVD);
+ }
+ sci_port_bcn_enable(iport);
+}
+
+static void isci_port_link_up(struct isci_host *isci_host,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ unsigned long flags;
+ struct sci_port_properties properties;
+ unsigned long success = true;
+
+ BUG_ON(iphy->isci_port != NULL);
+
+ iphy->isci_port = iport;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p\n",
+ __func__, iport);
+
+ spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+ isci_port_change_state(iphy->isci_port, isci_starting);
+
+ sci_port_get_properties(iport, &properties);
+
+ if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
+ u64 attached_sas_address;
+
+ iphy->sas_phy.oob_mode = SATA_OOB_MODE;
+ iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
+
+ /*
+ * For direct-attached SATA devices, the SCI core will
+ * automagically assign a SAS address to the end device
+ * for the purpose of creating a port. This SAS address
+ * will not be the same as assigned to the PHY and needs
+ * to be obtained from struct sci_port_properties properties.
+ */
+ attached_sas_address = properties.remote.sas_address.high;
+ attached_sas_address <<= 32;
+ attached_sas_address |= properties.remote.sas_address.low;
+ swab64s(&attached_sas_address);
+
+ memcpy(&iphy->sas_phy.attached_sas_addr,
+ &attached_sas_address, sizeof(attached_sas_address));
+ } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+ iphy->sas_phy.oob_mode = SAS_OOB_MODE;
+ iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
+
+ /* Copy the attached SAS address from the IAF */
+ memcpy(iphy->sas_phy.attached_sas_addr,
+ iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
+ } else {
+ dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__);
+ success = false;
+ }
+
+ iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
+
+ spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+ /* Notify libsas that we have an address frame, if indeed
+ * we've found an SSP, SMP, or STP target */
+ if (success)
+ isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
+ PORTE_BYTES_DMAED);
+}
+
+
+/**
+ * isci_port_link_down() - This function is called by the sci core when a link
+ * becomes inactive.
+ * @isci_host: This parameter specifies the isci host object.
+ * @phy: This parameter specifies the isci phy with the active link.
+ * @port: This parameter specifies the isci port with the active link.
+ *
+ */
+static void isci_port_link_down(struct isci_host *isci_host,
+ struct isci_phy *isci_phy,
+ struct isci_port *isci_port)
+{
+ struct isci_remote_device *isci_device;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p\n", __func__, isci_port);
+
+ if (isci_port) {
+
+ /* check to see if this is the last phy on this port. */
+ if (isci_phy->sas_phy.port &&
+ isci_phy->sas_phy.port->num_phys == 1) {
+ atomic_inc(&isci_port->event);
+ isci_port_bcn_enable(isci_host, isci_port);
+
+ /* change the state for all devices on this port. The
+ * next task sent to this device will be returned as
+ * SAS_TASK_UNDELIVERED, and the scsi mid layer will
+ * remove the target
+ */
+ list_for_each_entry(isci_device,
+ &isci_port->remote_dev_list,
+ node) {
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p\n",
+ __func__, isci_device);
+ set_bit(IDEV_GONE, &isci_device->flags);
+ }
+ }
+ isci_port_change_state(isci_port, isci_stopping);
+ }
+
+ /* Notify libsas of the borken link, this will trigger calls to our
+ * isci_port_deformed and isci_dev_gone functions.
+ */
+ sas_phy_disconnected(&isci_phy->sas_phy);
+ isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
+
+ isci_phy->isci_port = NULL;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p - Done\n", __func__, isci_port);
+}
+
+
+/**
+ * isci_port_ready() - This function is called by the sci core when a link
+ * becomes ready.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the sci port with the active link.
+ *
+ */
+static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port)
+{
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p\n", __func__, isci_port);
+
+ complete_all(&isci_port->start_complete);
+ isci_port_change_state(isci_port, isci_ready);
+ return;
+}
+
+/**
+ * isci_port_not_ready() - This function is called by the sci core when a link
+ * is not ready. All remote devices on this link will be removed if they are
+ * in the stopping state.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the sci port with the active link.
+ *
+ */
+static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port)
+{
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p\n", __func__, isci_port);
+}
+
+static void isci_port_stop_complete(struct isci_host *ihost,
+ struct isci_port *iport,
+ enum sci_status completion_status)
+{
+ dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
+}
+
+/**
+ * isci_port_hard_reset_complete() - This function is called by the sci core
+ * when the hard reset complete notification has been received.
+ * @port: This parameter specifies the sci port with the active link.
+ * @completion_status: This parameter specifies the core status for the reset
+ * process.
+ *
+ */
+static void isci_port_hard_reset_complete(struct isci_port *isci_port,
+ enum sci_status completion_status)
+{
+ dev_dbg(&isci_port->isci_host->pdev->dev,
+ "%s: isci_port = %p, completion_status=%x\n",
+ __func__, isci_port, completion_status);
+
+ /* Save the status of the hard reset from the port. */
+ isci_port->hard_reset_status = completion_status;
+
+ complete_all(&isci_port->hard_reset_complete);
+}
+
+/* This method will return a true value if the specified phy can be assigned to
+ * this port The following is a list of phys for each port that are allowed: -
+ * Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method
+ * doesn't preclude all configurations. It merely ensures that a phy is part
+ * of the allowable set of phy identifiers for that port. For example, one
+ * could assign phy 3 to port 0 and no other phys. Please refer to
+ * sci_port_is_phy_mask_valid() for information regarding whether the
+ * phy_mask for a port can be supported. bool true if this is a valid phy
+ * assignment for the port false if this is not a valid phy assignment for the
+ * port
+ */
+bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ struct sci_user_parameters *user = &ihost->user_parameters;
+
+ /* Initialize to invalid value. */
+ u32 existing_phy_index = SCI_MAX_PHYS;
+ u32 index;
+
+ if ((iport->physical_port_index == 1) && (phy_index != 1))
+ return false;
+
+ if (iport->physical_port_index == 3 && phy_index != 3)
+ return false;
+
+ if (iport->physical_port_index == 2 &&
+ (phy_index == 0 || phy_index == 1))
+ return false;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ if (iport->phy_table[index] && index != phy_index)
+ existing_phy_index = index;
+
+ /* Ensure that all of the phys in the port are capable of
+ * operating at the same maximum link rate.
+ */
+ if (existing_phy_index < SCI_MAX_PHYS &&
+ user->phys[phy_index].max_speed_generation !=
+ user->phys[existing_phy_index].max_speed_generation)
+ return false;
+
+ return true;
+}
+
+/**
+ *
+ * @sci_port: This is the port object for which to determine if the phy mask
+ * can be supported.
+ *
+ * This method will return a true value if the port's phy mask can be supported
+ * by the SCU. The following is a list of valid PHY mask configurations for
+ * each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2]
+ * - Port 3 - [3] This method returns a boolean indication specifying if the
+ * phy mask can be supported. true if this is a valid phy assignment for the
+ * port false if this is not a valid phy assignment for the port
+ */
+static bool sci_port_is_phy_mask_valid(
+ struct isci_port *iport,
+ u32 phy_mask)
+{
+ if (iport->physical_port_index == 0) {
+ if (((phy_mask & 0x0F) == 0x0F)
+ || ((phy_mask & 0x03) == 0x03)
+ || ((phy_mask & 0x01) == 0x01)
+ || (phy_mask == 0))
+ return true;
+ } else if (iport->physical_port_index == 1) {
+ if (((phy_mask & 0x02) == 0x02)
+ || (phy_mask == 0))
+ return true;
+ } else if (iport->physical_port_index == 2) {
+ if (((phy_mask & 0x0C) == 0x0C)
+ || ((phy_mask & 0x04) == 0x04)
+ || (phy_mask == 0))
+ return true;
+ } else if (iport->physical_port_index == 3) {
+ if (((phy_mask & 0x08) == 0x08)
+ || (phy_mask == 0))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * This method retrieves a currently active (i.e. connected) phy contained in
+ * the port. Currently, the lowest order phy that is connected is returned.
+ * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
+ * returned if there are no currently active (i.e. connected to a remote end
+ * point) phys contained in the port. All other values specify a struct sci_phy
+ * object that is active in the port.
+ */
+static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
+{
+ u32 index;
+ struct isci_phy *iphy;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ /* Ensure that the phy is both part of the port and currently
+ * connected to the remote end-point.
+ */
+ iphy = iport->phy_table[index];
+ if (iphy && sci_port_active_phy(iport, iphy))
+ return iphy;
+ }
+
+ return NULL;
+}
+
+static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+ /* Check to see if we can add this phy to a port
+ * that means that the phy is not part of a port and that the port does
+ * not already have a phy assinged to the phy index.
+ */
+ if (!iport->phy_table[iphy->phy_index] &&
+ !phy_get_non_dummy_port(iphy) &&
+ sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
+ /* Phy is being added in the stopped state so we are in MPC mode
+ * make logical port index = physical port index
+ */
+ iport->logical_port_index = iport->physical_port_index;
+ iport->phy_table[iphy->phy_index] = iphy;
+ sci_phy_set_port(iphy, iport);
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE;
+}
+
+static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+ /* Make sure that this phy is part of this port */
+ if (iport->phy_table[iphy->phy_index] == iphy &&
+ phy_get_non_dummy_port(iphy) == iport) {
+ struct isci_host *ihost = iport->owning_controller;
+
+ /* Yep it is assigned to this port so remove it */
+ sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
+ iport->phy_table[iphy->phy_index] = NULL;
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE;
+}
+
+void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
+{
+ u32 index;
+
+ sas->high = 0;
+ sas->low = 0;
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ if (iport->phy_table[index])
+ sci_phy_get_sas_address(iport->phy_table[index], sas);
+}
+
+void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
+{
+ struct isci_phy *iphy;
+
+ /*
+ * Ensure that the phy is both part of the port and currently
+ * connected to the remote end-point.
+ */
+ iphy = sci_port_get_a_connected_phy(iport);
+ if (iphy) {
+ if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) {
+ sci_phy_get_attached_sas_address(iphy, sas);
+ } else {
+ sci_phy_get_sas_address(iphy, sas);
+ sas->low += iphy->phy_index;
+ }
+ } else {
+ sas->high = 0;
+ sas->low = 0;
+ }
+}
+
+/**
+ * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
+ *
+ * @sci_port: logical port on which we need to create the remote node context
+ * @rni: remote node index for this remote node context.
+ *
+ * This routine will construct a dummy remote node context data structure
+ * This structure will be posted to the hardware to work around a scheduler
+ * error in the hardware.
+ */
+static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
+{
+ union scu_remote_node_context *rnc;
+
+ rnc = &iport->owning_controller->remote_node_context_table[rni];
+
+ memset(rnc, 0, sizeof(union scu_remote_node_context));
+
+ rnc->ssp.remote_sas_address_hi = 0;
+ rnc->ssp.remote_sas_address_lo = 0;
+
+ rnc->ssp.remote_node_index = rni;
+ rnc->ssp.remote_node_port_width = 1;
+ rnc->ssp.logical_port_index = iport->physical_port_index;
+
+ rnc->ssp.nexus_loss_timer_enable = false;
+ rnc->ssp.check_bit = false;
+ rnc->ssp.is_valid = true;
+ rnc->ssp.is_remote_node_context = true;
+ rnc->ssp.function_number = 0;
+ rnc->ssp.arbitration_wait_time = 0;
+}
+
+/*
+ * construct a dummy task context data structure. This
+ * structure will be posted to the hardwre to work around a scheduler error
+ * in the hardware.
+ */
+static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ struct scu_task_context *task_context;
+
+ task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+ memset(task_context, 0, sizeof(struct scu_task_context));
+
+ task_context->initiator_request = 1;
+ task_context->connection_rate = 1;
+ task_context->logical_port_index = iport->physical_port_index;
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
+ task_context->task_index = ISCI_TAG_TCI(tag);
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+ task_context->remote_node_index = iport->reserved_rni;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->task_phase = 0x01;
+}
+
+static void sci_port_destroy_dummy_resources(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
+ isci_free_tag(ihost, iport->reserved_tag);
+
+ if (iport->reserved_rni != SCU_DUMMY_INDEX)
+ sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
+ 1, iport->reserved_rni);
+
+ iport->reserved_rni = SCU_DUMMY_INDEX;
+ iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
+{
+ u8 index;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ if (iport->active_phy_mask & (1 << index))
+ sci_phy_setup_transport(iport->phy_table[index], device_id);
+ }
+}
+
+static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
+ bool do_notify_user)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
+ sci_phy_resume(iphy);
+
+ iport->active_phy_mask |= 1 << iphy->phy_index;
+
+ sci_controller_clear_invalid_phy(ihost, iphy);
+
+ if (do_notify_user == true)
+ isci_port_link_up(ihost, iport, iphy);
+}
+
+void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
+ bool do_notify_user)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ iport->active_phy_mask &= ~(1 << iphy->phy_index);
+
+ iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
+
+ /* Re-assign the phy back to the LP as if it were a narrow port */
+ writel(iphy->phy_index,
+ &iport->port_pe_configuration_register[iphy->phy_index]);
+
+ if (do_notify_user == true)
+ isci_port_link_down(ihost, iphy, iport);
+}
+
+static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ /*
+ * Check to see if we have alreay reported this link as bad and if
+ * not go ahead and tell the SCI_USER that we have discovered an
+ * invalid link.
+ */
+ if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
+ ihost->invalid_phy_mask |= 1 << iphy->phy_index;
+ dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
+ }
+}
+
+static bool is_port_ready_state(enum sci_port_states state)
+{
+ switch (state) {
+ case SCI_PORT_READY:
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ case SCI_PORT_SUB_CONFIGURING:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* flag dummy rnc hanling when exiting a ready state */
+static void port_state_machine_change(struct isci_port *iport,
+ enum sci_port_states state)
+{
+ struct sci_base_state_machine *sm = &iport->sm;
+ enum sci_port_states old_state = sm->current_state_id;
+
+ if (is_port_ready_state(old_state) && !is_port_ready_state(state))
+ iport->ready_exit = true;
+
+ sci_change_state(sm, state);
+ iport->ready_exit = false;
+}
+
+/**
+ * sci_port_general_link_up_handler - phy can be assigned to port?
+ * @sci_port: sci_port object for which has a phy that has gone link up.
+ * @sci_phy: This is the struct isci_phy object that has gone link up.
+ * @do_notify_user: This parameter specifies whether to inform the user (via
+ * sci_port_link_up()) as to the fact that a new phy as become ready.
+ *
+ * Determine if this phy can be assigned to this
+ * port . If the phy is not a valid PHY for
+ * this port then the function will notify the user. A PHY can only be
+ * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
+ * the same port. none
+ */
+static void sci_port_general_link_up_handler(struct isci_port *iport,
+ struct isci_phy *iphy,
+ bool do_notify_user)
+{
+ struct sci_sas_address port_sas_address;
+ struct sci_sas_address phy_sas_address;
+
+ sci_port_get_attached_sas_address(iport, &port_sas_address);
+ sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
+
+ /* If the SAS address of the new phy matches the SAS address of
+ * other phys in the port OR this is the first phy in the port,
+ * then activate the phy and allow it to be used for operations
+ * in this port.
+ */
+ if ((phy_sas_address.high == port_sas_address.high &&
+ phy_sas_address.low == port_sas_address.low) ||
+ iport->active_phy_mask == 0) {
+ struct sci_base_state_machine *sm = &iport->sm;
+
+ sci_port_activate_phy(iport, iphy, do_notify_user);
+ if (sm->current_state_id == SCI_PORT_RESETTING)
+ port_state_machine_change(iport, SCI_PORT_READY);
+ } else
+ sci_port_invalid_link_up(iport, iphy);
+}
+
+
+
+/**
+ * This method returns false if the port only has a single phy object assigned.
+ * If there are no phys or more than one phy then the method will return
+ * true.
+ * @sci_port: The port for which the wide port condition is to be checked.
+ *
+ * bool true Is returned if this is a wide ported port. false Is returned if
+ * this is a narrow port.
+ */
+static bool sci_port_is_wide(struct isci_port *iport)
+{
+ u32 index;
+ u32 phy_count = 0;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ if (iport->phy_table[index] != NULL) {
+ phy_count++;
+ }
+ }
+
+ return phy_count != 1;
+}
+
+/**
+ * This method is called by the PHY object when the link is detected. if the
+ * port wants the PHY to continue on to the link up state then the port
+ * layer must return true. If the port object returns false the phy object
+ * must halt its attempt to go link up.
+ * @sci_port: The port associated with the phy object.
+ * @sci_phy: The phy object that is trying to go link up.
+ *
+ * true if the phy object can continue to the link up condition. true Is
+ * returned if this phy can continue to the ready state. false Is returned if
+ * can not continue on to the ready state. This notification is in place for
+ * wide ports and direct attached phys. Since there are no wide ported SATA
+ * devices this could become an invalid port configuration.
+ */
+bool sci_port_link_detected(
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
+ (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
+ sci_port_is_wide(iport)) {
+ sci_port_invalid_link_up(iport, iphy);
+
+ return false;
+ }
+
+ return true;
+}
+
+static void port_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
+ struct isci_host *ihost = iport->owning_controller;
+ unsigned long flags;
+ u32 current_state;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ current_state = iport->sm.current_state_id;
+
+ if (current_state == SCI_PORT_RESETTING) {
+ /* if the port is still in the resetting state then the timeout
+ * fired before the reset completed.
+ */
+ port_state_machine_change(iport, SCI_PORT_FAILED);
+ } else if (current_state == SCI_PORT_STOPPED) {
+ /* if the port is stopped then the start request failed In this
+ * case stay in the stopped state.
+ */
+ dev_err(sciport_to_dev(iport),
+ "%s: SCIC Port 0x%p failed to stop before tiemout.\n",
+ __func__,
+ iport);
+ } else if (current_state == SCI_PORT_STOPPING) {
+ /* if the port is still stopping then the stop has not completed */
+ isci_port_stop_complete(iport->owning_controller,
+ iport,
+ SCI_FAILURE_TIMEOUT);
+ } else {
+ /* The port is in the ready state and we have a timer
+ * reporting a timeout this should not happen.
+ */
+ dev_err(sciport_to_dev(iport),
+ "%s: SCIC Port 0x%p is processing a timeout operation "
+ "in state %d.\n", __func__, iport, current_state);
+ }
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/* --------------------------------------------------------------------------- */
+
+/**
+ * This function updates the hardwares VIIT entry for this port.
+ *
+ *
+ */
+static void sci_port_update_viit_entry(struct isci_port *iport)
+{
+ struct sci_sas_address sas_address;
+
+ sci_port_get_sas_address(iport, &sas_address);
+
+ writel(sas_address.high,
+ &iport->viit_registers->initiator_sas_address_hi);
+ writel(sas_address.low,
+ &iport->viit_registers->initiator_sas_address_lo);
+
+ /* This value get cleared just in case its not already cleared */
+ writel(0, &iport->viit_registers->reserved);
+
+ /* We are required to update the status register last */
+ writel(SCU_VIIT_ENTRY_ID_VIIT |
+ SCU_VIIT_IPPT_INITIATOR |
+ ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
+ SCU_VIIT_STATUS_ALL_VALID,
+ &iport->viit_registers->status);
+}
+
+enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
+{
+ u16 index;
+ struct isci_phy *iphy;
+ enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
+
+ /*
+ * Loop through all of the phys in this port and find the phy with the
+ * lowest maximum link rate. */
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ iphy = iport->phy_table[index];
+ if (iphy && sci_port_active_phy(iport, iphy) &&
+ iphy->max_negotiated_speed < max_allowed_speed)
+ max_allowed_speed = iphy->max_negotiated_speed;
+ }
+
+ return max_allowed_speed;
+}
+
+static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
+{
+ u32 pts_control_value;
+
+ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+ pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
+ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+/**
+ * sci_port_post_dummy_request() - post dummy/workaround request
+ * @sci_port: port to post task
+ *
+ * Prevent the hardware scheduler from posting new requests to the front
+ * of the scheduler queue causing a starvation problem for currently
+ * ongoing requests.
+ *
+ */
+static void sci_port_post_dummy_request(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ u16 tag = iport->reserved_tag;
+ struct scu_task_context *tc;
+ u32 command;
+
+ tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+ tc->abort = 0;
+
+ command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
+ ISCI_TAG_TCI(tag);
+
+ sci_controller_post_request(ihost, command);
+}
+
+/**
+ * This routine will abort the dummy request. This will alow the hardware to
+ * power down parts of the silicon to save power.
+ *
+ * @sci_port: The port on which the task must be aborted.
+ *
+ */
+static void sci_port_abort_dummy_request(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ u16 tag = iport->reserved_tag;
+ struct scu_task_context *tc;
+ u32 command;
+
+ tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+ tc->abort = 1;
+
+ command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
+ iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
+ ISCI_TAG_TCI(tag);
+
+ sci_controller_post_request(ihost, command);
+}
+
+/**
+ *
+ * @sci_port: This is the struct isci_port object to resume.
+ *
+ * This method will resume the port task scheduler for this port object. none
+ */
+static void
+sci_port_resume_port_task_scheduler(struct isci_port *iport)
+{
+ u32 pts_control_value;
+
+ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+ pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
+ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ sci_port_suspend_port_task_scheduler(iport);
+
+ iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
+
+ if (iport->active_phy_mask != 0) {
+ /* At least one of the phys on the port is ready */
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_OPERATIONAL);
+ }
+}
+
+static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
+{
+ u32 index;
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ struct isci_host *ihost = iport->owning_controller;
+
+ isci_port_ready(ihost, iport);
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ if (iport->phy_table[index]) {
+ writel(iport->physical_port_index,
+ &iport->port_pe_configuration_register[
+ iport->phy_table[index]->phy_index]);
+ }
+ }
+
+ sci_port_update_viit_entry(iport);
+
+ sci_port_resume_port_task_scheduler(iport);
+
+ /*
+ * Post the dummy task for the port so the hardware can schedule
+ * io correctly
+ */
+ sci_port_post_dummy_request(iport);
+}
+
+static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ u8 phys_index = iport->physical_port_index;
+ union scu_remote_node_context *rnc;
+ u16 rni = iport->reserved_rni;
+ u32 command;
+
+ rnc = &ihost->remote_node_context_table[rni];
+
+ rnc->ssp.is_valid = false;
+
+ /* ensure the preceding tc abort request has reached the
+ * controller and give it ample time to act before posting the rnc
+ * invalidate
+ */
+ readl(&ihost->smu_registers->interrupt_status); /* flush */
+ udelay(10);
+
+ command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
+ phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+ sci_controller_post_request(ihost, command);
+}
+
+/**
+ *
+ * @object: This is the object which is cast to a struct isci_port object.
+ *
+ * This method will perform the actions required by the struct isci_port on
+ * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
+ * the port not ready and suspends the port task scheduler. none
+ */
+static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ struct isci_host *ihost = iport->owning_controller;
+
+ /*
+ * Kill the dummy task for this port if it has not yet posted
+ * the hardware will treat this as a NOP and just return abort
+ * complete.
+ */
+ sci_port_abort_dummy_request(iport);
+
+ isci_port_not_ready(ihost, iport);
+
+ if (iport->ready_exit)
+ sci_port_invalidate_dummy_remote_node(iport);
+}
+
+static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ struct isci_host *ihost = iport->owning_controller;
+
+ if (iport->active_phy_mask == 0) {
+ isci_port_not_ready(ihost, iport);
+
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_WAITING);
+ } else if (iport->started_request_count == 0)
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_OPERATIONAL);
+}
+
+static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ sci_port_suspend_port_task_scheduler(iport);
+ if (iport->ready_exit)
+ sci_port_invalidate_dummy_remote_node(iport);
+}
+
+enum sci_status sci_port_start(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ enum sci_status status = SCI_SUCCESS;
+ enum sci_port_states state;
+ u32 phy_mask;
+
+ state = iport->sm.current_state_id;
+ if (state != SCI_PORT_STOPPED) {
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ if (iport->assigned_device_count > 0) {
+ /* TODO This is a start failure operation because
+ * there are still devices assigned to this port.
+ * There must be no devices assigned to a port on a
+ * start operation.
+ */
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ if (iport->reserved_rni == SCU_DUMMY_INDEX) {
+ u16 rni = sci_remote_node_table_allocate_remote_node(
+ &ihost->available_remote_nodes, 1);
+
+ if (rni != SCU_DUMMY_INDEX)
+ sci_port_construct_dummy_rnc(iport, rni);
+ else
+ status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
+ iport->reserved_rni = rni;
+ }
+
+ if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
+ u16 tag;
+
+ tag = isci_alloc_tag(ihost);
+ if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
+ status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
+ else
+ sci_port_construct_dummy_task(iport, tag);
+ iport->reserved_tag = tag;
+ }
+
+ if (status == SCI_SUCCESS) {
+ phy_mask = sci_port_get_phys(iport);
+
+ /*
+ * There are one or more phys assigned to this port. Make sure
+ * the port's phy mask is in fact legal and supported by the
+ * silicon.
+ */
+ if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
+ port_state_machine_change(iport,
+ SCI_PORT_READY);
+
+ return SCI_SUCCESS;
+ }
+ status = SCI_FAILURE;
+ }
+
+ if (status != SCI_SUCCESS)
+ sci_port_destroy_dummy_resources(iport);
+
+ return status;
+}
+
+enum sci_status sci_port_stop(struct isci_port *iport)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_STOPPED:
+ return SCI_SUCCESS;
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ case SCI_PORT_SUB_CONFIGURING:
+ case SCI_PORT_RESETTING:
+ port_state_machine_change(iport,
+ SCI_PORT_STOPPING);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
+{
+ enum sci_status status = SCI_FAILURE_INVALID_PHY;
+ struct isci_phy *iphy = NULL;
+ enum sci_port_states state;
+ u32 phy_index;
+
+ state = iport->sm.current_state_id;
+ if (state != SCI_PORT_SUB_OPERATIONAL) {
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ /* Select a phy on which we can send the hard reset request. */
+ for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
+ iphy = iport->phy_table[phy_index];
+ if (iphy && !sci_port_active_phy(iport, iphy)) {
+ /*
+ * We found a phy but it is not ready select
+ * different phy
+ */
+ iphy = NULL;
+ }
+ }
+
+ /* If we have a phy then go ahead and start the reset procedure */
+ if (!iphy)
+ return status;
+ status = sci_phy_reset(iphy);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_mod_timer(&iport->timer, timeout);
+ iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
+
+ port_state_machine_change(iport, SCI_PORT_RESETTING);
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_port_add_phy() -
+ * @sci_port: This parameter specifies the port in which the phy will be added.
+ * @sci_phy: This parameter is the phy which is to be added to the port.
+ *
+ * This method will add a PHY to the selected port. This method returns an
+ * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
+ * status is a failure to add the phy to the port.
+ */
+enum sci_status sci_port_add_phy(struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ enum sci_status status;
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_STOPPED: {
+ struct sci_sas_address port_sas_address;
+
+ /* Read the port assigned SAS Address if there is one */
+ sci_port_get_sas_address(iport, &port_sas_address);
+
+ if (port_sas_address.high != 0 && port_sas_address.low != 0) {
+ struct sci_sas_address phy_sas_address;
+
+ /* Make sure that the PHY SAS Address matches the SAS Address
+ * for this port
+ */
+ sci_phy_get_sas_address(iphy, &phy_sas_address);
+
+ if (port_sas_address.high != phy_sas_address.high ||
+ port_sas_address.low != phy_sas_address.low)
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+ return sci_port_set_phy(iport, iphy);
+ }
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ status = sci_port_set_phy(iport, iphy);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_port_general_link_up_handler(iport, iphy, true);
+ iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
+ port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
+
+ return status;
+ case SCI_PORT_SUB_CONFIGURING:
+ status = sci_port_set_phy(iport, iphy);
+
+ if (status != SCI_SUCCESS)
+ return status;
+ sci_port_general_link_up_handler(iport, iphy, true);
+
+ /* Re-enter the configuring state since this may be the last phy in
+ * the port.
+ */
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_CONFIGURING);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+/**
+ * sci_port_remove_phy() -
+ * @sci_port: This parameter specifies the port in which the phy will be added.
+ * @sci_phy: This parameter is the phy which is to be added to the port.
+ *
+ * This method will remove the PHY from the selected PORT. This method returns
+ * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
+ * other status is a failure to add the phy to the port.
+ */
+enum sci_status sci_port_remove_phy(struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ enum sci_status status;
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+
+ switch (state) {
+ case SCI_PORT_STOPPED:
+ return sci_port_clear_phy(iport, iphy);
+ case SCI_PORT_SUB_OPERATIONAL:
+ status = sci_port_clear_phy(iport, iphy);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_port_deactivate_phy(iport, iphy, true);
+ iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_CONFIGURING);
+ return SCI_SUCCESS;
+ case SCI_PORT_SUB_CONFIGURING:
+ status = sci_port_clear_phy(iport, iphy);
+
+ if (status != SCI_SUCCESS)
+ return status;
+ sci_port_deactivate_phy(iport, iphy, true);
+
+ /* Re-enter the configuring state since this may be the last phy in
+ * the port
+ */
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_CONFIGURING);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_port_link_up(struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_SUB_WAITING:
+ /* Since this is the first phy going link up for the port we
+ * can just enable it and continue
+ */
+ sci_port_activate_phy(iport, iphy, true);
+
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_OPERATIONAL);
+ return SCI_SUCCESS;
+ case SCI_PORT_SUB_OPERATIONAL:
+ sci_port_general_link_up_handler(iport, iphy, true);
+ return SCI_SUCCESS;
+ case SCI_PORT_RESETTING:
+ /* TODO We should make sure that the phy that has gone
+ * link up is the same one on which we sent the reset. It is
+ * possible that the phy on which we sent the reset is not the
+ * one that has gone link up and we want to make sure that
+ * phy being reset comes back. Consider the case where a
+ * reset is sent but before the hardware processes the reset it
+ * get a link up on the port because of a hot plug event.
+ * because of the reset request this phy will go link down
+ * almost immediately.
+ */
+
+ /* In the resetting state we don't notify the user regarding
+ * link up and link down notifications.
+ */
+ sci_port_general_link_up_handler(iport, iphy, false);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_port_link_down(struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_SUB_OPERATIONAL:
+ sci_port_deactivate_phy(iport, iphy, true);
+
+ /* If there are no active phys left in the port, then
+ * transition the port to the WAITING state until such time
+ * as a phy goes link up
+ */
+ if (iport->active_phy_mask == 0)
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_WAITING);
+ return SCI_SUCCESS;
+ case SCI_PORT_RESETTING:
+ /* In the resetting state we don't notify the user regarding
+ * link up and link down notifications. */
+ sci_port_deactivate_phy(iport, iphy, false);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_port_start_io(struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_SUB_WAITING:
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_PORT_SUB_OPERATIONAL:
+ iport->started_request_count++;
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_port_complete_io(struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_STOPPED:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_PORT_STOPPING:
+ sci_port_decrement_request_count(iport);
+
+ if (iport->started_request_count == 0)
+ port_state_machine_change(iport,
+ SCI_PORT_STOPPED);
+ break;
+ case SCI_PORT_READY:
+ case SCI_PORT_RESETTING:
+ case SCI_PORT_FAILED:
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ sci_port_decrement_request_count(iport);
+ break;
+ case SCI_PORT_SUB_CONFIGURING:
+ sci_port_decrement_request_count(iport);
+ if (iport->started_request_count == 0) {
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_OPERATIONAL);
+ }
+ break;
+ }
+ return SCI_SUCCESS;
+}
+
+static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
+{
+ u32 pts_control_value;
+
+ /* enable the port task scheduler in a suspended state */
+ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+ pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
+ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
+{
+ u32 pts_control_value;
+
+ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+ pts_control_value &=
+ ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
+ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_post_dummy_remote_node(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ u8 phys_index = iport->physical_port_index;
+ union scu_remote_node_context *rnc;
+ u16 rni = iport->reserved_rni;
+ u32 command;
+
+ rnc = &ihost->remote_node_context_table[rni];
+ rnc->ssp.is_valid = true;
+
+ command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
+ phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+ sci_controller_post_request(ihost, command);
+
+ /* ensure hardware has seen the post rnc command and give it
+ * ample time to act before sending the suspend
+ */
+ readl(&ihost->smu_registers->interrupt_status); /* flush */
+ udelay(10);
+
+ command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
+ phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+ sci_controller_post_request(ihost, command);
+}
+
+static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
+ /*
+ * If we enter this state becasuse of a request to stop
+ * the port then we want to disable the hardwares port
+ * task scheduler. */
+ sci_port_disable_port_task_scheduler(iport);
+ }
+}
+
+static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ /* Enable and suspend the port task scheduler */
+ sci_port_enable_port_task_scheduler(iport);
+}
+
+static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ struct isci_host *ihost = iport->owning_controller;
+ u32 prev_state;
+
+ prev_state = iport->sm.previous_state_id;
+ if (prev_state == SCI_PORT_RESETTING)
+ isci_port_hard_reset_complete(iport, SCI_SUCCESS);
+ else
+ isci_port_not_ready(ihost, iport);
+
+ /* Post and suspend the dummy remote node context for this port. */
+ sci_port_post_dummy_remote_node(iport);
+
+ /* Start the ready substate machine */
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_WAITING);
+}
+
+static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ sci_del_timer(&iport->timer);
+}
+
+static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ sci_del_timer(&iport->timer);
+
+ sci_port_destroy_dummy_resources(iport);
+}
+
+static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
+}
+
+/* --------------------------------------------------------------------------- */
+
+static const struct sci_base_state sci_port_state_table[] = {
+ [SCI_PORT_STOPPED] = {
+ .enter_state = sci_port_stopped_state_enter,
+ .exit_state = sci_port_stopped_state_exit
+ },
+ [SCI_PORT_STOPPING] = {
+ .exit_state = sci_port_stopping_state_exit
+ },
+ [SCI_PORT_READY] = {
+ .enter_state = sci_port_ready_state_enter,
+ },
+ [SCI_PORT_SUB_WAITING] = {
+ .enter_state = sci_port_ready_substate_waiting_enter,
+ },
+ [SCI_PORT_SUB_OPERATIONAL] = {
+ .enter_state = sci_port_ready_substate_operational_enter,
+ .exit_state = sci_port_ready_substate_operational_exit
+ },
+ [SCI_PORT_SUB_CONFIGURING] = {
+ .enter_state = sci_port_ready_substate_configuring_enter,
+ .exit_state = sci_port_ready_substate_configuring_exit
+ },
+ [SCI_PORT_RESETTING] = {
+ .exit_state = sci_port_resetting_state_exit
+ },
+ [SCI_PORT_FAILED] = {
+ .enter_state = sci_port_failed_state_enter,
+ }
+};
+
+void sci_port_construct(struct isci_port *iport, u8 index,
+ struct isci_host *ihost)
+{
+ sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
+
+ iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
+ iport->physical_port_index = index;
+ iport->active_phy_mask = 0;
+ iport->ready_exit = false;
+
+ iport->owning_controller = ihost;
+
+ iport->started_request_count = 0;
+ iport->assigned_device_count = 0;
+
+ iport->reserved_rni = SCU_DUMMY_INDEX;
+ iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
+
+ sci_init_timer(&iport->timer, port_timeout);
+
+ iport->port_task_scheduler_registers = NULL;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ iport->phy_table[index] = NULL;
+}
+
+void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
+{
+ INIT_LIST_HEAD(&iport->remote_dev_list);
+ INIT_LIST_HEAD(&iport->domain_dev_list);
+ spin_lock_init(&iport->state_lock);
+ init_completion(&iport->start_complete);
+ iport->isci_host = ihost;
+ isci_port_change_state(iport, isci_freed);
+ atomic_set(&iport->event, 0);
+}
+
+/**
+ * isci_port_get_state() - This function gets the status of the port object.
+ * @isci_port: This parameter points to the isci_port object
+ *
+ * status of the object as a isci_status enum.
+ */
+enum isci_status isci_port_get_state(
+ struct isci_port *isci_port)
+{
+ return isci_port->status;
+}
+
+void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ /* notify the user. */
+ isci_port_bc_change_received(ihost, iport, iphy);
+}
+
+int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ unsigned long flags;
+ enum sci_status status;
+ int idx, ret = TMF_RESP_FUNC_COMPLETE;
+
+ dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
+ __func__, iport);
+
+ init_completion(&iport->hard_reset_complete);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
+ status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
+
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (status == SCI_SUCCESS) {
+ wait_for_completion(&iport->hard_reset_complete);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: iport = %p; hard reset completion\n",
+ __func__, iport);
+
+ if (iport->hard_reset_status != SCI_SUCCESS)
+ ret = TMF_RESP_FUNC_FAILED;
+ } else {
+ ret = TMF_RESP_FUNC_FAILED;
+
+ dev_err(&ihost->pdev->dev,
+ "%s: iport = %p; sci_port_hard_reset call"
+ " failed 0x%x\n",
+ __func__, iport, status);
+
+ }
+
+ /* If the hard reset for the port has failed, consider this
+ * the same as link failures on all phys in the port.
+ */
+ if (ret != TMF_RESP_FUNC_COMPLETE) {
+
+ dev_err(&ihost->pdev->dev,
+ "%s: iport = %p; hard reset failed "
+ "(0x%x) - driving explicit link fail for all phys\n",
+ __func__, iport, iport->hard_reset_status);
+
+ /* Down all phys in the port. */
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ for (idx = 0; idx < SCI_MAX_PHYS; ++idx) {
+ struct isci_phy *iphy = iport->phy_table[idx];
+
+ if (!iphy)
+ continue;
+ sci_phy_stop(iphy);
+ sci_phy_start(iphy);
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ }
+ return ret;
+}
+
+/**
+ * isci_port_deformed() - This function is called by libsas when a port becomes
+ * inactive.
+ * @phy: This parameter specifies the libsas phy with the inactive port.
+ *
+ */
+void isci_port_deformed(struct asd_sas_phy *phy)
+{
+ pr_debug("%s: sas_phy = %p\n", __func__, phy);
+}
+
+/**
+ * isci_port_formed() - This function is called by libsas when a port becomes
+ * active.
+ * @phy: This parameter specifies the libsas phy with the active port.
+ *
+ */
+void isci_port_formed(struct asd_sas_phy *phy)
+{
+ pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port);
+}
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
new file mode 100644
index 000000000000..b50ecd4e8f9c
--- /dev/null
+++ b/drivers/scsi/isci/port.h
@@ -0,0 +1,306 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_PORT_H_
+#define _ISCI_PORT_H_
+
+#include <scsi/libsas.h>
+#include "isci.h"
+#include "sas.h"
+#include "phy.h"
+
+#define SCIC_SDS_DUMMY_PORT 0xFF
+
+struct isci_phy;
+struct isci_host;
+
+enum isci_status {
+ isci_freed = 0x00,
+ isci_starting = 0x01,
+ isci_ready = 0x02,
+ isci_ready_for_io = 0x03,
+ isci_stopping = 0x04,
+ isci_stopped = 0x05,
+};
+
+/**
+ * struct isci_port - isci direct attached sas port object
+ * @event: counts bcns and port stop events (for bcn filtering)
+ * @ready_exit: several states constitute 'ready'. When exiting ready we
+ * need to take extra port-teardown actions that are
+ * skipped when exiting to another 'ready' state.
+ * @logical_port_index: software port index
+ * @physical_port_index: hardware port index
+ * @active_phy_mask: identifies phy members
+ * @reserved_tag:
+ * @reserved_rni: reserver for port task scheduler workaround
+ * @started_request_count: reference count for outstanding commands
+ * @not_ready_reason: set during state transitions and notified
+ * @timer: timeout start/stop operations
+ */
+struct isci_port {
+ enum isci_status status;
+ #define IPORT_BCN_BLOCKED 0
+ #define IPORT_BCN_PENDING 1
+ unsigned long flags;
+ atomic_t event;
+ struct isci_host *isci_host;
+ struct asd_sas_port sas_port;
+ struct list_head remote_dev_list;
+ spinlock_t state_lock;
+ struct list_head domain_dev_list;
+ struct completion start_complete;
+ struct completion hard_reset_complete;
+ enum sci_status hard_reset_status;
+ struct sci_base_state_machine sm;
+ bool ready_exit;
+ u8 logical_port_index;
+ u8 physical_port_index;
+ u8 active_phy_mask;
+ u16 reserved_rni;
+ u16 reserved_tag;
+ u32 started_request_count;
+ u32 assigned_device_count;
+ u32 not_ready_reason;
+ struct isci_phy *phy_table[SCI_MAX_PHYS];
+ struct isci_host *owning_controller;
+ struct sci_timer timer;
+ struct scu_port_task_scheduler_registers __iomem *port_task_scheduler_registers;
+ /* XXX rework: only one register, no need to replicate per-port */
+ u32 __iomem *port_pe_configuration_register;
+ struct scu_viit_entry __iomem *viit_registers;
+};
+
+enum sci_port_not_ready_reason_code {
+ SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS,
+ SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED,
+ SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION,
+ SCIC_PORT_NOT_READY_RECONFIGURING,
+
+ SCIC_PORT_NOT_READY_REASON_CODE_MAX
+};
+
+struct sci_port_end_point_properties {
+ struct sci_sas_address sas_address;
+ struct sci_phy_proto protocols;
+};
+
+struct sci_port_properties {
+ u32 index;
+ struct sci_port_end_point_properties local;
+ struct sci_port_end_point_properties remote;
+ u32 phy_mask;
+};
+
+/**
+ * enum sci_port_states - This enumeration depicts all the states for the
+ * common port state machine.
+ *
+ *
+ */
+enum sci_port_states {
+ /**
+ * This state indicates that the port has successfully been stopped.
+ * In this state no new IO operations are permitted.
+ * This state is entered from the STOPPING state.
+ */
+ SCI_PORT_STOPPED,
+
+ /**
+ * This state indicates that the port is in the process of stopping.
+ * In this state no new IO operations are permitted, but existing IO
+ * operations are allowed to complete.
+ * This state is entered from the READY state.
+ */
+ SCI_PORT_STOPPING,
+
+ /**
+ * This state indicates the port is now ready. Thus, the user is
+ * able to perform IO operations on this port.
+ * This state is entered from the STARTING state.
+ */
+ SCI_PORT_READY,
+
+ /**
+ * The substate where the port is started and ready but has no
+ * active phys.
+ */
+ SCI_PORT_SUB_WAITING,
+
+ /**
+ * The substate where the port is started and ready and there is
+ * at least one phy operational.
+ */
+ SCI_PORT_SUB_OPERATIONAL,
+
+ /**
+ * The substate where the port is started and there was an
+ * add/remove phy event. This state is only used in Automatic
+ * Port Configuration Mode (APC)
+ */
+ SCI_PORT_SUB_CONFIGURING,
+
+ /**
+ * This state indicates the port is in the process of performing a hard
+ * reset. Thus, the user is unable to perform IO operations on this
+ * port.
+ * This state is entered from the READY state.
+ */
+ SCI_PORT_RESETTING,
+
+ /**
+ * This state indicates the port has failed a reset request. This state
+ * is entered when a port reset request times out.
+ * This state is entered from the RESETTING state.
+ */
+ SCI_PORT_FAILED,
+
+
+};
+
+static inline void sci_port_decrement_request_count(struct isci_port *iport)
+{
+ if (WARN_ONCE(iport->started_request_count == 0,
+ "%s: tried to decrement started_request_count past 0!?",
+ __func__))
+ /* pass */;
+ else
+ iport->started_request_count--;
+}
+
+#define sci_port_active_phy(port, phy) \
+ (((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0)
+
+void sci_port_construct(
+ struct isci_port *iport,
+ u8 port_index,
+ struct isci_host *ihost);
+
+enum sci_status sci_port_start(struct isci_port *iport);
+enum sci_status sci_port_stop(struct isci_port *iport);
+
+enum sci_status sci_port_add_phy(
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+enum sci_status sci_port_remove_phy(
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+void sci_port_setup_transports(
+ struct isci_port *iport,
+ u32 device_id);
+
+void isci_port_bcn_enable(struct isci_host *, struct isci_port *);
+
+void sci_port_deactivate_phy(
+ struct isci_port *iport,
+ struct isci_phy *iphy,
+ bool do_notify_user);
+
+bool sci_port_link_detected(
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+enum sci_status sci_port_link_up(struct isci_port *iport,
+ struct isci_phy *iphy);
+enum sci_status sci_port_link_down(struct isci_port *iport,
+ struct isci_phy *iphy);
+
+struct isci_request;
+struct isci_remote_device;
+enum sci_status sci_port_start_io(
+ struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_port_complete_io(
+ struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sas_linkrate sci_port_get_max_allowed_speed(
+ struct isci_port *iport);
+
+void sci_port_broadcast_change_received(
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+bool sci_port_is_valid_phy_assignment(
+ struct isci_port *iport,
+ u32 phy_index);
+
+void sci_port_get_sas_address(
+ struct isci_port *iport,
+ struct sci_sas_address *sas_address);
+
+void sci_port_get_attached_sas_address(
+ struct isci_port *iport,
+ struct sci_sas_address *sas_address);
+
+enum isci_status isci_port_get_state(
+ struct isci_port *isci_port);
+
+void isci_port_formed(struct asd_sas_phy *);
+void isci_port_deformed(struct asd_sas_phy *);
+
+void isci_port_init(
+ struct isci_port *port,
+ struct isci_host *host,
+ int index);
+
+int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy);
+#endif /* !defined(_ISCI_PORT_H_) */
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
new file mode 100644
index 000000000000..486b113c634a
--- /dev/null
+++ b/drivers/scsi/isci/port_config.c
@@ -0,0 +1,754 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+
+#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
+#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (100)
+
+enum SCIC_SDS_APC_ACTIVITY {
+ SCIC_SDS_APC_SKIP_PHY,
+ SCIC_SDS_APC_ADD_PHY,
+ SCIC_SDS_APC_START_TIMER,
+
+ SCIC_SDS_APC_ACTIVITY_MAX
+};
+
+/*
+ * ******************************************************************************
+ * General port configuration agent routines
+ * ****************************************************************************** */
+
+/**
+ *
+ * @address_one: A SAS Address to be compared.
+ * @address_two: A SAS Address to be compared.
+ *
+ * Compare the two SAS Address and if SAS Address One is greater than SAS
+ * Address Two then return > 0 else if SAS Address One is less than SAS Address
+ * Two return < 0 Otherwise they are the same return 0 A signed value of x > 0
+ * > y where x is returned for Address One > Address Two y is returned for
+ * Address One < Address Two 0 is returned ofr Address One = Address Two
+ */
+static s32 sci_sas_address_compare(
+ struct sci_sas_address address_one,
+ struct sci_sas_address address_two)
+{
+ if (address_one.high > address_two.high) {
+ return 1;
+ } else if (address_one.high < address_two.high) {
+ return -1;
+ } else if (address_one.low > address_two.low) {
+ return 1;
+ } else if (address_one.low < address_two.low) {
+ return -1;
+ }
+
+ /* The two SAS Address must be identical */
+ return 0;
+}
+
+/**
+ *
+ * @controller: The controller object used for the port search.
+ * @phy: The phy object to match.
+ *
+ * This routine will find a matching port for the phy. This means that the
+ * port and phy both have the same broadcast sas address and same received sas
+ * address. The port address or the NULL if there is no matching
+ * port. port address if the port can be found to match the phy.
+ * NULL if there is no matching port for the phy.
+ */
+static struct isci_port *sci_port_configuration_agent_find_port(
+ struct isci_host *ihost,
+ struct isci_phy *iphy)
+{
+ u8 i;
+ struct sci_sas_address port_sas_address;
+ struct sci_sas_address port_attached_device_address;
+ struct sci_sas_address phy_sas_address;
+ struct sci_sas_address phy_attached_device_address;
+
+ /*
+ * Since this phy can be a member of a wide port check to see if one or
+ * more phys match the sent and received SAS address as this phy in which
+ * case it should participate in the same port.
+ */
+ sci_phy_get_sas_address(iphy, &phy_sas_address);
+ sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address);
+
+ for (i = 0; i < ihost->logical_port_entries; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+
+ sci_port_get_sas_address(iport, &port_sas_address);
+ sci_port_get_attached_sas_address(iport, &port_attached_device_address);
+
+ if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 &&
+ sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0)
+ return iport;
+ }
+
+ return NULL;
+}
+
+/**
+ *
+ * @controller: This is the controller object that contains the port agent
+ * @port_agent: This is the port configruation agent for the controller.
+ *
+ * This routine will validate the port configuration is correct for the SCU
+ * hardware. The SCU hardware allows for port configurations as follows. LP0
+ * -> (PE0), (PE0, PE1), (PE0, PE1, PE2, PE3) LP1 -> (PE1) LP2 -> (PE2), (PE2,
+ * PE3) LP3 -> (PE3) enum sci_status SCI_SUCCESS the port configuration is valid for
+ * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION
+ * the port configuration is not valid for this port configuration agent.
+ */
+static enum sci_status sci_port_configuration_agent_validate_ports(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
+{
+ struct sci_sas_address first_address;
+ struct sci_sas_address second_address;
+
+ /*
+ * Sanity check the max ranges for all the phys the max index
+ * is always equal to the port range index */
+ if (port_agent->phy_valid_port_range[0].max_index != 0 ||
+ port_agent->phy_valid_port_range[1].max_index != 1 ||
+ port_agent->phy_valid_port_range[2].max_index != 2 ||
+ port_agent->phy_valid_port_range[3].max_index != 3)
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+
+ /*
+ * This is a request to configure a single x4 port or at least attempt
+ * to make all the phys into a single port */
+ if (port_agent->phy_valid_port_range[0].min_index == 0 &&
+ port_agent->phy_valid_port_range[1].min_index == 0 &&
+ port_agent->phy_valid_port_range[2].min_index == 0 &&
+ port_agent->phy_valid_port_range[3].min_index == 0)
+ return SCI_SUCCESS;
+
+ /*
+ * This is a degenerate case where phy 1 and phy 2 are assigned
+ * to the same port this is explicitly disallowed by the hardware
+ * unless they are part of the same x4 port and this condition was
+ * already checked above. */
+ if (port_agent->phy_valid_port_range[2].min_index == 1) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ /*
+ * PE0 and PE3 can never have the same SAS Address unless they
+ * are part of the same x4 wide port and we have already checked
+ * for this condition. */
+ sci_phy_get_sas_address(&ihost->phys[0], &first_address);
+ sci_phy_get_sas_address(&ihost->phys[3], &second_address);
+
+ if (sci_sas_address_compare(first_address, second_address) == 0) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ /*
+ * PE0 and PE1 are configured into a 2x1 ports make sure that the
+ * SAS Address for PE0 and PE2 are different since they can not be
+ * part of the same port. */
+ if (port_agent->phy_valid_port_range[0].min_index == 0 &&
+ port_agent->phy_valid_port_range[1].min_index == 1) {
+ sci_phy_get_sas_address(&ihost->phys[0], &first_address);
+ sci_phy_get_sas_address(&ihost->phys[2], &second_address);
+
+ if (sci_sas_address_compare(first_address, second_address) == 0) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+ }
+
+ /*
+ * PE2 and PE3 are configured into a 2x1 ports make sure that the
+ * SAS Address for PE1 and PE3 are different since they can not be
+ * part of the same port. */
+ if (port_agent->phy_valid_port_range[2].min_index == 2 &&
+ port_agent->phy_valid_port_range[3].min_index == 3) {
+ sci_phy_get_sas_address(&ihost->phys[1], &first_address);
+ sci_phy_get_sas_address(&ihost->phys[3], &second_address);
+
+ if (sci_sas_address_compare(first_address, second_address) == 0) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+ }
+
+ return SCI_SUCCESS;
+}
+
+/*
+ * ******************************************************************************
+ * Manual port configuration agent routines
+ * ****************************************************************************** */
+
+/* verify all of the phys in the same port are using the same SAS address */
+static enum sci_status
+sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
+{
+ u32 phy_mask;
+ u32 assigned_phy_mask;
+ struct sci_sas_address sas_address;
+ struct sci_sas_address phy_assigned_address;
+ u8 port_index;
+ u8 phy_index;
+
+ assigned_phy_mask = 0;
+ sas_address.high = 0;
+ sas_address.low = 0;
+
+ for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) {
+ phy_mask = ihost->oem_parameters.ports[port_index].phy_mask;
+
+ if (!phy_mask)
+ continue;
+ /*
+ * Make sure that one or more of the phys were not already assinged to
+ * a different port. */
+ if ((phy_mask & ~assigned_phy_mask) == 0) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ /* Find the starting phy index for this round through the loop */
+ for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
+ if ((phy_mask & (1 << phy_index)) == 0)
+ continue;
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
+ &sas_address);
+
+ /*
+ * The phy_index can be used as the starting point for the
+ * port range since the hardware starts all logical ports
+ * the same as the PE index. */
+ port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+
+ if (phy_index != port_index) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ break;
+ }
+
+ /*
+ * See how many additional phys are being added to this logical port.
+ * Note: We have not moved the current phy_index so we will actually
+ * compare the startting phy with itself.
+ * This is expected and required to add the phy to the port. */
+ while (phy_index < SCI_MAX_PHYS) {
+ if ((phy_mask & (1 << phy_index)) == 0)
+ continue;
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
+ &phy_assigned_address);
+
+ if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) {
+ /*
+ * The phy mask specified that this phy is part of the same port
+ * as the starting phy and it is not so fail this configuration */
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+
+ sci_port_add_phy(&ihost->ports[port_index],
+ &ihost->phys[phy_index]);
+
+ assigned_phy_mask |= (1 << phy_index);
+ }
+
+ phy_index++;
+ }
+
+ return sci_port_configuration_agent_validate_ports(ihost, port_agent);
+}
+
+static void mpc_agent_timeout(unsigned long data)
+{
+ u8 index;
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct sci_port_configuration_agent *port_agent;
+ struct isci_host *ihost;
+ unsigned long flags;
+ u16 configure_phy_mask;
+
+ port_agent = container_of(tmr, typeof(*port_agent), timer);
+ ihost = container_of(port_agent, typeof(*ihost), port_agent);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ port_agent->timer_pending = false;
+
+ /* Find the mask of phys that are reported read but as yet unconfigured into a port */
+ configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ struct isci_phy *iphy = &ihost->phys[index];
+
+ if (configure_phy_mask & (1 << index)) {
+ port_agent->link_up_handler(ihost, port_agent,
+ phy_get_non_dummy_port(iphy),
+ iphy);
+ }
+ }
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static void sci_mpc_agent_link_up(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ /* If the port is NULL then the phy was not assigned to a port.
+ * This is because the phy was not given the same SAS Address as
+ * the other PHYs in the port.
+ */
+ if (!iport)
+ return;
+
+ port_agent->phy_ready_mask |= (1 << iphy->phy_index);
+ sci_port_link_up(iport, iphy);
+ if ((iport->active_phy_mask & (1 << iphy->phy_index)))
+ port_agent->phy_configured_mask |= (1 << iphy->phy_index);
+}
+
+/**
+ *
+ * @controller: This is the controller object that receives the link down
+ * notification.
+ * @port: This is the port object associated with the phy. If the is no
+ * associated port this is an NULL. The port is an invalid
+ * handle only if the phy was never port of this port. This happens when
+ * the phy is not broadcasting the same SAS address as the other phys in the
+ * assigned port.
+ * @phy: This is the phy object which has gone link down.
+ *
+ * This function handles the manual port configuration link down notifications.
+ * Since all ports and phys are associated at initialization time we just turn
+ * around and notifiy the port object of the link down event. If this PHY is
+ * not associated with a port there is no action taken. Is it possible to get a
+ * link down notification from a phy that has no assocoated port?
+ */
+static void sci_mpc_agent_link_down(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ if (iport != NULL) {
+ /*
+ * If we can form a new port from the remainder of the phys
+ * then we want to start the timer to allow the SCI User to
+ * cleanup old devices and rediscover the port before
+ * rebuilding the port with the phys that remain in the ready
+ * state.
+ */
+ port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
+ port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
+
+ /*
+ * Check to see if there are more phys waiting to be
+ * configured into a port. If there are allow the SCI User
+ * to tear down this port, if necessary, and then reconstruct
+ * the port after the timeout.
+ */
+ if ((port_agent->phy_configured_mask == 0x0000) &&
+ (port_agent->phy_ready_mask != 0x0000) &&
+ !port_agent->timer_pending) {
+ port_agent->timer_pending = true;
+
+ sci_mod_timer(&port_agent->timer,
+ SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT);
+ }
+
+ sci_port_link_down(iport, iphy);
+ }
+}
+
+/* verify phys are assigned a valid SAS address for automatic port
+ * configuration mode.
+ */
+static enum sci_status
+sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
+{
+ u8 phy_index;
+ u8 port_index;
+ struct sci_sas_address sas_address;
+ struct sci_sas_address phy_assigned_address;
+
+ phy_index = 0;
+
+ while (phy_index < SCI_MAX_PHYS) {
+ port_index = phy_index;
+
+ /* Get the assigned SAS Address for the first PHY on the controller. */
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
+ &sas_address);
+
+ while (++phy_index < SCI_MAX_PHYS) {
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
+ &phy_assigned_address);
+
+ /* Verify each of the SAS address are all the same for every PHY */
+ if (sci_sas_address_compare(sas_address, phy_assigned_address) == 0) {
+ port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+ } else {
+ port_agent->phy_valid_port_range[phy_index].min_index = phy_index;
+ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+ break;
+ }
+ }
+ }
+
+ return sci_port_configuration_agent_validate_ports(ihost, port_agent);
+}
+
+static void sci_apc_agent_configure_ports(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_phy *iphy,
+ bool start_timer)
+{
+ u8 port_index;
+ enum sci_status status;
+ struct isci_port *iport;
+ enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY;
+
+ iport = sci_port_configuration_agent_find_port(ihost, iphy);
+
+ if (iport) {
+ if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index))
+ apc_activity = SCIC_SDS_APC_ADD_PHY;
+ else
+ apc_activity = SCIC_SDS_APC_SKIP_PHY;
+ } else {
+ /*
+ * There is no matching Port for this PHY so lets search through the
+ * Ports and see if we can add the PHY to its own port or maybe start
+ * the timer and wait to see if a wider port can be made.
+ *
+ * Note the break when we reach the condition of the port id == phy id */
+ for (port_index = port_agent->phy_valid_port_range[iphy->phy_index].min_index;
+ port_index <= port_agent->phy_valid_port_range[iphy->phy_index].max_index;
+ port_index++) {
+
+ iport = &ihost->ports[port_index];
+
+ /* First we must make sure that this PHY can be added to this Port. */
+ if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
+ /*
+ * Port contains a PHY with a greater PHY ID than the current
+ * PHY that has gone link up. This phy can not be part of any
+ * port so skip it and move on. */
+ if (iport->active_phy_mask > (1 << iphy->phy_index)) {
+ apc_activity = SCIC_SDS_APC_SKIP_PHY;
+ break;
+ }
+
+ /*
+ * We have reached the end of our Port list and have not found
+ * any reason why we should not either add the PHY to the port
+ * or wait for more phys to become active. */
+ if (iport->physical_port_index == iphy->phy_index) {
+ /*
+ * The Port either has no active PHYs.
+ * Consider that if the port had any active PHYs we would have
+ * or active PHYs with
+ * a lower PHY Id than this PHY. */
+ if (apc_activity != SCIC_SDS_APC_START_TIMER) {
+ apc_activity = SCIC_SDS_APC_ADD_PHY;
+ }
+
+ break;
+ }
+
+ /*
+ * The current Port has no active PHYs and this PHY could be part
+ * of this Port. Since we dont know as yet setup to start the
+ * timer and see if there is a better configuration. */
+ if (iport->active_phy_mask == 0) {
+ apc_activity = SCIC_SDS_APC_START_TIMER;
+ }
+ } else if (iport->active_phy_mask != 0) {
+ /*
+ * The Port has an active phy and the current Phy can not
+ * participate in this port so skip the PHY and see if
+ * there is a better configuration. */
+ apc_activity = SCIC_SDS_APC_SKIP_PHY;
+ }
+ }
+ }
+
+ /*
+ * Check to see if the start timer operations should instead map to an
+ * add phy operation. This is caused because we have been waiting to
+ * add a phy to a port but could not becuase the automatic port
+ * configuration engine had a choice of possible ports for the phy.
+ * Since we have gone through a timeout we are going to restrict the
+ * choice to the smallest possible port. */
+ if (
+ (start_timer == false)
+ && (apc_activity == SCIC_SDS_APC_START_TIMER)
+ ) {
+ apc_activity = SCIC_SDS_APC_ADD_PHY;
+ }
+
+ switch (apc_activity) {
+ case SCIC_SDS_APC_ADD_PHY:
+ status = sci_port_add_phy(iport, iphy);
+
+ if (status == SCI_SUCCESS) {
+ port_agent->phy_configured_mask |= (1 << iphy->phy_index);
+ }
+ break;
+
+ case SCIC_SDS_APC_START_TIMER:
+ /*
+ * This can occur for either a link down event, or a link
+ * up event where we cannot yet tell the port to which a
+ * phy belongs.
+ */
+ if (port_agent->timer_pending)
+ sci_del_timer(&port_agent->timer);
+
+ port_agent->timer_pending = true;
+ sci_mod_timer(&port_agent->timer,
+ SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+ break;
+
+ case SCIC_SDS_APC_SKIP_PHY:
+ default:
+ /* do nothing the PHY can not be made part of a port at this time. */
+ break;
+ }
+}
+
+/**
+ * sci_apc_agent_link_up - handle apc link up events
+ * @scic: This is the controller object that receives the link up
+ * notification.
+ * @sci_port: This is the port object associated with the phy. If the is no
+ * associated port this is an NULL.
+ * @sci_phy: This is the phy object which has gone link up.
+ *
+ * This method handles the automatic port configuration for link up
+ * notifications. Is it possible to get a link down notification from a phy
+ * that has no assocoated port?
+ */
+static void sci_apc_agent_link_up(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ u8 phy_index = iphy->phy_index;
+
+ if (!iport) {
+ /* the phy is not the part of this port */
+ port_agent->phy_ready_mask |= 1 << phy_index;
+ sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
+ } else {
+ /* the phy is already the part of the port */
+ u32 port_state = iport->sm.current_state_id;
+
+ /* if the PORT'S state is resetting then the link up is from
+ * port hard reset in this case, we need to tell the port
+ * that link up is recieved
+ */
+ BUG_ON(port_state != SCI_PORT_RESETTING);
+ port_agent->phy_ready_mask |= 1 << phy_index;
+ sci_port_link_up(iport, iphy);
+ }
+}
+
+/**
+ *
+ * @controller: This is the controller object that receives the link down
+ * notification.
+ * @iport: This is the port object associated with the phy. If the is no
+ * associated port this is an NULL.
+ * @iphy: This is the phy object which has gone link down.
+ *
+ * This method handles the automatic port configuration link down
+ * notifications. not associated with a port there is no action taken. Is it
+ * possible to get a link down notification from a phy that has no assocoated
+ * port?
+ */
+static void sci_apc_agent_link_down(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
+
+ if (!iport)
+ return;
+ if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) {
+ enum sci_status status;
+
+ status = sci_port_remove_phy(iport, iphy);
+
+ if (status == SCI_SUCCESS)
+ port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
+ }
+}
+
+/* configure the phys into ports when the timer fires */
+static void apc_agent_timeout(unsigned long data)
+{
+ u32 index;
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct sci_port_configuration_agent *port_agent;
+ struct isci_host *ihost;
+ unsigned long flags;
+ u16 configure_phy_mask;
+
+ port_agent = container_of(tmr, typeof(*port_agent), timer);
+ ihost = container_of(port_agent, typeof(*ihost), port_agent);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ port_agent->timer_pending = false;
+
+ configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
+
+ if (!configure_phy_mask)
+ return;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ if ((configure_phy_mask & (1 << index)) == 0)
+ continue;
+
+ sci_apc_agent_configure_ports(ihost, port_agent,
+ &ihost->phys[index], false);
+ }
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/*
+ * ******************************************************************************
+ * Public port configuration agent routines
+ * ****************************************************************************** */
+
+/**
+ *
+ *
+ * This method will construct the port configuration agent for operation. This
+ * call is universal for both manual port configuration and automatic port
+ * configuration modes.
+ */
+void sci_port_configuration_agent_construct(
+ struct sci_port_configuration_agent *port_agent)
+{
+ u32 index;
+
+ port_agent->phy_configured_mask = 0x00;
+ port_agent->phy_ready_mask = 0x00;
+
+ port_agent->link_up_handler = NULL;
+ port_agent->link_down_handler = NULL;
+
+ port_agent->timer_pending = false;
+
+ for (index = 0; index < SCI_MAX_PORTS; index++) {
+ port_agent->phy_valid_port_range[index].min_index = 0;
+ port_agent->phy_valid_port_range[index].max_index = 0;
+ }
+}
+
+enum sci_status sci_port_configuration_agent_initialize(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
+{
+ enum sci_status status;
+ enum sci_port_configuration_mode mode;
+
+ mode = ihost->oem_parameters.controller.mode_type;
+
+ if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+ status = sci_mpc_agent_validate_phy_configuration(
+ ihost, port_agent);
+
+ port_agent->link_up_handler = sci_mpc_agent_link_up;
+ port_agent->link_down_handler = sci_mpc_agent_link_down;
+
+ sci_init_timer(&port_agent->timer, mpc_agent_timeout);
+ } else {
+ status = sci_apc_agent_validate_phy_configuration(
+ ihost, port_agent);
+
+ port_agent->link_up_handler = sci_apc_agent_link_up;
+ port_agent->link_down_handler = sci_apc_agent_link_down;
+
+ sci_init_timer(&port_agent->timer, apc_agent_timeout);
+ }
+
+ return status;
+}
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
new file mode 100644
index 000000000000..b5f4341de243
--- /dev/null
+++ b/drivers/scsi/isci/probe_roms.c
@@ -0,0 +1,243 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ */
+
+/* probe_roms - scan for oem parameters */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/uaccess.h>
+#include <linux/efi.h>
+#include <asm/probe_roms.h>
+
+#include "isci.h"
+#include "task.h"
+#include "probe_roms.h"
+
+static efi_char16_t isci_efivar_name[] = {
+ 'R', 's', 't', 'S', 'c', 'u', 'O'
+};
+
+struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
+{
+ void __iomem *oprom = pci_map_biosrom(pdev);
+ struct isci_orom *rom = NULL;
+ size_t len, i;
+ int j;
+ char oem_sig[4];
+ struct isci_oem_hdr oem_hdr;
+ u8 *tmp, sum;
+
+ if (!oprom)
+ return NULL;
+
+ len = pci_biosrom_size(pdev);
+ rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
+ if (!rom) {
+ dev_warn(&pdev->dev,
+ "Unable to allocate memory for orom\n");
+ return NULL;
+ }
+
+ for (i = 0; i < len && rom; i += ISCI_OEM_SIG_SIZE) {
+ memcpy_fromio(oem_sig, oprom + i, ISCI_OEM_SIG_SIZE);
+
+ /* we think we found the OEM table */
+ if (memcmp(oem_sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) == 0) {
+ size_t copy_len;
+
+ memcpy_fromio(&oem_hdr, oprom + i, sizeof(oem_hdr));
+
+ copy_len = min(oem_hdr.len - sizeof(oem_hdr),
+ sizeof(*rom));
+
+ memcpy_fromio(rom,
+ oprom + i + sizeof(oem_hdr),
+ copy_len);
+
+ /* calculate checksum */
+ tmp = (u8 *)&oem_hdr;
+ for (j = 0, sum = 0; j < sizeof(oem_hdr); j++, tmp++)
+ sum += *tmp;
+
+ tmp = (u8 *)rom;
+ for (j = 0; j < sizeof(*rom); j++, tmp++)
+ sum += *tmp;
+
+ if (sum != 0) {
+ dev_warn(&pdev->dev,
+ "OEM table checksum failed\n");
+ continue;
+ }
+
+ /* keep going if that's not the oem param table */
+ if (memcmp(rom->hdr.signature,
+ ISCI_ROM_SIG,
+ ISCI_ROM_SIG_SIZE) != 0)
+ continue;
+
+ dev_info(&pdev->dev,
+ "OEM parameter table found in OROM\n");
+ break;
+ }
+ }
+
+ if (i >= len) {
+ dev_err(&pdev->dev, "oprom parse error\n");
+ devm_kfree(&pdev->dev, rom);
+ rom = NULL;
+ }
+ pci_unmap_biosrom(oprom);
+
+ return rom;
+}
+
+enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
+ struct isci_orom *orom, int scu_index)
+{
+ /* check for valid inputs */
+ if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS ||
+ scu_index > orom->hdr.num_elements || !oem)
+ return -EINVAL;
+
+ *oem = orom->ctrl[scu_index];
+ return 0;
+}
+
+struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw)
+{
+ struct isci_orom *orom = NULL, *data;
+ int i, j;
+
+ if (request_firmware(&fw, ISCI_FW_NAME, &pdev->dev) != 0)
+ return NULL;
+
+ if (fw->size < sizeof(*orom))
+ goto out;
+
+ data = (struct isci_orom *)fw->data;
+
+ if (strncmp(ISCI_ROM_SIG, data->hdr.signature,
+ strlen(ISCI_ROM_SIG)) != 0)
+ goto out;
+
+ orom = devm_kzalloc(&pdev->dev, fw->size, GFP_KERNEL);
+ if (!orom)
+ goto out;
+
+ memcpy(orom, fw->data, fw->size);
+
+ if (is_c0(pdev))
+ goto out;
+
+ /*
+ * deprecated: override default amp_control for pre-preproduction
+ * silicon revisions
+ */
+ for (i = 0; i < ARRAY_SIZE(orom->ctrl); i++)
+ for (j = 0; j < ARRAY_SIZE(orom->ctrl[i].phys); j++) {
+ orom->ctrl[i].phys[j].afe_tx_amp_control0 = 0xe7c03;
+ orom->ctrl[i].phys[j].afe_tx_amp_control1 = 0xe7c03;
+ orom->ctrl[i].phys[j].afe_tx_amp_control2 = 0xe7c03;
+ orom->ctrl[i].phys[j].afe_tx_amp_control3 = 0xe7c03;
+ }
+ out:
+ release_firmware(fw);
+
+ return orom;
+}
+
+static struct efi *get_efi(void)
+{
+#ifdef CONFIG_EFI
+ return &efi;
+#else
+ return NULL;
+#endif
+}
+
+struct isci_orom *isci_get_efi_var(struct pci_dev *pdev)
+{
+ efi_status_t status;
+ struct isci_orom *rom;
+ struct isci_oem_hdr *oem_hdr;
+ u8 *tmp, sum;
+ int j;
+ unsigned long data_len;
+ u8 *efi_data;
+ u32 efi_attrib = 0;
+
+ data_len = 1024;
+ efi_data = devm_kzalloc(&pdev->dev, data_len, GFP_KERNEL);
+ if (!efi_data) {
+ dev_warn(&pdev->dev,
+ "Unable to allocate memory for EFI data\n");
+ return NULL;
+ }
+
+ rom = (struct isci_orom *)(efi_data + sizeof(struct isci_oem_hdr));
+
+ if (get_efi())
+ status = get_efi()->get_variable(isci_efivar_name,
+ &ISCI_EFI_VENDOR_GUID,
+ &efi_attrib,
+ &data_len,
+ efi_data);
+ else
+ status = EFI_NOT_FOUND;
+
+ if (status != EFI_SUCCESS) {
+ dev_warn(&pdev->dev,
+ "Unable to obtain EFI var data for OEM parms\n");
+ return NULL;
+ }
+
+ oem_hdr = (struct isci_oem_hdr *)efi_data;
+
+ if (memcmp(oem_hdr->sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) != 0) {
+ dev_warn(&pdev->dev,
+ "Invalid OEM header signature\n");
+ return NULL;
+ }
+
+ /* calculate checksum */
+ tmp = (u8 *)efi_data;
+ for (j = 0, sum = 0; j < (sizeof(*oem_hdr) + sizeof(*rom)); j++, tmp++)
+ sum += *tmp;
+
+ if (sum != 0) {
+ dev_warn(&pdev->dev,
+ "OEM table checksum failed\n");
+ return NULL;
+ }
+
+ if (memcmp(rom->hdr.signature,
+ ISCI_ROM_SIG,
+ ISCI_ROM_SIG_SIZE) != 0) {
+ dev_warn(&pdev->dev,
+ "Invalid OEM table signature\n");
+ return NULL;
+ }
+
+ return rom;
+}
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
new file mode 100644
index 000000000000..dc007e692f4e
--- /dev/null
+++ b/drivers/scsi/isci/probe_roms.h
@@ -0,0 +1,249 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_PROBE_ROMS_H_
+#define _ISCI_PROBE_ROMS_H_
+
+#ifdef __KERNEL__
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/efi.h>
+#include "isci.h"
+
+#define SCIC_SDS_PARM_NO_SPEED 0
+
+/* generation 1 (i.e. 1.5 Gb/s) */
+#define SCIC_SDS_PARM_GEN1_SPEED 1
+
+/* generation 2 (i.e. 3.0 Gb/s) */
+#define SCIC_SDS_PARM_GEN2_SPEED 2
+
+/* generation 3 (i.e. 6.0 Gb/s) */
+#define SCIC_SDS_PARM_GEN3_SPEED 3
+#define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED
+
+/* parameters that can be set by module parameters */
+struct sci_user_parameters {
+ struct sci_phy_user_params {
+ /**
+ * This field specifies the NOTIFY (ENABLE SPIN UP) primitive
+ * insertion frequency for this phy index.
+ */
+ u32 notify_enable_spin_up_insertion_frequency;
+
+ /**
+ * This method specifies the number of transmitted DWORDs within which
+ * to transmit a single ALIGN primitive. This value applies regardless
+ * of what type of device is attached or connection state. A value of
+ * 0 indicates that no ALIGN primitives will be inserted.
+ */
+ u16 align_insertion_frequency;
+
+ /**
+ * This method specifies the number of transmitted DWORDs within which
+ * to transmit 2 ALIGN primitives. This applies for SAS connections
+ * only. A minimum value of 3 is required for this field.
+ */
+ u16 in_connection_align_insertion_frequency;
+
+ /**
+ * This field indicates the maximum speed generation to be utilized
+ * by phys in the supplied port.
+ * - A value of 1 indicates generation 1 (i.e. 1.5 Gb/s).
+ * - A value of 2 indicates generation 2 (i.e. 3.0 Gb/s).
+ * - A value of 3 indicates generation 3 (i.e. 6.0 Gb/s).
+ */
+ u8 max_speed_generation;
+
+ } phys[SCI_MAX_PHYS];
+
+ /**
+ * This field specifies the maximum number of direct attached devices
+ * that can have power supplied to them simultaneously.
+ */
+ u8 max_number_concurrent_device_spin_up;
+
+ /**
+ * This field specifies the number of seconds to allow a phy to consume
+ * power before yielding to another phy.
+ *
+ */
+ u8 phy_spin_up_delay_interval;
+
+ /**
+ * These timer values specifies how long a link will remain open with no
+ * activity in increments of a microsecond, it can be in increments of
+ * 100 microseconds if the upper most bit is set.
+ *
+ */
+ u16 stp_inactivity_timeout;
+ u16 ssp_inactivity_timeout;
+
+ /**
+ * These timer values specifies how long a link will remain open in increments
+ * of 100 microseconds.
+ *
+ */
+ u16 stp_max_occupancy_timeout;
+ u16 ssp_max_occupancy_timeout;
+
+ /**
+ * This timer value specifies how long a link will remain open with no
+ * outbound traffic in increments of a microsecond.
+ *
+ */
+ u8 no_outbound_task_timeout;
+
+};
+
+#define SCIC_SDS_PARM_PHY_MASK_MIN 0x0
+#define SCIC_SDS_PARM_PHY_MASK_MAX 0xF
+#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
+
+struct sci_oem_params;
+int sci_oem_parameters_validate(struct sci_oem_params *oem);
+
+struct isci_orom;
+struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
+enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
+ struct isci_orom *orom, int scu_index);
+struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
+struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
+
+struct isci_oem_hdr {
+ u8 sig[4];
+ u8 rev_major;
+ u8 rev_minor;
+ u16 len;
+ u8 checksum;
+ u8 reserved1;
+ u16 reserved2;
+} __attribute__ ((packed));
+
+#else
+#define SCI_MAX_PORTS 4
+#define SCI_MAX_PHYS 4
+#define SCI_MAX_CONTROLLERS 2
+#endif
+
+#define ISCI_FW_NAME "isci/isci_firmware.bin"
+
+#define ROMSIGNATURE 0xaa55
+
+#define ISCI_OEM_SIG "$OEM"
+#define ISCI_OEM_SIG_SIZE 4
+#define ISCI_ROM_SIG "ISCUOEMB"
+#define ISCI_ROM_SIG_SIZE 8
+
+#define ISCI_EFI_VENDOR_GUID \
+ EFI_GUID(0x193dfefa, 0xa445, 0x4302, 0x99, 0xd8, 0xef, 0x3a, 0xad, \
+ 0x1a, 0x04, 0xc6)
+#define ISCI_EFI_VAR_NAME "RstScuO"
+
+/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
+ * defined by the OEM configuration parameters providing no PHY_MASK parameters
+ * for any PORT. i.e. There are no phys assigned to any of the ports at start.
+ * MPC Manual PORT configuration mode is defined by the OEM configuration
+ * parameters providing a PHY_MASK value for any PORT. It is assumed that any
+ * PORT with no PHY_MASK is an invalid port and not all PHYs must be assigned.
+ * A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs
+ * being assigned is sufficient to declare manual PORT configuration.
+ */
+enum sci_port_configuration_mode {
+ SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0,
+ SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1
+};
+
+struct sci_bios_oem_param_block_hdr {
+ uint8_t signature[ISCI_ROM_SIG_SIZE];
+ uint16_t total_block_length;
+ uint8_t hdr_length;
+ uint8_t version;
+ uint8_t preboot_source;
+ uint8_t num_elements;
+ uint16_t element_length;
+ uint8_t reserved[8];
+} __attribute__ ((packed));
+
+struct sci_oem_params {
+ struct {
+ uint8_t mode_type;
+ uint8_t max_concurrent_dev_spin_up;
+ uint8_t do_enable_ssc;
+ uint8_t reserved;
+ } controller;
+
+ struct {
+ uint8_t phy_mask;
+ } ports[SCI_MAX_PORTS];
+
+ struct sci_phy_oem_params {
+ struct {
+ uint32_t high;
+ uint32_t low;
+ } sas_address;
+
+ uint32_t afe_tx_amp_control0;
+ uint32_t afe_tx_amp_control1;
+ uint32_t afe_tx_amp_control2;
+ uint32_t afe_tx_amp_control3;
+ } phys[SCI_MAX_PHYS];
+} __attribute__ ((packed));
+
+struct isci_orom {
+ struct sci_bios_oem_param_block_hdr hdr;
+ struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS];
+} __attribute__ ((packed));
+
+#endif
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
new file mode 100644
index 000000000000..9b266c7428e8
--- /dev/null
+++ b/drivers/scsi/isci/registers.h
@@ -0,0 +1,1934 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_REGISTERS_H_
+#define _SCU_REGISTERS_H_
+
+/**
+ * This file contains the constants and structures for the SCU memory mapped
+ * registers.
+ *
+ *
+ */
+
+#define SCU_VIIT_ENTRY_ID_MASK (0xC0000000)
+#define SCU_VIIT_ENTRY_ID_SHIFT (30)
+
+#define SCU_VIIT_ENTRY_FUNCTION_MASK (0x0FF00000)
+#define SCU_VIIT_ENTRY_FUNCTION_SHIFT (20)
+
+#define SCU_VIIT_ENTRY_IPPTMODE_MASK (0x0001F800)
+#define SCU_VIIT_ENTRY_IPPTMODE_SHIFT (12)
+
+#define SCU_VIIT_ENTRY_LPVIE_MASK (0x00000F00)
+#define SCU_VIIT_ENTRY_LPVIE_SHIFT (8)
+
+#define SCU_VIIT_ENTRY_STATUS_MASK (0x000000FF)
+#define SCU_VIIT_ENTRY_STATUS_SHIFT (0)
+
+#define SCU_VIIT_ENTRY_ID_INVALID (0 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_VIIT (1 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_IIT (2 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_VIRT_EXP (3 << SCU_VIIT_ENTRY_ID_SHIFT)
+
+#define SCU_VIIT_IPPT_SSP_INITIATOR (0x01 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_SMP_INITIATOR (0x02 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_STP_INITIATOR (0x04 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_INITIATOR \
+ (\
+ SCU_VIIT_IPPT_SSP_INITIATOR \
+ | SCU_VIIT_IPPT_SMP_INITIATOR \
+ | SCU_VIIT_IPPT_STP_INITIATOR \
+ )
+
+#define SCU_VIIT_STATUS_RNC_VALID (0x01 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_ADDRESS_VALID (0x02 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_RNI_VALID (0x04 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_ALL_VALID \
+ (\
+ SCU_VIIT_STATUS_RNC_VALID \
+ | SCU_VIIT_STATUS_ADDRESS_VALID \
+ | SCU_VIIT_STATUS_RNI_VALID \
+ )
+
+#define SCU_VIIT_IPPT_SMP_TARGET (0x10 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+
+/**
+ * struct scu_viit_entry - This is the SCU Virtual Initiator Table Entry
+ *
+ *
+ */
+struct scu_viit_entry {
+ /**
+ * This must be encoded as to the type of initiator that is being constructed
+ * for this port.
+ */
+ u32 status;
+
+ /**
+ * Virtual initiator high SAS Address
+ */
+ u32 initiator_sas_address_hi;
+
+ /**
+ * Virtual initiator low SAS Address
+ */
+ u32 initiator_sas_address_lo;
+
+ /**
+ * This must be 0
+ */
+ u32 reserved;
+
+};
+
+
+/* IIT Status Defines */
+#define SCU_IIT_ENTRY_ID_MASK (0xC0000000)
+#define SCU_IIT_ENTRY_ID_SHIFT (30)
+
+#define SCU_IIT_ENTRY_STATUS_UPDATE_MASK (0x20000000)
+#define SCU_IIT_ENTRY_STATUS_UPDATE_SHIFT (29)
+
+#define SCU_IIT_ENTRY_LPI_MASK (0x00000F00)
+#define SCU_IIT_ENTRY_LPI_SHIFT (8)
+
+#define SCU_IIT_ENTRY_STATUS_MASK (0x000000FF)
+#define SCU_IIT_ENTRY_STATUS_SHIFT (0)
+
+/* IIT Remote Initiator Defines */
+#define SCU_IIT_ENTRY_REMOTE_TAG_MASK (0x0000FFFF)
+#define SCU_IIT_ENTRY_REMOTE_TAG_SHIFT (0)
+
+#define SCU_IIT_ENTRY_REMOTE_RNC_MASK (0x0FFF0000)
+#define SCU_IIT_ENTRY_REMOTE_RNC_SHIFT (16)
+
+#define SCU_IIT_ENTRY_ID_INVALID (0 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_VIIT (1 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_IIT (2 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_VIRT_EXP (3 << SCU_IIT_ENTRY_ID_SHIFT)
+
+/**
+ * struct scu_iit_entry - This will be implemented later when we support
+ * virtual functions
+ *
+ *
+ */
+struct scu_iit_entry {
+ u32 status;
+ u32 remote_initiator_sas_address_hi;
+ u32 remote_initiator_sas_address_lo;
+ u32 remote_initiator;
+
+};
+
+/* Generate a value for an SCU register */
+#define SCU_GEN_VALUE(name, value) \
+ (((value) << name ## _SHIFT) & (name ## _MASK))
+
+/*
+ * Generate a bit value for an SCU register
+ * Make sure that the register MASK is just a single bit */
+#define SCU_GEN_BIT(name) \
+ SCU_GEN_VALUE(name, ((u32)1))
+
+#define SCU_SET_BIT(name, reg_value) \
+ ((reg_value) | SCU_GEN_BIT(name))
+
+#define SCU_CLEAR_BIT(name, reg_value) \
+ ((reg_value)$ ~(SCU_GEN_BIT(name)))
+
+/*
+ * *****************************************************************************
+ * Unions for bitfield definitions of SCU Registers
+ * SMU Post Context Port
+ * ***************************************************************************** */
+#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_SHIFT (0)
+#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_MASK (0x00000FFF)
+#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_SHIFT (12)
+#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_MASK (0x0000F000)
+#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_SHIFT (16)
+#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_MASK (0x00030000)
+#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_SHIFT (18)
+#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_MASK (0x00FC0000)
+#define SMU_POST_CONTEXT_PORT_RESERVED_MASK (0xFF000000)
+
+#define SMU_PCP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_POST_CONTEXT_PORT_ ## name, value)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_STATUS_COMPLETION_SHIFT (31)
+#define SMU_INTERRUPT_STATUS_COMPLETION_MASK (0x80000000)
+#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_SHIFT (1)
+#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_MASK (0x00000002)
+#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_SHIFT (0)
+#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_MASK (0x00000001)
+#define SMU_INTERRUPT_STATUS_RESERVED_MASK (0x7FFFFFFC)
+
+#define SMU_ISR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_INTERRUPT_STATUS_ ## name)
+
+#define SMU_ISR_QUEUE_ERROR SMU_ISR_GEN_BIT(QUEUE_ERROR)
+#define SMU_ISR_QUEUE_SUSPEND SMU_ISR_GEN_BIT(QUEUE_SUSPEND)
+#define SMU_ISR_COMPLETION SMU_ISR_GEN_BIT(COMPLETION)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_MASK_COMPLETION_SHIFT (31)
+#define SMU_INTERRUPT_MASK_COMPLETION_MASK (0x80000000)
+#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_SHIFT (1)
+#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_MASK (0x00000002)
+#define SMU_INTERRUPT_MASK_QUEUE_ERROR_SHIFT (0)
+#define SMU_INTERRUPT_MASK_QUEUE_ERROR_MASK (0x00000001)
+#define SMU_INTERRUPT_MASK_RESERVED_MASK (0x7FFFFFFC)
+
+#define SMU_IMR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_INTERRUPT_MASK_ ## name)
+
+#define SMU_IMR_QUEUE_ERROR SMU_IMR_GEN_BIT(QUEUE_ERROR)
+#define SMU_IMR_QUEUE_SUSPEND SMU_IMR_GEN_BIT(QUEUE_SUSPEND)
+#define SMU_IMR_COMPLETION SMU_IMR_GEN_BIT(COMPLETION)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_SHIFT (0)
+#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_MASK (0x0000001F)
+#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_SHIFT (8)
+#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_MASK (0x0000FF00)
+#define SMU_INTERRUPT_COALESCING_CONTROL_RESERVED_MASK (0xFFFF00E0)
+
+#define SMU_ICC_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_INTERRUPT_COALESCING_CONTROL_ ## name, value)
+
+/* ***************************************************************************** */
+#define SMU_TASK_CONTEXT_RANGE_START_SHIFT (0)
+#define SMU_TASK_CONTEXT_RANGE_START_MASK (0x00000FFF)
+#define SMU_TASK_CONTEXT_RANGE_ENDING_SHIFT (16)
+#define SMU_TASK_CONTEXT_RANGE_ENDING_MASK (0x0FFF0000)
+#define SMU_TASK_CONTEXT_RANGE_ENABLE_SHIFT (31)
+#define SMU_TASK_CONTEXT_RANGE_ENABLE_MASK (0x80000000)
+#define SMU_TASK_CONTEXT_RANGE_RESERVED_MASK (0x7000F000)
+
+#define SMU_TCR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_TASK_CONTEXT_RANGE_ ## name, value)
+
+#define SMU_TCR_GEN_BIT(name, value) \
+ SCU_GEN_BIT(SMU_TASK_CONTEXT_RANGE_ ## name)
+
+/* ***************************************************************************** */
+
+#define SMU_COMPLETION_QUEUE_PUT_POINTER_SHIFT (0)
+#define SMU_COMPLETION_QUEUE_PUT_POINTER_MASK (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_SHIFT (15)
+#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_MASK (0x00008000)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_SHIFT (16)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_MASK (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_SHIFT (26)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_MASK (0x04000000)
+#define SMU_COMPLETION_QUEUE_PUT_RESERVED_MASK (0xF8004000)
+
+#define SMU_CQPR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_PUT_ ## name, value)
+
+#define SMU_CQPR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_COMPLETION_QUEUE_PUT_ ## name)
+
+/* ***************************************************************************** */
+
+#define SMU_COMPLETION_QUEUE_GET_POINTER_SHIFT (0)
+#define SMU_COMPLETION_QUEUE_GET_POINTER_MASK (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT (15)
+#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_MASK (0x00008000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT (16)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT (26)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_MASK (0x04000000)
+#define SMU_COMPLETION_QUEUE_GET_ENABLE_SHIFT (30)
+#define SMU_COMPLETION_QUEUE_GET_ENABLE_MASK (0x40000000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_SHIFT (31)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_MASK (0x80000000)
+#define SMU_COMPLETION_QUEUE_GET_RESERVED_MASK (0x38004000)
+
+#define SMU_CQGR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_GET_ ## name, value)
+
+#define SMU_CQGR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_COMPLETION_QUEUE_GET_ ## name)
+
+#define SMU_CQGR_CYCLE_BIT \
+ SMU_CQGR_GEN_BIT(CYCLE_BIT)
+
+#define SMU_CQGR_EVENT_CYCLE_BIT \
+ SMU_CQGR_GEN_BIT(EVENT_CYCLE_BIT)
+
+#define SMU_CQGR_GET_POINTER_SET(value) \
+ SMU_CQGR_GEN_VAL(POINTER, value)
+
+
+/* ***************************************************************************** */
+#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_SHIFT (0)
+#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_MASK (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_SHIFT (16)
+#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_MASK (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_CONTROL_RESERVED_MASK (0xFC00C000)
+
+#define SMU_CQC_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_CONTROL_ ## name, value)
+
+#define SMU_CQC_QUEUE_LIMIT_SET(value) \
+ SMU_CQC_GEN_VAL(QUEUE_LIMIT, value)
+
+#define SMU_CQC_EVENT_LIMIT_SET(value) \
+ SMU_CQC_GEN_VAL(EVENT_LIMIT, value)
+
+
+/* ***************************************************************************** */
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT (0)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK (0x00000FFF)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT (12)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK (0x00007000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT (15)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK (0x07FF8000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_SHIFT (27)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK (0x08000000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_RESERVED_MASK (0xF0000000)
+
+#define SMU_DCC_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_DEVICE_CONTEXT_CAPACITY_ ## name, value)
+
+#define SMU_DCC_GET_MAX_PEG(value) \
+ (\
+ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
+ )
+
+#define SMU_DCC_GET_MAX_LP(value) \
+ (\
+ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
+ )
+
+#define SMU_DCC_GET_MAX_TC(value) \
+ (\
+ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT \
+ )
+
+#define SMU_DCC_GET_MAX_RNC(value) \
+ (\
+ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \
+ )
+
+/* -------------------------------------------------------------------------- */
+
+#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0)
+#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_MASK (0x00000001)
+#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_SHIFT (1)
+#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_MASK (0x00000002)
+#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_SHIFT (16)
+#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_MASK (0x00010000)
+#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_SHIFT (17)
+#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_MASK (0x00020000)
+#define SMU_CONTROL_STATUS_RESERVED_MASK (0xFFFCFFFC)
+
+#define SMU_SMUCSR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_CONTROL_STATUS_ ## name)
+
+#define SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
+ (SMU_SMUCSR_GEN_BIT(SCHEDULER_RAM_INIT_COMPLETED))
+
+#define SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
+ (SMU_SMUCSR_GEN_BIT(CONTEXT_RAM_INIT_COMPLETED))
+
+#define SCU_RAM_INIT_COMPLETED \
+ (\
+ SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
+ | SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
+ )
+
+/* -------------------------------------------------------------------------- */
+
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_SHIFT (0)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_MASK (0x00000001)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_SHIFT (1)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_MASK (0x00000002)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_SHIFT (2)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_MASK (0x00000004)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_SHIFT (3)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_MASK (0x00000008)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_SHIFT (8)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_MASK (0x00000100)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_SHIFT (9)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_MASK (0x00000200)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_SHIFT (10)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_MASK (0x00000400)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_SHIFT (11)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_MASK (0x00000800)
+
+#define SMU_RESET_PROTOCOL_ENGINE(peg, pe) \
+ ((1 << (pe)) << ((peg) * 8))
+
+#define SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
+ (\
+ SMU_RESET_PROTOCOL_ENGINE(peg, 0) \
+ | SMU_RESET_PROTOCOL_ENGINE(peg, 1) \
+ | SMU_RESET_PROTOCOL_ENGINE(peg, 2) \
+ | SMU_RESET_PROTOCOL_ENGINE(peg, 3) \
+ )
+
+#define SMU_RESET_ALL_PROTOCOL_ENGINES() \
+ (\
+ SMU_RESET_PEG_PROTOCOL_ENGINES(0) \
+ | SMU_RESET_PEG_PROTOCOL_ENGINES(1) \
+ )
+
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_SHIFT (16)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_MASK (0x00010000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_SHIFT (17)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_MASK (0x00020000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_SHIFT (18)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_MASK (0x00040000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_SHIFT (19)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_MASK (0x00080000)
+
+#define SMU_RESET_WIDE_PORT_QUEUE(peg, wide_port) \
+ ((1 << ((wide_port) / 2)) << ((peg) * 2) << 16)
+
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_SHIFT (20)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_MASK (0x00100000)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_SHIFT (21)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_MASK (0x00200000)
+#define SMU_SOFTRESET_CONTROL_RESET_SCU_SHIFT (22)
+#define SMU_SOFTRESET_CONTROL_RESET_SCU_MASK (0x00400000)
+
+/*
+ * It seems to make sense that if you are going to reset the protocol
+ * engine group that you would also reset all of the protocol engines */
+#define SMU_RESET_PROTOCOL_ENGINE_GROUP(peg) \
+ (\
+ (1 << ((peg) + 20)) \
+ | SMU_RESET_WIDE_PORT_QUEUE(peg, 0) \
+ | SMU_RESET_WIDE_PORT_QUEUE(peg, 1) \
+ | SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
+ )
+
+#define SMU_RESET_ALL_PROTOCOL_ENGINE_GROUPS() \
+ (\
+ SMU_RESET_PROTOCOL_ENGINE_GROUP(0) \
+ | SMU_RESET_PROTOCOL_ENGINE_GROUP(1) \
+ )
+
+#define SMU_RESET_SCU() (0xFFFFFFFF)
+
+
+
+/* ***************************************************************************** */
+#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_SHIFT (0)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_MASK (0x00000FFF)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_SHIFT (16)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_MASK (0x0FFF0000)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_SHIFT (31)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_MASK (0x80000000)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RESERVED_MASK (0x7000F000)
+
+#define SMU_TCA_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name, value)
+
+#define SMU_TCA_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name)
+
+/* ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_SHIFT (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_MASK (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_RESERVED_MASK (0xFFFFF000)
+
+#define SCU_UFQC_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_ ## name, value)
+
+#define SCU_UFQC_QUEUE_SIZE_SET(value) \
+ SCU_UFQC_GEN_VAL(QUEUE_SIZE, value)
+
+/* ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_SHIFT (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_MASK (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_SHIFT (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_MASK (0x00001000)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_RESERVED_MASK (0xFFFFE000)
+
+#define SCU_UFQPP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name, value)
+
+#define SCU_UFQPP_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SDMA Registers
+ * ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_SHIFT (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_MASK (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_SHIFT (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_MASK (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_SHIFT (31)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_MASK (0x80000000)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_RESERVED_MASK (0x7FFFE000)
+
+#define SCU_UFQGP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name, value)
+
+#define SCU_UFQGP_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name)
+
+#define SCU_UFQGP_CYCLE_BIT(value) \
+ SCU_UFQGP_GEN_BIT(CYCLE_BIT, value)
+
+#define SCU_UFQGP_GET_POINTER(value) \
+ SCU_UFQGP_GEN_VALUE(POINTER, value)
+
+#define SCU_UFQGP_ENABLE(value) \
+ (SCU_UFQGP_GEN_BIT(ENABLE) | value)
+
+#define SCU_UFQGP_DISABLE(value) \
+ (~SCU_UFQGP_GEN_BIT(ENABLE) & value)
+
+#define SCU_UFQGP_VALUE(bit, value) \
+ (SCU_UFQGP_CYCLE_BIT(bit) | SCU_UFQGP_GET_POINTER(value))
+
+/* ***************************************************************************** */
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SHIFT (0)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_MASK (0x0000FFFF)
+#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (16)
+#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00010000)
+#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_SHIFT (17)
+#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_MASK (0x00020000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_SHIFT (18)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_MASK (0x00040000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_SHIFT (19)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_MASK (0x00080000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_SHIFT (20)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_MASK (0x00100000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_SHIFT (21)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_MASK (0x00200000)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_SHIFT (22)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_MASK (0x00400000)
+#define SCU_PDMA_CONFIGURATION_RESERVED_MASK (0xFF800000)
+
+#define SCU_PDMACR_GEN_VALUE(name, value) \
+ SCU_GEN_VALUE(SCU_PDMA_CONFIGURATION_ ## name, value)
+
+#define SCU_PDMACR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PDMA_CONFIGURATION_ ## name)
+
+#define SCU_PDMACR_BE_GEN_BIT(name) \
+ SCU_PCMACR_GEN_BIT(BIG_ENDIAN_CONTROL_ ## name)
+
+/* ***************************************************************************** */
+#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (8)
+#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00000100)
+
+#define SCU_CDMACR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_CDMA_CONFIGURATION_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SCU Link Layer Registers
+ * ***************************************************************************** */
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_SHIFT (0)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_MASK (0x000000FF)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_SHIFT (8)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_MASK (0x0000FF00)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_SHIFT (16)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_MASK (0x00FF0000)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_SHIFT (24)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_MASK (0xFF000000)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_REQUIRED_MASK (0x00000000)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_DEFAULT_MASK (0x7D00676F)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_RESERVED_MASK (0x00FF0000)
+
+#define SCU_SAS_SPDTOV_GEN_VALUE(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_ ## name, value)
+
+
+#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_SHIFT (2)
+#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_MASK (0x00000004)
+#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_SHIFT (4)
+#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_MASK (0x00000010)
+#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_SHIFT (5)
+#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_MASK (0x00000020)
+#define SCU_LINK_STATUS_RESERVED_MASK (0xFFFFFFCD)
+
+#define SCU_SAS_LLSTA_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_STATUS_ ## name)
+
+
+/* TODO: Where is the SATA_PSELTOV register? */
+
+/*
+ * *****************************************************************************
+ * * SCU SAS Maximum Arbitration Wait Time Timeout Register
+ * ***************************************************************************** */
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_SHIFT (0)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_MASK (0x00007FFF)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_SHIFT (15)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_MASK (0x00008000)
+
+#define SCU_SAS_MAWTTOV_GEN_VALUE(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name, value)
+
+#define SCU_SAS_MAWTTOV_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name)
+
+
+/*
+ * TODO: Where is the SAS_LNKTOV regsiter?
+ * TODO: Where is the SAS_PHYTOV register? */
+
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_SHIFT (1)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_MASK (0x00000002)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_SHIFT (2)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_MASK (0x00000004)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_SHIFT (3)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_MASK (0x00000008)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_SHIFT (8)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_MASK (0x00000100)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_SHIFT (9)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_MASK (0x00000200)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_SHIFT (10)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_MASK (0x00000400)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_SHIFT (11)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_MASK (0x00000800)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_SHIFT (16)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_MASK (0x000F0000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_SHIFT (24)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_MASK (0x0F000000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_SHIFT (28)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_MASK (0x70000000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_RESERVED_MASK (0x80F0F1F1)
+
+#define SCU_SAS_TIID_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name, value)
+
+#define SCU_SAS_TIID_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name)
+
+/* SAS Identify Frame PHY Identifier Register */
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_SHIFT (16)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_MASK (0x00010000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_SHIFT (17)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_MASK (0x00020000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_SHIFT (18)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_MASK (0x00040000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_SHIFT (24)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_MASK (0xFF000000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_RESERVED_MASK (0x00F800FF)
+
+#define SCU_SAS_TIPID_GEN_VALUE(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name, value)
+
+#define SCU_SAS_TIPID_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name)
+
+
+#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_SHIFT (4)
+#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_MASK (0x00000010)
+#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_SHIFT (6)
+#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_MASK (0x00000040)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_SHIFT (7)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_MASK (0x00000080)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_SHIFT (8)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_MASK (0x00000100)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_SHIFT (9)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_MASK (0x00000200)
+#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_SHIFT (11)
+#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_MASK (0x00000800)
+#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_SHIFT (12)
+#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_MASK (0x00001000)
+#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_SHIFT (13)
+#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_MASK (0x00002000)
+#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_SHIFT (14)
+#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_MASK (0x00004000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_SHIFT (15)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_MASK (0x00008000)
+#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_SHIFT (23)
+#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_MASK (0x00800000)
+#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_SHIFT (27)
+#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_MASK (0x08000000)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_SHIFT (28)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_MASK (0x10000000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_SHIFT (29)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_MASK (0x20000000)
+#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_SHIFT (30)
+#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_MASK (0x40000000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_SHIFT (31)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_MASK (0x80000000)
+#define SCU_SAS_PHY_CONFIGURATION_REQUIRED_MASK (0x0100000F)
+#define SCU_SAS_PHY_CONFIGURATION_DEFAULT_MASK (0x4180100F)
+#define SCU_SAS_PHY_CONFIGURATION_RESERVED_MASK (0x00000000)
+
+#define SCU_SAS_PCFG_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SAS_PHY_CONFIGURATION_ ## name)
+
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_SHIFT (0)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_MASK (0x000007FF)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_SHIFT (16)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_MASK (0x00ff0000)
+
+#define SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_##name, value)
+
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_SHIFT (0)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_MASK (0x0003FFFF)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_SHIFT (31)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_MASK (0x80000000)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_RESERVED_MASK (0x7FFC0000)
+
+#define SCU_ENSPINUP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name, value)
+
+#define SCU_ENSPINUP_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name)
+
+
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_SHIFT (1)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_MASK (0x00000002)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_SHIFT (4)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_MASK (0x000000F0)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_SHIFT (8)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_MASK (0x00000100)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_SHIFT (9)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_MASK (0x00000201)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_SHIFT (10)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_MASK (0x00000401)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_SHIFT (11)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_MASK (0x00000801)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_SHIFT (12)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_MASK (0x00001001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_SHIFT (13)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_MASK (0x00002001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_SHIFT (31)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_MASK (0x80000000)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_DEFAULT_MASK (0x00003F01)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_REQUIRED_MASK (0x00000001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RESERVED_MASK (0x7FFFC00D)
+
+#define SCU_SAS_PHYCAP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name, value)
+
+#define SCU_SAS_PHYCAP_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name)
+
+
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_SHIFT (0)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_MASK (0x000000FF)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_SHIFT (31)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_MASK (0x80000000)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_RESERVED_MASK (0x7FFFFF00)
+
+#define SCU_PSZGCR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name, value)
+
+#define SCU_PSZGCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name)
+
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_SHIFT (1)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_MASK (0x00000002)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_SHIFT (2)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_MASK (0x00000004)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_SHIFT (4)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_MASK (0x00000010)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_SHIFT (5)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_MASK (0x00000020)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_SHIFT (16)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_MASK (0x00030000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_SHIFT (19)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_MASK (0x00080000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_SHIFT (20)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_MASK (0x00300000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_SHIFT (23)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_MASK (0x00800000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_SHIFT (24)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_MASK (0x03000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_SHIFT (27)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_MASK (0x08000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_SHIFT (28)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_MASK (0x30000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_SHIFT (31)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_MASK (0x80000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_RESERVED_MASK (0x4444FFC9)
+
+#define SCU_PEG_SCUVZECR_GEN_VAL(name, val) \
+ SCU_GEN_VALUE(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name, val)
+
+#define SCU_PEG_SCUVZECR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name)
+
+
+/*
+ * *****************************************************************************
+ * * Port Task Scheduler registers shift and mask values
+ * ***************************************************************************** */
+#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_SHIFT (0)
+#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_MASK (0x0000FFFF)
+#define SCU_PTSG_CONTROL_TASK_TIMEOUT_SHIFT (16)
+#define SCU_PTSG_CONTROL_TASK_TIMEOUT_MASK (0x00FF0000)
+#define SCU_PTSG_CONTROL_PTSG_ENABLE_SHIFT (24)
+#define SCU_PTSG_CONTROL_PTSG_ENABLE_MASK (0x01000000)
+#define SCU_PTSG_CONTROL_ETM_ENABLE_SHIFT (25)
+#define SCU_PTSG_CONTROL_ETM_ENABLE_MASK (0x02000000)
+#define SCU_PTSG_CONTROL_DEFAULT_MASK (0x00020002)
+#define SCU_PTSG_CONTROL_REQUIRED_MASK (0x00000000)
+#define SCU_PTSG_CONTROL_RESERVED_MASK (0xFC000000)
+
+#define SCU_PTSGCR_GEN_VAL(name, val) \
+ SCU_GEN_VALUE(SCU_PTSG_CONTROL_ ## name, val)
+
+#define SCU_PTSGCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PTSG_CONTROL_ ## name)
+
+
+/* ***************************************************************************** */
+#define SCU_PTSG_REAL_TIME_CLOCK_SHIFT (0)
+#define SCU_PTSG_REAL_TIME_CLOCK_MASK (0x0000FFFF)
+#define SCU_PTSG_REAL_TIME_CLOCK_RESERVED_MASK (0xFFFF0000)
+
+#define SCU_RTCR_GEN_VAL(name, val) \
+ SCU_GEN_VALUE(SCU_PTSG_ ## name, val)
+
+
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_SHIFT (0)
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_MASK (0x00FFFFFF)
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_RESERVED_MASK (0xFF000000)
+
+#define SCU_RTCCR_GEN_VAL(name, val) \
+ SCU_GEN_VALUE(SCU_PTSG_REAL_TIME_CLOCK_CONTROL_ ## name, val)
+
+
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_SHIFT (0)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_MASK (0x00000001)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_SHIFT (1)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_MASK (0x00000002)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_RESERVED_MASK (0xFFFFFFFC)
+
+#define SCU_PTSxCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ ## name)
+
+
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_SHIFT (0)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_MASK (0x00000001)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_SHIFT (1)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_MASK (0x00000002)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_SHIFT (2)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_MASK (0x00000004)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_RESERVED_MASK (0xFFFFFFF8)
+
+#define SCU_PTSxSR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name)
+
+
+/*
+ * *****************************************************************************
+ * * SGPIO Register shift and mask values
+ * ***************************************************************************** */
+#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_SHIFT (0)
+#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_MASK (0x00000001)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_SHIFT (1)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_MASK (0x00000002)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_SHIFT (2)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_MASK (0x00000004)
+#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_SHIFT (15)
+#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_MASK (0x00008000)
+#define SCU_SGPIO_CONTROL_SGPIO_RESERVED_MASK (0xFFFF7FF8)
+
+#define SCU_SGICRx_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SGPIO_CONTROL_SGPIO_ ## name)
+
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_SHIFT (0)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_MASK (0x0000000F)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_SHIFT (4)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_MASK (0x000000F0)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_SHIFT (8)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_MASK (0x00000F00)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_SHIFT (12)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_MASK (0x0000F000)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_RESERVED_MASK (0xFFFF0000)
+
+#define SCU_SGPBRx_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_ ## name, value)
+
+#define SCU_SGPIO_START_DRIVE_LOWER_R0_SHIFT (0)
+#define SCU_SGPIO_START_DRIVE_LOWER_R0_MASK (0x00000003)
+#define SCU_SGPIO_START_DRIVE_LOWER_R1_SHIFT (4)
+#define SCU_SGPIO_START_DRIVE_LOWER_R1_MASK (0x00000030)
+#define SCU_SGPIO_START_DRIVE_LOWER_R2_SHIFT (8)
+#define SCU_SGPIO_START_DRIVE_LOWER_R2_MASK (0x00000300)
+#define SCU_SGPIO_START_DRIVE_LOWER_R3_SHIFT (12)
+#define SCU_SGPIO_START_DRIVE_LOWER_R3_MASK (0x00003000)
+#define SCU_SGPIO_START_DRIVE_LOWER_RESERVED_MASK (0xFFFF8888)
+
+#define SCU_SGSDLRx_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
+
+#define SCU_SGPIO_START_DRIVE_UPPER_R0_SHIFT (0)
+#define SCU_SGPIO_START_DRIVE_UPPER_R0_MASK (0x00000003)
+#define SCU_SGPIO_START_DRIVE_UPPER_R1_SHIFT (4)
+#define SCU_SGPIO_START_DRIVE_UPPER_R1_MASK (0x00000030)
+#define SCU_SGPIO_START_DRIVE_UPPER_R2_SHIFT (8)
+#define SCU_SGPIO_START_DRIVE_UPPER_R2_MASK (0x00000300)
+#define SCU_SGPIO_START_DRIVE_UPPER_R3_SHIFT (12)
+#define SCU_SGPIO_START_DRIVE_UPPER_R3_MASK (0x00003000)
+#define SCU_SGPIO_START_DRIVE_UPPER_RESERVED_MASK (0xFFFF8888)
+
+#define SCU_SGSDURx_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
+
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_SHIFT (0)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_MASK (0x00000003)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_SHIFT (4)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_MASK (0x00000030)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_SHIFT (8)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_MASK (0x00000300)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_SHIFT (12)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_MASK (0x00003000)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_RESERVED_MASK (0xFFFF8888)
+
+#define SCU_SGSIDLRx_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
+
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_SHIFT (0)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_MASK (0x00000003)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_SHIFT (4)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_MASK (0x00000030)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_SHIFT (8)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_MASK (0x00000300)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_SHIFT (12)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_MASK (0x00003000)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_RESERVED_MASK (0xFFFF8888)
+
+#define SCU_SGSIDURx_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
+
+#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_SHIFT (0)
+#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_MASK (0x0000000F)
+#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_RESERVED_MASK (0xFFFFFFF0)
+
+#define SCU_SGVSCR_GEN_VAL(value) \
+ SCU_GEN_VALUE(SCU_SGPIO_VENDOR_SPECIFIC_CODE ## name, value)
+
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_SHIFT (0)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_MASK (0x00000003)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_SHIFT (2)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_MASK (0x00000004)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_SHIFT (3)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_MASK (0x00000008)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_SHIFT (4)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_MASK (0x00000030)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_SHIFT (6)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_MASK (0x00000040)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_SHIFT (7)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_MASK (0x00000080)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_SHIFT (8)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_MASK (0x00000300)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_SHIFT (10)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_MASK (0x00000400)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_SHIFT (11)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_MASK (0x00000800)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_RESERVED_MASK (0xFFFFF000)
+
+#define SCU_SGODSR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name, value)
+
+#define SCU_SGODSR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SMU Registers
+ * ***************************************************************************** */
+
+/*
+ * ----------------------------------------------------------------------------
+ * SMU Registers
+ * These registers are based off of BAR0
+ *
+ * To calculate the offset for other functions use
+ * BAR0 + FN# * SystemPageSize * 2
+ *
+ * The TCA is only accessable from FN#0 (Physical Function) and each
+ * is programmed by (BAR0 + SCU_SMU_TCA_OFFSET + (FN# * 0x04)) or
+ * TCA0 for FN#0 is at BAR0 + 0x0400
+ * TCA1 for FN#1 is at BAR0 + 0x0404
+ * etc.
+ * ----------------------------------------------------------------------------
+ * Accessable to all FN#s */
+#define SCU_SMU_PCP_OFFSET 0x0000
+#define SCU_SMU_AMR_OFFSET 0x0004
+#define SCU_SMU_ISR_OFFSET 0x0010
+#define SCU_SMU_IMR_OFFSET 0x0014
+#define SCU_SMU_ICC_OFFSET 0x0018
+#define SCU_SMU_HTTLBAR_OFFSET 0x0020
+#define SCU_SMU_HTTUBAR_OFFSET 0x0024
+#define SCU_SMU_TCR_OFFSET 0x0028
+#define SCU_SMU_CQLBAR_OFFSET 0x0030
+#define SCU_SMU_CQUBAR_OFFSET 0x0034
+#define SCU_SMU_CQPR_OFFSET 0x0040
+#define SCU_SMU_CQGR_OFFSET 0x0044
+#define SCU_SMU_CQC_OFFSET 0x0048
+/* Accessable to FN#0 only */
+#define SCU_SMU_RNCLBAR_OFFSET 0x0080
+#define SCU_SMU_RNCUBAR_OFFSET 0x0084
+#define SCU_SMU_DCC_OFFSET 0x0090
+#define SCU_SMU_DFC_OFFSET 0x0094
+#define SCU_SMU_SMUCSR_OFFSET 0x0098
+#define SCU_SMU_SCUSRCR_OFFSET 0x009C
+#define SCU_SMU_SMAW_OFFSET 0x00A0
+#define SCU_SMU_SMDW_OFFSET 0x00A4
+/* Accessable to FN#0 only */
+#define SCU_SMU_TCA_OFFSET 0x0400
+/* Accessable to all FN#s */
+#define SCU_SMU_MT_MLAR0_OFFSET 0x2000
+#define SCU_SMU_MT_MUAR0_OFFSET 0x2004
+#define SCU_SMU_MT_MDR0_OFFSET 0x2008
+#define SCU_SMU_MT_VCR0_OFFSET 0x200C
+#define SCU_SMU_MT_MLAR1_OFFSET 0x2010
+#define SCU_SMU_MT_MUAR1_OFFSET 0x2014
+#define SCU_SMU_MT_MDR1_OFFSET 0x2018
+#define SCU_SMU_MT_VCR1_OFFSET 0x201C
+#define SCU_SMU_MPBA_OFFSET 0x3000
+
+/**
+ * struct smu_registers - These are the SMU registers
+ *
+ *
+ */
+struct smu_registers {
+/* 0x0000 PCP */
+ u32 post_context_port;
+/* 0x0004 AMR */
+ u32 address_modifier;
+ u32 reserved_08;
+ u32 reserved_0C;
+/* 0x0010 ISR */
+ u32 interrupt_status;
+/* 0x0014 IMR */
+ u32 interrupt_mask;
+/* 0x0018 ICC */
+ u32 interrupt_coalesce_control;
+ u32 reserved_1C;
+/* 0x0020 HTTLBAR */
+ u32 host_task_table_lower;
+/* 0x0024 HTTUBAR */
+ u32 host_task_table_upper;
+/* 0x0028 TCR */
+ u32 task_context_range;
+ u32 reserved_2C;
+/* 0x0030 CQLBAR */
+ u32 completion_queue_lower;
+/* 0x0034 CQUBAR */
+ u32 completion_queue_upper;
+ u32 reserved_38;
+ u32 reserved_3C;
+/* 0x0040 CQPR */
+ u32 completion_queue_put;
+/* 0x0044 CQGR */
+ u32 completion_queue_get;
+/* 0x0048 CQC */
+ u32 completion_queue_control;
+ u32 reserved_4C;
+ u32 reserved_5x[4];
+ u32 reserved_6x[4];
+ u32 reserved_7x[4];
+/*
+ * Accessable to FN#0 only
+ * 0x0080 RNCLBAR */
+ u32 remote_node_context_lower;
+/* 0x0084 RNCUBAR */
+ u32 remote_node_context_upper;
+ u32 reserved_88;
+ u32 reserved_8C;
+/* 0x0090 DCC */
+ u32 device_context_capacity;
+/* 0x0094 DFC */
+ u32 device_function_capacity;
+/* 0x0098 SMUCSR */
+ u32 control_status;
+/* 0x009C SCUSRCR */
+ u32 soft_reset_control;
+/* 0x00A0 SMAW */
+ u32 mmr_address_window;
+/* 0x00A4 SMDW */
+ u32 mmr_data_window;
+ u32 reserved_A8;
+ u32 reserved_AC;
+/* A whole bunch of reserved space */
+ u32 reserved_Bx[4];
+ u32 reserved_Cx[4];
+ u32 reserved_Dx[4];
+ u32 reserved_Ex[4];
+ u32 reserved_Fx[4];
+ u32 reserved_1xx[64];
+ u32 reserved_2xx[64];
+ u32 reserved_3xx[64];
+/*
+ * Accessable to FN#0 only
+ * 0x0400 TCA */
+ u32 task_context_assignment[256];
+/* MSI-X registers not included */
+};
+
+/*
+ * *****************************************************************************
+ * SDMA Registers
+ * ***************************************************************************** */
+#define SCU_SDMA_BASE 0x6000
+#define SCU_SDMA_PUFATLHAR_OFFSET 0x0000
+#define SCU_SDMA_PUFATUHAR_OFFSET 0x0004
+#define SCU_SDMA_UFLHBAR_OFFSET 0x0008
+#define SCU_SDMA_UFUHBAR_OFFSET 0x000C
+#define SCU_SDMA_UFQC_OFFSET 0x0010
+#define SCU_SDMA_UFQPP_OFFSET 0x0014
+#define SCU_SDMA_UFQGP_OFFSET 0x0018
+#define SCU_SDMA_PDMACR_OFFSET 0x001C
+#define SCU_SDMA_CDMACR_OFFSET 0x0080
+
+/**
+ * struct scu_sdma_registers - These are the SCU SDMA Registers
+ *
+ *
+ */
+struct scu_sdma_registers {
+/* 0x0000 PUFATLHAR */
+ u32 uf_address_table_lower;
+/* 0x0004 PUFATUHAR */
+ u32 uf_address_table_upper;
+/* 0x0008 UFLHBAR */
+ u32 uf_header_base_address_lower;
+/* 0x000C UFUHBAR */
+ u32 uf_header_base_address_upper;
+/* 0x0010 UFQC */
+ u32 unsolicited_frame_queue_control;
+/* 0x0014 UFQPP */
+ u32 unsolicited_frame_put_pointer;
+/* 0x0018 UFQGP */
+ u32 unsolicited_frame_get_pointer;
+/* 0x001C PDMACR */
+ u32 pdma_configuration;
+/* Reserved until offset 0x80 */
+ u32 reserved_0020_007C[0x18];
+/* 0x0080 CDMACR */
+ u32 cdma_configuration;
+/* Remainder SDMA register space */
+ u32 reserved_0084_0400[0xDF];
+
+};
+
+/*
+ * *****************************************************************************
+ * * SCU Link Registers
+ * ***************************************************************************** */
+#define SCU_PEG0_OFFSET 0x0000
+#define SCU_PEG1_OFFSET 0x8000
+
+#define SCU_TL0_OFFSET 0x0000
+#define SCU_TL1_OFFSET 0x0400
+#define SCU_TL2_OFFSET 0x0800
+#define SCU_TL3_OFFSET 0x0C00
+
+#define SCU_LL_OFFSET 0x0080
+#define SCU_LL0_OFFSET (SCU_TL0_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL1_OFFSET (SCU_TL1_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL2_OFFSET (SCU_TL2_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL3_OFFSET (SCU_TL3_OFFSET + SCU_LL_OFFSET)
+
+/* Transport Layer Offsets (PEG + TL) */
+#define SCU_TLCR_OFFSET 0x0000
+#define SCU_TLADTR_OFFSET 0x0004
+#define SCU_TLTTMR_OFFSET 0x0008
+#define SCU_TLEECR0_OFFSET 0x000C
+#define SCU_STPTLDARNI_OFFSET 0x0010
+
+
+#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_SHIFT (0)
+#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_MASK (0x00000001)
+#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_SHIFT (1)
+#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_MASK (0x00000002)
+#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_SHIFT (3)
+#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_MASK (0x00000008)
+#define SCU_TLCR_CMD_NAK_STATUS_CODE_SHIFT (4)
+#define SCU_TLCR_CMD_NAK_STATUS_CODE_MASK (0x00000010)
+#define SCU_TLCR_RESERVED_MASK (0xFFFFFFEB)
+
+#define SCU_TLCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_TLCR_ ## name)
+
+/**
+ * struct scu_transport_layer_registers - These are the SCU Transport Layer
+ * registers
+ *
+ *
+ */
+struct scu_transport_layer_registers {
+ /* 0x0000 TLCR */
+ u32 control;
+ /* 0x0004 TLADTR */
+ u32 arbitration_delay_timer;
+ /* 0x0008 TLTTMR */
+ u32 timer_test_mode;
+ /* 0x000C reserved */
+ u32 reserved_0C;
+ /* 0x0010 STPTLDARNI */
+ u32 stp_rni;
+ /* 0x0014 TLFEWPORCTRL */
+ u32 tlfe_wpo_read_control;
+ /* 0x0018 TLFEWPORDATA */
+ u32 tlfe_wpo_read_data;
+ /* 0x001C RXTLSSCSR1 */
+ u32 rxtl_single_step_control_status_1;
+ /* 0x0020 RXTLSSCSR2 */
+ u32 rxtl_single_step_control_status_2;
+ /* 0x0024 AWTRDDCR */
+ u32 tlfe_awt_retry_delay_debug_control;
+ /* Remainder of TL memory space */
+ u32 reserved_0028_007F[0x16];
+
+};
+
+/* Protocol Engine Group Registers */
+#define SCU_SCUVZECRx_OFFSET 0x1080
+
+/* Link Layer Offsets (PEG + TL + LL) */
+#define SCU_SAS_SPDTOV_OFFSET 0x0000
+#define SCU_SAS_LLSTA_OFFSET 0x0004
+#define SCU_SATA_PSELTOV_OFFSET 0x0008
+#define SCU_SAS_TIMETOV_OFFSET 0x0010
+#define SCU_SAS_LOSTOT_OFFSET 0x0014
+#define SCU_SAS_LNKTOV_OFFSET 0x0018
+#define SCU_SAS_PHYTOV_OFFSET 0x001C
+#define SCU_SAS_AFERCNT_OFFSET 0x0020
+#define SCU_SAS_WERCNT_OFFSET 0x0024
+#define SCU_SAS_TIID_OFFSET 0x0028
+#define SCU_SAS_TIDNH_OFFSET 0x002C
+#define SCU_SAS_TIDNL_OFFSET 0x0030
+#define SCU_SAS_TISSAH_OFFSET 0x0034
+#define SCU_SAS_TISSAL_OFFSET 0x0038
+#define SCU_SAS_TIPID_OFFSET 0x003C
+#define SCU_SAS_TIRES2_OFFSET 0x0040
+#define SCU_SAS_ADRSTA_OFFSET 0x0044
+#define SCU_SAS_MAWTTOV_OFFSET 0x0048
+#define SCU_SAS_FRPLDFIL_OFFSET 0x0054
+#define SCU_SAS_RFCNT_OFFSET 0x0060
+#define SCU_SAS_TFCNT_OFFSET 0x0064
+#define SCU_SAS_RFDCNT_OFFSET 0x0068
+#define SCU_SAS_TFDCNT_OFFSET 0x006C
+#define SCU_SAS_LERCNT_OFFSET 0x0070
+#define SCU_SAS_RDISERRCNT_OFFSET 0x0074
+#define SCU_SAS_CRERCNT_OFFSET 0x0078
+#define SCU_STPCTL_OFFSET 0x007C
+#define SCU_SAS_PCFG_OFFSET 0x0080
+#define SCU_SAS_CLKSM_OFFSET 0x0084
+#define SCU_SAS_TXCOMWAKE_OFFSET 0x0088
+#define SCU_SAS_TXCOMINIT_OFFSET 0x008C
+#define SCU_SAS_TXCOMSAS_OFFSET 0x0090
+#define SCU_SAS_COMINIT_OFFSET 0x0094
+#define SCU_SAS_COMWAKE_OFFSET 0x0098
+#define SCU_SAS_COMSAS_OFFSET 0x009C
+#define SCU_SAS_SFERCNT_OFFSET 0x00A0
+#define SCU_SAS_CDFERCNT_OFFSET 0x00A4
+#define SCU_SAS_DNFERCNT_OFFSET 0x00A8
+#define SCU_SAS_PRSTERCNT_OFFSET 0x00AC
+#define SCU_SAS_CNTCTL_OFFSET 0x00B0
+#define SCU_SAS_SSPTOV_OFFSET 0x00B4
+#define SCU_FTCTL_OFFSET 0x00B8
+#define SCU_FRCTL_OFFSET 0x00BC
+#define SCU_FTWMRK_OFFSET 0x00C0
+#define SCU_ENSPINUP_OFFSET 0x00C4
+#define SCU_SAS_TRNTOV_OFFSET 0x00C8
+#define SCU_SAS_PHYCAP_OFFSET 0x00CC
+#define SCU_SAS_PHYCTL_OFFSET 0x00D0
+#define SCU_SAS_LLCTL_OFFSET 0x00D8
+#define SCU_AFE_XCVRCR_OFFSET 0x00DC
+#define SCU_AFE_LUTCR_OFFSET 0x00E0
+
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2 (1)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3 (2)
+#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_SHIFT (2)
+#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_MASK (0x000003FC)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_SHIFT (16)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_MASK (0x00010000)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_SHIFT (17)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_MASK (0x00020000)
+#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_SHIFT (24)
+#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_MASK (0xFF000000)
+#define SCU_SAS_LINK_LAYER_CONTROL_RESERVED (0x00FCFC00)
+
+#define SCU_SAS_LLCTL_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_CONTROL_ ## name, value)
+
+#define SCU_SAS_LLCTL_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name)
+
+
+/* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */
+#define SCU_PSZGCR_OFFSET 0x00E4
+#define SCU_SAS_RECPHYCAP_OFFSET 0x00E8
+/* #define SCU_TX_LUTSEL_OFFSET 0x00B8 */
+
+#define SCU_SAS_PTxC_OFFSET 0x00D4 /* Same offset as SAS_TCTSTM */
+
+/**
+ * struct scu_link_layer_registers - SCU Link Layer Registers
+ *
+ *
+ */
+struct scu_link_layer_registers {
+/* 0x0000 SAS_SPDTOV */
+ u32 speed_negotiation_timers;
+/* 0x0004 SAS_LLSTA */
+ u32 link_layer_status;
+/* 0x0008 SATA_PSELTOV */
+ u32 port_selector_timeout;
+ u32 reserved0C;
+/* 0x0010 SAS_TIMETOV */
+ u32 timeout_unit_value;
+/* 0x0014 SAS_RCDTOV */
+ u32 rcd_timeout;
+/* 0x0018 SAS_LNKTOV */
+ u32 link_timer_timeouts;
+/* 0x001C SAS_PHYTOV */
+ u32 sas_phy_timeouts;
+/* 0x0020 SAS_AFERCNT */
+ u32 received_address_frame_error_counter;
+/* 0x0024 SAS_WERCNT */
+ u32 invalid_dword_counter;
+/* 0x0028 SAS_TIID */
+ u32 transmit_identification;
+/* 0x002C SAS_TIDNH */
+ u32 sas_device_name_high;
+/* 0x0030 SAS_TIDNL */
+ u32 sas_device_name_low;
+/* 0x0034 SAS_TISSAH */
+ u32 source_sas_address_high;
+/* 0x0038 SAS_TISSAL */
+ u32 source_sas_address_low;
+/* 0x003C SAS_TIPID */
+ u32 identify_frame_phy_id;
+/* 0x0040 SAS_TIRES2 */
+ u32 identify_frame_reserved;
+/* 0x0044 SAS_ADRSTA */
+ u32 received_address_frame;
+/* 0x0048 SAS_MAWTTOV */
+ u32 maximum_arbitration_wait_timer_timeout;
+/* 0x004C SAS_PTxC */
+ u32 transmit_primitive;
+/* 0x0050 SAS_RORES */
+ u32 error_counter_event_notification_control;
+/* 0x0054 SAS_FRPLDFIL */
+ u32 frxq_payload_fill_threshold;
+/* 0x0058 SAS_LLHANG_TOT */
+ u32 link_layer_hang_detection_timeout;
+ u32 reserved_5C;
+/* 0x0060 SAS_RFCNT */
+ u32 received_frame_count;
+/* 0x0064 SAS_TFCNT */
+ u32 transmit_frame_count;
+/* 0x0068 SAS_RFDCNT */
+ u32 received_dword_count;
+/* 0x006C SAS_TFDCNT */
+ u32 transmit_dword_count;
+/* 0x0070 SAS_LERCNT */
+ u32 loss_of_sync_error_count;
+/* 0x0074 SAS_RDISERRCNT */
+ u32 running_disparity_error_count;
+/* 0x0078 SAS_CRERCNT */
+ u32 received_frame_crc_error_count;
+/* 0x007C STPCTL */
+ u32 stp_control;
+/* 0x0080 SAS_PCFG */
+ u32 phy_configuration;
+/* 0x0084 SAS_CLKSM */
+ u32 clock_skew_management;
+/* 0x0088 SAS_TXCOMWAKE */
+ u32 transmit_comwake_signal;
+/* 0x008C SAS_TXCOMINIT */
+ u32 transmit_cominit_signal;
+/* 0x0090 SAS_TXCOMSAS */
+ u32 transmit_comsas_signal;
+/* 0x0094 SAS_COMINIT */
+ u32 cominit_control;
+/* 0x0098 SAS_COMWAKE */
+ u32 comwake_control;
+/* 0x009C SAS_COMSAS */
+ u32 comsas_control;
+/* 0x00A0 SAS_SFERCNT */
+ u32 received_short_frame_count;
+/* 0x00A4 SAS_CDFERCNT */
+ u32 received_frame_without_credit_count;
+/* 0x00A8 SAS_DNFERCNT */
+ u32 received_frame_after_done_count;
+/* 0x00AC SAS_PRSTERCNT */
+ u32 phy_reset_problem_count;
+/* 0x00B0 SAS_CNTCTL */
+ u32 counter_control;
+/* 0x00B4 SAS_SSPTOV */
+ u32 ssp_timer_timeout_values;
+/* 0x00B8 FTCTL */
+ u32 ftx_control;
+/* 0x00BC FRCTL */
+ u32 frx_control;
+/* 0x00C0 FTWMRK */
+ u32 ftx_watermark;
+/* 0x00C4 ENSPINUP */
+ u32 notify_enable_spinup_control;
+/* 0x00C8 SAS_TRNTOV */
+ u32 sas_training_sequence_timer_values;
+/* 0x00CC SAS_PHYCAP */
+ u32 phy_capabilities;
+/* 0x00D0 SAS_PHYCTL */
+ u32 phy_control;
+ u32 reserved_d4;
+/* 0x00D8 LLCTL */
+ u32 link_layer_control;
+/* 0x00DC AFE_XCVRCR */
+ u32 afe_xcvr_control;
+/* 0x00E0 AFE_LUTCR */
+ u32 afe_lookup_table_control;
+/* 0x00E4 PSZGCR */
+ u32 phy_source_zone_group_control;
+/* 0x00E8 SAS_RECPHYCAP */
+ u32 receive_phycap;
+ u32 reserved_ec;
+/* 0x00F0 SNAFERXRSTCTL */
+ u32 speed_negotiation_afe_rx_reset_control;
+/* 0x00F4 SAS_SSIPMCTL */
+ u32 power_management_control;
+/* 0x00F8 SAS_PSPREQ_PRIM */
+ u32 sas_pm_partial_request_primitive;
+/* 0x00FC SAS_PSSREQ_PRIM */
+ u32 sas_pm_slumber_request_primitive;
+/* 0x0100 SAS_PPSACK_PRIM */
+ u32 sas_pm_ack_primitive_register;
+/* 0x0104 SAS_PSNAK_PRIM */
+ u32 sas_pm_nak_primitive_register;
+/* 0x0108 SAS_SSIPMTOV */
+ u32 sas_primitive_timeout;
+ u32 reserved_10c;
+/* 0x0110 - 0x011C PLAPRDCTRLxREG */
+ u32 pla_product_control[4];
+/* 0x0120 PLAPRDSUMREG */
+ u32 pla_product_sum;
+/* 0x0124 PLACONTROLREG */
+ u32 pla_control;
+/* Remainder of memory space 896 bytes */
+ u32 reserved_0128_037f[0x96];
+
+};
+
+/*
+ * 0x00D4 // Same offset as SAS_TCTSTM SAS_PTxC
+ * u32 primitive_transmit_control; */
+
+/*
+ * ----------------------------------------------------------------------------
+ * SGPIO
+ * ---------------------------------------------------------------------------- */
+#define SCU_SGPIO_OFFSET 0x1400
+
+/* #define SCU_SGPIO_OFFSET 0x6000 // later moves to 0x1400 see HSD 652625 */
+#define SCU_SGPIO_SGICR_OFFSET 0x0000
+#define SCU_SGPIO_SGPBR_OFFSET 0x0004
+#define SCU_SGPIO_SGSDLR_OFFSET 0x0008
+#define SCU_SGPIO_SGSDUR_OFFSET 0x000C
+#define SCU_SGPIO_SGSIDLR_OFFSET 0x0010
+#define SCU_SGPIO_SGSIDUR_OFFSET 0x0014
+#define SCU_SGPIO_SGVSCR_OFFSET 0x0018
+/* Address from 0x0820 to 0x083C */
+#define SCU_SGPIO_SGODSR_OFFSET 0x0020
+
+/**
+ * struct scu_sgpio_registers - SCU SGPIO Registers
+ *
+ *
+ */
+struct scu_sgpio_registers {
+/* 0x0000 SGPIO_SGICR */
+ u32 interface_control;
+/* 0x0004 SGPIO_SGPBR */
+ u32 blink_rate;
+/* 0x0008 SGPIO_SGSDLR */
+ u32 start_drive_lower;
+/* 0x000C SGPIO_SGSDUR */
+ u32 start_drive_upper;
+/* 0x0010 SGPIO_SGSIDLR */
+ u32 serial_input_lower;
+/* 0x0014 SGPIO_SGSIDUR */
+ u32 serial_input_upper;
+/* 0x0018 SGPIO_SGVSCR */
+ u32 vendor_specific_code;
+/* 0x0020 SGPIO_SGODSR */
+ u32 ouput_data_select[8];
+/* Remainder of memory space 256 bytes */
+ u32 reserved_1444_14ff[0x31];
+
+};
+
+/*
+ * *****************************************************************************
+ * * Defines for VIIT entry offsets
+ * * Access additional entries by SCU_VIIT_BASE + index * 0x10
+ * ***************************************************************************** */
+#define SCU_VIIT_BASE 0x1c00
+
+struct scu_viit_registers {
+ u32 registers[256];
+};
+
+/*
+ * *****************************************************************************
+ * * SCU PORT TASK SCHEDULER REGISTERS
+ * ***************************************************************************** */
+
+#define SCU_PTSG_BASE 0x1000
+
+#define SCU_PTSG_PTSGCR_OFFSET 0x0000
+#define SCU_PTSG_RTCR_OFFSET 0x0004
+#define SCU_PTSG_RTCCR_OFFSET 0x0008
+#define SCU_PTSG_PTS0CR_OFFSET 0x0010
+#define SCU_PTSG_PTS0SR_OFFSET 0x0014
+#define SCU_PTSG_PTS1CR_OFFSET 0x0018
+#define SCU_PTSG_PTS1SR_OFFSET 0x001C
+#define SCU_PTSG_PTS2CR_OFFSET 0x0020
+#define SCU_PTSG_PTS2SR_OFFSET 0x0024
+#define SCU_PTSG_PTS3CR_OFFSET 0x0028
+#define SCU_PTSG_PTS3SR_OFFSET 0x002C
+#define SCU_PTSG_PCSPE0CR_OFFSET 0x0030
+#define SCU_PTSG_PCSPE1CR_OFFSET 0x0034
+#define SCU_PTSG_PCSPE2CR_OFFSET 0x0038
+#define SCU_PTSG_PCSPE3CR_OFFSET 0x003C
+#define SCU_PTSG_ETMTSCCR_OFFSET 0x0040
+#define SCU_PTSG_ETMRNSCCR_OFFSET 0x0044
+
+/**
+ * struct scu_port_task_scheduler_registers - These are the control/stats pairs
+ * for each Port Task Scheduler.
+ *
+ *
+ */
+struct scu_port_task_scheduler_registers {
+ u32 control;
+ u32 status;
+};
+
+/**
+ * struct scu_port_task_scheduler_group_registers - These are the PORT Task
+ * Scheduler registers
+ *
+ *
+ */
+struct scu_port_task_scheduler_group_registers {
+/* 0x0000 PTSGCR */
+ u32 control;
+/* 0x0004 RTCR */
+ u32 real_time_clock;
+/* 0x0008 RTCCR */
+ u32 real_time_clock_control;
+/* 0x000C */
+ u32 reserved_0C;
+/*
+ * 0x0010 PTS0CR
+ * 0x0014 PTS0SR
+ * 0x0018 PTS1CR
+ * 0x001C PTS1SR
+ * 0x0020 PTS2CR
+ * 0x0024 PTS2SR
+ * 0x0028 PTS3CR
+ * 0x002C PTS3SR */
+ struct scu_port_task_scheduler_registers port[4];
+/*
+ * 0x0030 PCSPE0CR
+ * 0x0034 PCSPE1CR
+ * 0x0038 PCSPE2CR
+ * 0x003C PCSPE3CR */
+ u32 protocol_engine[4];
+/* 0x0040 ETMTSCCR */
+ u32 tc_scanning_interval_control;
+/* 0x0044 ETMRNSCCR */
+ u32 rnc_scanning_interval_control;
+/* Remainder of memory space 128 bytes */
+ u32 reserved_1048_107f[0x0E];
+
+};
+
+#define SCU_PTSG_SCUVZECR_OFFSET 0x003C
+
+/*
+ * *****************************************************************************
+ * * AFE REGISTERS
+ * ***************************************************************************** */
+#define SCU_AFE_MMR_BASE 0xE000
+
+/*
+ * AFE 0 is at offset 0x0800
+ * AFE 1 is at offset 0x0900
+ * AFE 2 is at offset 0x0a00
+ * AFE 3 is at offset 0x0b00 */
+struct scu_afe_transceiver {
+ /* 0x0000 AFE_XCVR_CTRL0 */
+ u32 afe_xcvr_control0;
+ /* 0x0004 AFE_XCVR_CTRL1 */
+ u32 afe_xcvr_control1;
+ /* 0x0008 */
+ u32 reserved_0008;
+ /* 0x000c afe_dfx_rx_control0 */
+ u32 afe_dfx_rx_control0;
+ /* 0x0010 AFE_DFX_RX_CTRL1 */
+ u32 afe_dfx_rx_control1;
+ /* 0x0014 */
+ u32 reserved_0014;
+ /* 0x0018 AFE_DFX_RX_STS0 */
+ u32 afe_dfx_rx_status0;
+ /* 0x001c AFE_DFX_RX_STS1 */
+ u32 afe_dfx_rx_status1;
+ /* 0x0020 */
+ u32 reserved_0020;
+ /* 0x0024 AFE_TX_CTRL */
+ u32 afe_tx_control;
+ /* 0x0028 AFE_TX_AMP_CTRL0 */
+ u32 afe_tx_amp_control0;
+ /* 0x002c AFE_TX_AMP_CTRL1 */
+ u32 afe_tx_amp_control1;
+ /* 0x0030 AFE_TX_AMP_CTRL2 */
+ u32 afe_tx_amp_control2;
+ /* 0x0034 AFE_TX_AMP_CTRL3 */
+ u32 afe_tx_amp_control3;
+ /* 0x0038 afe_tx_ssc_control */
+ u32 afe_tx_ssc_control;
+ /* 0x003c */
+ u32 reserved_003c;
+ /* 0x0040 AFE_RX_SSC_CTRL0 */
+ u32 afe_rx_ssc_control0;
+ /* 0x0044 AFE_RX_SSC_CTRL1 */
+ u32 afe_rx_ssc_control1;
+ /* 0x0048 AFE_RX_SSC_CTRL2 */
+ u32 afe_rx_ssc_control2;
+ /* 0x004c AFE_RX_EQ_STS0 */
+ u32 afe_rx_eq_status0;
+ /* 0x0050 AFE_RX_EQ_STS1 */
+ u32 afe_rx_eq_status1;
+ /* 0x0054 AFE_RX_CDR_STS */
+ u32 afe_rx_cdr_status;
+ /* 0x0058 */
+ u32 reserved_0058;
+ /* 0x005c AFE_CHAN_CTRL */
+ u32 afe_channel_control;
+ /* 0x0060-0x006c */
+ u32 reserved_0060_006c[0x04];
+ /* 0x0070 AFE_XCVR_EC_STS0 */
+ u32 afe_xcvr_error_capture_status0;
+ /* 0x0074 AFE_XCVR_EC_STS1 */
+ u32 afe_xcvr_error_capture_status1;
+ /* 0x0078 AFE_XCVR_EC_STS2 */
+ u32 afe_xcvr_error_capture_status2;
+ /* 0x007c afe_xcvr_ec_status3 */
+ u32 afe_xcvr_error_capture_status3;
+ /* 0x0080 AFE_XCVR_EC_STS4 */
+ u32 afe_xcvr_error_capture_status4;
+ /* 0x0084 AFE_XCVR_EC_STS5 */
+ u32 afe_xcvr_error_capture_status5;
+ /* 0x0088-0x00fc */
+ u32 reserved_008c_00fc[0x1e];
+};
+
+/**
+ * struct scu_afe_registers - AFE Regsiters
+ *
+ *
+ */
+/* Uaoa AFE registers */
+struct scu_afe_registers {
+ /* 0Xe000 AFE_BIAS_CTRL */
+ u32 afe_bias_control;
+ u32 reserved_0004;
+ /* 0x0008 AFE_PLL_CTRL0 */
+ u32 afe_pll_control0;
+ /* 0x000c AFE_PLL_CTRL1 */
+ u32 afe_pll_control1;
+ /* 0x0010 AFE_PLL_CTRL2 */
+ u32 afe_pll_control2;
+ /* 0x0014 AFE_CB_STS */
+ u32 afe_common_block_status;
+ /* 0x0018-0x007c */
+ u32 reserved_18_7c[0x1a];
+ /* 0x0080 AFE_PMSN_MCTRL0 */
+ u32 afe_pmsn_master_control0;
+ /* 0x0084 AFE_PMSN_MCTRL1 */
+ u32 afe_pmsn_master_control1;
+ /* 0x0088 AFE_PMSN_MCTRL2 */
+ u32 afe_pmsn_master_control2;
+ /* 0x008C-0x00fc */
+ u32 reserved_008c_00fc[0x1D];
+ /* 0x0100 AFE_DFX_MST_CTRL0 */
+ u32 afe_dfx_master_control0;
+ /* 0x0104 AFE_DFX_MST_CTRL1 */
+ u32 afe_dfx_master_control1;
+ /* 0x0108 AFE_DFX_DCL_CTRL */
+ u32 afe_dfx_dcl_control;
+ /* 0x010c AFE_DFX_DMON_CTRL */
+ u32 afe_dfx_digital_monitor_control;
+ /* 0x0110 AFE_DFX_AMONP_CTRL */
+ u32 afe_dfx_analog_p_monitor_control;
+ /* 0x0114 AFE_DFX_AMONN_CTRL */
+ u32 afe_dfx_analog_n_monitor_control;
+ /* 0x0118 AFE_DFX_NTL_STS */
+ u32 afe_dfx_ntl_status;
+ /* 0x011c AFE_DFX_FIFO_STS0 */
+ u32 afe_dfx_fifo_status0;
+ /* 0x0120 AFE_DFX_FIFO_STS1 */
+ u32 afe_dfx_fifo_status1;
+ /* 0x0124 AFE_DFX_MPAT_CTRL */
+ u32 afe_dfx_master_pattern_control;
+ /* 0x0128 AFE_DFX_P0_CTRL */
+ u32 afe_dfx_p0_control;
+ /* 0x012c-0x01a8 AFE_DFX_P0_DRx */
+ u32 afe_dfx_p0_data[32];
+ /* 0x01ac */
+ u32 reserved_01ac;
+ /* 0x01b0-0x020c AFE_DFX_P0_IRx */
+ u32 afe_dfx_p0_instruction[24];
+ /* 0x0210 */
+ u32 reserved_0210;
+ /* 0x0214 AFE_DFX_P1_CTRL */
+ u32 afe_dfx_p1_control;
+ /* 0x0218-0x245 AFE_DFX_P1_DRx */
+ u32 afe_dfx_p1_data[16];
+ /* 0x0258-0x029c */
+ u32 reserved_0258_029c[0x12];
+ /* 0x02a0-0x02bc AFE_DFX_P1_IRx */
+ u32 afe_dfx_p1_instruction[8];
+ /* 0x02c0-0x2fc */
+ u32 reserved_02c0_02fc[0x10];
+ /* 0x0300 AFE_DFX_TX_PMSN_CTRL */
+ u32 afe_dfx_tx_pmsn_control;
+ /* 0x0304 AFE_DFX_RX_PMSN_CTRL */
+ u32 afe_dfx_rx_pmsn_control;
+ u32 reserved_0308;
+ /* 0x030c AFE_DFX_NOA_CTRL0 */
+ u32 afe_dfx_noa_control0;
+ /* 0x0310 AFE_DFX_NOA_CTRL1 */
+ u32 afe_dfx_noa_control1;
+ /* 0x0314 AFE_DFX_NOA_CTRL2 */
+ u32 afe_dfx_noa_control2;
+ /* 0x0318 AFE_DFX_NOA_CTRL3 */
+ u32 afe_dfx_noa_control3;
+ /* 0x031c AFE_DFX_NOA_CTRL4 */
+ u32 afe_dfx_noa_control4;
+ /* 0x0320 AFE_DFX_NOA_CTRL5 */
+ u32 afe_dfx_noa_control5;
+ /* 0x0324 AFE_DFX_NOA_CTRL6 */
+ u32 afe_dfx_noa_control6;
+ /* 0x0328 AFE_DFX_NOA_CTRL7 */
+ u32 afe_dfx_noa_control7;
+ /* 0x032c-0x07fc */
+ u32 reserved_032c_07fc[0x135];
+
+ /* 0x0800-0x0bfc */
+ struct scu_afe_transceiver scu_afe_xcvr[4];
+
+ /* 0x0c00-0x0ffc */
+ u32 reserved_0c00_0ffc[0x0100];
+};
+
+struct scu_protocol_engine_group_registers {
+ u32 table[0xE0];
+};
+
+
+struct scu_viit_iit {
+ u32 table[256];
+};
+
+/**
+ * Placeholder for the ZONE Partition Table information ZONING will not be
+ * included in the 1.1 release.
+ *
+ *
+ */
+struct scu_zone_partition_table {
+ u32 table[2048];
+};
+
+/**
+ * Placeholder for the CRAM register since I am not sure if we need to
+ * read/write to these registers as yet.
+ *
+ *
+ */
+struct scu_completion_ram {
+ u32 ram[128];
+};
+
+/**
+ * Placeholder for the FBRAM registers since I am not sure if we need to
+ * read/write to these registers as yet.
+ *
+ *
+ */
+struct scu_frame_buffer_ram {
+ u32 ram[128];
+};
+
+#define scu_scratch_ram_SIZE_IN_DWORDS 256
+
+/**
+ * Placeholder for the scratch RAM registers.
+ *
+ *
+ */
+struct scu_scratch_ram {
+ u32 ram[scu_scratch_ram_SIZE_IN_DWORDS];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_protocol_engine_partition {
+ u32 reserved[64];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_hub_partition {
+ u32 reserved[64];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_host_interface_partition {
+ u32 reserved[64];
+};
+
+/**
+ * struct transport_link_layer_pair - The SCU Hardware pairs up the TL
+ * registers with the LL registers so we must place them adjcent to make the
+ * array of registers in the PEG.
+ *
+ *
+ */
+struct transport_link_layer_pair {
+ struct scu_transport_layer_registers tl;
+ struct scu_link_layer_registers ll;
+};
+
+/**
+ * struct scu_peg_registers - SCU Protocol Engine Memory mapped register space.
+ * These registers are unique to each protocol engine group. There can be
+ * at most two PEG for a single SCU part.
+ *
+ *
+ */
+struct scu_peg_registers {
+ struct transport_link_layer_pair pe[4];
+ struct scu_port_task_scheduler_group_registers ptsg;
+ struct scu_protocol_engine_group_registers peg;
+ struct scu_sgpio_registers sgpio;
+ u32 reserved_01500_1BFF[0x1C0];
+ struct scu_viit_entry viit[64];
+ struct scu_zone_partition_table zpt0;
+ struct scu_zone_partition_table zpt1;
+};
+
+/**
+ * struct scu_registers - SCU regsiters including both PEG registers if we turn
+ * on that compile option. All of these registers are in the memory mapped
+ * space returned from BAR1.
+ *
+ *
+ */
+struct scu_registers {
+ /* 0x0000 - PEG 0 */
+ struct scu_peg_registers peg0;
+
+ /* 0x6000 - SDMA and Miscellaneous */
+ struct scu_sdma_registers sdma;
+ struct scu_completion_ram cram;
+ struct scu_frame_buffer_ram fbram;
+ u32 reserved_6800_69FF[0x80];
+ struct noa_protocol_engine_partition noa_pe;
+ struct noa_hub_partition noa_hub;
+ struct noa_host_interface_partition noa_if;
+ u32 reserved_6d00_7fff[0x4c0];
+
+ /* 0x8000 - PEG 1 */
+ struct scu_peg_registers peg1;
+
+ /* 0xE000 - AFE Registers */
+ struct scu_afe_registers afe;
+
+ /* 0xF000 - reserved */
+ u32 reserved_f000_211fff[0x80c00];
+
+ /* 0x212000 - scratch RAM */
+ struct scu_scratch_ram scratch_ram;
+};
+
+#endif /* _SCU_REGISTERS_HEADER_ */
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
new file mode 100644
index 000000000000..b6e6368c2665
--- /dev/null
+++ b/drivers/scsi/isci/remote_device.c
@@ -0,0 +1,1501 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <scsi/sas.h>
+#include "isci.h"
+#include "port.h"
+#include "remote_device.h"
+#include "request.h"
+#include "remote_node_context.h"
+#include "scu_event_codes.h"
+#include "task.h"
+
+/**
+ * isci_remote_device_not_ready() - This function is called by the ihost when
+ * the remote device is not ready. We mark the isci device as ready (not
+ * "ready_for_io") and signal the waiting proccess.
+ * @isci_host: This parameter specifies the isci host object.
+ * @isci_device: This parameter specifies the remote device
+ *
+ * sci_lock is held on entrance to this function.
+ */
+static void isci_remote_device_not_ready(struct isci_host *ihost,
+ struct isci_remote_device *idev, u32 reason)
+{
+ struct isci_request *ireq;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
+
+ switch (reason) {
+ case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED:
+ set_bit(IDEV_GONE, &idev->flags);
+ break;
+ case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
+ set_bit(IDEV_IO_NCQERROR, &idev->flags);
+
+ /* Kill all outstanding requests for the device. */
+ list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) {
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p request = %p\n",
+ __func__, idev, ireq);
+
+ sci_controller_terminate_request(ihost,
+ idev,
+ ireq);
+ }
+ /* Fall through into the default case... */
+ default:
+ clear_bit(IDEV_IO_READY, &idev->flags);
+ break;
+ }
+}
+
+/**
+ * isci_remote_device_ready() - This function is called by the ihost when the
+ * remote device is ready. We mark the isci device as ready and signal the
+ * waiting proccess.
+ * @ihost: our valid isci_host
+ * @idev: remote device
+ *
+ */
+static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p\n", __func__, idev);
+
+ clear_bit(IDEV_IO_NCQERROR, &idev->flags);
+ set_bit(IDEV_IO_READY, &idev->flags);
+ if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
+ wake_up(&ihost->eventq);
+}
+
+/* called once the remote node context is ready to be freed.
+ * The remote device can now report that its stop operation is complete. none
+ */
+static void rnc_destruct_done(void *_dev)
+{
+ struct isci_remote_device *idev = _dev;
+
+ BUG_ON(idev->started_request_count != 0);
+ sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+}
+
+static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev)
+{
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ enum sci_status status = SCI_SUCCESS;
+ u32 i;
+
+ for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+ struct isci_request *ireq = ihost->reqs[i];
+ enum sci_status s;
+
+ if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
+ ireq->target_device != idev)
+ continue;
+
+ s = sci_controller_terminate_request(ihost, idev, ireq);
+ if (s != SCI_SUCCESS)
+ status = s;
+ }
+
+ return status;
+}
+
+enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
+ u32 timeout)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_STOPPED:
+ return SCI_SUCCESS;
+ case SCI_DEV_STARTING:
+ /* device not started so there had better be no requests */
+ BUG_ON(idev->started_request_count != 0);
+ sci_remote_node_context_destruct(&idev->rnc,
+ rnc_destruct_done, idev);
+ /* Transition to the stopping state and wait for the
+ * remote node to complete being posted and invalidated.
+ */
+ sci_change_state(sm, SCI_DEV_STOPPING);
+ return SCI_SUCCESS;
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ sci_change_state(sm, SCI_DEV_STOPPING);
+ if (idev->started_request_count == 0) {
+ sci_remote_node_context_destruct(&idev->rnc,
+ rnc_destruct_done, idev);
+ return SCI_SUCCESS;
+ } else
+ return sci_remote_device_terminate_requests(idev);
+ break;
+ case SCI_DEV_STOPPING:
+ /* All requests should have been terminated, but if there is an
+ * attempt to stop a device already in the stopping state, then
+ * try again to terminate.
+ */
+ return sci_remote_device_terminate_requests(idev);
+ case SCI_DEV_RESETTING:
+ sci_change_state(sm, SCI_DEV_STOPPING);
+ return SCI_SUCCESS;
+ }
+}
+
+enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ sci_change_state(sm, SCI_DEV_RESETTING);
+ return SCI_SUCCESS;
+ }
+}
+
+enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ if (state != SCI_DEV_RESETTING) {
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(sm, SCI_DEV_READY);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
+ u32 suspend_type)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ if (state != SCI_STP_DEV_CMD) {
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ return sci_remote_node_context_suspend(&idev->rnc,
+ suspend_type, NULL, NULL);
+}
+
+enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
+ u32 frame_index)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ enum sci_status status;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_STP_DEV_IDLE:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ /* Return the frame back to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING: {
+ struct isci_request *ireq;
+ struct ssp_frame_hdr hdr;
+ void *frame_header;
+ ssize_t word_cnt;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ &frame_header);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ word_cnt = sizeof(hdr) / sizeof(u32);
+ sci_swab32_cpy(&hdr, frame_header, word_cnt);
+
+ ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
+ if (ireq && ireq->target_device == idev) {
+ /* The IO request is now in charge of releasing the frame */
+ status = sci_io_request_frame_handler(ireq, frame_index);
+ } else {
+ /* We could not map this tag to a valid IO
+ * request Just toss the frame and continue
+ */
+ sci_controller_release_frame(ihost, frame_index);
+ }
+ break;
+ }
+ case SCI_STP_DEV_NCQ: {
+ struct dev_to_host_fis *hdr;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&hdr);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ if (hdr->fis_type == FIS_SETDEVBITS &&
+ (hdr->status & ATA_ERR)) {
+ idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
+
+ /* TODO Check sactive and complete associated IO if any. */
+ sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
+ } else if (hdr->fis_type == FIS_REGD2H &&
+ (hdr->status & ATA_ERR)) {
+ /*
+ * Some devices return D2H FIS when an NCQ error is detected.
+ * Treat this like an SDB error FIS ready reason.
+ */
+ idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
+ sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
+ } else
+ status = SCI_FAILURE;
+
+ sci_controller_release_frame(ihost, frame_index);
+ break;
+ }
+ case SCI_STP_DEV_CMD:
+ case SCI_SMP_DEV_CMD:
+ /* The device does not process any UF received from the hardware while
+ * in this state. All unsolicited frames are forwarded to the io request
+ * object.
+ */
+ status = sci_io_request_frame_handler(idev->working_request, frame_index);
+ break;
+ }
+
+ return status;
+}
+
+static bool is_remote_device_ready(struct isci_remote_device *idev)
+{
+
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ switch (state) {
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
+ u32 event_code)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ enum sci_status status;
+
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TYPE_RNC_OPS_MISC:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+ status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
+ break;
+ case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
+ if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
+ status = SCI_SUCCESS;
+
+ /* Suspend the associated RNC */
+ sci_remote_node_context_suspend(&idev->rnc,
+ SCI_SOFTWARE_SUSPENSION,
+ NULL, NULL);
+
+ dev_dbg(scirdev_to_dev(idev),
+ "%s: device: %p event code: %x: %s\n",
+ __func__, idev, event_code,
+ is_remote_device_ready(idev)
+ ? "I_T_Nexus_Timeout event"
+ : "I_T_Nexus_Timeout event in wrong state");
+
+ break;
+ }
+ /* Else, fall through and treat as unhandled... */
+ default:
+ dev_dbg(scirdev_to_dev(idev),
+ "%s: device: %p event code: %x: %s\n",
+ __func__, idev, event_code,
+ is_remote_device_ready(idev)
+ ? "unexpected event"
+ : "unexpected event in wrong state");
+ status = SCI_FAILURE_INVALID_STATE;
+ break;
+ }
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ if (state == SCI_STP_DEV_IDLE) {
+
+ /* We pick up suspension events to handle specifically to this
+ * state. We resume the RNC right away.
+ */
+ if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
+ scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
+ status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
+ }
+
+ return status;
+}
+
+static void sci_remote_device_start_request(struct isci_remote_device *idev,
+ struct isci_request *ireq,
+ enum sci_status status)
+{
+ struct isci_port *iport = idev->owning_port;
+
+ /* cleanup requests that failed after starting on the port */
+ if (status != SCI_SUCCESS)
+ sci_port_complete_io(iport, idev, ireq);
+ else {
+ kref_get(&idev->kref);
+ idev->started_request_count++;
+ }
+}
+
+enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_port *iport = idev->owning_port;
+ enum sci_status status;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_READY:
+ /* attempt to start an io request for this device object. The remote
+ * device object will issue the start request for the io and if
+ * successful it will start the request for the port object then
+ * increment its own request count.
+ */
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ break;
+ case SCI_STP_DEV_IDLE: {
+ /* handle the start io operation for a sata device that is in
+ * the command idle state. - Evalute the type of IO request to
+ * be started - If its an NCQ request change to NCQ substate -
+ * If its any other command change to the CMD substate
+ *
+ * If this is a softreset we may want to have a different
+ * substate.
+ */
+ enum sci_remote_device_states new_state;
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ if (task->ata_task.use_ncq)
+ new_state = SCI_STP_DEV_NCQ;
+ else {
+ idev->working_request = ireq;
+ new_state = SCI_STP_DEV_CMD;
+ }
+ sci_change_state(sm, new_state);
+ break;
+ }
+ case SCI_STP_DEV_NCQ: {
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ if (task->ata_task.use_ncq) {
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ } else
+ return SCI_FAILURE_INVALID_STATE;
+ break;
+ }
+ case SCI_STP_DEV_AWAIT_RESET:
+ return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+ case SCI_SMP_DEV_IDLE:
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ idev->working_request = ireq;
+ sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
+ break;
+ case SCI_STP_DEV_CMD:
+ case SCI_SMP_DEV_CMD:
+ /* device is already handling a command it can not accept new commands
+ * until this one is complete.
+ */
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_remote_device_start_request(idev, ireq, status);
+ return status;
+}
+
+static enum sci_status common_complete_io(struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status;
+
+ status = sci_request_complete(ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_port_complete_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_remote_device_decrement_request_count(idev);
+ return status;
+}
+
+enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_port *iport = idev->owning_port;
+ enum sci_status status;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_STP_DEV_IDLE:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_DEV_RESETTING:
+ status = common_complete_io(iport, idev, ireq);
+ break;
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ status = common_complete_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ /* This request causes hardware error, device needs to be Lun Reset.
+ * So here we force the state machine to IDLE state so the rest IOs
+ * can reach RNC state handler, these IOs will be completed by RNC with
+ * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
+ */
+ sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
+ } else if (idev->started_request_count == 0)
+ sci_change_state(sm, SCI_STP_DEV_IDLE);
+ break;
+ case SCI_SMP_DEV_CMD:
+ status = common_complete_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+ sci_change_state(sm, SCI_SMP_DEV_IDLE);
+ break;
+ case SCI_DEV_STOPPING:
+ status = common_complete_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ if (idev->started_request_count == 0)
+ sci_remote_node_context_destruct(&idev->rnc,
+ rnc_destruct_done,
+ idev);
+ break;
+ }
+
+ if (status != SCI_SUCCESS)
+ dev_err(scirdev_to_dev(idev),
+ "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
+ "could not complete\n", __func__, iport,
+ idev, ireq, status);
+ else
+ isci_put_device(idev);
+
+ return status;
+}
+
+static void sci_remote_device_continue_request(void *dev)
+{
+ struct isci_remote_device *idev = dev;
+
+ /* we need to check if this request is still valid to continue. */
+ if (idev->working_request)
+ sci_controller_continue_io(idev->working_request);
+}
+
+enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_port *iport = idev->owning_port;
+ enum sci_status status;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_task(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ goto out;
+
+ status = sci_request_start(ireq);
+ if (status != SCI_SUCCESS)
+ goto out;
+
+ /* Note: If the remote device state is not IDLE this will
+ * replace the request that probably resulted in the task
+ * management request.
+ */
+ idev->working_request = ireq;
+ sci_change_state(sm, SCI_STP_DEV_CMD);
+
+ /* The remote node context must cleanup the TCi to NCQ mapping
+ * table. The only way to do this correctly is to either write
+ * to the TLCR register or to invalidate and repost the RNC. In
+ * either case the remote node context state machine will take
+ * the correct action when the remote node context is suspended
+ * and later resumed.
+ */
+ sci_remote_node_context_suspend(&idev->rnc,
+ SCI_SOFTWARE_SUSPENSION, NULL, NULL);
+ sci_remote_node_context_resume(&idev->rnc,
+ sci_remote_device_continue_request,
+ idev);
+
+ out:
+ sci_remote_device_start_request(idev, ireq, status);
+ /* We need to let the controller start request handler know that
+ * it can't post TC yet. We will provide a callback function to
+ * post TC when RNC gets resumed.
+ */
+ return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
+ case SCI_DEV_READY:
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_task(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ break;
+ }
+ sci_remote_device_start_request(idev, ireq, status);
+
+ return status;
+}
+
+void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
+{
+ struct isci_port *iport = idev->owning_port;
+ u32 context;
+
+ context = request |
+ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ idev->rnc.remote_node_index;
+
+ sci_controller_post_request(iport->owning_controller, context);
+}
+
+/* called once the remote node context has transisitioned to a
+ * ready state. This is the indication that the remote device object can also
+ * transition to ready.
+ */
+static void remote_device_resume_done(void *_dev)
+{
+ struct isci_remote_device *idev = _dev;
+
+ if (is_remote_device_ready(idev))
+ return;
+
+ /* go 'ready' if we are not already in a ready state */
+ sci_change_state(&idev->sm, SCI_DEV_READY);
+}
+
+static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
+{
+ struct isci_remote_device *idev = _dev;
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ /* For NCQ operation we do not issue a isci_remote_device_not_ready().
+ * As a result, avoid sending the ready notification.
+ */
+ if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
+ isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ /* Initial state is a transitional state to the stopped state */
+ sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+}
+
+/**
+ * sci_remote_device_destruct() - free remote node context and destruct
+ * @remote_device: This parameter specifies the remote device to be destructed.
+ *
+ * Remote device objects are a limited resource. As such, they must be
+ * protected. Thus calls to construct and destruct are mutually exclusive and
+ * non-reentrant. The return value shall indicate if the device was
+ * successfully destructed or if some failure occurred. enum sci_status This value
+ * is returned if the device is successfully destructed.
+ * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
+ * device isn't valid (e.g. it's already been destoryed, the handle isn't
+ * valid, etc.).
+ */
+static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_host *ihost;
+
+ if (state != SCI_DEV_STOPPED) {
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ ihost = idev->owning_port->owning_controller;
+ sci_controller_free_remote_node_context(ihost, idev,
+ idev->rnc.remote_node_index);
+ idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+ sci_change_state(sm, SCI_DEV_FINAL);
+
+ return SCI_SUCCESS;
+}
+
+/**
+ * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
+ * @ihost: This parameter specifies the isci host object.
+ * @idev: This parameter specifies the remote device to be freed.
+ *
+ */
+static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
+
+ /* There should not be any outstanding io's. All paths to
+ * here should go through isci_remote_device_nuke_requests.
+ * If we hit this condition, we will need a way to complete
+ * io requests in process */
+ BUG_ON(!list_empty(&idev->reqs_in_process));
+
+ sci_remote_device_destruct(idev);
+ list_del_init(&idev->node);
+ isci_put_device(idev);
+}
+
+static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ u32 prev_state;
+
+ /* If we are entering from the stopping state let the SCI User know that
+ * the stop operation has completed.
+ */
+ prev_state = idev->sm.previous_state_id;
+ if (prev_state == SCI_DEV_STOPPING)
+ isci_remote_device_deconstruct(ihost, idev);
+
+ sci_controller_remote_device_stopped(ihost, idev);
+}
+
+static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ isci_remote_device_not_ready(ihost, idev,
+ SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
+}
+
+static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ struct domain_device *dev = idev->domain_dev;
+
+ if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
+ sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
+ } else if (dev_is_expander(dev)) {
+ sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
+ } else
+ isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct domain_device *dev = idev->domain_dev;
+
+ if (dev->dev_type == SAS_END_DEV) {
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ isci_remote_device_not_ready(ihost, idev,
+ SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
+ }
+}
+
+static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ sci_remote_node_context_suspend(
+ &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL);
+}
+
+static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
+}
+
+static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ idev->working_request = NULL;
+ if (sci_remote_node_context_is_ready(&idev->rnc)) {
+ /*
+ * Since the RNC is ready, it's alright to finish completion
+ * processing (e.g. signal the remote device is ready). */
+ sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
+ } else {
+ sci_remote_node_context_resume(&idev->rnc,
+ sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
+ idev);
+ }
+}
+
+static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ BUG_ON(idev->working_request == NULL);
+
+ isci_remote_device_not_ready(ihost, idev,
+ SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
+}
+
+static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
+ isci_remote_device_not_ready(ihost, idev,
+ idev->not_ready_reason);
+}
+
+static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ BUG_ON(idev->working_request == NULL);
+
+ isci_remote_device_not_ready(ihost, idev,
+ SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
+}
+
+static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ idev->working_request = NULL;
+}
+
+static const struct sci_base_state sci_remote_device_state_table[] = {
+ [SCI_DEV_INITIAL] = {
+ .enter_state = sci_remote_device_initial_state_enter,
+ },
+ [SCI_DEV_STOPPED] = {
+ .enter_state = sci_remote_device_stopped_state_enter,
+ },
+ [SCI_DEV_STARTING] = {
+ .enter_state = sci_remote_device_starting_state_enter,
+ },
+ [SCI_DEV_READY] = {
+ .enter_state = sci_remote_device_ready_state_enter,
+ .exit_state = sci_remote_device_ready_state_exit
+ },
+ [SCI_STP_DEV_IDLE] = {
+ .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
+ },
+ [SCI_STP_DEV_CMD] = {
+ .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
+ },
+ [SCI_STP_DEV_NCQ] = { },
+ [SCI_STP_DEV_NCQ_ERROR] = {
+ .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
+ },
+ [SCI_STP_DEV_AWAIT_RESET] = { },
+ [SCI_SMP_DEV_IDLE] = {
+ .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
+ },
+ [SCI_SMP_DEV_CMD] = {
+ .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
+ .exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
+ },
+ [SCI_DEV_STOPPING] = { },
+ [SCI_DEV_FAILED] = { },
+ [SCI_DEV_RESETTING] = {
+ .enter_state = sci_remote_device_resetting_state_enter,
+ .exit_state = sci_remote_device_resetting_state_exit
+ },
+ [SCI_DEV_FINAL] = { },
+};
+
+/**
+ * sci_remote_device_construct() - common construction
+ * @sci_port: SAS/SATA port through which this device is accessed.
+ * @sci_dev: remote device to construct
+ *
+ * This routine just performs benign initialization and does not
+ * allocate the remote_node_context which is left to
+ * sci_remote_device_[de]a_construct(). sci_remote_device_destruct()
+ * frees the remote_node_context(s) for the device.
+ */
+static void sci_remote_device_construct(struct isci_port *iport,
+ struct isci_remote_device *idev)
+{
+ idev->owning_port = iport;
+ idev->started_request_count = 0;
+
+ sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
+
+ sci_remote_node_context_construct(&idev->rnc,
+ SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
+}
+
+/**
+ * sci_remote_device_da_construct() - construct direct attached device.
+ *
+ * The information (e.g. IAF, Signature FIS, etc.) necessary to build
+ * the device is known to the SCI Core since it is contained in the
+ * sci_phy object. Remote node context(s) is/are a global resource
+ * allocated by this routine, freed by sci_remote_device_destruct().
+ *
+ * Returns:
+ * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
+ * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
+ * sata-only controller instance.
+ * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
+ */
+static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
+ struct isci_remote_device *idev)
+{
+ enum sci_status status;
+ struct domain_device *dev = idev->domain_dev;
+
+ sci_remote_device_construct(iport, idev);
+
+ /*
+ * This information is request to determine how many remote node context
+ * entries will be needed to store the remote node.
+ */
+ idev->is_direct_attached = true;
+ status = sci_controller_allocate_remote_node_context(iport->owning_controller,
+ idev,
+ &idev->rnc.remote_node_index);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
+ (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
+ /* pass */;
+ else
+ return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+ idev->connection_rate = sci_port_get_max_allowed_speed(iport);
+
+ /* / @todo Should I assign the port width by reading all of the phys on the port? */
+ idev->device_port_width = 1;
+
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_remote_device_ea_construct() - construct expander attached device
+ *
+ * Remote node context(s) is/are a global resource allocated by this
+ * routine, freed by sci_remote_device_destruct().
+ *
+ * Returns:
+ * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
+ * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
+ * sata-only controller instance.
+ * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
+ */
+static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
+ struct isci_remote_device *idev)
+{
+ struct domain_device *dev = idev->domain_dev;
+ enum sci_status status;
+
+ sci_remote_device_construct(iport, idev);
+
+ status = sci_controller_allocate_remote_node_context(iport->owning_controller,
+ idev,
+ &idev->rnc.remote_node_index);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
+ (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
+ /* pass */;
+ else
+ return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+ /*
+ * For SAS-2 the physical link rate is actually a logical link
+ * rate that incorporates multiplexing. The SCU doesn't
+ * incorporate multiplexing and for the purposes of the
+ * connection the logical link rate is that same as the
+ * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
+ * one another, so this code works for both situations. */
+ idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
+ dev->linkrate);
+
+ /* / @todo Should I assign the port width by reading all of the phys on the port? */
+ idev->device_port_width = 1;
+
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_remote_device_start() - This method will start the supplied remote
+ * device. This method enables normal IO requests to flow through to the
+ * remote device.
+ * @remote_device: This parameter specifies the device to be started.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ * start operation should complete.
+ *
+ * An indication of whether the device was successfully started. SCI_SUCCESS
+ * This value is returned if the device was successfully started.
+ * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
+ * the device when there have been no phys added to it.
+ */
+static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
+ u32 timeout)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ enum sci_status status;
+
+ if (state != SCI_DEV_STOPPED) {
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ status = sci_remote_node_context_resume(&idev->rnc,
+ remote_device_resume_done,
+ idev);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_change_state(sm, SCI_DEV_STARTING);
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status isci_remote_device_construct(struct isci_port *iport,
+ struct isci_remote_device *idev)
+{
+ struct isci_host *ihost = iport->isci_host;
+ struct domain_device *dev = idev->domain_dev;
+ enum sci_status status;
+
+ if (dev->parent && dev_is_expander(dev->parent))
+ status = sci_remote_device_ea_construct(iport, idev);
+ else
+ status = sci_remote_device_da_construct(iport, idev);
+
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
+ __func__, status);
+
+ return status;
+ }
+
+ /* start the device. */
+ status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
+
+ if (status != SCI_SUCCESS)
+ dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
+ status);
+
+ return status;
+}
+
+void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ DECLARE_COMPLETION_ONSTACK(aborted_task_completion);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p\n", __func__, idev);
+
+ /* Cleanup all requests pending for this device. */
+ isci_terminate_pending_requests(ihost, idev);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p, done\n", __func__, idev);
+}
+
+/**
+ * This function builds the isci_remote_device when a libsas dev_found message
+ * is received.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the isci_port conected to this device.
+ *
+ * pointer to new isci_remote_device.
+ */
+static struct isci_remote_device *
+isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
+{
+ struct isci_remote_device *idev;
+ int i;
+
+ for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+ idev = &ihost->devices[i];
+ if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
+ break;
+ }
+
+ if (i >= SCI_MAX_REMOTE_DEVICES) {
+ dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
+ return NULL;
+ }
+
+ if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n"))
+ return NULL;
+
+ if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
+ return NULL;
+
+ return idev;
+}
+
+void isci_remote_device_release(struct kref *kref)
+{
+ struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
+ struct isci_host *ihost = idev->isci_port->isci_host;
+
+ idev->domain_dev = NULL;
+ idev->isci_port = NULL;
+ clear_bit(IDEV_START_PENDING, &idev->flags);
+ clear_bit(IDEV_STOP_PENDING, &idev->flags);
+ clear_bit(IDEV_IO_READY, &idev->flags);
+ clear_bit(IDEV_GONE, &idev->flags);
+ clear_bit(IDEV_EH, &idev->flags);
+ smp_mb__before_clear_bit();
+ clear_bit(IDEV_ALLOCATED, &idev->flags);
+ wake_up(&ihost->eventq);
+}
+
+/**
+ * isci_remote_device_stop() - This function is called internally to stop the
+ * remote device.
+ * @isci_host: This parameter specifies the isci host object.
+ * @isci_device: This parameter specifies the remote device.
+ *
+ * The status of the ihost request to stop.
+ */
+enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ enum sci_status status;
+ unsigned long flags;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
+ set_bit(IDEV_GONE, &idev->flags);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Kill all outstanding requests. */
+ isci_remote_device_nuke_requests(ihost, idev);
+
+ set_bit(IDEV_STOP_PENDING, &idev->flags);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ status = sci_remote_device_stop(idev, 50);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Wait for the stop complete callback. */
+ if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
+ /* nothing to wait for */;
+ else
+ wait_for_device_stop(ihost, idev);
+
+ return status;
+}
+
+/**
+ * isci_remote_device_gone() - This function is called by libsas when a domain
+ * device is removed.
+ * @domain_device: This parameter specifies the libsas domain device.
+ *
+ */
+void isci_remote_device_gone(struct domain_device *dev)
+{
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev = dev->lldd_dev;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
+ __func__, dev, idev, idev->isci_port);
+
+ isci_remote_device_stop(ihost, idev);
+}
+
+
+/**
+ * isci_remote_device_found() - This function is called by libsas when a remote
+ * device is discovered. A remote device object is created and started. the
+ * function then sleeps until the sci core device started message is
+ * received.
+ * @domain_device: This parameter specifies the libsas domain device.
+ *
+ * status, zero indicates success.
+ */
+int isci_remote_device_found(struct domain_device *domain_dev)
+{
+ struct isci_host *isci_host = dev_to_ihost(domain_dev);
+ struct isci_port *isci_port;
+ struct isci_phy *isci_phy;
+ struct asd_sas_port *sas_port;
+ struct asd_sas_phy *sas_phy;
+ struct isci_remote_device *isci_device;
+ enum sci_status status;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: domain_device = %p\n", __func__, domain_dev);
+
+ wait_for_start(isci_host);
+
+ sas_port = domain_dev->port;
+ sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy,
+ port_phy_el);
+ isci_phy = to_iphy(sas_phy);
+ isci_port = isci_phy->isci_port;
+
+ /* we are being called for a device on this port,
+ * so it has to come up eventually
+ */
+ wait_for_completion(&isci_port->start_complete);
+
+ if ((isci_stopping == isci_port_get_state(isci_port)) ||
+ (isci_stopped == isci_port_get_state(isci_port)))
+ return -ENODEV;
+
+ isci_device = isci_remote_device_alloc(isci_host, isci_port);
+ if (!isci_device)
+ return -ENODEV;
+
+ kref_init(&isci_device->kref);
+ INIT_LIST_HEAD(&isci_device->node);
+
+ spin_lock_irq(&isci_host->scic_lock);
+ isci_device->domain_dev = domain_dev;
+ isci_device->isci_port = isci_port;
+ list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
+
+ set_bit(IDEV_START_PENDING, &isci_device->flags);
+ status = isci_remote_device_construct(isci_port, isci_device);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p\n",
+ __func__, isci_device);
+
+ if (status == SCI_SUCCESS) {
+ /* device came up, advertise it to the world */
+ domain_dev->lldd_dev = isci_device;
+ } else
+ isci_put_device(isci_device);
+ spin_unlock_irq(&isci_host->scic_lock);
+
+ /* wait for the device ready callback. */
+ wait_for_device_start(isci_host, isci_device);
+
+ return status == SCI_SUCCESS ? 0 : -ENODEV;
+}
+/**
+ * isci_device_is_reset_pending() - This function will check if there is any
+ * pending reset condition on the device.
+ * @request: This parameter is the isci_device object.
+ *
+ * true if there is a reset pending for the device.
+ */
+bool isci_device_is_reset_pending(
+ struct isci_host *isci_host,
+ struct isci_remote_device *isci_device)
+{
+ struct isci_request *isci_request;
+ struct isci_request *tmp_req;
+ bool reset_is_pending = false;
+ unsigned long flags;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p\n", __func__, isci_device);
+
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+
+ /* Check for reset on all pending requests. */
+ list_for_each_entry_safe(isci_request, tmp_req,
+ &isci_device->reqs_in_process, dev_node) {
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p request = %p\n",
+ __func__, isci_device, isci_request);
+
+ if (isci_request->ttype == io_task) {
+ struct sas_task *task = isci_request_access_task(
+ isci_request);
+
+ spin_lock(&task->task_state_lock);
+ if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
+ reset_is_pending = true;
+ spin_unlock(&task->task_state_lock);
+ }
+ }
+
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p reset_is_pending = %d\n",
+ __func__, isci_device, reset_is_pending);
+
+ return reset_is_pending;
+}
+
+/**
+ * isci_device_clear_reset_pending() - This function will clear if any pending
+ * reset condition flags on the device.
+ * @request: This parameter is the isci_device object.
+ *
+ * true if there is a reset pending for the device.
+ */
+void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ struct isci_request *isci_request;
+ struct isci_request *tmp_req;
+ unsigned long flags = 0;
+
+ dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ihost=%p\n",
+ __func__, idev, ihost);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ /* Clear reset pending on all pending requests. */
+ list_for_each_entry_safe(isci_request, tmp_req,
+ &idev->reqs_in_process, dev_node) {
+ dev_dbg(&ihost->pdev->dev, "%s: idev = %p request = %p\n",
+ __func__, idev, isci_request);
+
+ if (isci_request->ttype == io_task) {
+
+ unsigned long flags2;
+ struct sas_task *task = isci_request_access_task(
+ isci_request);
+
+ spin_lock_irqsave(&task->task_state_lock, flags2);
+ task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, flags2);
+ }
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
new file mode 100644
index 000000000000..57ccfc3d6ad3
--- /dev/null
+++ b/drivers/scsi/isci/remote_device.h
@@ -0,0 +1,352 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_REMOTE_DEVICE_H_
+#define _ISCI_REMOTE_DEVICE_H_
+#include <scsi/libsas.h>
+#include <linux/kref.h>
+#include "scu_remote_node_context.h"
+#include "remote_node_context.h"
+#include "port.h"
+
+enum sci_remote_device_not_ready_reason_code {
+ SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED,
+ SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED,
+ SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED,
+ SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED,
+ SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED,
+ SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX
+};
+
+/**
+ * isci_remote_device - isci representation of a sas expander / end point
+ * @device_port_width: hw setting for number of simultaneous connections
+ * @connection_rate: per-taskcontext connection rate for this device
+ * @working_request: SATA requests have no tag we for unaccelerated
+ * protocols we need a method to associate unsolicited
+ * frames with a pending request
+ */
+struct isci_remote_device {
+ #define IDEV_START_PENDING 0
+ #define IDEV_STOP_PENDING 1
+ #define IDEV_ALLOCATED 2
+ #define IDEV_EH 3
+ #define IDEV_GONE 4
+ #define IDEV_IO_READY 5
+ #define IDEV_IO_NCQERROR 6
+ unsigned long flags;
+ struct kref kref;
+ struct isci_port *isci_port;
+ struct domain_device *domain_dev;
+ struct list_head node;
+ struct list_head reqs_in_process;
+ struct sci_base_state_machine sm;
+ u32 device_port_width;
+ enum sas_linkrate connection_rate;
+ bool is_direct_attached;
+ struct isci_port *owning_port;
+ struct sci_remote_node_context rnc;
+ /* XXX unify with device reference counting and delete */
+ u32 started_request_count;
+ struct isci_request *working_request;
+ u32 not_ready_reason;
+};
+
+#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
+
+/* device reference routines must be called under sci_lock */
+static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
+{
+ struct isci_remote_device *idev = dev->lldd_dev;
+
+ if (idev && !test_bit(IDEV_GONE, &idev->flags)) {
+ kref_get(&idev->kref);
+ return idev;
+ }
+
+ return NULL;
+}
+
+void isci_remote_device_release(struct kref *kref);
+static inline void isci_put_device(struct isci_remote_device *idev)
+{
+ if (idev)
+ kref_put(&idev->kref, isci_remote_device_release);
+}
+
+enum sci_status isci_remote_device_stop(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+void isci_remote_device_nuke_requests(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+void isci_remote_device_gone(struct domain_device *domain_dev);
+int isci_remote_device_found(struct domain_device *domain_dev);
+bool isci_device_is_reset_pending(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+void isci_device_clear_reset_pending(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+/**
+ * sci_remote_device_stop() - This method will stop both transmission and
+ * reception of link activity for the supplied remote device. This method
+ * disables normal IO requests from flowing through to the remote device.
+ * @remote_device: This parameter specifies the device to be stopped.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ * stop operation should complete.
+ *
+ * An indication of whether the device was successfully stopped. SCI_SUCCESS
+ * This value is returned if the transmission and reception for the device was
+ * successfully stopped.
+ */
+enum sci_status sci_remote_device_stop(
+ struct isci_remote_device *idev,
+ u32 timeout);
+
+/**
+ * sci_remote_device_reset() - This method will reset the device making it
+ * ready for operation. This method must be called anytime the device is
+ * reset either through a SMP phy control or a port hard reset request.
+ * @remote_device: This parameter specifies the device to be reset.
+ *
+ * This method does not actually cause the device hardware to be reset. This
+ * method resets the software object so that it will be operational after a
+ * device hardware reset completes. An indication of whether the device reset
+ * was accepted. SCI_SUCCESS This value is returned if the device reset is
+ * started.
+ */
+enum sci_status sci_remote_device_reset(
+ struct isci_remote_device *idev);
+
+/**
+ * sci_remote_device_reset_complete() - This method informs the device object
+ * that the reset operation is complete and the device can resume operation
+ * again.
+ * @remote_device: This parameter specifies the device which is to be informed
+ * of the reset complete operation.
+ *
+ * An indication that the device is resuming operation. SCI_SUCCESS the device
+ * is resuming operation.
+ */
+enum sci_status sci_remote_device_reset_complete(
+ struct isci_remote_device *idev);
+
+/**
+ * enum sci_remote_device_states - This enumeration depicts all the states
+ * for the common remote device state machine.
+ *
+ *
+ */
+enum sci_remote_device_states {
+ /**
+ * Simply the initial state for the base remote device state machine.
+ */
+ SCI_DEV_INITIAL,
+
+ /**
+ * This state indicates that the remote device has successfully been
+ * stopped. In this state no new IO operations are permitted.
+ * This state is entered from the INITIAL state.
+ * This state is entered from the STOPPING state.
+ */
+ SCI_DEV_STOPPED,
+
+ /**
+ * This state indicates the the remote device is in the process of
+ * becoming ready (i.e. starting). In this state no new IO operations
+ * are permitted.
+ * This state is entered from the STOPPED state.
+ */
+ SCI_DEV_STARTING,
+
+ /**
+ * This state indicates the remote device is now ready. Thus, the user
+ * is able to perform IO operations on the remote device.
+ * This state is entered from the STARTING state.
+ */
+ SCI_DEV_READY,
+
+ /**
+ * This is the idle substate for the stp remote device. When there are no
+ * active IO for the device it is is in this state.
+ */
+ SCI_STP_DEV_IDLE,
+
+ /**
+ * This is the command state for for the STP remote device. This state is
+ * entered when the device is processing a non-NCQ command. The device object
+ * will fail any new start IO requests until this command is complete.
+ */
+ SCI_STP_DEV_CMD,
+
+ /**
+ * This is the NCQ state for the STP remote device. This state is entered
+ * when the device is processing an NCQ reuqest. It will remain in this state
+ * so long as there is one or more NCQ requests being processed.
+ */
+ SCI_STP_DEV_NCQ,
+
+ /**
+ * This is the NCQ error state for the STP remote device. This state is
+ * entered when an SDB error FIS is received by the device object while in the
+ * NCQ state. The device object will only accept a READ LOG command while in
+ * this state.
+ */
+ SCI_STP_DEV_NCQ_ERROR,
+
+ /**
+ * This is the READY substate indicates the device is waiting for the RESET task
+ * coming to be recovered from certain hardware specific error.
+ */
+ SCI_STP_DEV_AWAIT_RESET,
+
+ /**
+ * This is the ready operational substate for the remote device. This is the
+ * normal operational state for a remote device.
+ */
+ SCI_SMP_DEV_IDLE,
+
+ /**
+ * This is the suspended state for the remote device. This is the state that
+ * the device is placed in when a RNC suspend is received by the SCU hardware.
+ */
+ SCI_SMP_DEV_CMD,
+
+ /**
+ * This state indicates that the remote device is in the process of
+ * stopping. In this state no new IO operations are permitted, but
+ * existing IO operations are allowed to complete.
+ * This state is entered from the READY state.
+ * This state is entered from the FAILED state.
+ */
+ SCI_DEV_STOPPING,
+
+ /**
+ * This state indicates that the remote device has failed.
+ * In this state no new IO operations are permitted.
+ * This state is entered from the INITIALIZING state.
+ * This state is entered from the READY state.
+ */
+ SCI_DEV_FAILED,
+
+ /**
+ * This state indicates the device is being reset.
+ * In this state no new IO operations are permitted.
+ * This state is entered from the READY state.
+ */
+ SCI_DEV_RESETTING,
+
+ /**
+ * Simply the final state for the base remote device state machine.
+ */
+ SCI_DEV_FINAL,
+};
+
+static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
+{
+ struct isci_remote_device *idev;
+
+ idev = container_of(rnc, typeof(*idev), rnc);
+
+ return idev;
+}
+
+static inline bool dev_is_expander(struct domain_device *dev)
+{
+ return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
+}
+
+static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
+{
+ /* XXX delete this voodoo when converting to the top-level device
+ * reference count
+ */
+ if (WARN_ONCE(idev->started_request_count == 0,
+ "%s: tried to decrement started_request_count past 0!?",
+ __func__))
+ /* pass */;
+ else
+ idev->started_request_count--;
+}
+
+enum sci_status sci_remote_device_frame_handler(
+ struct isci_remote_device *idev,
+ u32 frame_index);
+
+enum sci_status sci_remote_device_event_handler(
+ struct isci_remote_device *idev,
+ u32 event_code);
+
+enum sci_status sci_remote_device_start_io(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_remote_device_start_task(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_remote_device_complete_io(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_remote_device_suspend(
+ struct isci_remote_device *idev,
+ u32 suspend_type);
+
+void sci_remote_device_post_request(
+ struct isci_remote_device *idev,
+ u32 request);
+
+#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
new file mode 100644
index 000000000000..748e8339d1ec
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -0,0 +1,627 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+#include "isci.h"
+#include "remote_device.h"
+#include "remote_node_context.h"
+#include "scu_event_codes.h"
+#include "scu_task_context.h"
+
+
+/**
+ *
+ * @sci_rnc: The RNC for which the is posted request is being made.
+ *
+ * This method will return true if the RNC is not in the initial state. In all
+ * other states the RNC is considered active and this will return true. The
+ * destroy request of the state machine drives the RNC back to the initial
+ * state. If the state machine changes then this routine will also have to be
+ * changed. bool true if the state machine is not in the initial state false if
+ * the state machine is in the initial state
+ */
+
+/**
+ *
+ * @sci_rnc: The state of the remote node context object to check.
+ *
+ * This method will return true if the remote node context is in a READY state
+ * otherwise it will return false bool true if the remote node context is in
+ * the ready state. false if the remote node context is not in the ready state.
+ */
+bool sci_remote_node_context_is_ready(
+ struct sci_remote_node_context *sci_rnc)
+{
+ u32 current_state = sci_rnc->sm.current_state_id;
+
+ if (current_state == SCI_RNC_READY) {
+ return true;
+ }
+
+ return false;
+}
+
+static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
+{
+ if (id < ihost->remote_node_entries &&
+ ihost->device_table[id])
+ return &ihost->remote_node_context_table[id];
+
+ return NULL;
+}
+
+static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
+{
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ struct domain_device *dev = idev->domain_dev;
+ int rni = sci_rnc->remote_node_index;
+ union scu_remote_node_context *rnc;
+ struct isci_host *ihost;
+ __le64 sas_addr;
+
+ ihost = idev->owning_port->owning_controller;
+ rnc = sci_rnc_by_id(ihost, rni);
+
+ memset(rnc, 0, sizeof(union scu_remote_node_context)
+ * sci_remote_device_node_count(idev));
+
+ rnc->ssp.remote_node_index = rni;
+ rnc->ssp.remote_node_port_width = idev->device_port_width;
+ rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
+
+ /* sas address is __be64, context ram format is __le64 */
+ sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
+ rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
+ rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
+
+ rnc->ssp.nexus_loss_timer_enable = true;
+ rnc->ssp.check_bit = false;
+ rnc->ssp.is_valid = false;
+ rnc->ssp.is_remote_node_context = true;
+ rnc->ssp.function_number = 0;
+
+ rnc->ssp.arbitration_wait_time = 0;
+
+ if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ rnc->ssp.connection_occupancy_timeout =
+ ihost->user_parameters.stp_max_occupancy_timeout;
+ rnc->ssp.connection_inactivity_timeout =
+ ihost->user_parameters.stp_inactivity_timeout;
+ } else {
+ rnc->ssp.connection_occupancy_timeout =
+ ihost->user_parameters.ssp_max_occupancy_timeout;
+ rnc->ssp.connection_inactivity_timeout =
+ ihost->user_parameters.ssp_inactivity_timeout;
+ }
+
+ rnc->ssp.initial_arbitration_wait_time = 0;
+
+ /* Open Address Frame Parameters */
+ rnc->ssp.oaf_connection_rate = idev->connection_rate;
+ rnc->ssp.oaf_features = 0;
+ rnc->ssp.oaf_source_zone_group = 0;
+ rnc->ssp.oaf_more_compatibility_features = 0;
+}
+
+/**
+ *
+ * @sci_rnc:
+ * @callback:
+ * @callback_parameter:
+ *
+ * This method will setup the remote node context object so it will transition
+ * to its ready state. If the remote node context is already setup to
+ * transition to its final state then this function does nothing. none
+ */
+static void sci_remote_node_context_setup_to_resume(
+ struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback callback,
+ void *callback_parameter)
+{
+ if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) {
+ sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY;
+ sci_rnc->user_callback = callback;
+ sci_rnc->user_cookie = callback_parameter;
+ }
+}
+
+static void sci_remote_node_context_setup_to_destory(
+ struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback callback,
+ void *callback_parameter)
+{
+ sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL;
+ sci_rnc->user_callback = callback;
+ sci_rnc->user_cookie = callback_parameter;
+}
+
+/**
+ *
+ *
+ * This method just calls the user callback function and then resets the
+ * callback.
+ */
+static void sci_remote_node_context_notify_user(
+ struct sci_remote_node_context *rnc)
+{
+ if (rnc->user_callback != NULL) {
+ (*rnc->user_callback)(rnc->user_cookie);
+
+ rnc->user_callback = NULL;
+ rnc->user_cookie = NULL;
+ }
+}
+
+static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
+{
+ if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
+ sci_remote_node_context_resume(rnc, rnc->user_callback,
+ rnc->user_cookie);
+}
+
+static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
+{
+ union scu_remote_node_context *rnc_buffer;
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ struct domain_device *dev = idev->domain_dev;
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
+
+ rnc_buffer->ssp.is_valid = true;
+
+ if (!idev->is_direct_attached &&
+ (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) {
+ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
+ } else {
+ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
+
+ if (idev->is_direct_attached)
+ sci_port_setup_transports(idev->owning_port,
+ sci_rnc->remote_node_index);
+ }
+}
+
+static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
+{
+ union scu_remote_node_context *rnc_buffer;
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
+
+ rnc_buffer->ssp.is_valid = false;
+
+ sci_remote_device_post_request(rnc_to_dev(sci_rnc),
+ SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
+}
+
+static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ /* Check to see if we have gotten back to the initial state because
+ * someone requested to destroy the remote node context object.
+ */
+ if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
+ rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+ sci_remote_node_context_notify_user(rnc);
+ }
+}
+
+static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
+
+ sci_remote_node_context_validate_context_buffer(sci_rnc);
+}
+
+static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ sci_remote_node_context_invalidate_context_buffer(rnc);
+}
+
+static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ struct isci_remote_device *idev;
+ struct domain_device *dev;
+
+ idev = rnc_to_dev(rnc);
+ dev = idev->domain_dev;
+
+ /*
+ * For direct attached SATA devices we need to clear the TLCR
+ * NCQ to TCi tag mapping on the phy and in cases where we
+ * resume because of a target reset we also need to update
+ * the STPTLDARNI register with the RNi of the device
+ */
+ if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
+ idev->is_direct_attached)
+ sci_port_setup_transports(idev->owning_port,
+ rnc->remote_node_index);
+
+ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
+}
+
+static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+
+ if (rnc->user_callback)
+ sci_remote_node_context_notify_user(rnc);
+}
+
+static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ sci_remote_node_context_continue_state_transitions(rnc);
+}
+
+static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ sci_remote_node_context_continue_state_transitions(rnc);
+}
+
+static const struct sci_base_state sci_remote_node_context_state_table[] = {
+ [SCI_RNC_INITIAL] = {
+ .enter_state = sci_remote_node_context_initial_state_enter,
+ },
+ [SCI_RNC_POSTING] = {
+ .enter_state = sci_remote_node_context_posting_state_enter,
+ },
+ [SCI_RNC_INVALIDATING] = {
+ .enter_state = sci_remote_node_context_invalidating_state_enter,
+ },
+ [SCI_RNC_RESUMING] = {
+ .enter_state = sci_remote_node_context_resuming_state_enter,
+ },
+ [SCI_RNC_READY] = {
+ .enter_state = sci_remote_node_context_ready_state_enter,
+ },
+ [SCI_RNC_TX_SUSPENDED] = {
+ .enter_state = sci_remote_node_context_tx_suspended_state_enter,
+ },
+ [SCI_RNC_TX_RX_SUSPENDED] = {
+ .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
+ },
+ [SCI_RNC_AWAIT_SUSPENSION] = { },
+};
+
+void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
+ u16 remote_node_index)
+{
+ memset(rnc, 0, sizeof(struct sci_remote_node_context));
+
+ rnc->remote_node_index = remote_node_index;
+ rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+
+ sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
+}
+
+enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
+ u32 event_code)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ switch (state) {
+ case SCI_RNC_POSTING:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_POST_RNC_COMPLETE:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ case SCI_RNC_INVALIDATING:
+ if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
+ if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL)
+ state = SCI_RNC_INITIAL;
+ else
+ state = SCI_RNC_POSTING;
+ sci_change_state(&sci_rnc->sm, state);
+ } else {
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+ /* We really dont care if the hardware is going to suspend
+ * the device since it's being invalidated anyway */
+ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: SCIC Remote Node Context 0x%p was "
+ "suspeneded by hardware while being "
+ "invalidated.\n", __func__, sci_rnc);
+ break;
+ default:
+ goto out;
+ }
+ }
+ break;
+ case SCI_RNC_RESUMING:
+ if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
+ sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
+ } else {
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+ /* We really dont care if the hardware is going to suspend
+ * the device since it's being resumed anyway */
+ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: SCIC Remote Node Context 0x%p was "
+ "suspeneded by hardware while being resumed.\n",
+ __func__, sci_rnc);
+ break;
+ default:
+ goto out;
+ }
+ }
+ break;
+ case SCI_RNC_READY:
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TL_RNC_SUSPEND_TX:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
+ sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ break;
+ case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
+ sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ case SCI_RNC_AWAIT_SUSPENSION:
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TL_RNC_SUSPEND_TX:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
+ sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ break;
+ case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
+ sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+ return SCI_SUCCESS;
+
+ out:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: code: %#x state: %d\n", __func__, event_code, state);
+ return SCI_FAILURE;
+
+}
+
+enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ switch (state) {
+ case SCI_RNC_INVALIDATING:
+ sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+ return SCI_SUCCESS;
+ case SCI_RNC_POSTING:
+ case SCI_RNC_RESUMING:
+ case SCI_RNC_READY:
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ case SCI_RNC_AWAIT_SUSPENSION:
+ sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
+ return SCI_SUCCESS;
+ case SCI_RNC_INITIAL:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ /* We have decided that the destruct request on the remote node context
+ * can not fail since it is either in the initial/destroyed state or is
+ * can be destroyed.
+ */
+ return SCI_SUCCESS;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
+ u32 suspend_type,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ if (state != SCI_RNC_READY) {
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_rnc->user_callback = cb_fn;
+ sci_rnc->user_cookie = cb_p;
+ sci_rnc->suspension_code = suspend_type;
+
+ if (suspend_type == SCI_SOFTWARE_SUSPENSION) {
+ sci_remote_device_post_request(rnc_to_dev(sci_rnc),
+ SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
+ }
+
+ sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ switch (state) {
+ case SCI_RNC_INITIAL:
+ if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
+ return SCI_FAILURE_INVALID_STATE;
+
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+ sci_remote_node_context_construct_buffer(sci_rnc);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
+ return SCI_SUCCESS;
+ case SCI_RNC_POSTING:
+ case SCI_RNC_INVALIDATING:
+ case SCI_RNC_RESUMING:
+ if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
+ return SCI_FAILURE_INVALID_STATE;
+
+ sci_rnc->user_callback = cb_fn;
+ sci_rnc->user_cookie = cb_p;
+ return SCI_SUCCESS;
+ case SCI_RNC_TX_SUSPENDED: {
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ struct domain_device *dev = idev->domain_dev;
+
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+
+ /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */
+ if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev))
+ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
+ else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ if (idev->is_direct_attached) {
+ /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */
+ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
+ } else {
+ sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
+ }
+ } else
+ return SCI_FAILURE;
+ return SCI_SUCCESS;
+ }
+ case SCI_RNC_TX_RX_SUSPENDED:
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_RNC_AWAIT_SUSPENSION:
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+
+ switch (state) {
+ case SCI_RNC_READY:
+ return SCI_SUCCESS;
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ case SCI_RNC_AWAIT_SUSPENSION:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+ default:
+ break;
+ }
+ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: requested to start IO while still resuming, %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+}
+
+enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ switch (state) {
+ case SCI_RNC_RESUMING:
+ case SCI_RNC_READY:
+ case SCI_RNC_AWAIT_SUSPENSION:
+ return SCI_SUCCESS;
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ sci_remote_node_context_resume(sci_rnc, NULL, NULL);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
new file mode 100644
index 000000000000..41580ad12520
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -0,0 +1,224 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
+#define _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
+
+/**
+ * This file contains the structures, constants, and prototypes associated with
+ * the remote node context in the silicon. It exists to model and manage
+ * the remote node context in the silicon.
+ *
+ *
+ */
+
+#include "isci.h"
+
+/**
+ *
+ *
+ * This constant represents an invalid remote device id, it is used to program
+ * the STPDARNI register so the driver knows when it has received a SIGNATURE
+ * FIS from the SCU.
+ */
+#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF
+
+#define SCU_HARDWARE_SUSPENSION (0)
+#define SCI_SOFTWARE_SUSPENSION (1)
+
+struct isci_request;
+struct isci_remote_device;
+struct sci_remote_node_context;
+
+typedef void (*scics_sds_remote_node_context_callback)(void *);
+
+/**
+ * This is the enumeration of the remote node context states.
+ */
+enum scis_sds_remote_node_context_states {
+ /**
+ * This state is the initial state for a remote node context. On a resume
+ * request the remote node context will transition to the posting state.
+ */
+ SCI_RNC_INITIAL,
+
+ /**
+ * This is a transition state that posts the RNi to the hardware. Once the RNC
+ * is posted the remote node context will be made ready.
+ */
+ SCI_RNC_POSTING,
+
+ /**
+ * This is a transition state that will post an RNC invalidate to the
+ * hardware. Once the invalidate is complete the remote node context will
+ * transition to the posting state.
+ */
+ SCI_RNC_INVALIDATING,
+
+ /**
+ * This is a transition state that will post an RNC resume to the hardare.
+ * Once the event notification of resume complete is received the remote node
+ * context will transition to the ready state.
+ */
+ SCI_RNC_RESUMING,
+
+ /**
+ * This is the state that the remote node context must be in to accept io
+ * request operations.
+ */
+ SCI_RNC_READY,
+
+ /**
+ * This is the state that the remote node context transitions to when it gets
+ * a TX suspend notification from the hardware.
+ */
+ SCI_RNC_TX_SUSPENDED,
+
+ /**
+ * This is the state that the remote node context transitions to when it gets
+ * a TX RX suspend notification from the hardware.
+ */
+ SCI_RNC_TX_RX_SUSPENDED,
+
+ /**
+ * This state is a wait state for the remote node context that waits for a
+ * suspend notification from the hardware. This state is entered when either
+ * there is a request to supend the remote node context or when there is a TC
+ * completion where the remote node will be suspended by the hardware.
+ */
+ SCI_RNC_AWAIT_SUSPENSION
+};
+
+/**
+ *
+ *
+ * This enumeration is used to define the end destination state for the remote
+ * node context.
+ */
+enum sci_remote_node_context_destination_state {
+ SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED,
+ SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY,
+ SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL
+};
+
+/**
+ * struct sci_remote_node_context - This structure contains the data
+ * associated with the remote node context object. The remote node context
+ * (RNC) object models the the remote device information necessary to manage
+ * the silicon RNC.
+ */
+struct sci_remote_node_context {
+ /**
+ * This field indicates the remote node index (RNI) associated with
+ * this RNC.
+ */
+ u16 remote_node_index;
+
+ /**
+ * This field is the recored suspension code or the reason for the remote node
+ * context suspension.
+ */
+ u32 suspension_code;
+
+ /**
+ * This field is true if the remote node context is resuming from its current
+ * state. This can cause an automatic resume on receiving a suspension
+ * notification.
+ */
+ enum sci_remote_node_context_destination_state destination_state;
+
+ /**
+ * This field contains the callback function that the user requested to be
+ * called when the requested state transition is complete.
+ */
+ scics_sds_remote_node_context_callback user_callback;
+
+ /**
+ * This field contains the parameter that is called when the user requested
+ * state transition is completed.
+ */
+ void *user_cookie;
+
+ /**
+ * This field contains the data for the object's state machine.
+ */
+ struct sci_base_state_machine sm;
+};
+
+void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
+ u16 remote_node_index);
+
+
+bool sci_remote_node_context_is_ready(
+ struct sci_remote_node_context *sci_rnc);
+
+enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
+ u32 event_code);
+enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback callback,
+ void *callback_parameter);
+enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
+ u32 suspend_type,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p);
+enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p);
+enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq);
+enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq);
+
+#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c
new file mode 100644
index 000000000000..301b3141945e
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_table.c
@@ -0,0 +1,598 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
+ * public, protected, and private methods.
+ *
+ *
+ */
+#include "remote_node_table.h"
+#include "remote_node_context.h"
+
+/**
+ *
+ * @remote_node_table: This is the remote node index table from which the
+ * selection will be made.
+ * @group_table_index: This is the index to the group table from which to
+ * search for an available selection.
+ *
+ * This routine will find the bit position in absolute bit terms of the next 32
+ * + bit position. If there are available bits in the first u32 then it is
+ * just bit position. u32 This is the absolute bit position for an available
+ * group.
+ */
+static u32 sci_remote_node_table_get_group_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index)
+{
+ u32 dword_index;
+ u32 *group_table;
+ u32 bit_index;
+
+ group_table = remote_node_table->remote_node_groups[group_table_index];
+
+ for (dword_index = 0; dword_index < remote_node_table->group_array_size; dword_index++) {
+ if (group_table[dword_index] != 0) {
+ for (bit_index = 0; bit_index < 32; bit_index++) {
+ if ((group_table[dword_index] & (1 << bit_index)) != 0) {
+ return (dword_index * 32) + bit_index;
+ }
+ }
+ }
+ }
+
+ return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX;
+}
+
+/**
+ *
+ * @out]: remote_node_table This the remote node table in which to clear the
+ * selector.
+ * @set_index: This is the remote node selector in which the change will be
+ * made.
+ * @group_index: This is the bit index in the table to be modified.
+ *
+ * This method will clear the group index entry in the specified group index
+ * table. none
+ */
+static void sci_remote_node_table_clear_group_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index,
+ u32 group_index)
+{
+ u32 dword_index;
+ u32 bit_index;
+ u32 *group_table;
+
+ BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
+ BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
+
+ dword_index = group_index / 32;
+ bit_index = group_index % 32;
+ group_table = remote_node_table->remote_node_groups[group_table_index];
+
+ group_table[dword_index] = group_table[dword_index] & ~(1 << bit_index);
+}
+
+/**
+ *
+ * @out]: remote_node_table This the remote node table in which to set the
+ * selector.
+ * @group_table_index: This is the remote node selector in which the change
+ * will be made.
+ * @group_index: This is the bit position in the table to be modified.
+ *
+ * This method will set the group index bit entry in the specified gropu index
+ * table. none
+ */
+static void sci_remote_node_table_set_group_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index,
+ u32 group_index)
+{
+ u32 dword_index;
+ u32 bit_index;
+ u32 *group_table;
+
+ BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
+ BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
+
+ dword_index = group_index / 32;
+ bit_index = group_index % 32;
+ group_table = remote_node_table->remote_node_groups[group_table_index];
+
+ group_table[dword_index] = group_table[dword_index] | (1 << bit_index);
+}
+
+/**
+ *
+ * @out]: remote_node_table This is the remote node table in which to modify
+ * the remote node availability.
+ * @remote_node_index: This is the remote node index that is being returned to
+ * the table.
+ *
+ * This method will set the remote to available in the remote node allocation
+ * table. none
+ */
+static void sci_remote_node_table_set_node_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 slot_normalized;
+ u32 slot_position;
+
+ BUG_ON(
+ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+ <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
+ );
+
+ dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
+ dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
+ slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
+ slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
+
+ remote_node_table->available_remote_nodes[dword_location] |=
+ 1 << (slot_normalized + slot_position);
+}
+
+/**
+ *
+ * @out]: remote_node_table This is the remote node table from which to clear
+ * the available remote node bit.
+ * @remote_node_index: This is the remote node index which is to be cleared
+ * from the table.
+ *
+ * This method clears the remote node index from the table of available remote
+ * nodes. none
+ */
+static void sci_remote_node_table_clear_node_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 slot_position;
+ u32 slot_normalized;
+
+ BUG_ON(
+ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+ <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
+ );
+
+ dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
+ dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
+ slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
+ slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
+
+ remote_node_table->available_remote_nodes[dword_location] &=
+ ~(1 << (slot_normalized + slot_position));
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote node table from which the slot will be
+ * cleared.
+ * @group_index: The index for the slot that is to be cleared.
+ *
+ * This method clears the entire table slot at the specified slot index. none
+ */
+static void sci_remote_node_table_clear_group(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 dword_value;
+
+ BUG_ON(
+ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+ <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
+ );
+
+ dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+ dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+ dword_value = remote_node_table->available_remote_nodes[dword_location];
+ dword_value &= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+ remote_node_table->available_remote_nodes[dword_location] = dword_value;
+}
+
+/**
+ *
+ * @remote_node_table:
+ *
+ * THis method sets an entire remote node group in the remote node table.
+ */
+static void sci_remote_node_table_set_group(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 dword_value;
+
+ BUG_ON(
+ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+ <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
+ );
+
+ dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+ dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+ dword_value = remote_node_table->available_remote_nodes[dword_location];
+ dword_value |= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+ remote_node_table->available_remote_nodes[dword_location] = dword_value;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table that for which the group
+ * value is to be returned.
+ * @group_index: This is the group index to use to find the group value.
+ *
+ * This method will return the group value for the specified group index. The
+ * bit values at the specified remote node group index.
+ */
+static u8 sci_remote_node_table_get_group_value(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 dword_value;
+
+ dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+ dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+ dword_value = remote_node_table->available_remote_nodes[dword_location];
+ dword_value &= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+ dword_value = dword_value >> (dword_remainder * 4);
+
+ return (u8)dword_value;
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote that which is to be initialized.
+ * @remote_node_entries: The number of entries to put in the table.
+ *
+ * This method will initialize the remote node table for use. none
+ */
+void sci_remote_node_table_initialize(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_entries)
+{
+ u32 index;
+
+ /*
+ * Initialize the raw data we could improve the speed by only initializing
+ * those entries that we are actually going to be used */
+ memset(
+ remote_node_table->available_remote_nodes,
+ 0x00,
+ sizeof(remote_node_table->available_remote_nodes)
+ );
+
+ memset(
+ remote_node_table->remote_node_groups,
+ 0x00,
+ sizeof(remote_node_table->remote_node_groups)
+ );
+
+ /* Initialize the available remote node sets */
+ remote_node_table->available_nodes_array_size = (u16)
+ (remote_node_entries / SCIC_SDS_REMOTE_NODES_PER_DWORD)
+ + ((remote_node_entries % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0);
+
+
+ /* Initialize each full DWORD to a FULL SET of remote nodes */
+ for (index = 0; index < remote_node_entries; index++) {
+ sci_remote_node_table_set_node_index(remote_node_table, index);
+ }
+
+ remote_node_table->group_array_size = (u16)
+ (remote_node_entries / (SCU_STP_REMOTE_NODE_COUNT * 32))
+ + ((remote_node_entries % (SCU_STP_REMOTE_NODE_COUNT * 32)) != 0);
+
+ for (index = 0; index < (remote_node_entries / SCU_STP_REMOTE_NODE_COUNT); index++) {
+ /*
+ * These are all guaranteed to be full slot values so fill them in the
+ * available sets of 3 remote nodes */
+ sci_remote_node_table_set_group_index(remote_node_table, 2, index);
+ }
+
+ /* Now fill in any remainders that we may find */
+ if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) {
+ sci_remote_node_table_set_group_index(remote_node_table, 1, index);
+ } else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) {
+ sci_remote_node_table_set_group_index(remote_node_table, 0, index);
+ }
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote node table from which to allocate a
+ * remote node.
+ * @table_index: The group index that is to be used for the search.
+ *
+ * This method will allocate a single RNi from the remote node table. The
+ * table index will determine from which remote node group table to search.
+ * This search may fail and another group node table can be specified. The
+ * function is designed to allow a serach of the available single remote node
+ * group up to the triple remote node group. If an entry is found in the
+ * specified table the remote node is removed and the remote node groups are
+ * updated. The RNi value or an invalid remote node context if an RNi can not
+ * be found.
+ */
+static u16 sci_remote_node_table_allocate_single_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index)
+{
+ u8 index;
+ u8 group_value;
+ u32 group_index;
+ u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+ group_index = sci_remote_node_table_get_group_index(
+ remote_node_table, group_table_index);
+
+ /* We could not find an available slot in the table selector 0 */
+ if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
+ group_value = sci_remote_node_table_get_group_value(
+ remote_node_table, group_index);
+
+ for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) {
+ if (((1 << index) & group_value) != 0) {
+ /* We have selected a bit now clear it */
+ remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT
+ + index);
+
+ sci_remote_node_table_clear_group_index(
+ remote_node_table, group_table_index, group_index
+ );
+
+ sci_remote_node_table_clear_node_index(
+ remote_node_table, remote_node_index
+ );
+
+ if (group_table_index > 0) {
+ sci_remote_node_table_set_group_index(
+ remote_node_table, group_table_index - 1, group_index
+ );
+ }
+
+ break;
+ }
+ }
+ }
+
+ return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table from which to allocate the
+ * remote node entries.
+ * @group_table_index: THis is the group table index which must equal two (2)
+ * for this operation.
+ *
+ * This method will allocate three consecutive remote node context entries. If
+ * there are no remaining triple entries the function will return a failure.
+ * The remote node index that represents three consecutive remote node entries
+ * or an invalid remote node context if none can be found.
+ */
+static u16 sci_remote_node_table_allocate_triple_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index)
+{
+ u32 group_index;
+ u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+ group_index = sci_remote_node_table_get_group_index(
+ remote_node_table, group_table_index);
+
+ if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
+ remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT;
+
+ sci_remote_node_table_clear_group_index(
+ remote_node_table, group_table_index, group_index
+ );
+
+ sci_remote_node_table_clear_group(
+ remote_node_table, group_index
+ );
+ }
+
+ return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table from which the remote node
+ * allocation is to take place.
+ * @remote_node_count: This is ther remote node count which is one of
+ * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3).
+ *
+ * This method will allocate a remote node that mataches the remote node count
+ * specified by the caller. Valid values for remote node count is
+ * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is
+ * the remote node index that is returned or an invalid remote node context.
+ */
+u16 sci_remote_node_table_allocate_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_count)
+{
+ u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+ if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
+ remote_node_index =
+ sci_remote_node_table_allocate_single_remote_node(
+ remote_node_table, 0);
+
+ if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+ remote_node_index =
+ sci_remote_node_table_allocate_single_remote_node(
+ remote_node_table, 1);
+ }
+
+ if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+ remote_node_index =
+ sci_remote_node_table_allocate_single_remote_node(
+ remote_node_table, 2);
+ }
+ } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
+ remote_node_index =
+ sci_remote_node_table_allocate_triple_remote_node(
+ remote_node_table, 2);
+ }
+
+ return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table:
+ *
+ * This method will free a single remote node index back to the remote node
+ * table. This routine will update the remote node groups
+ */
+static void sci_remote_node_table_release_single_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u16 remote_node_index)
+{
+ u32 group_index;
+ u8 group_value;
+
+ group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
+
+ group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index);
+
+ /*
+ * Assert that we are not trying to add an entry to a slot that is already
+ * full. */
+ BUG_ON(group_value == SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE);
+
+ if (group_value == 0x00) {
+ /*
+ * There are no entries in this slot so it must be added to the single
+ * slot table. */
+ sci_remote_node_table_set_group_index(remote_node_table, 0, group_index);
+ } else if ((group_value & (group_value - 1)) == 0) {
+ /*
+ * There is only one entry in this slot so it must be moved from the
+ * single slot table to the dual slot table */
+ sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
+ sci_remote_node_table_set_group_index(remote_node_table, 1, group_index);
+ } else {
+ /*
+ * There are two entries in the slot so it must be moved from the dual
+ * slot table to the tripple slot table. */
+ sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
+ sci_remote_node_table_set_group_index(remote_node_table, 2, group_index);
+ }
+
+ sci_remote_node_table_set_node_index(remote_node_table, remote_node_index);
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table to which the remote node
+ * index is to be freed.
+ *
+ * This method will release a group of three consecutive remote nodes back to
+ * the free remote nodes.
+ */
+static void sci_remote_node_table_release_triple_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u16 remote_node_index)
+{
+ u32 group_index;
+
+ group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
+
+ sci_remote_node_table_set_group_index(
+ remote_node_table, 2, group_index
+ );
+
+ sci_remote_node_table_set_group(remote_node_table, group_index);
+}
+
+/**
+ *
+ * @remote_node_table: The remote node table to which the remote node index is
+ * to be freed.
+ * @remote_node_count: This is the count of consecutive remote nodes that are
+ * to be freed.
+ *
+ * This method will release the remote node index back into the remote node
+ * table free pool.
+ */
+void sci_remote_node_table_release_remote_node_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_count,
+ u16 remote_node_index)
+{
+ if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
+ sci_remote_node_table_release_single_remote_node(
+ remote_node_table, remote_node_index);
+ } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
+ sci_remote_node_table_release_triple_remote_node(
+ remote_node_table, remote_node_index);
+ }
+}
+
diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h
new file mode 100644
index 000000000000..721ab982d2ac
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_table.h
@@ -0,0 +1,188 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_REMOTE_NODE_TABLE_H_
+#define _SCIC_SDS_REMOTE_NODE_TABLE_H_
+
+#include "isci.h"
+
+/**
+ *
+ *
+ * Remote node sets are sets of remote node index in the remtoe node table The
+ * SCU hardware requires that STP remote node entries take three consecutive
+ * remote node index so the table is arranged in sets of three. The bits are
+ * used as 0111 0111 to make a byte and the bits define the set of three remote
+ * nodes to use as a sequence.
+ */
+#define SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE 2
+
+/**
+ *
+ *
+ * Since the remote node table is organized as DWORDS take the remote node sets
+ * in bytes and represent them in DWORDs. The lowest ordered bits are the ones
+ * used in case full DWORD is not being used. i.e. 0000 0000 0000 0000 0111
+ * 0111 0111 0111 // if only a single WORD is in use in the DWORD.
+ */
+#define SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD \
+ (sizeof(u32) * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
+/**
+ *
+ *
+ * This is a count of the numeber of remote nodes that can be represented in a
+ * byte
+ */
+#define SCIC_SDS_REMOTE_NODES_PER_BYTE \
+ (SCU_STP_REMOTE_NODE_COUNT * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
+
+/**
+ *
+ *
+ * This is a count of the number of remote nodes that can be represented in a
+ * DWROD
+ */
+#define SCIC_SDS_REMOTE_NODES_PER_DWORD \
+ (sizeof(u32) * SCIC_SDS_REMOTE_NODES_PER_BYTE)
+
+/**
+ *
+ *
+ * This is the number of bits in a remote node group
+ */
+#define SCIC_SDS_REMOTE_NODES_BITS_PER_GROUP 4
+
+#define SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX (0xFFFFFFFF)
+#define SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE (0x07)
+#define SCIC_SDS_REMOTE_NODE_TABLE_EMPTY_SLOT_VALUE (0x00)
+
+/**
+ *
+ *
+ * Expander attached sata remote node count
+ */
+#define SCU_STP_REMOTE_NODE_COUNT 3
+
+/**
+ *
+ *
+ * Expander or direct attached ssp remote node count
+ */
+#define SCU_SSP_REMOTE_NODE_COUNT 1
+
+/**
+ *
+ *
+ * Direct attached STP remote node count
+ */
+#define SCU_SATA_REMOTE_NODE_COUNT 1
+
+/**
+ * struct sci_remote_node_table -
+ *
+ *
+ */
+struct sci_remote_node_table {
+ /**
+ * This field contains the array size in dwords
+ */
+ u16 available_nodes_array_size;
+
+ /**
+ * This field contains the array size of the
+ */
+ u16 group_array_size;
+
+ /**
+ * This field is the array of available remote node entries in bits.
+ * Because of the way STP remote node data is allocated on the SCU hardware
+ * the remote nodes must occupy three consecutive remote node context
+ * entries. For ease of allocation and de-allocation we have broken the
+ * sets of three into a single nibble. When the STP RNi is allocated all
+ * of the bits in the nibble are cleared. This math results in a table size
+ * of MAX_REMOTE_NODES / CONSECUTIVE RNi ENTRIES for STP / 2 entries per byte.
+ */
+ u32 available_remote_nodes[
+ (SCI_MAX_REMOTE_DEVICES / SCIC_SDS_REMOTE_NODES_PER_DWORD)
+ + ((SCI_MAX_REMOTE_DEVICES % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0)];
+
+ /**
+ * This field is the nibble selector for the above table. There are three
+ * possible selectors each for fast lookup when trying to find one, two or
+ * three remote node entries.
+ */
+ u32 remote_node_groups[
+ SCU_STP_REMOTE_NODE_COUNT][
+ (SCI_MAX_REMOTE_DEVICES / (32 * SCU_STP_REMOTE_NODE_COUNT))
+ + ((SCI_MAX_REMOTE_DEVICES % (32 * SCU_STP_REMOTE_NODE_COUNT)) != 0)];
+
+};
+
+/* --------------------------------------------------------------------------- */
+
+void sci_remote_node_table_initialize(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_entries);
+
+u16 sci_remote_node_table_allocate_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_count);
+
+void sci_remote_node_table_release_remote_node_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_count,
+ u16 remote_node_index);
+
+#endif /* _SCIC_SDS_REMOTE_NODE_TABLE_H_ */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
new file mode 100644
index 000000000000..a46e07ac789f
--- /dev/null
+++ b/drivers/scsi/isci/request.c
@@ -0,0 +1,3391 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "task.h"
+#include "request.h"
+#include "scu_completion_codes.h"
+#include "scu_event_codes.h"
+#include "sas.h"
+
+static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
+ int idx)
+{
+ if (idx == 0)
+ return &ireq->tc->sgl_pair_ab;
+ else if (idx == 1)
+ return &ireq->tc->sgl_pair_cd;
+ else if (idx < 0)
+ return NULL;
+ else
+ return &ireq->sg_table[idx - 2];
+}
+
+static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
+ struct isci_request *ireq, u32 idx)
+{
+ u32 offset;
+
+ if (idx == 0) {
+ offset = (void *) &ireq->tc->sgl_pair_ab -
+ (void *) &ihost->task_context_table[0];
+ return ihost->task_context_dma + offset;
+ } else if (idx == 1) {
+ offset = (void *) &ireq->tc->sgl_pair_cd -
+ (void *) &ihost->task_context_table[0];
+ return ihost->task_context_dma + offset;
+ }
+
+ return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
+}
+
+static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
+{
+ e->length = sg_dma_len(sg);
+ e->address_upper = upper_32_bits(sg_dma_address(sg));
+ e->address_lower = lower_32_bits(sg_dma_address(sg));
+ e->address_modifier = 0;
+}
+
+static void sci_request_build_sgl(struct isci_request *ireq)
+{
+ struct isci_host *ihost = ireq->isci_host;
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct scatterlist *sg = NULL;
+ dma_addr_t dma_addr;
+ u32 sg_idx = 0;
+ struct scu_sgl_element_pair *scu_sg = NULL;
+ struct scu_sgl_element_pair *prev_sg = NULL;
+
+ if (task->num_scatter > 0) {
+ sg = task->scatter;
+
+ while (sg) {
+ scu_sg = to_sgl_element_pair(ireq, sg_idx);
+ init_sgl_element(&scu_sg->A, sg);
+ sg = sg_next(sg);
+ if (sg) {
+ init_sgl_element(&scu_sg->B, sg);
+ sg = sg_next(sg);
+ } else
+ memset(&scu_sg->B, 0, sizeof(scu_sg->B));
+
+ if (prev_sg) {
+ dma_addr = to_sgl_element_pair_dma(ihost,
+ ireq,
+ sg_idx);
+
+ prev_sg->next_pair_upper =
+ upper_32_bits(dma_addr);
+ prev_sg->next_pair_lower =
+ lower_32_bits(dma_addr);
+ }
+
+ prev_sg = scu_sg;
+ sg_idx++;
+ }
+ } else { /* handle when no sg */
+ scu_sg = to_sgl_element_pair(ireq, sg_idx);
+
+ dma_addr = dma_map_single(&ihost->pdev->dev,
+ task->scatter,
+ task->total_xfer_len,
+ task->data_dir);
+
+ ireq->zero_scatter_daddr = dma_addr;
+
+ scu_sg->A.length = task->total_xfer_len;
+ scu_sg->A.address_upper = upper_32_bits(dma_addr);
+ scu_sg->A.address_lower = lower_32_bits(dma_addr);
+ }
+
+ if (scu_sg) {
+ scu_sg->next_pair_upper = 0;
+ scu_sg->next_pair_lower = 0;
+ }
+}
+
+static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
+{
+ struct ssp_cmd_iu *cmd_iu;
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ cmd_iu = &ireq->ssp.cmd;
+
+ memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
+ cmd_iu->add_cdb_len = 0;
+ cmd_iu->_r_a = 0;
+ cmd_iu->_r_b = 0;
+ cmd_iu->en_fburst = 0; /* unsupported */
+ cmd_iu->task_prio = task->ssp_task.task_prio;
+ cmd_iu->task_attr = task->ssp_task.task_attr;
+ cmd_iu->_r_c = 0;
+
+ sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
+ sizeof(task->ssp_task.cdb) / sizeof(u32));
+}
+
+static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
+{
+ struct ssp_task_iu *task_iu;
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
+
+ task_iu = &ireq->ssp.tmf;
+
+ memset(task_iu, 0, sizeof(struct ssp_task_iu));
+
+ memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
+
+ task_iu->task_func = isci_tmf->tmf_code;
+ task_iu->task_tag =
+ (ireq->ttype == tmf_task) ?
+ isci_tmf->io_tag :
+ SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+/**
+ * This method is will fill in the SCU Task Context for any type of SSP request.
+ * @sci_req:
+ * @task_context:
+ *
+ */
+static void scu_ssp_reqeust_construct_task_context(
+ struct isci_request *ireq,
+ struct scu_task_context *task_context)
+{
+ dma_addr_t dma_addr;
+ struct isci_remote_device *idev;
+ struct isci_port *iport;
+
+ idev = ireq->target_device;
+ iport = idev->owning_port;
+
+ /* Fill in the TC with the its required data */
+ task_context->abort = 0;
+ task_context->priority = 0;
+ task_context->initiator_request = 1;
+ task_context->connection_rate = idev->connection_rate;
+ task_context->protocol_engine_index = ISCI_PEG;
+ task_context->logical_port_index = iport->physical_port_index;
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+ task_context->remote_node_index = idev->rnc.remote_node_index;
+ task_context->command_code = 0;
+
+ task_context->link_layer_control = 0;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->strict_ordering = 0;
+ task_context->control_frame = 0;
+ task_context->timeout_enable = 0;
+ task_context->block_guard_enable = 0;
+
+ task_context->address_modifier = 0;
+
+ /* task_context->type.ssp.tag = ireq->io_tag; */
+ task_context->task_phase = 0x01;
+
+ ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (iport->physical_port_index <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ ISCI_TAG_TCI(ireq->io_tag));
+
+ /*
+ * Copy the physical address for the command buffer to the
+ * SCU Task Context
+ */
+ dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
+
+ task_context->command_iu_upper = upper_32_bits(dma_addr);
+ task_context->command_iu_lower = lower_32_bits(dma_addr);
+
+ /*
+ * Copy the physical address for the response buffer to the
+ * SCU Task Context
+ */
+ dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
+
+ task_context->response_iu_upper = upper_32_bits(dma_addr);
+ task_context->response_iu_lower = lower_32_bits(dma_addr);
+}
+
+/**
+ * This method is will fill in the SCU Task Context for a SSP IO request.
+ * @sci_req:
+ *
+ */
+static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
+ enum dma_data_direction dir,
+ u32 len)
+{
+ struct scu_task_context *task_context = ireq->tc;
+
+ scu_ssp_reqeust_construct_task_context(ireq, task_context);
+
+ task_context->ssp_command_iu_length =
+ sizeof(struct ssp_cmd_iu) / sizeof(u32);
+ task_context->type.ssp.frame_type = SSP_COMMAND;
+
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ case DMA_NONE:
+ default:
+ task_context->task_type = SCU_TASK_TYPE_IOREAD;
+ break;
+ case DMA_TO_DEVICE:
+ task_context->task_type = SCU_TASK_TYPE_IOWRITE;
+ break;
+ }
+
+ task_context->transfer_length_bytes = len;
+
+ if (task_context->transfer_length_bytes > 0)
+ sci_request_build_sgl(ireq);
+}
+
+/**
+ * This method will fill in the SCU Task Context for a SSP Task request. The
+ * following important settings are utilized: -# priority ==
+ * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
+ * ahead of other task destined for the same Remote Node. -# task_type ==
+ * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
+ * (i.e. non-raw frame) is being utilized to perform task management. -#
+ * control_frame == 1. This ensures that the proper endianess is set so
+ * that the bytes are transmitted in the right order for a task frame.
+ * @sci_req: This parameter specifies the task request object being
+ * constructed.
+ *
+ */
+static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
+{
+ struct scu_task_context *task_context = ireq->tc;
+
+ scu_ssp_reqeust_construct_task_context(ireq, task_context);
+
+ task_context->control_frame = 1;
+ task_context->priority = SCU_TASK_PRIORITY_HIGH;
+ task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
+ task_context->transfer_length_bytes = 0;
+ task_context->type.ssp.frame_type = SSP_TASK;
+ task_context->ssp_command_iu_length =
+ sizeof(struct ssp_task_iu) / sizeof(u32);
+}
+
+/**
+ * This method is will fill in the SCU Task Context for any type of SATA
+ * request. This is called from the various SATA constructors.
+ * @sci_req: The general IO request object which is to be used in
+ * constructing the SCU task context.
+ * @task_context: The buffer pointer for the SCU task context which is being
+ * constructed.
+ *
+ * The general io request construction is complete. The buffer assignment for
+ * the command buffer is complete. none Revisit task context construction to
+ * determine what is common for SSP/SMP/STP task context structures.
+ */
+static void scu_sata_reqeust_construct_task_context(
+ struct isci_request *ireq,
+ struct scu_task_context *task_context)
+{
+ dma_addr_t dma_addr;
+ struct isci_remote_device *idev;
+ struct isci_port *iport;
+
+ idev = ireq->target_device;
+ iport = idev->owning_port;
+
+ /* Fill in the TC with the its required data */
+ task_context->abort = 0;
+ task_context->priority = SCU_TASK_PRIORITY_NORMAL;
+ task_context->initiator_request = 1;
+ task_context->connection_rate = idev->connection_rate;
+ task_context->protocol_engine_index = ISCI_PEG;
+ task_context->logical_port_index = iport->physical_port_index;
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+ task_context->remote_node_index = idev->rnc.remote_node_index;
+ task_context->command_code = 0;
+
+ task_context->link_layer_control = 0;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->strict_ordering = 0;
+ task_context->control_frame = 0;
+ task_context->timeout_enable = 0;
+ task_context->block_guard_enable = 0;
+
+ task_context->address_modifier = 0;
+ task_context->task_phase = 0x01;
+
+ task_context->ssp_command_iu_length =
+ (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
+
+ /* Set the first word of the H2D REG FIS */
+ task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
+
+ ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (iport->physical_port_index <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ ISCI_TAG_TCI(ireq->io_tag));
+ /*
+ * Copy the physical address for the command buffer to the SCU Task
+ * Context. We must offset the command buffer by 4 bytes because the
+ * first 4 bytes are transfered in the body of the TC.
+ */
+ dma_addr = sci_io_request_get_dma_addr(ireq,
+ ((char *) &ireq->stp.cmd) +
+ sizeof(u32));
+
+ task_context->command_iu_upper = upper_32_bits(dma_addr);
+ task_context->command_iu_lower = lower_32_bits(dma_addr);
+
+ /* SATA Requests do not have a response buffer */
+ task_context->response_iu_upper = 0;
+ task_context->response_iu_lower = 0;
+}
+
+static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
+{
+ struct scu_task_context *task_context = ireq->tc;
+
+ scu_sata_reqeust_construct_task_context(ireq, task_context);
+
+ task_context->control_frame = 0;
+ task_context->priority = SCU_TASK_PRIORITY_NORMAL;
+ task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
+ task_context->type.stp.fis_type = FIS_REGH2D;
+ task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
+}
+
+static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
+ bool copy_rx_frame)
+{
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+
+ scu_stp_raw_request_construct_task_context(ireq);
+
+ stp_req->status = 0;
+ stp_req->sgl.offset = 0;
+ stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
+
+ if (copy_rx_frame) {
+ sci_request_build_sgl(ireq);
+ stp_req->sgl.index = 0;
+ } else {
+ /* The user does not want the data copied to the SGL buffer location */
+ stp_req->sgl.index = -1;
+ }
+
+ return SCI_SUCCESS;
+}
+
+/**
+ *
+ * @sci_req: This parameter specifies the request to be constructed as an
+ * optimized request.
+ * @optimized_task_type: This parameter specifies whether the request is to be
+ * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
+ * value of 1 indicates NCQ.
+ *
+ * This method will perform request construction common to all types of STP
+ * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
+ * returns an indication as to whether the construction was successful.
+ */
+static void sci_stp_optimized_request_construct(struct isci_request *ireq,
+ u8 optimized_task_type,
+ u32 len,
+ enum dma_data_direction dir)
+{
+ struct scu_task_context *task_context = ireq->tc;
+
+ /* Build the STP task context structure */
+ scu_sata_reqeust_construct_task_context(ireq, task_context);
+
+ /* Copy over the SGL elements */
+ sci_request_build_sgl(ireq);
+
+ /* Copy over the number of bytes to be transfered */
+ task_context->transfer_length_bytes = len;
+
+ if (dir == DMA_TO_DEVICE) {
+ /*
+ * The difference between the DMA IN and DMA OUT request task type
+ * values are consistent with the difference between FPDMA READ
+ * and FPDMA WRITE values. Add the supplied task type parameter
+ * to this difference to set the task type properly for this
+ * DATA OUT (WRITE) case. */
+ task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
+ - SCU_TASK_TYPE_DMA_IN);
+ } else {
+ /*
+ * For the DATA IN (READ) case, simply save the supplied
+ * optimized task type. */
+ task_context->task_type = optimized_task_type;
+ }
+}
+
+
+
+static enum sci_status
+sci_io_request_construct_sata(struct isci_request *ireq,
+ u32 len,
+ enum dma_data_direction dir,
+ bool copy)
+{
+ enum sci_status status = SCI_SUCCESS;
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ /* check for management protocols */
+ if (ireq->ttype == tmf_task) {
+ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+ if (tmf->tmf_code == isci_tmf_sata_srst_high ||
+ tmf->tmf_code == isci_tmf_sata_srst_low) {
+ scu_stp_raw_request_construct_task_context(ireq);
+ return SCI_SUCCESS;
+ } else {
+ dev_err(&ireq->owning_controller->pdev->dev,
+ "%s: Request 0x%p received un-handled SAT "
+ "management protocol 0x%x.\n",
+ __func__, ireq, tmf->tmf_code);
+
+ return SCI_FAILURE;
+ }
+ }
+
+ if (!sas_protocol_ata(task->task_proto)) {
+ dev_err(&ireq->owning_controller->pdev->dev,
+ "%s: Non-ATA protocol in SATA path: 0x%x\n",
+ __func__,
+ task->task_proto);
+ return SCI_FAILURE;
+
+ }
+
+ /* non data */
+ if (task->data_dir == DMA_NONE) {
+ scu_stp_raw_request_construct_task_context(ireq);
+ return SCI_SUCCESS;
+ }
+
+ /* NCQ */
+ if (task->ata_task.use_ncq) {
+ sci_stp_optimized_request_construct(ireq,
+ SCU_TASK_TYPE_FPDMAQ_READ,
+ len, dir);
+ return SCI_SUCCESS;
+ }
+
+ /* DMA */
+ if (task->ata_task.dma_xfer) {
+ sci_stp_optimized_request_construct(ireq,
+ SCU_TASK_TYPE_DMA_IN,
+ len, dir);
+ return SCI_SUCCESS;
+ } else /* PIO */
+ return sci_stp_pio_request_construct(ireq, copy);
+
+ return status;
+}
+
+static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
+{
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ ireq->protocol = SCIC_SSP_PROTOCOL;
+
+ scu_ssp_io_request_construct_task_context(ireq,
+ task->data_dir,
+ task->total_xfer_len);
+
+ sci_io_request_build_ssp_command_iu(ireq);
+
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_task_request_construct_ssp(
+ struct isci_request *ireq)
+{
+ /* Construct the SSP Task SCU Task Context */
+ scu_ssp_task_request_construct_task_context(ireq);
+
+ /* Fill in the SSP Task IU */
+ sci_task_request_build_ssp_task_iu(ireq);
+
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
+{
+ enum sci_status status;
+ bool copy = false;
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ ireq->protocol = SCIC_STP_PROTOCOL;
+
+ copy = (task->data_dir == DMA_NONE) ? false : true;
+
+ status = sci_io_request_construct_sata(ireq,
+ task->total_xfer_len,
+ task->data_dir,
+ copy);
+
+ if (status == SCI_SUCCESS)
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return status;
+}
+
+enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
+{
+ enum sci_status status = SCI_SUCCESS;
+
+ /* check for management protocols */
+ if (ireq->ttype == tmf_task) {
+ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+ if (tmf->tmf_code == isci_tmf_sata_srst_high ||
+ tmf->tmf_code == isci_tmf_sata_srst_low) {
+ scu_stp_raw_request_construct_task_context(ireq);
+ } else {
+ dev_err(&ireq->owning_controller->pdev->dev,
+ "%s: Request 0x%p received un-handled SAT "
+ "Protocol 0x%x.\n",
+ __func__, ireq, tmf->tmf_code);
+
+ return SCI_FAILURE;
+ }
+ }
+
+ if (status != SCI_SUCCESS)
+ return status;
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return status;
+}
+
+/**
+ * sci_req_tx_bytes - bytes transferred when reply underruns request
+ * @sci_req: request that was terminated early
+ */
+#define SCU_TASK_CONTEXT_SRAM 0x200000
+static u32 sci_req_tx_bytes(struct isci_request *ireq)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+ u32 ret_val = 0;
+
+ if (readl(&ihost->smu_registers->address_modifier) == 0) {
+ void __iomem *scu_reg_base = ihost->scu_registers;
+
+ /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
+ * BAR1 is the scu_registers
+ * 0x20002C = 0x200000 + 0x2c
+ * = start of task context SRAM + offset of (type.ssp.data_offset)
+ * TCi is the io_tag of struct sci_request
+ */
+ ret_val = readl(scu_reg_base +
+ (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
+ ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
+ }
+
+ return ret_val;
+}
+
+enum sci_status sci_request_start(struct isci_request *ireq)
+{
+ enum sci_base_request_states state;
+ struct scu_task_context *tc = ireq->tc;
+ struct isci_host *ihost = ireq->owning_controller;
+
+ state = ireq->sm.current_state_id;
+ if (state != SCI_REQ_CONSTRUCTED) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC IO Request requested to start while in wrong "
+ "state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
+
+ switch (tc->protocol_type) {
+ case SCU_TASK_CONTEXT_PROTOCOL_SMP:
+ case SCU_TASK_CONTEXT_PROTOCOL_SSP:
+ /* SSP/SMP Frame */
+ tc->type.ssp.tag = ireq->io_tag;
+ tc->type.ssp.target_port_transfer_tag = 0xFFFF;
+ break;
+
+ case SCU_TASK_CONTEXT_PROTOCOL_STP:
+ /* STP/SATA Frame
+ * tc->type.stp.ncq_tag = ireq->ncq_tag;
+ */
+ break;
+
+ case SCU_TASK_CONTEXT_PROTOCOL_NONE:
+ /* / @todo When do we set no protocol type? */
+ break;
+
+ default:
+ /* This should never happen since we build the IO
+ * requests */
+ break;
+ }
+
+ /* Add to the post_context the io tag value */
+ ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
+
+ /* Everything is good go ahead and change state */
+ sci_change_state(&ireq->sm, SCI_REQ_STARTED);
+
+ return SCI_SUCCESS;
+}
+
+enum sci_status
+sci_io_request_terminate(struct isci_request *ireq)
+{
+ enum sci_base_request_states state;
+
+ state = ireq->sm.current_state_id;
+
+ switch (state) {
+ case SCI_REQ_CONSTRUCTED:
+ ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
+ ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+ case SCI_REQ_STARTED:
+ case SCI_REQ_TASK_WAIT_TC_COMP:
+ case SCI_REQ_SMP_WAIT_RESP:
+ case SCI_REQ_SMP_WAIT_TC_COMP:
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ case SCI_REQ_STP_UDMA_WAIT_D2H:
+ case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+ case SCI_REQ_STP_NON_DATA_WAIT_D2H:
+ case SCI_REQ_STP_PIO_WAIT_H2D:
+ case SCI_REQ_STP_PIO_WAIT_FRAME:
+ case SCI_REQ_STP_PIO_DATA_IN:
+ case SCI_REQ_STP_PIO_DATA_OUT:
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
+ case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
+ sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
+ return SCI_SUCCESS;
+ case SCI_REQ_TASK_WAIT_TC_RESP:
+ sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+ case SCI_REQ_ABORTING:
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+ case SCI_REQ_COMPLETED:
+ default:
+ dev_warn(&ireq->owning_controller->pdev->dev,
+ "%s: SCIC IO Request requested to abort while in wrong "
+ "state %d\n",
+ __func__,
+ ireq->sm.current_state_id);
+ break;
+ }
+
+ return SCI_FAILURE_INVALID_STATE;
+}
+
+enum sci_status sci_request_complete(struct isci_request *ireq)
+{
+ enum sci_base_request_states state;
+ struct isci_host *ihost = ireq->owning_controller;
+
+ state = ireq->sm.current_state_id;
+ if (WARN_ONCE(state != SCI_REQ_COMPLETED,
+ "isci: request completion from wrong state (%d)\n", state))
+ return SCI_FAILURE_INVALID_STATE;
+
+ if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
+ sci_controller_release_frame(ihost,
+ ireq->saved_rx_frame_index);
+
+ /* XXX can we just stop the machine and remove the 'final' state? */
+ sci_change_state(&ireq->sm, SCI_REQ_FINAL);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
+ u32 event_code)
+{
+ enum sci_base_request_states state;
+ struct isci_host *ihost = ireq->owning_controller;
+
+ state = ireq->sm.current_state_id;
+
+ if (state != SCI_REQ_STP_PIO_DATA_IN) {
+ dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n",
+ __func__, event_code, state);
+
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ switch (scu_get_event_specifier(event_code)) {
+ case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
+ /* We are waiting for data and the SCU has R_ERR the data frame.
+ * Go back to waiting for the D2H Register FIS
+ */
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ return SCI_SUCCESS;
+ default:
+ dev_err(&ihost->pdev->dev,
+ "%s: pio request unexpected event %#x\n",
+ __func__, event_code);
+
+ /* TODO Should we fail the PIO request when we get an
+ * unexpected event?
+ */
+ return SCI_FAILURE;
+ }
+}
+
+/*
+ * This function copies response data for requests returning response data
+ * instead of sense data.
+ * @sci_req: This parameter specifies the request object for which to copy
+ * the response data.
+ */
+static void sci_io_request_copy_response(struct isci_request *ireq)
+{
+ void *resp_buf;
+ u32 len;
+ struct ssp_response_iu *ssp_response;
+ struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
+
+ ssp_response = &ireq->ssp.rsp;
+
+ resp_buf = &isci_tmf->resp.resp_iu;
+
+ len = min_t(u32,
+ SSP_RESP_IU_MAX_SIZE,
+ be32_to_cpu(ssp_response->response_data_len));
+
+ memcpy(resp_buf, ssp_response->resp_data, len);
+}
+
+static enum sci_status
+request_started_state_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ struct ssp_response_iu *resp_iu;
+ u8 datapres;
+
+ /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
+ * to determine SDMA status
+ */
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
+ /* There are times when the SCU hardware will return an early
+ * response because the io request specified more data than is
+ * returned by the target device (mode pages, inquiry data,
+ * etc.). We must check the response stats to see if this is
+ * truly a failed request or a good request that just got
+ * completed early.
+ */
+ struct ssp_response_iu *resp = &ireq->ssp.rsp;
+ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+ sci_swab32_cpy(&ireq->ssp.rsp,
+ &ireq->ssp.rsp,
+ word_cnt);
+
+ if (resp->status == 0) {
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
+ } else {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ }
+ break;
+ }
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
+ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+ sci_swab32_cpy(&ireq->ssp.rsp,
+ &ireq->ssp.rsp,
+ word_cnt);
+
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ break;
+ }
+
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
+ /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
+ * guaranteed to be received before this completion status is
+ * posted?
+ */
+ resp_iu = &ireq->ssp.rsp;
+ datapres = resp_iu->datapres;
+
+ if (datapres == 1 || datapres == 2) {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ } else {
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ }
+ break;
+ /* only stp device gets suspended. */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
+ if (ireq->protocol == SCIC_STP_PROTOCOL) {
+ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT;
+ ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+ } else {
+ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT;
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ }
+ break;
+
+ /* both stp/ssp device gets suspended */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
+ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT;
+ ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+ break;
+
+ /* neither ssp nor stp gets suspended. */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
+ default:
+ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT;
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ break;
+ }
+
+ /*
+ * TODO: This is probably wrong for ACK/NAK timeout conditions
+ */
+
+ /* In all cases we will treat this as the completion of the IO req. */
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+request_aborting_state_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
+ case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
+ ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
+ ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+
+ default:
+ /* Unless we get some strange error wait for the task abort to complete
+ * TODO: Should there be a state change for this completion?
+ */
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
+ /* Currently, the decision is to simply allow the task request
+ * to timeout if the task IU wasn't received successfully.
+ * There is a potential for receiving multiple task responses if
+ * we decide to send the task IU again.
+ */
+ dev_warn(&ireq->owning_controller->pdev->dev,
+ "%s: TaskRequest:0x%p CompletionCode:%x - "
+ "ACK/NAK timeout\n", __func__, ireq,
+ completion_code);
+
+ sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
+ break;
+ default:
+ /*
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+smp_request_await_response_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ /* In the AWAIT RESPONSE state, any TC completion is
+ * unexpected. but if the TC has success status, we
+ * complete the IO anyway.
+ */
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
+ /* These status has been seen in a specific LSI
+ * expander, which sometimes is not able to send smp
+ * response within 2 ms. This causes our hardware break
+ * the connection and set TC completion with one of
+ * these SMP_XXX_XX_ERR status. For these type of error,
+ * we ask ihost user to retry the request.
+ */
+ ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
+ ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ default:
+ /* All other completion status cause the IO to be complete. If a NAK
+ * was received, then it is up to the user to retry the request
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+smp_request_await_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ default:
+ /* All other completion status cause the IO to be
+ * complete. If a NAK was received, then it is up to
+ * the user to retry the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
+{
+ struct scu_sgl_element *sgl;
+ struct scu_sgl_element_pair *sgl_pair;
+ struct isci_request *ireq = to_ireq(stp_req);
+ struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
+
+ sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
+ if (!sgl_pair)
+ sgl = NULL;
+ else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
+ if (sgl_pair->B.address_lower == 0 &&
+ sgl_pair->B.address_upper == 0) {
+ sgl = NULL;
+ } else {
+ pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
+ sgl = &sgl_pair->B;
+ }
+ } else {
+ if (sgl_pair->next_pair_lower == 0 &&
+ sgl_pair->next_pair_upper == 0) {
+ sgl = NULL;
+ } else {
+ pio_sgl->index++;
+ pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
+ sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
+ sgl = &sgl_pair->A;
+ }
+ }
+
+ return sgl;
+}
+
+static enum sci_status
+stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
+ break;
+
+ default:
+ /* All other completion status cause the IO to be
+ * complete. If a NAK was received, then it is up to
+ * the user to retry the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
+
+/* transmit DATA_FIS from (current sgl + offset) for input
+ * parameter length. current sgl and offset is alreay stored in the IO request
+ */
+static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
+ struct isci_request *ireq,
+ u32 length)
+{
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+ struct scu_task_context *task_context = ireq->tc;
+ struct scu_sgl_element_pair *sgl_pair;
+ struct scu_sgl_element *current_sgl;
+
+ /* Recycle the TC and reconstruct it for sending out DATA FIS containing
+ * for the data from current_sgl+offset for the input length
+ */
+ sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
+ if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
+ current_sgl = &sgl_pair->A;
+ else
+ current_sgl = &sgl_pair->B;
+
+ /* update the TC */
+ task_context->command_iu_upper = current_sgl->address_upper;
+ task_context->command_iu_lower = current_sgl->address_lower;
+ task_context->transfer_length_bytes = length;
+ task_context->type.stp.fis_type = FIS_DATA;
+
+ /* send the new TC out. */
+ return sci_controller_continue_io(ireq);
+}
+
+static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
+{
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+ struct scu_sgl_element_pair *sgl_pair;
+ struct scu_sgl_element *sgl;
+ enum sci_status status;
+ u32 offset;
+ u32 len = 0;
+
+ offset = stp_req->sgl.offset;
+ sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
+ if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
+ return SCI_FAILURE;
+
+ if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
+ sgl = &sgl_pair->A;
+ len = sgl_pair->A.length - offset;
+ } else {
+ sgl = &sgl_pair->B;
+ len = sgl_pair->B.length - offset;
+ }
+
+ if (stp_req->pio_len == 0)
+ return SCI_SUCCESS;
+
+ if (stp_req->pio_len >= len) {
+ status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
+ if (status != SCI_SUCCESS)
+ return status;
+ stp_req->pio_len -= len;
+
+ /* update the current sgl, offset and save for future */
+ sgl = pio_sgl_next(stp_req);
+ offset = 0;
+ } else if (stp_req->pio_len < len) {
+ sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
+
+ /* Sgl offset will be adjusted and saved for future */
+ offset += stp_req->pio_len;
+ sgl->address_lower += stp_req->pio_len;
+ stp_req->pio_len = 0;
+ }
+
+ stp_req->sgl.offset = offset;
+
+ return status;
+}
+
+/**
+ *
+ * @stp_request: The request that is used for the SGL processing.
+ * @data_buffer: The buffer of data to be copied.
+ * @length: The length of the data transfer.
+ *
+ * Copy the data from the buffer for the length specified to the IO reqeust SGL
+ * specified data region. enum sci_status
+ */
+static enum sci_status
+sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
+ u8 *data_buf, u32 len)
+{
+ struct isci_request *ireq;
+ u8 *src_addr;
+ int copy_len;
+ struct sas_task *task;
+ struct scatterlist *sg;
+ void *kaddr;
+ int total_len = len;
+
+ ireq = to_ireq(stp_req);
+ task = isci_request_access_task(ireq);
+ src_addr = data_buf;
+
+ if (task->num_scatter > 0) {
+ sg = task->scatter;
+
+ while (total_len > 0) {
+ struct page *page = sg_page(sg);
+
+ copy_len = min_t(int, total_len, sg_dma_len(sg));
+ kaddr = kmap_atomic(page, KM_IRQ0);
+ memcpy(kaddr + sg->offset, src_addr, copy_len);
+ kunmap_atomic(kaddr, KM_IRQ0);
+ total_len -= copy_len;
+ src_addr += copy_len;
+ sg = sg_next(sg);
+ }
+ } else {
+ BUG_ON(task->total_xfer_len < total_len);
+ memcpy(task->scatter, src_addr, total_len);
+ }
+
+ return SCI_SUCCESS;
+}
+
+/**
+ *
+ * @sci_req: The PIO DATA IN request that is to receive the data.
+ * @data_buffer: The buffer to copy from.
+ *
+ * Copy the data buffer to the io request data region. enum sci_status
+ */
+static enum sci_status sci_stp_request_pio_data_in_copy_data(
+ struct isci_stp_request *stp_req,
+ u8 *data_buffer)
+{
+ enum sci_status status;
+
+ /*
+ * If there is less than 1K remaining in the transfer request
+ * copy just the data for the transfer */
+ if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
+ status = sci_stp_request_pio_data_in_copy_data_buffer(
+ stp_req, data_buffer, stp_req->pio_len);
+
+ if (status == SCI_SUCCESS)
+ stp_req->pio_len = 0;
+ } else {
+ /* We are transfering the whole frame so copy */
+ status = sci_stp_request_pio_data_in_copy_data_buffer(
+ stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
+
+ if (status == SCI_SUCCESS)
+ stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
+ }
+
+ return status;
+}
+
+static enum sci_status
+stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ enum sci_status status = SCI_SUCCESS;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ break;
+
+ default:
+ /* All other completion status cause the IO to be
+ * complete. If a NAK was received, then it is up to
+ * the user to retry the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return status;
+}
+
+static enum sci_status
+pio_data_out_tx_done_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ enum sci_status status = SCI_SUCCESS;
+ bool all_frames_transferred = false;
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ /* Transmit data */
+ if (stp_req->pio_len != 0) {
+ status = sci_stp_request_pio_data_out_transmit_data(ireq);
+ if (status == SCI_SUCCESS) {
+ if (stp_req->pio_len == 0)
+ all_frames_transferred = true;
+ }
+ } else if (stp_req->pio_len == 0) {
+ /*
+ * this will happen if the all data is written at the
+ * first time after the pio setup fis is received
+ */
+ all_frames_transferred = true;
+ }
+
+ /* all data transferred. */
+ if (all_frames_transferred) {
+ /*
+ * Change the state to SCI_REQ_STP_PIO_DATA_IN
+ * and wait for PIO_SETUP fis / or D2H REg fis. */
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ }
+ break;
+
+ default:
+ /*
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return status;
+}
+
+static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
+ u32 frame_index)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+ struct dev_to_host_fis *frame_header;
+ enum sci_status status;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if ((status == SCI_SUCCESS) &&
+ (frame_header->fis_type == FIS_REGD2H)) {
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+ }
+
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+}
+
+enum sci_status
+sci_io_request_frame_handler(struct isci_request *ireq,
+ u32 frame_index)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+ enum sci_base_request_states state;
+ enum sci_status status;
+ ssize_t word_cnt;
+
+ state = ireq->sm.current_state_id;
+ switch (state) {
+ case SCI_REQ_STARTED: {
+ struct ssp_frame_hdr ssp_hdr;
+ void *frame_header;
+
+ sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ &frame_header);
+
+ word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
+ sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
+
+ if (ssp_hdr.frame_type == SSP_RESPONSE) {
+ struct ssp_response_iu *resp_iu;
+ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&resp_iu);
+
+ sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
+
+ resp_iu = &ireq->ssp.rsp;
+
+ if (resp_iu->datapres == 0x01 ||
+ resp_iu->datapres == 0x02) {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ } else {
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ }
+ } else {
+ /* not a response frame, why did it get forwarded? */
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p received unexpected "
+ "frame %d type 0x%02x\n", __func__, ireq,
+ frame_index, ssp_hdr.frame_type);
+ }
+
+ /*
+ * In any case we are done with this frame buffer return it to
+ * the controller
+ */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return SCI_SUCCESS;
+ }
+
+ case SCI_REQ_TASK_WAIT_TC_RESP:
+ sci_io_request_copy_response(ireq);
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ sci_controller_release_frame(ihost, frame_index);
+ return SCI_SUCCESS;
+
+ case SCI_REQ_SMP_WAIT_RESP: {
+ struct smp_resp *rsp_hdr = &ireq->smp.rsp;
+ void *frame_header;
+
+ sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ &frame_header);
+
+ /* byte swap the header. */
+ word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
+ sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
+
+ if (rsp_hdr->frame_type == SMP_RESPONSE) {
+ void *smp_resp;
+
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ &smp_resp);
+
+ word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
+ sizeof(u32);
+
+ sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
+ smp_resp, word_cnt);
+
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
+ } else {
+ /*
+ * This was not a response frame why did it get
+ * forwarded?
+ */
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC SMP Request 0x%p received unexpected "
+ "frame %d type 0x%02x\n",
+ __func__,
+ ireq,
+ frame_index,
+ rsp_hdr->frame_type);
+
+ ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ }
+
+ sci_controller_release_frame(ihost, frame_index);
+
+ return SCI_SUCCESS;
+ }
+
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ return sci_stp_request_udma_general_frame_handler(ireq,
+ frame_index);
+
+ case SCI_REQ_STP_UDMA_WAIT_D2H:
+ /* Use the general frame handler to copy the resposne data */
+ status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+
+ case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
+ struct dev_to_host_fis *frame_header;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
+
+ return status;
+ }
+
+ switch (frame_header->fis_type) {
+ case FIS_REGD2H:
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+ /* The command has completed with error */
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ break;
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: IO Request:0x%p Frame Id:%d protocol "
+ "violation occurred\n", __func__, stp_req,
+ frame_index);
+
+ ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
+ ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
+ break;
+ }
+
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+ /* Frame has been decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+ }
+
+ case SCI_REQ_STP_PIO_WAIT_FRAME: {
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct dev_to_host_fis *frame_header;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__, stp_req, frame_index, status);
+ return status;
+ }
+
+ switch (frame_header->fis_type) {
+ case FIS_PIO_SETUP:
+ /* Get from the frame buffer the PIO Setup Data */
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ /* Get the data from the PIO Setup The SCU Hardware
+ * returns first word in the frame_header and the rest
+ * of the data is in the frame buffer so we need to
+ * back up one dword
+ */
+
+ /* transfer_count: first 16bits in the 4th dword */
+ stp_req->pio_len = frame_buffer[3] & 0xffff;
+
+ /* status: 4th byte in the 3rd dword */
+ stp_req->status = (frame_buffer[2] >> 24) & 0xff;
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+ ireq->stp.rsp.status = stp_req->status;
+
+ /* The next state is dependent on whether the
+ * request was PIO Data-in or Data out
+ */
+ if (task->data_dir == DMA_FROM_DEVICE) {
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
+ } else if (task->data_dir == DMA_TO_DEVICE) {
+ /* Transmit data */
+ status = sci_stp_request_pio_data_out_transmit_data(ireq);
+ if (status != SCI_SUCCESS)
+ break;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
+ }
+ break;
+
+ case FIS_SETDEVBITS:
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ break;
+
+ case FIS_REGD2H:
+ if (frame_header->status & ATA_BUSY) {
+ /*
+ * Now why is the drive sending a D2H Register
+ * FIS when it is still busy? Do nothing since
+ * we are still in the right state.
+ */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCIC PIO Request 0x%p received "
+ "D2H Register FIS with BSY status "
+ "0x%x\n",
+ __func__,
+ stp_req,
+ frame_header->status);
+ break;
+ }
+
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.req,
+ frame_header,
+ frame_buffer);
+
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+
+ default:
+ /* FIXME: what do we do here? */
+ break;
+ }
+
+ /* Frame is decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+ }
+
+ case SCI_REQ_STP_PIO_DATA_IN: {
+ struct dev_to_host_fis *frame_header;
+ struct sata_fis_data *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
+ return status;
+ }
+
+ if (frame_header->fis_type != FIS_DATA) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC PIO Request 0x%p received frame %d "
+ "with fis type 0x%02x when expecting a data "
+ "fis.\n",
+ __func__,
+ stp_req,
+ frame_index,
+ frame_header->fis_type);
+
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+ /* Frame is decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+ return status;
+ }
+
+ if (stp_req->sgl.index < 0) {
+ ireq->saved_rx_frame_index = frame_index;
+ stp_req->pio_len = 0;
+ } else {
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ status = sci_stp_request_pio_data_in_copy_data(stp_req,
+ (u8 *)frame_buffer);
+
+ /* Frame is decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+ }
+
+ /* Check for the end of the transfer, are there more
+ * bytes remaining for this data transfer
+ */
+ if (status != SCI_SUCCESS || stp_req->pio_len != 0)
+ return status;
+
+ if ((stp_req->status & ATA_BUSY) == 0) {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ } else {
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ }
+ return status;
+ }
+
+ case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
+ struct dev_to_host_fis *frame_header;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
+ return status;
+ }
+
+ switch (frame_header->fis_type) {
+ case FIS_REGD2H:
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+ /* The command has completed with error */
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ break;
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: IO Request:0x%p Frame Id:%d protocol "
+ "violation occurred\n",
+ __func__,
+ stp_req,
+ frame_index);
+
+ ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
+ ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
+ break;
+ }
+
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+ /* Frame has been decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+ }
+ case SCI_REQ_ABORTING:
+ /*
+ * TODO: Is it even possible to get an unsolicited frame in the
+ * aborting state?
+ */
+ sci_controller_release_frame(ihost, frame_index);
+ return SCI_SUCCESS;
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC IO Request given unexpected frame %x while "
+ "in state %d\n",
+ __func__,
+ frame_index,
+ state);
+
+ sci_controller_release_frame(ihost, frame_index);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ enum sci_status status = SCI_SUCCESS;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
+ /* We must check ther response buffer to see if the D2H
+ * Register FIS was received before we got the TC
+ * completion.
+ */
+ if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
+ sci_remote_device_suspend(ireq->target_device,
+ SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
+
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ } else {
+ /* If we have an error completion status for the
+ * TC then we can expect a D2H register FIS from
+ * the device so we must change state to wait
+ * for it
+ */
+ sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
+ }
+ break;
+
+ /* TODO Check to see if any of these completion status need to
+ * wait for the device to host register fis.
+ */
+ /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
+ * - this comes only for B0
+ */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
+ sci_remote_device_suspend(ireq->target_device,
+ SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
+ /* Fall through to the default case */
+ default:
+ /* All other completion status cause the IO to be complete. */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return status;
+}
+
+static enum sci_status
+stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
+ break;
+
+ default:
+ /*
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
+ break;
+
+ default:
+ /* All other completion status cause the IO to be complete. If
+ * a NAK was received, then it is up to the user to retry the
+ * request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+enum sci_status
+sci_io_request_tc_completion(struct isci_request *ireq,
+ u32 completion_code)
+{
+ enum sci_base_request_states state;
+ struct isci_host *ihost = ireq->owning_controller;
+
+ state = ireq->sm.current_state_id;
+
+ switch (state) {
+ case SCI_REQ_STARTED:
+ return request_started_state_tc_event(ireq, completion_code);
+
+ case SCI_REQ_TASK_WAIT_TC_COMP:
+ return ssp_task_request_await_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_SMP_WAIT_RESP:
+ return smp_request_await_response_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_SMP_WAIT_TC_COMP:
+ return smp_request_await_tc_event(ireq, completion_code);
+
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ return stp_request_udma_await_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+ return stp_request_non_data_await_h2d_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_STP_PIO_WAIT_H2D:
+ return stp_request_pio_await_h2d_completion_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_STP_PIO_DATA_OUT:
+ return pio_data_out_tx_done_tc_event(ireq, completion_code);
+
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
+ return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
+ return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_ABORTING:
+ return request_aborting_state_tc_event(ireq,
+ completion_code);
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC IO Request given task completion "
+ "notification %x while in wrong state %d\n",
+ __func__,
+ completion_code,
+ state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+/**
+ * isci_request_process_response_iu() - This function sets the status and
+ * response iu, in the task struct, from the request object for the upper
+ * layer driver.
+ * @sas_task: This parameter is the task struct from the upper layer driver.
+ * @resp_iu: This parameter points to the response iu of the completed request.
+ * @dev: This parameter specifies the linux device struct.
+ *
+ * none.
+ */
+static void isci_request_process_response_iu(
+ struct sas_task *task,
+ struct ssp_response_iu *resp_iu,
+ struct device *dev)
+{
+ dev_dbg(dev,
+ "%s: resp_iu = %p "
+ "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
+ "resp_iu->response_data_len = %x, "
+ "resp_iu->sense_data_len = %x\nrepsonse data: ",
+ __func__,
+ resp_iu,
+ resp_iu->status,
+ resp_iu->datapres,
+ resp_iu->response_data_len,
+ resp_iu->sense_data_len);
+
+ task->task_status.stat = resp_iu->status;
+
+ /* libsas updates the task status fields based on the response iu. */
+ sas_ssp_task_response(dev, task, resp_iu);
+}
+
+/**
+ * isci_request_set_open_reject_status() - This function prepares the I/O
+ * completion for OPEN_REJECT conditions.
+ * @request: This parameter is the completed isci_request object.
+ * @response_ptr: This parameter specifies the service response for the I/O.
+ * @status_ptr: This parameter specifies the exec status for the I/O.
+ * @complete_to_host_ptr: This parameter specifies the action to be taken by
+ * the LLDD with respect to completing this request or forcing an abort
+ * condition on the I/O.
+ * @open_rej_reason: This parameter specifies the encoded reason for the
+ * abandon-class reject.
+ *
+ * none.
+ */
+static void isci_request_set_open_reject_status(
+ struct isci_request *request,
+ struct sas_task *task,
+ enum service_response *response_ptr,
+ enum exec_status *status_ptr,
+ enum isci_completion_selection *complete_to_host_ptr,
+ enum sas_open_rej_reason open_rej_reason)
+{
+ /* Task in the target is done. */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ *response_ptr = SAS_TASK_UNDELIVERED;
+ *status_ptr = SAS_OPEN_REJECT;
+ *complete_to_host_ptr = isci_perform_normal_io_completion;
+ task->task_status.open_rej_reason = open_rej_reason;
+}
+
+/**
+ * isci_request_handle_controller_specific_errors() - This function decodes
+ * controller-specific I/O completion error conditions.
+ * @request: This parameter is the completed isci_request object.
+ * @response_ptr: This parameter specifies the service response for the I/O.
+ * @status_ptr: This parameter specifies the exec status for the I/O.
+ * @complete_to_host_ptr: This parameter specifies the action to be taken by
+ * the LLDD with respect to completing this request or forcing an abort
+ * condition on the I/O.
+ *
+ * none.
+ */
+static void isci_request_handle_controller_specific_errors(
+ struct isci_remote_device *idev,
+ struct isci_request *request,
+ struct sas_task *task,
+ enum service_response *response_ptr,
+ enum exec_status *status_ptr,
+ enum isci_completion_selection *complete_to_host_ptr)
+{
+ unsigned int cstatus;
+
+ cstatus = request->scu_status;
+
+ dev_dbg(&request->isci_host->pdev->dev,
+ "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
+ "- controller status = 0x%x\n",
+ __func__, request, cstatus);
+
+ /* Decode the controller-specific errors; most
+ * important is to recognize those conditions in which
+ * the target may still have a task outstanding that
+ * must be aborted.
+ *
+ * Note that there are SCU completion codes being
+ * named in the decode below for which SCIC has already
+ * done work to handle them in a way other than as
+ * a controller-specific completion code; these are left
+ * in the decode below for completeness sake.
+ */
+ switch (cstatus) {
+ case SCU_TASK_DONE_DMASETUP_DIRERR:
+ /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
+ case SCU_TASK_DONE_XFERCNT_ERR:
+ /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
+ if (task->task_proto == SAS_PROTOCOL_SMP) {
+ /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
+ *response_ptr = SAS_TASK_COMPLETE;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ *status_ptr = SAS_DEVICE_UNKNOWN;
+ else
+ *status_ptr = SAS_ABORTED_TASK;
+
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ *complete_to_host_ptr =
+ isci_perform_normal_io_completion;
+ } else {
+ /* Task in the target is not done. */
+ *response_ptr = SAS_TASK_UNDELIVERED;
+
+ if (!idev)
+ *status_ptr = SAS_DEVICE_UNKNOWN;
+ else
+ *status_ptr = SAM_STAT_TASK_ABORTED;
+
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ *complete_to_host_ptr =
+ isci_perform_error_io_completion;
+ }
+
+ break;
+
+ case SCU_TASK_DONE_CRC_ERR:
+ case SCU_TASK_DONE_NAK_CMD_ERR:
+ case SCU_TASK_DONE_EXCESS_DATA:
+ case SCU_TASK_DONE_UNEXP_FIS:
+ /* Also SCU_TASK_DONE_UNEXP_RESP: */
+ case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
+ case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
+ case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
+ /* These are conditions in which the target
+ * has completed the task, so that no cleanup
+ * is necessary.
+ */
+ *response_ptr = SAS_TASK_COMPLETE;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ *status_ptr = SAS_DEVICE_UNKNOWN;
+ else
+ *status_ptr = SAS_ABORTED_TASK;
+
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ *complete_to_host_ptr = isci_perform_normal_io_completion;
+ break;
+
+
+ /* Note that the only open reject completion codes seen here will be
+ * abandon-class codes; all others are automatically retried in the SCU.
+ */
+ case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+
+ /* Note - the return of AB0 will change when
+ * libsas implements detection of zone violations.
+ */
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_RESV_AB0);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_RESV_AB1);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_RESV_AB2);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_RESV_AB3);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_BAD_DEST);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_STP_NORES);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_EPROTO);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_CONN_RATE);
+ break;
+
+ case SCU_TASK_DONE_LL_R_ERR:
+ /* Also SCU_TASK_DONE_ACK_NAK_TO: */
+ case SCU_TASK_DONE_LL_PERR:
+ case SCU_TASK_DONE_LL_SY_TERM:
+ /* Also SCU_TASK_DONE_NAK_ERR:*/
+ case SCU_TASK_DONE_LL_LF_TERM:
+ /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
+ case SCU_TASK_DONE_LL_ABORT_ERR:
+ case SCU_TASK_DONE_SEQ_INV_TYPE:
+ /* Also SCU_TASK_DONE_UNEXP_XR: */
+ case SCU_TASK_DONE_XR_IU_LEN_ERR:
+ case SCU_TASK_DONE_INV_FIS_LEN:
+ /* Also SCU_TASK_DONE_XR_WD_LEN: */
+ case SCU_TASK_DONE_SDMA_ERR:
+ case SCU_TASK_DONE_OFFSET_ERR:
+ case SCU_TASK_DONE_MAX_PLD_ERR:
+ case SCU_TASK_DONE_LF_ERR:
+ case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
+ case SCU_TASK_DONE_SMP_LL_RX_ERR:
+ case SCU_TASK_DONE_UNEXP_DATA:
+ case SCU_TASK_DONE_UNEXP_SDBFIS:
+ case SCU_TASK_DONE_REG_ERR:
+ case SCU_TASK_DONE_SDB_ERR:
+ case SCU_TASK_DONE_TASK_ABORT:
+ default:
+ /* Task in the target is not done. */
+ *response_ptr = SAS_TASK_UNDELIVERED;
+ *status_ptr = SAM_STAT_TASK_ABORTED;
+
+ if (task->task_proto == SAS_PROTOCOL_SMP) {
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ *complete_to_host_ptr = isci_perform_normal_io_completion;
+ } else {
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ *complete_to_host_ptr = isci_perform_error_io_completion;
+ }
+ break;
+ }
+}
+
+/**
+ * isci_task_save_for_upper_layer_completion() - This function saves the
+ * request for later completion to the upper layer driver.
+ * @host: This parameter is a pointer to the host on which the the request
+ * should be queued (either as an error or success).
+ * @request: This parameter is the completed request.
+ * @response: This parameter is the response code for the completed task.
+ * @status: This parameter is the status code for the completed task.
+ *
+ * none.
+ */
+static void isci_task_save_for_upper_layer_completion(
+ struct isci_host *host,
+ struct isci_request *request,
+ enum service_response response,
+ enum exec_status status,
+ enum isci_completion_selection task_notification_selection)
+{
+ struct sas_task *task = isci_request_access_task(request);
+
+ task_notification_selection
+ = isci_task_set_completion_status(task, response, status,
+ task_notification_selection);
+
+ /* Tasks aborted specifically by a call to the lldd_abort_task
+ * function should not be completed to the host in the regular path.
+ */
+ switch (task_notification_selection) {
+
+ case isci_perform_normal_io_completion:
+
+ /* Normal notification (task_done) */
+ dev_dbg(&host->pdev->dev,
+ "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
+ __func__,
+ task,
+ task->task_status.resp, response,
+ task->task_status.stat, status);
+ /* Add to the completed list. */
+ list_add(&request->completed_node,
+ &host->requests_to_complete);
+
+ /* Take the request off the device's pending request list. */
+ list_del_init(&request->dev_node);
+ break;
+
+ case isci_perform_aborted_io_completion:
+ /* No notification to libsas because this request is
+ * already in the abort path.
+ */
+ dev_dbg(&host->pdev->dev,
+ "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
+ __func__,
+ task,
+ task->task_status.resp, response,
+ task->task_status.stat, status);
+
+ /* Wake up whatever process was waiting for this
+ * request to complete.
+ */
+ WARN_ON(request->io_request_completion == NULL);
+
+ if (request->io_request_completion != NULL) {
+
+ /* Signal whoever is waiting that this
+ * request is complete.
+ */
+ complete(request->io_request_completion);
+ }
+ break;
+
+ case isci_perform_error_io_completion:
+ /* Use sas_task_abort */
+ dev_dbg(&host->pdev->dev,
+ "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
+ __func__,
+ task,
+ task->task_status.resp, response,
+ task->task_status.stat, status);
+ /* Add to the aborted list. */
+ list_add(&request->completed_node,
+ &host->requests_to_errorback);
+ break;
+
+ default:
+ dev_dbg(&host->pdev->dev,
+ "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
+ __func__,
+ task,
+ task->task_status.resp, response,
+ task->task_status.stat, status);
+
+ /* Add to the error to libsas list. */
+ list_add(&request->completed_node,
+ &host->requests_to_errorback);
+ break;
+ }
+}
+
+static void isci_request_process_stp_response(struct sas_task *task,
+ void *response_buffer)
+{
+ struct dev_to_host_fis *d2h_reg_fis = response_buffer;
+ struct task_status_struct *ts = &task->task_status;
+ struct ata_task_resp *resp = (void *)&ts->buf[0];
+
+ resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6));
+ memcpy(&resp->ending_fis[0], response_buffer + 16, 24);
+ ts->buf_valid_size = sizeof(*resp);
+
+ /**
+ * If the device fault bit is set in the status register, then
+ * set the sense data and return.
+ */
+ if (d2h_reg_fis->status & ATA_DF)
+ ts->stat = SAS_PROTO_RESPONSE;
+ else
+ ts->stat = SAM_STAT_GOOD;
+
+ ts->resp = SAS_TASK_COMPLETE;
+}
+
+static void isci_request_io_request_complete(struct isci_host *ihost,
+ struct isci_request *request,
+ enum sci_io_status completion_status)
+{
+ struct sas_task *task = isci_request_access_task(request);
+ struct ssp_response_iu *resp_iu;
+ void *resp_buf;
+ unsigned long task_flags;
+ struct isci_remote_device *idev = isci_lookup_device(task->dev);
+ enum service_response response = SAS_TASK_UNDELIVERED;
+ enum exec_status status = SAS_ABORTED_TASK;
+ enum isci_request_status request_status;
+ enum isci_completion_selection complete_to_host
+ = isci_perform_normal_io_completion;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: request = %p, task = %p,\n"
+ "task->data_dir = %d completion_status = 0x%x\n",
+ __func__,
+ request,
+ task,
+ task->data_dir,
+ completion_status);
+
+ spin_lock(&request->state_lock);
+ request_status = request->status;
+
+ /* Decode the request status. Note that if the request has been
+ * aborted by a task management function, we don't care
+ * what the status is.
+ */
+ switch (request_status) {
+
+ case aborted:
+ /* "aborted" indicates that the request was aborted by a task
+ * management function, since once a task management request is
+ * perfomed by the device, the request only completes because
+ * of the subsequent driver terminate.
+ *
+ * Aborted also means an external thread is explicitly managing
+ * this request, so that we do not complete it up the stack.
+ *
+ * The target is still there (since the TMF was successful).
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = SAS_TASK_COMPLETE;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ complete_to_host = isci_perform_aborted_io_completion;
+ /* This was an aborted request. */
+
+ spin_unlock(&request->state_lock);
+ break;
+
+ case aborting:
+ /* aborting means that the task management function tried and
+ * failed to abort the request. We need to note the request
+ * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
+ * target as down.
+ *
+ * Aborting also means an external thread is explicitly managing
+ * this request, so that we do not complete it up the stack.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = SAS_TASK_UNDELIVERED;
+
+ if (!idev)
+ /* The device has been /is being stopped. Note that
+ * we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_PHY_DOWN;
+
+ complete_to_host = isci_perform_aborted_io_completion;
+
+ /* This was an aborted request. */
+
+ spin_unlock(&request->state_lock);
+ break;
+
+ case terminating:
+
+ /* This was an terminated request. This happens when
+ * the I/O is being terminated because of an action on
+ * the device (reset, tear down, etc.), and the I/O needs
+ * to be completed up the stack.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = SAS_TASK_UNDELIVERED;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ complete_to_host = isci_perform_aborted_io_completion;
+
+ /* This was a terminated request. */
+
+ spin_unlock(&request->state_lock);
+ break;
+
+ case dead:
+ /* This was a terminated request that timed-out during the
+ * termination process. There is no task to complete to
+ * libsas.
+ */
+ complete_to_host = isci_perform_normal_io_completion;
+ spin_unlock(&request->state_lock);
+ break;
+
+ default:
+
+ /* The request is done from an SCU HW perspective. */
+ request->status = completed;
+
+ spin_unlock(&request->state_lock);
+
+ /* This is an active request being completed from the core. */
+ switch (completion_status) {
+
+ case SCI_IO_FAILURE_RESPONSE_VALID:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
+ __func__,
+ request,
+ task);
+
+ if (sas_protocol_ata(task->task_proto)) {
+ resp_buf = &request->stp.rsp;
+ isci_request_process_stp_response(task,
+ resp_buf);
+ } else if (SAS_PROTOCOL_SSP == task->task_proto) {
+
+ /* crack the iu response buffer. */
+ resp_iu = &request->ssp.rsp;
+ isci_request_process_response_iu(task, resp_iu,
+ &ihost->pdev->dev);
+
+ } else if (SAS_PROTOCOL_SMP == task->task_proto) {
+
+ dev_err(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
+ "SAS_PROTOCOL_SMP protocol\n",
+ __func__);
+
+ } else
+ dev_err(&ihost->pdev->dev,
+ "%s: unknown protocol\n", __func__);
+
+ /* use the task status set in the task struct by the
+ * isci_request_process_response_iu call.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = task->task_status.resp;
+ status = task->task_status.stat;
+ break;
+
+ case SCI_IO_SUCCESS:
+ case SCI_IO_SUCCESS_IO_DONE_EARLY:
+
+ response = SAS_TASK_COMPLETE;
+ status = SAM_STAT_GOOD;
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ if (task->task_proto == SAS_PROTOCOL_SMP) {
+ void *rsp = &request->smp.rsp;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SMP protocol completion\n",
+ __func__);
+
+ sg_copy_from_buffer(
+ &task->smp_task.smp_resp, 1,
+ rsp, sizeof(struct smp_resp));
+ } else if (completion_status
+ == SCI_IO_SUCCESS_IO_DONE_EARLY) {
+
+ /* This was an SSP / STP / SATA transfer.
+ * There is a possibility that less data than
+ * the maximum was transferred.
+ */
+ u32 transferred_length = sci_req_tx_bytes(request);
+
+ task->task_status.residual
+ = task->total_xfer_len - transferred_length;
+
+ /* If there were residual bytes, call this an
+ * underrun.
+ */
+ if (task->task_status.residual != 0)
+ status = SAS_DATA_UNDERRUN;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
+ __func__,
+ status);
+
+ } else
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_SUCCESS\n",
+ __func__);
+
+ break;
+
+ case SCI_IO_FAILURE_TERMINATED:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
+ __func__,
+ request,
+ task);
+
+ /* The request was terminated explicitly. No handling
+ * is needed in the SCSI error handler path.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = SAS_TASK_UNDELIVERED;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ complete_to_host = isci_perform_normal_io_completion;
+ break;
+
+ case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
+
+ isci_request_handle_controller_specific_errors(
+ idev, request, task, &response, &status,
+ &complete_to_host);
+
+ break;
+
+ case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
+ /* This is a special case, in that the I/O completion
+ * is telling us that the device needs a reset.
+ * In order for the device reset condition to be
+ * noticed, the I/O has to be handled in the error
+ * handler. Set the reset flag and cause the
+ * SCSI error thread to be scheduled.
+ */
+ spin_lock_irqsave(&task->task_state_lock, task_flags);
+ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, task_flags);
+
+ /* Fail the I/O. */
+ response = SAS_TASK_UNDELIVERED;
+ status = SAM_STAT_TASK_ABORTED;
+
+ complete_to_host = isci_perform_error_io_completion;
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ break;
+
+ case SCI_FAILURE_RETRY_REQUIRED:
+
+ /* Fail the I/O so it can be retried. */
+ response = SAS_TASK_UNDELIVERED;
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ complete_to_host = isci_perform_normal_io_completion;
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ break;
+
+
+ default:
+ /* Catch any otherwise unhandled error codes here. */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: invalid completion code: 0x%x - "
+ "isci_request = %p\n",
+ __func__, completion_status, request);
+
+ response = SAS_TASK_UNDELIVERED;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ if (SAS_PROTOCOL_SMP == task->task_proto) {
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ complete_to_host = isci_perform_normal_io_completion;
+ } else {
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ complete_to_host = isci_perform_error_io_completion;
+ }
+ break;
+ }
+ break;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ if (task->data_dir == DMA_NONE)
+ break;
+ if (task->num_scatter == 0)
+ /* 0 indicates a single dma address */
+ dma_unmap_single(&ihost->pdev->dev,
+ request->zero_scatter_daddr,
+ task->total_xfer_len, task->data_dir);
+ else /* unmap the sgl dma addresses */
+ dma_unmap_sg(&ihost->pdev->dev, task->scatter,
+ request->num_sg_entries, task->data_dir);
+ break;
+ case SAS_PROTOCOL_SMP: {
+ struct scatterlist *sg = &task->smp_task.smp_req;
+ struct smp_req *smp_req;
+ void *kaddr;
+
+ dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
+
+ /* need to swab it back in case the command buffer is re-used */
+ kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+ smp_req = kaddr + sg->offset;
+ sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
+ kunmap_atomic(kaddr, KM_IRQ0);
+ break;
+ }
+ default:
+ break;
+ }
+
+ /* Put the completed request on the correct list */
+ isci_task_save_for_upper_layer_completion(ihost, request, response,
+ status, complete_to_host
+ );
+
+ /* complete the io request to the core. */
+ sci_controller_complete_io(ihost, request->target_device, request);
+ isci_put_device(idev);
+
+ /* set terminated handle so it cannot be completed or
+ * terminated again, and to cause any calls into abort
+ * task to recognize the already completed case.
+ */
+ set_bit(IREQ_TERMINATED, &request->flags);
+}
+
+static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+ struct domain_device *dev = ireq->target_device->domain_dev;
+ struct sas_task *task;
+
+ /* XXX as hch said always creating an internal sas_task for tmf
+ * requests would simplify the driver
+ */
+ task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
+
+ /* all unaccelerated request types (non ssp or ncq) handled with
+ * substates
+ */
+ if (!task && dev->dev_type == SAS_END_DEV) {
+ sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
+ } else if (!task &&
+ (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
+ isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
+ sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
+ } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
+ sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
+ } else if (task && sas_protocol_ata(task->task_proto) &&
+ !task->ata_task.use_ncq) {
+ u32 state;
+
+ if (task->data_dir == DMA_NONE)
+ state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
+ else if (task->ata_task.dma_xfer)
+ state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
+ else /* PIO */
+ state = SCI_REQ_STP_PIO_WAIT_H2D;
+
+ sci_change_state(sm, state);
+ }
+}
+
+static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+ struct isci_host *ihost = ireq->owning_controller;
+
+ /* Tell the SCI_USER that the IO request is complete */
+ if (!test_bit(IREQ_TMF, &ireq->flags))
+ isci_request_io_request_complete(ihost, ireq,
+ ireq->sci_status);
+ else
+ isci_task_request_complete(ihost, ireq, ireq->sci_status);
+}
+
+static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+ /* Setting the abort bit in the Task Context is required by the silicon. */
+ ireq->tc->abort = 1;
+}
+
+static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+ ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+ ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+ ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+ struct scu_task_context *tc = ireq->tc;
+ struct host_to_dev_fis *h2d_fis;
+ enum sci_status status;
+
+ /* Clear the SRST bit */
+ h2d_fis = &ireq->stp.cmd;
+ h2d_fis->control = 0;
+
+ /* Clear the TC control bit */
+ tc->control_frame = 0;
+
+ status = sci_controller_continue_io(ireq);
+ WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
+}
+
+static const struct sci_base_state sci_request_state_table[] = {
+ [SCI_REQ_INIT] = { },
+ [SCI_REQ_CONSTRUCTED] = { },
+ [SCI_REQ_STARTED] = {
+ .enter_state = sci_request_started_state_enter,
+ },
+ [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
+ .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
+ },
+ [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
+ [SCI_REQ_STP_PIO_WAIT_H2D] = {
+ .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
+ },
+ [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
+ [SCI_REQ_STP_PIO_DATA_IN] = { },
+ [SCI_REQ_STP_PIO_DATA_OUT] = { },
+ [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
+ [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
+ [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
+ .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
+ },
+ [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
+ .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
+ },
+ [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
+ [SCI_REQ_TASK_WAIT_TC_COMP] = { },
+ [SCI_REQ_TASK_WAIT_TC_RESP] = { },
+ [SCI_REQ_SMP_WAIT_RESP] = { },
+ [SCI_REQ_SMP_WAIT_TC_COMP] = { },
+ [SCI_REQ_COMPLETED] = {
+ .enter_state = sci_request_completed_state_enter,
+ },
+ [SCI_REQ_ABORTING] = {
+ .enter_state = sci_request_aborting_state_enter,
+ },
+ [SCI_REQ_FINAL] = { },
+};
+
+static void
+sci_general_request_construct(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
+
+ ireq->target_device = idev;
+ ireq->protocol = SCIC_NO_PROTOCOL;
+ ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
+
+ ireq->sci_status = SCI_SUCCESS;
+ ireq->scu_status = 0;
+ ireq->post_context = 0xFFFFFFFF;
+}
+
+static enum sci_status
+sci_io_request_construct(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ struct domain_device *dev = idev->domain_dev;
+ enum sci_status status = SCI_SUCCESS;
+
+ /* Build the common part of the request */
+ sci_general_request_construct(ihost, idev, ireq);
+
+ if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
+ return SCI_FAILURE_INVALID_REMOTE_DEVICE;
+
+ if (dev->dev_type == SAS_END_DEV)
+ /* pass */;
+ else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
+ memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
+ else if (dev_is_expander(dev))
+ /* pass */;
+ else
+ return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+ memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
+
+ return status;
+}
+
+enum sci_status sci_task_request_construct(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 io_tag, struct isci_request *ireq)
+{
+ struct domain_device *dev = idev->domain_dev;
+ enum sci_status status = SCI_SUCCESS;
+
+ /* Build the common part of the request */
+ sci_general_request_construct(ihost, idev, ireq);
+
+ if (dev->dev_type == SAS_END_DEV ||
+ dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ set_bit(IREQ_TMF, &ireq->flags);
+ memset(ireq->tc, 0, sizeof(struct scu_task_context));
+ } else
+ status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+ return status;
+}
+
+static enum sci_status isci_request_ssp_request_construct(
+ struct isci_request *request)
+{
+ enum sci_status status;
+
+ dev_dbg(&request->isci_host->pdev->dev,
+ "%s: request = %p\n",
+ __func__,
+ request);
+ status = sci_io_request_construct_basic_ssp(request);
+ return status;
+}
+
+static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
+{
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct host_to_dev_fis *fis = &ireq->stp.cmd;
+ struct ata_queued_cmd *qc = task->uldd_task;
+ enum sci_status status;
+
+ dev_dbg(&ireq->isci_host->pdev->dev,
+ "%s: ireq = %p\n",
+ __func__,
+ ireq);
+
+ memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+ if (!task->ata_task.device_control_reg_update)
+ fis->flags |= 0x80;
+ fis->flags &= 0xF0;
+
+ status = sci_io_request_construct_basic_sata(ireq);
+
+ if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+ qc->tf.command == ATA_CMD_FPDMA_READ)) {
+ fis->sector_count = qc->tag << 3;
+ ireq->tc->type.stp.ncq_tag = qc->tag;
+ }
+
+ return status;
+}
+
+static enum sci_status
+sci_io_request_construct_smp(struct device *dev,
+ struct isci_request *ireq,
+ struct sas_task *task)
+{
+ struct scatterlist *sg = &task->smp_task.smp_req;
+ struct isci_remote_device *idev;
+ struct scu_task_context *task_context;
+ struct isci_port *iport;
+ struct smp_req *smp_req;
+ void *kaddr;
+ u8 req_len;
+ u32 cmd;
+
+ kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+ smp_req = kaddr + sg->offset;
+ /*
+ * Look at the SMP requests' header fields; for certain SAS 1.x SMP
+ * functions under SAS 2.0, a zero request length really indicates
+ * a non-zero default length.
+ */
+ if (smp_req->req_len == 0) {
+ switch (smp_req->func) {
+ case SMP_DISCOVER:
+ case SMP_REPORT_PHY_ERR_LOG:
+ case SMP_REPORT_PHY_SATA:
+ case SMP_REPORT_ROUTE_INFO:
+ smp_req->req_len = 2;
+ break;
+ case SMP_CONF_ROUTE_INFO:
+ case SMP_PHY_CONTROL:
+ case SMP_PHY_TEST_FUNCTION:
+ smp_req->req_len = 9;
+ break;
+ /* Default - zero is a valid default for 2.0. */
+ }
+ }
+ req_len = smp_req->req_len;
+ sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
+ cmd = *(u32 *) smp_req;
+ kunmap_atomic(kaddr, KM_IRQ0);
+
+ if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
+ return SCI_FAILURE;
+
+ ireq->protocol = SCIC_SMP_PROTOCOL;
+
+ /* byte swap the smp request. */
+
+ task_context = ireq->tc;
+
+ idev = ireq->target_device;
+ iport = idev->owning_port;
+
+ /*
+ * Fill in the TC with the its required data
+ * 00h
+ */
+ task_context->priority = 0;
+ task_context->initiator_request = 1;
+ task_context->connection_rate = idev->connection_rate;
+ task_context->protocol_engine_index = ISCI_PEG;
+ task_context->logical_port_index = iport->physical_port_index;
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
+ task_context->abort = 0;
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+ /* 04h */
+ task_context->remote_node_index = idev->rnc.remote_node_index;
+ task_context->command_code = 0;
+ task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
+
+ /* 08h */
+ task_context->link_layer_control = 0;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->strict_ordering = 0;
+ task_context->control_frame = 1;
+ task_context->timeout_enable = 0;
+ task_context->block_guard_enable = 0;
+
+ /* 0ch */
+ task_context->address_modifier = 0;
+
+ /* 10h */
+ task_context->ssp_command_iu_length = req_len;
+
+ /* 14h */
+ task_context->transfer_length_bytes = 0;
+
+ /*
+ * 18h ~ 30h, protocol specific
+ * since commandIU has been build by framework at this point, we just
+ * copy the frist DWord from command IU to this location. */
+ memcpy(&task_context->type.smp, &cmd, sizeof(u32));
+
+ /*
+ * 40h
+ * "For SMP you could program it to zero. We would prefer that way
+ * so that done code will be consistent." - Venki
+ */
+ task_context->task_phase = 0;
+
+ ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (iport->physical_port_index <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ ISCI_TAG_TCI(ireq->io_tag));
+ /*
+ * Copy the physical address for the command buffer to the SCU Task
+ * Context command buffer should not contain command header.
+ */
+ task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
+ task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
+
+ /* SMP response comes as UF, so no need to set response IU address. */
+ task_context->response_iu_upper = 0;
+ task_context->response_iu_lower = 0;
+
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return SCI_SUCCESS;
+}
+
+/*
+ * isci_smp_request_build() - This function builds the smp request.
+ * @ireq: This parameter points to the isci_request allocated in the
+ * request construct function.
+ *
+ * SCI_SUCCESS on successfull completion, or specific failure code.
+ */
+static enum sci_status isci_smp_request_build(struct isci_request *ireq)
+{
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct device *dev = &ireq->isci_host->pdev->dev;
+ enum sci_status status = SCI_FAILURE;
+
+ status = sci_io_request_construct_smp(dev, ireq, task);
+ if (status != SCI_SUCCESS)
+ dev_dbg(&ireq->isci_host->pdev->dev,
+ "%s: failed with status = %d\n",
+ __func__,
+ status);
+
+ return status;
+}
+
+/**
+ * isci_io_request_build() - This function builds the io request object.
+ * @ihost: This parameter specifies the ISCI host object
+ * @request: This parameter points to the isci_request object allocated in the
+ * request construct function.
+ * @sci_device: This parameter is the handle for the sci core's remote device
+ * object that is the destination for this request.
+ *
+ * SCI_SUCCESS on successfull completion, or specific failure code.
+ */
+static enum sci_status isci_io_request_build(struct isci_host *ihost,
+ struct isci_request *request,
+ struct isci_remote_device *idev)
+{
+ enum sci_status status = SCI_SUCCESS;
+ struct sas_task *task = isci_request_access_task(request);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = 0x%p; request = %p, "
+ "num_scatter = %d\n",
+ __func__,
+ idev,
+ request,
+ task->num_scatter);
+
+ /* map the sgl addresses, if present.
+ * libata does the mapping for sata devices
+ * before we get the request.
+ */
+ if (task->num_scatter &&
+ !sas_protocol_ata(task->task_proto) &&
+ !(SAS_PROTOCOL_SMP & task->task_proto)) {
+
+ request->num_sg_entries = dma_map_sg(
+ &ihost->pdev->dev,
+ task->scatter,
+ task->num_scatter,
+ task->data_dir
+ );
+
+ if (request->num_sg_entries == 0)
+ return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+ }
+
+ status = sci_io_request_construct(ihost, idev, request);
+
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: failed request construct\n",
+ __func__);
+ return SCI_FAILURE;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SMP:
+ status = isci_smp_request_build(request);
+ break;
+ case SAS_PROTOCOL_SSP:
+ status = isci_request_ssp_request_construct(request);
+ break;
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ status = isci_request_stp_request_construct(request);
+ break;
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: unknown protocol\n", __func__);
+ return SCI_FAILURE;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
+{
+ struct isci_request *ireq;
+
+ ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
+ ireq->io_tag = tag;
+ ireq->io_request_completion = NULL;
+ ireq->flags = 0;
+ ireq->num_sg_entries = 0;
+ INIT_LIST_HEAD(&ireq->completed_node);
+ INIT_LIST_HEAD(&ireq->dev_node);
+ isci_request_change_state(ireq, allocated);
+
+ return ireq;
+}
+
+static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
+ struct sas_task *task,
+ u16 tag)
+{
+ struct isci_request *ireq;
+
+ ireq = isci_request_from_tag(ihost, tag);
+ ireq->ttype_ptr.io_task_ptr = task;
+ ireq->ttype = io_task;
+ task->lldd_task = ireq;
+
+ return ireq;
+}
+
+struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
+ struct isci_tmf *isci_tmf,
+ u16 tag)
+{
+ struct isci_request *ireq;
+
+ ireq = isci_request_from_tag(ihost, tag);
+ ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
+ ireq->ttype = tmf_task;
+
+ return ireq;
+}
+
+int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
+ struct sas_task *task, u16 tag)
+{
+ enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+ struct isci_request *ireq;
+ unsigned long flags;
+ int ret = 0;
+
+ /* do common allocation and init of request object. */
+ ireq = isci_io_request_from_tag(ihost, task, tag);
+
+ status = isci_io_request_build(ihost, ireq, idev);
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: request_construct failed - status = 0x%x\n",
+ __func__,
+ status);
+ return status;
+ }
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
+
+ if (isci_task_is_ncq_recovery(task)) {
+
+ /* The device is in an NCQ recovery state. Issue the
+ * request on the task side. Note that it will
+ * complete on the I/O request side because the
+ * request was built that way (ie.
+ * ireq->is_task_management_request is false).
+ */
+ status = sci_controller_start_task(ihost,
+ idev,
+ ireq);
+ } else {
+ status = SCI_FAILURE;
+ }
+ } else {
+ /* send the request, let the core assign the IO TAG. */
+ status = sci_controller_start_io(ihost, idev,
+ ireq);
+ }
+
+ if (status != SCI_SUCCESS &&
+ status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: failed request start (0x%x)\n",
+ __func__, status);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ return status;
+ }
+
+ /* Either I/O started OK, or the core has signaled that
+ * the device needs a target reset.
+ *
+ * In either case, hold onto the I/O for later.
+ *
+ * Update it's status and add it to the list in the
+ * remote device object.
+ */
+ list_add(&ireq->dev_node, &idev->reqs_in_process);
+
+ if (status == SCI_SUCCESS) {
+ isci_request_change_state(ireq, started);
+ } else {
+ /* The request did not really start in the
+ * hardware, so clear the request handle
+ * here so no terminations will be done.
+ */
+ set_bit(IREQ_TERMINATED, &ireq->flags);
+ isci_request_change_state(ireq, completed);
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (status ==
+ SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ /* Signal libsas that we need the SCSI error
+ * handler thread to work on this I/O and that
+ * we want a device reset.
+ */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ /* Cause this task to be scheduled in the SCSI error
+ * handler thread.
+ */
+ isci_execpath_callback(ihost, task,
+ sas_task_abort);
+
+ /* Change the status, since we are holding
+ * the I/O until it is managed by the SCSI
+ * error handler.
+ */
+ status = SCI_SUCCESS;
+ }
+
+ return ret;
+}
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
new file mode 100644
index 000000000000..7a1d5a9778eb
--- /dev/null
+++ b/drivers/scsi/isci/request.h
@@ -0,0 +1,448 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_REQUEST_H_
+#define _ISCI_REQUEST_H_
+
+#include "isci.h"
+#include "host.h"
+#include "scu_task_context.h"
+
+/**
+ * struct isci_request_status - This enum defines the possible states of an I/O
+ * request.
+ *
+ *
+ */
+enum isci_request_status {
+ unallocated = 0x00,
+ allocated = 0x01,
+ started = 0x02,
+ completed = 0x03,
+ aborting = 0x04,
+ aborted = 0x05,
+ terminating = 0x06,
+ dead = 0x07
+};
+
+enum task_type {
+ io_task = 0,
+ tmf_task = 1
+};
+
+enum sci_request_protocol {
+ SCIC_NO_PROTOCOL,
+ SCIC_SMP_PROTOCOL,
+ SCIC_SSP_PROTOCOL,
+ SCIC_STP_PROTOCOL
+}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
+
+/**
+ * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
+ * @pio_len - number of bytes requested at PIO setup
+ * @status - pio setup ending status value to tell us if we need
+ * to wait for another fis or if the transfer is complete. Upon
+ * receipt of a d2h fis this will be the status field of that fis.
+ * @sgl - track pio transfer progress as we iterate through the sgl
+ * @device_cdb_len - atapi device advertises it's transfer constraints at setup
+ */
+struct isci_stp_request {
+ u32 pio_len;
+ u8 status;
+
+ struct isci_stp_pio_sgl {
+ int index;
+ u8 set;
+ u32 offset;
+ } sgl;
+ u32 device_cdb_len;
+};
+
+struct isci_request {
+ enum isci_request_status status;
+ #define IREQ_COMPLETE_IN_TARGET 0
+ #define IREQ_TERMINATED 1
+ #define IREQ_TMF 2
+ #define IREQ_ACTIVE 3
+ unsigned long flags;
+ /* XXX kill ttype and ttype_ptr, allocate full sas_task */
+ enum task_type ttype;
+ union ttype_ptr_union {
+ struct sas_task *io_task_ptr; /* When ttype==io_task */
+ struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
+ } ttype_ptr;
+ struct isci_host *isci_host;
+ /* For use in the requests_to_{complete|abort} lists: */
+ struct list_head completed_node;
+ /* For use in the reqs_in_process list: */
+ struct list_head dev_node;
+ spinlock_t state_lock;
+ dma_addr_t request_daddr;
+ dma_addr_t zero_scatter_daddr;
+ unsigned int num_sg_entries;
+ /* Note: "io_request_completion" is completed in two different ways
+ * depending on whether this is a TMF or regular request.
+ * - TMF requests are completed in the thread that started them;
+ * - regular requests are completed in the request completion callback
+ * function.
+ * This difference in operation allows the aborter of a TMF request
+ * to be sure that once the TMF request completes, the I/O that the
+ * TMF was aborting is guaranteed to have completed.
+ *
+ * XXX kill io_request_completion
+ */
+ struct completion *io_request_completion;
+ struct sci_base_state_machine sm;
+ struct isci_host *owning_controller;
+ struct isci_remote_device *target_device;
+ u16 io_tag;
+ enum sci_request_protocol protocol;
+ u32 scu_status; /* hardware result */
+ u32 sci_status; /* upper layer disposition */
+ u32 post_context;
+ struct scu_task_context *tc;
+ /* could be larger with sg chaining */
+ #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
+ struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
+ /* This field is a pointer to the stored rx frame data. It is used in
+ * STP internal requests and SMP response frames. If this field is
+ * non-NULL the saved frame must be released on IO request completion.
+ */
+ u32 saved_rx_frame_index;
+
+ union {
+ struct {
+ union {
+ struct ssp_cmd_iu cmd;
+ struct ssp_task_iu tmf;
+ };
+ union {
+ struct ssp_response_iu rsp;
+ u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
+ };
+ } ssp;
+ struct {
+ struct smp_resp rsp;
+ } smp;
+ struct {
+ struct isci_stp_request req;
+ struct host_to_dev_fis cmd;
+ struct dev_to_host_fis rsp;
+ } stp;
+ };
+};
+
+static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
+{
+ struct isci_request *ireq;
+
+ ireq = container_of(stp_req, typeof(*ireq), stp.req);
+ return ireq;
+}
+
+/**
+ * enum sci_base_request_states - This enumeration depicts all the states for
+ * the common request state machine.
+ *
+ *
+ */
+enum sci_base_request_states {
+ /*
+ * Simply the initial state for the base request state machine.
+ */
+ SCI_REQ_INIT,
+
+ /*
+ * This state indicates that the request has been constructed.
+ * This state is entered from the INITIAL state.
+ */
+ SCI_REQ_CONSTRUCTED,
+
+ /*
+ * This state indicates that the request has been started. This state
+ * is entered from the CONSTRUCTED state.
+ */
+ SCI_REQ_STARTED,
+
+ SCI_REQ_STP_UDMA_WAIT_TC_COMP,
+ SCI_REQ_STP_UDMA_WAIT_D2H,
+
+ SCI_REQ_STP_NON_DATA_WAIT_H2D,
+ SCI_REQ_STP_NON_DATA_WAIT_D2H,
+
+ SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED,
+ SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG,
+ SCI_REQ_STP_SOFT_RESET_WAIT_D2H,
+
+ /*
+ * While in this state the IO request object is waiting for the TC
+ * completion notification for the H2D Register FIS
+ */
+ SCI_REQ_STP_PIO_WAIT_H2D,
+
+ /*
+ * While in this state the IO request object is waiting for either a
+ * PIO Setup FIS or a D2H register FIS. The type of frame received is
+ * based on the result of the prior frame and line conditions.
+ */
+ SCI_REQ_STP_PIO_WAIT_FRAME,
+
+ /*
+ * While in this state the IO request object is waiting for a DATA
+ * frame from the device.
+ */
+ SCI_REQ_STP_PIO_DATA_IN,
+
+ /*
+ * While in this state the IO request object is waiting to transmit
+ * the next data frame to the device.
+ */
+ SCI_REQ_STP_PIO_DATA_OUT,
+
+ /*
+ * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
+ * task management request is waiting for the transmission of the
+ * initial frame (i.e. command, task, etc.).
+ */
+ SCI_REQ_TASK_WAIT_TC_COMP,
+
+ /*
+ * This sub-state indicates that the started task management request
+ * is waiting for the reception of an unsolicited frame
+ * (i.e. response IU).
+ */
+ SCI_REQ_TASK_WAIT_TC_RESP,
+
+ /*
+ * This sub-state indicates that the started task management request
+ * is waiting for the reception of an unsolicited frame
+ * (i.e. response IU).
+ */
+ SCI_REQ_SMP_WAIT_RESP,
+
+ /*
+ * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
+ * request is waiting for the transmission of the initial frame
+ * (i.e. command, task, etc.).
+ */
+ SCI_REQ_SMP_WAIT_TC_COMP,
+
+ /*
+ * This state indicates that the request has completed.
+ * This state is entered from the STARTED state. This state is entered
+ * from the ABORTING state.
+ */
+ SCI_REQ_COMPLETED,
+
+ /*
+ * This state indicates that the request is in the process of being
+ * terminated/aborted.
+ * This state is entered from the CONSTRUCTED state.
+ * This state is entered from the STARTED state.
+ */
+ SCI_REQ_ABORTING,
+
+ /*
+ * Simply the final state for the base request state machine.
+ */
+ SCI_REQ_FINAL,
+};
+
+enum sci_status sci_request_start(struct isci_request *ireq);
+enum sci_status sci_io_request_terminate(struct isci_request *ireq);
+enum sci_status
+sci_io_request_event_handler(struct isci_request *ireq,
+ u32 event_code);
+enum sci_status
+sci_io_request_frame_handler(struct isci_request *ireq,
+ u32 frame_index);
+enum sci_status
+sci_task_request_terminate(struct isci_request *ireq);
+extern enum sci_status
+sci_request_complete(struct isci_request *ireq);
+extern enum sci_status
+sci_io_request_tc_completion(struct isci_request *ireq, u32 code);
+
+/* XXX open code in caller */
+static inline dma_addr_t
+sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
+{
+
+ char *requested_addr = (char *)virt_addr;
+ char *base_addr = (char *)ireq;
+
+ BUG_ON(requested_addr < base_addr);
+ BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
+
+ return ireq->request_daddr + (requested_addr - base_addr);
+}
+
+/**
+ * isci_request_change_state() - This function sets the status of the request
+ * object.
+ * @request: This parameter points to the isci_request object
+ * @status: This Parameter is the new status of the object
+ *
+ */
+static inline enum isci_request_status
+isci_request_change_state(struct isci_request *isci_request,
+ enum isci_request_status status)
+{
+ enum isci_request_status old_state;
+ unsigned long flags;
+
+ dev_dbg(&isci_request->isci_host->pdev->dev,
+ "%s: isci_request = %p, state = 0x%x\n",
+ __func__,
+ isci_request,
+ status);
+
+ BUG_ON(isci_request == NULL);
+
+ spin_lock_irqsave(&isci_request->state_lock, flags);
+ old_state = isci_request->status;
+ isci_request->status = status;
+ spin_unlock_irqrestore(&isci_request->state_lock, flags);
+
+ return old_state;
+}
+
+/**
+ * isci_request_change_started_to_newstate() - This function sets the status of
+ * the request object.
+ * @request: This parameter points to the isci_request object
+ * @status: This Parameter is the new status of the object
+ *
+ * state previous to any change.
+ */
+static inline enum isci_request_status
+isci_request_change_started_to_newstate(struct isci_request *isci_request,
+ struct completion *completion_ptr,
+ enum isci_request_status newstate)
+{
+ enum isci_request_status old_state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isci_request->state_lock, flags);
+
+ old_state = isci_request->status;
+
+ if (old_state == started || old_state == aborting) {
+ BUG_ON(isci_request->io_request_completion != NULL);
+
+ isci_request->io_request_completion = completion_ptr;
+ isci_request->status = newstate;
+ }
+
+ spin_unlock_irqrestore(&isci_request->state_lock, flags);
+
+ dev_dbg(&isci_request->isci_host->pdev->dev,
+ "%s: isci_request = %p, old_state = 0x%x\n",
+ __func__,
+ isci_request,
+ old_state);
+
+ return old_state;
+}
+
+/**
+ * isci_request_change_started_to_aborted() - This function sets the status of
+ * the request object.
+ * @request: This parameter points to the isci_request object
+ * @completion_ptr: This parameter is saved as the kernel completion structure
+ * signalled when the old request completes.
+ *
+ * state previous to any change.
+ */
+static inline enum isci_request_status
+isci_request_change_started_to_aborted(struct isci_request *isci_request,
+ struct completion *completion_ptr)
+{
+ return isci_request_change_started_to_newstate(isci_request,
+ completion_ptr,
+ aborted);
+}
+
+#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
+
+#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
+
+struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
+ struct isci_tmf *isci_tmf,
+ u16 tag);
+int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
+ struct sas_task *task, u16 tag);
+void isci_terminate_pending_requests(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+enum sci_status
+sci_task_request_construct(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 io_tag,
+ struct isci_request *ireq);
+enum sci_status
+sci_task_request_construct_ssp(struct isci_request *ireq);
+enum sci_status
+sci_task_request_construct_sata(struct isci_request *ireq);
+void sci_smp_request_copy_response(struct isci_request *ireq);
+
+static inline int isci_task_is_ncq_recovery(struct sas_task *task)
+{
+ return (sas_protocol_ata(task->task_proto) &&
+ task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT &&
+ task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
+
+}
+
+#endif /* !defined(_ISCI_REQUEST_H_) */
diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h
new file mode 100644
index 000000000000..462b15174d3f
--- /dev/null
+++ b/drivers/scsi/isci/sas.h
@@ -0,0 +1,219 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCI_SAS_H_
+#define _SCI_SAS_H_
+
+#include <linux/kernel.h>
+
+/*
+ * SATA FIS Types These constants depict the various SATA FIS types devined in
+ * the serial ATA specification.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+#define FIS_REGH2D 0x27
+#define FIS_REGD2H 0x34
+#define FIS_SETDEVBITS 0xA1
+#define FIS_DMA_ACTIVATE 0x39
+#define FIS_DMA_SETUP 0x41
+#define FIS_BIST_ACTIVATE 0x58
+#define FIS_PIO_SETUP 0x5F
+#define FIS_DATA 0x46
+
+/**************************************************************************/
+#define SSP_RESP_IU_MAX_SIZE 280
+
+/*
+ * contents of the SSP COMMAND INFORMATION UNIT.
+ * For specific information on each of these individual fields please
+ * reference the SAS specification SSP transport layer section.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+struct ssp_cmd_iu {
+ u8 LUN[8];
+ u8 add_cdb_len:6;
+ u8 _r_a:2;
+ u8 _r_b;
+ u8 en_fburst:1;
+ u8 task_prio:4;
+ u8 task_attr:3;
+ u8 _r_c;
+
+ u8 cdb[16];
+} __packed;
+
+/*
+ * contents of the SSP TASK INFORMATION UNIT.
+ * For specific information on each of these individual fields please
+ * reference the SAS specification SSP transport layer section.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+struct ssp_task_iu {
+ u8 LUN[8];
+ u8 _r_a;
+ u8 task_func;
+ u8 _r_b[4];
+ u16 task_tag;
+ u8 _r_c[12];
+} __packed;
+
+
+/*
+ * struct smp_req_phy_id - This structure defines the contents of
+ * an SMP Request that is comprised of the struct smp_request_header and a
+ * phy identifier.
+ * Examples: SMP_REQUEST_DISCOVER, SMP_REQUEST_REPORT_PHY_SATA.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_phy_id {
+ u8 _r_a[4]; /* bytes 4-7 */
+
+ u8 ign_zone_grp:1; /* byte 8 */
+ u8 _r_b:7;
+
+ u8 phy_id; /* byte 9 */
+ u8 _r_c; /* byte 10 */
+ u8 _r_d; /* byte 11 */
+} __packed;
+
+/*
+ * struct smp_req_config_route_info - This structure defines the
+ * contents of an SMP Configure Route Information request.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_conf_rtinfo {
+ u16 exp_change_cnt; /* bytes 4-5 */
+ u8 exp_rt_idx_hi; /* byte 6 */
+ u8 exp_rt_idx; /* byte 7 */
+
+ u8 _r_a; /* byte 8 */
+ u8 phy_id; /* byte 9 */
+ u16 _r_b; /* bytes 10-11 */
+
+ u8 _r_c:7; /* byte 12 */
+ u8 dis_rt_entry:1;
+ u8 _r_d[3]; /* bytes 13-15 */
+
+ u8 rt_sas_addr[8]; /* bytes 16-23 */
+ u8 _r_e[16]; /* bytes 24-39 */
+} __packed;
+
+/*
+ * struct smp_req_phycntl - This structure defines the contents of an
+ * SMP Phy Controller request.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_phycntl {
+ u16 exp_change_cnt; /* byte 4-5 */
+
+ u8 _r_a[3]; /* bytes 6-8 */
+
+ u8 phy_id; /* byte 9 */
+ u8 phy_op; /* byte 10 */
+
+ u8 upd_pathway:1; /* byte 11 */
+ u8 _r_b:7;
+
+ u8 _r_c[12]; /* byte 12-23 */
+
+ u8 att_dev_name[8]; /* byte 24-31 */
+
+ u8 _r_d:4; /* byte 32 */
+ u8 min_linkrate:4;
+
+ u8 _r_e:4; /* byte 33 */
+ u8 max_linkrate:4;
+
+ u8 _r_f[2]; /* byte 34-35 */
+
+ u8 pathway:4; /* byte 36 */
+ u8 _r_g:4;
+
+ u8 _r_h[3]; /* bytes 37-39 */
+} __packed;
+
+/*
+ * struct smp_req - This structure simply unionizes the existing request
+ * structures into a common request type.
+ *
+ * XXX: This data structure may need to go to scsi/sas.h
+ */
+struct smp_req {
+ u8 type; /* byte 0 */
+ u8 func; /* byte 1 */
+ u8 alloc_resp_len; /* byte 2 */
+ u8 req_len; /* byte 3 */
+ u8 req_data[0];
+} __packed;
+
+#define SMP_RESP_HDR_SZ 4
+
+/*
+ * struct sci_sas_address - This structure depicts how a SAS address is
+ * represented by SCI.
+ * XXX convert this to u8 [SAS_ADDR_SIZE] like the rest of libsas
+ *
+ */
+struct sci_sas_address {
+ u32 high;
+ u32 low;
+};
+#endif
diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h
new file mode 100644
index 000000000000..c8b329c695f9
--- /dev/null
+++ b/drivers/scsi/isci/scu_completion_codes.h
@@ -0,0 +1,283 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_COMPLETION_CODES_HEADER_
+#define _SCU_COMPLETION_CODES_HEADER_
+
+/**
+ * This file contains the constants and macros for the SCU hardware completion
+ * codes.
+ *
+ *
+ */
+
+#define SCU_COMPLETION_TYPE_SHIFT 28
+#define SCU_COMPLETION_TYPE_MASK 0x70000000
+
+/**
+ * SCU_COMPLETION_TYPE() -
+ *
+ * This macro constructs an SCU completion type
+ */
+#define SCU_COMPLETION_TYPE(type) \
+ ((u32)(type) << SCU_COMPLETION_TYPE_SHIFT)
+
+/**
+ * SCU_COMPLETION_TYPE() -
+ *
+ * These macros contain the SCU completion types SCU_COMPLETION_TYPE
+ */
+#define SCU_COMPLETION_TYPE_TASK SCU_COMPLETION_TYPE(0)
+#define SCU_COMPLETION_TYPE_SDMA SCU_COMPLETION_TYPE(1)
+#define SCU_COMPLETION_TYPE_UFI SCU_COMPLETION_TYPE(2)
+#define SCU_COMPLETION_TYPE_EVENT SCU_COMPLETION_TYPE(3)
+#define SCU_COMPLETION_TYPE_NOTIFY SCU_COMPLETION_TYPE(4)
+
+/**
+ *
+ *
+ * These constants provide the shift and mask values for the various parts of
+ * an SCU completion code.
+ */
+#define SCU_COMPLETION_STATUS_MASK 0x0FFC0000
+#define SCU_COMPLETION_TL_STATUS_MASK 0x0FC00000
+#define SCU_COMPLETION_TL_STATUS_SHIFT 22
+#define SCU_COMPLETION_SDMA_STATUS_MASK 0x003C0000
+#define SCU_COMPLETION_PEG_MASK 0x00010000
+#define SCU_COMPLETION_PORT_MASK 0x00007000
+#define SCU_COMPLETION_PE_MASK SCU_COMPLETION_PORT_MASK
+#define SCU_COMPLETION_PE_SHIFT 12
+#define SCU_COMPLETION_INDEX_MASK 0x00000FFF
+
+/**
+ * SCU_GET_COMPLETION_TYPE() -
+ *
+ * This macro returns the SCU completion type.
+ */
+#define SCU_GET_COMPLETION_TYPE(completion_code) \
+ ((completion_code) & SCU_COMPLETION_TYPE_MASK)
+
+/**
+ * SCU_GET_COMPLETION_STATUS() -
+ *
+ * This macro returns the SCU completion status.
+ */
+#define SCU_GET_COMPLETION_STATUS(completion_code) \
+ ((completion_code) & SCU_COMPLETION_STATUS_MASK)
+
+/**
+ * SCU_GET_COMPLETION_TL_STATUS() -
+ *
+ * This macro returns the transport layer completion status.
+ */
+#define SCU_GET_COMPLETION_TL_STATUS(completion_code) \
+ ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK)
+
+/**
+ * SCU_MAKE_COMPLETION_STATUS() -
+ *
+ * This macro takes a completion code and performs the shift and mask
+ * operations to turn it into a completion code that can be compared to a
+ * SCU_GET_COMPLETION_TL_STATUS.
+ */
+#define SCU_MAKE_COMPLETION_STATUS(completion_code) \
+ ((u32)(completion_code) << SCU_COMPLETION_TL_STATUS_SHIFT)
+
+/**
+ * SCU_NORMALIZE_COMPLETION_STATUS() -
+ *
+ * This macro takes a SCU_GET_COMPLETION_TL_STATUS and normalizes it for a
+ * return code.
+ */
+#define SCU_NORMALIZE_COMPLETION_STATUS(completion_code) \
+ (\
+ ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK) \
+ >> SCU_COMPLETION_TL_STATUS_SHIFT \
+ )
+
+/**
+ * SCU_GET_COMPLETION_SDMA_STATUS() -
+ *
+ * This macro returns the SDMA completion status.
+ */
+#define SCU_GET_COMPLETION_SDMA_STATUS(completion_code) \
+ ((completion_code) & SCU_COMPLETION_SDMA_STATUS_MASK)
+
+/**
+ * SCU_GET_COMPLETION_PEG() -
+ *
+ * This macro returns the Protocol Engine Group from the completion code.
+ */
+#define SCU_GET_COMPLETION_PEG(completion_code) \
+ ((completion_code) & SCU_COMPLETION_PEG_MASK)
+
+/**
+ * SCU_GET_COMPLETION_PORT() -
+ *
+ * This macro reuturns the logical port index from the completion code.
+ */
+#define SCU_GET_COMPLETION_PORT(completion_code) \
+ ((completion_code) & SCU_COMPLETION_PORT_MASK)
+
+/**
+ * SCU_GET_PROTOCOL_ENGINE_INDEX() -
+ *
+ * This macro returns the PE index from the completion code.
+ */
+#define SCU_GET_PROTOCOL_ENGINE_INDEX(completion_code) \
+ (((completion_code) & SCU_COMPLETION_PE_MASK) >> SCU_COMPLETION_PE_SHIFT)
+
+/**
+ * SCU_GET_COMPLETION_INDEX() -
+ *
+ * This macro returns the index of the completion which is either a TCi or an
+ * RNi depending on the completion type.
+ */
+#define SCU_GET_COMPLETION_INDEX(completion_code) \
+ ((completion_code) & SCU_COMPLETION_INDEX_MASK)
+
+#define SCU_UNSOLICITED_FRAME_MASK 0x0FFF0000
+#define SCU_UNSOLICITED_FRAME_SHIFT 16
+
+/**
+ * SCU_GET_FRAME_INDEX() -
+ *
+ * This macro returns a normalized frame index from an unsolicited frame
+ * completion.
+ */
+#define SCU_GET_FRAME_INDEX(completion_code) \
+ (\
+ ((completion_code) & SCU_UNSOLICITED_FRAME_MASK) \
+ >> SCU_UNSOLICITED_FRAME_SHIFT \
+ )
+
+#define SCU_UNSOLICITED_FRAME_ERROR_MASK 0x00008000
+
+/**
+ * SCU_GET_FRAME_ERROR() -
+ *
+ * This macro returns a zero (0) value if there is no frame error otherwise it
+ * returns non-zero (!0).
+ */
+#define SCU_GET_FRAME_ERROR(completion_code) \
+ ((completion_code) & SCU_UNSOLICITED_FRAME_ERROR_MASK)
+
+/**
+ *
+ *
+ * These constants represent normalized completion codes which must be shifted
+ * 18 bits to match it with the hardware completion code. In a 16-bit compiler,
+ * immediate constants are 16-bit values (the size of an int). If we shift
+ * those by 18 bits, we completely lose the value. To ensure the value is a
+ * 32-bit value like we want, each immediate value must be cast to a u32.
+ */
+#define SCU_TASK_DONE_GOOD ((u32)0x00)
+#define SCU_TASK_DONE_CRC_ERR ((u32)0x14)
+#define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14)
+#define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15)
+#define SCU_TASK_DONE_NAK_CMD_ERR ((u32)0x16)
+#define SCU_TASK_DONE_CMD_LL_R_ERR ((u32)0x16)
+#define SCU_TASK_DONE_LL_R_ERR ((u32)0x17)
+#define SCU_TASK_DONE_ACK_NAK_TO ((u32)0x17)
+#define SCU_TASK_DONE_LL_PERR ((u32)0x18)
+#define SCU_TASK_DONE_LL_SY_TERM ((u32)0x19)
+#define SCU_TASK_DONE_NAK_ERR ((u32)0x19)
+#define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A)
+#define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A)
+#define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B)
+#define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B)
+#define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C)
+#define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C)
+#define SCU_TASK_DONE_INV_FIS_TYPE ((u32)0x1D)
+#define SCU_TASK_DONE_XR_IU_LEN_ERR ((u32)0x1D)
+#define SCU_TASK_DONE_INV_FIS_LEN ((u32)0x1E)
+#define SCU_TASK_DONE_XR_WD_LEN ((u32)0x1E)
+#define SCU_TASK_DONE_SDMA_ERR ((u32)0x1F)
+#define SCU_TASK_DONE_OFFSET_ERR ((u32)0x20)
+#define SCU_TASK_DONE_MAX_PLD_ERR ((u32)0x21)
+#define SCU_TASK_DONE_EXCESS_DATA ((u32)0x22)
+#define SCU_TASK_DONE_LF_ERR ((u32)0x23)
+#define SCU_TASK_DONE_UNEXP_FIS ((u32)0x24)
+#define SCU_TASK_DONE_UNEXP_RESP ((u32)0x24)
+#define SCU_TASK_DONE_EARLY_RESP ((u32)0x25)
+#define SCU_TASK_DONE_SMP_RESP_TO_ERR ((u32)0x26)
+#define SCU_TASK_DONE_DMASETUP_DIRERR ((u32)0x27)
+#define SCU_TASK_DONE_SMP_UFI_ERR ((u32)0x27)
+#define SCU_TASK_DONE_XFERCNT_ERR ((u32)0x28)
+#define SCU_TASK_DONE_SMP_FRM_TYPE_ERR ((u32)0x28)
+#define SCU_TASK_DONE_SMP_LL_RX_ERR ((u32)0x29)
+#define SCU_TASK_DONE_RESP_LEN_ERR ((u32)0x2A)
+#define SCU_TASK_DONE_UNEXP_DATA ((u32)0x2B)
+#define SCU_TASK_DONE_OPEN_FAIL ((u32)0x2C)
+#define SCU_TASK_DONE_UNEXP_SDBFIS ((u32)0x2D)
+#define SCU_TASK_DONE_REG_ERR ((u32)0x2E)
+#define SCU_TASK_DONE_SDB_ERR ((u32)0x2F)
+#define SCU_TASK_DONE_TASK_ABORT ((u32)0x30)
+#define SCU_TASK_DONE_CMD_SDMA_ERR ((U32)0x32)
+#define SCU_TASK_DONE_CMD_LL_ABORT_ERR ((U32)0x33)
+#define SCU_TASK_OPEN_REJECT_WRONG_DESTINATION ((u32)0x34)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1 ((u32)0x35)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2 ((u32)0x36)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3 ((u32)0x37)
+#define SCU_TASK_OPEN_REJECT_BAD_DESTINATION ((u32)0x38)
+#define SCU_TASK_OPEN_REJECT_ZONE_VIOLATION ((u32)0x39)
+#define SCU_TASK_DONE_VIIT_ENTRY_NV ((u32)0x3A)
+#define SCU_TASK_DONE_IIT_ENTRY_NV ((u32)0x3B)
+#define SCU_TASK_DONE_RNCNV_OUTBOUND ((u32)0x3C)
+#define SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY ((u32)0x3D)
+#define SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED ((u32)0x3E)
+#define SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED ((u32)0x3F)
+
+#endif /* _SCU_COMPLETION_CODES_HEADER_ */
diff --git a/drivers/scsi/isci/scu_event_codes.h b/drivers/scsi/isci/scu_event_codes.h
new file mode 100644
index 000000000000..36a945ad5722
--- /dev/null
+++ b/drivers/scsi/isci/scu_event_codes.h
@@ -0,0 +1,336 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SCU_EVENT_CODES_HEADER__
+#define __SCU_EVENT_CODES_HEADER__
+
+/**
+ * This file contains the constants and macros for the SCU event codes.
+ *
+ *
+ */
+
+#define SCU_EVENT_TYPE_CODE_SHIFT 24
+#define SCU_EVENT_TYPE_CODE_MASK 0x0F000000
+
+#define SCU_EVENT_SPECIFIC_CODE_SHIFT 18
+#define SCU_EVENT_SPECIFIC_CODE_MASK 0x00FC0000
+
+#define SCU_EVENT_CODE_MASK \
+ (SCU_EVENT_TYPE_CODE_MASK | SCU_EVENT_SPECIFIC_CODE_MASK)
+
+/**
+ * SCU_EVENT_TYPE() -
+ *
+ * This macro constructs an SCU event type from the type value.
+ */
+#define SCU_EVENT_TYPE(type) \
+ ((u32)(type) << SCU_EVENT_TYPE_CODE_SHIFT)
+
+/**
+ * SCU_EVENT_SPECIFIC() -
+ *
+ * This macro constructs an SCU event specifier from the code value.
+ */
+#define SCU_EVENT_SPECIFIC(code) \
+ ((u32)(code) << SCU_EVENT_SPECIFIC_CODE_SHIFT)
+
+/**
+ * SCU_EVENT_MESSAGE() -
+ *
+ * This macro constructs a combines an SCU event type and SCU event specifier
+ * from the type and code values.
+ */
+#define SCU_EVENT_MESSAGE(type, code) \
+ ((type) | SCU_EVENT_SPECIFIC(code))
+
+/**
+ * SCU_EVENT_TYPE() -
+ *
+ * SCU_EVENT_TYPES
+ */
+#define SCU_EVENT_TYPE_SMU_COMMAND_ERROR SCU_EVENT_TYPE(0x08)
+#define SCU_EVENT_TYPE_SMU_PCQ_ERROR SCU_EVENT_TYPE(0x09)
+#define SCU_EVENT_TYPE_SMU_ERROR SCU_EVENT_TYPE(0x00)
+#define SCU_EVENT_TYPE_TRANSPORT_ERROR SCU_EVENT_TYPE(0x01)
+#define SCU_EVENT_TYPE_BROADCAST_CHANGE SCU_EVENT_TYPE(0x02)
+#define SCU_EVENT_TYPE_OSSP_EVENT SCU_EVENT_TYPE(0x03)
+#define SCU_EVENT_TYPE_FATAL_MEMORY_ERROR SCU_EVENT_TYPE(0x0F)
+#define SCU_EVENT_TYPE_RNC_SUSPEND_TX SCU_EVENT_TYPE(0x04)
+#define SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX SCU_EVENT_TYPE(0x05)
+#define SCU_EVENT_TYPE_RNC_OPS_MISC SCU_EVENT_TYPE(0x06)
+#define SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT SCU_EVENT_TYPE(0x07)
+#define SCU_EVENT_TYPE_ERR_CNT_EVENT SCU_EVENT_TYPE(0x0A)
+
+/**
+ *
+ *
+ * SCU_EVENT_SPECIFIERS
+ */
+#define SCU_EVENT_SPECIFIER_DRIVER_SUSPEND 0x20
+#define SCU_EVENT_SPECIFIER_RNC_RELEASE 0x00
+
+/**
+ *
+ *
+ * SMU_COMMAND_EVENTS
+ */
+#define SCU_EVENT_INVALID_CONTEXT_COMMAND \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_COMMAND_ERROR, 0x00)
+
+/**
+ *
+ *
+ * SMU_PCQ_EVENTS
+ */
+#define SCU_EVENT_UNCORRECTABLE_PCQ_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_PCQ_ERROR, 0x00)
+
+/**
+ *
+ *
+ * SMU_EVENTS
+ */
+#define SCU_EVENT_UNCORRECTABLE_REGISTER_WRITE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x02)
+#define SCU_EVENT_UNCORRECTABLE_REGISTER_READ \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x03)
+#define SCU_EVENT_PCIE_INTERFACE_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x04)
+#define SCU_EVENT_FUNCTION_LEVEL_RESET \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x05)
+
+/**
+ *
+ *
+ * TRANSPORT_LEVEL_ERRORS
+ */
+#define SCU_EVENT_ACK_NAK_TIMEOUT_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_TRANSPORT_ERROR, 0x00)
+
+/**
+ *
+ *
+ * BROADCAST_CHANGE_EVENTS
+ */
+#define SCU_EVENT_BROADCAST_CHANGE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x01)
+#define SCU_EVENT_BROADCAST_RESERVED0 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x02)
+#define SCU_EVENT_BROADCAST_RESERVED1 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x03)
+#define SCU_EVENT_BROADCAST_SES \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x04)
+#define SCU_EVENT_BROADCAST_EXPANDER \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x05)
+#define SCU_EVENT_BROADCAST_AEN \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x06)
+#define SCU_EVENT_BROADCAST_RESERVED3 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x07)
+#define SCU_EVENT_BROADCAST_RESERVED4 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x08)
+#define SCU_EVENT_PE_SUSPENDED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x09)
+
+/**
+ *
+ *
+ * OSSP_EVENTS
+ */
+#define SCU_EVENT_PORT_SELECTOR_DETECTED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x10)
+#define SCU_EVENT_SENT_PORT_SELECTION \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x11)
+#define SCU_EVENT_HARD_RESET_TRANSMITTED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x12)
+#define SCU_EVENT_HARD_RESET_RECEIVED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x13)
+#define SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x15)
+#define SCU_EVENT_LINK_FAILURE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x16)
+#define SCU_EVENT_SATA_SPINUP_HOLD \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x17)
+#define SCU_EVENT_SAS_15_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x18)
+#define SCU_EVENT_SAS_15 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x19)
+#define SCU_EVENT_SAS_30_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1A)
+#define SCU_EVENT_SAS_30 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1B)
+#define SCU_EVENT_SAS_60_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1C)
+#define SCU_EVENT_SAS_60 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1D)
+#define SCU_EVENT_SATA_15_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1E)
+#define SCU_EVENT_SATA_15 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1F)
+#define SCU_EVENT_SATA_30_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x20)
+#define SCU_EVENT_SATA_30 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x21)
+#define SCU_EVENT_SATA_60_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x22)
+#define SCU_EVENT_SATA_60 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x23)
+#define SCU_EVENT_SAS_PHY_DETECTED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x24)
+#define SCU_EVENT_SATA_PHY_DETECTED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x25)
+
+/**
+ *
+ *
+ * FATAL_INTERNAL_MEMORY_ERROR_EVENTS
+ */
+#define SCU_EVENT_TSC_RNSC_UNCORRECTABLE_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x00)
+#define SCU_EVENT_TC_RNC_UNCORRECTABLE_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x01)
+#define SCU_EVENT_ZPT_UNCORRECTABLE_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x02)
+
+/**
+ *
+ *
+ * REMOTE_NODE_SUSPEND_EVENTS
+ */
+#define SCU_EVENT_TL_RNC_SUSPEND_TX \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x00)
+#define SCU_EVENT_TL_RNC_SUSPEND_TX_RX \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x00)
+#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x20)
+#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX_RX \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x20)
+
+/**
+ *
+ *
+ * REMOTE_NODE_MISC_EVENTS
+ */
+#define SCU_EVENT_POST_RCN_RELEASE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, SCU_EVENT_SPECIFIER_RNC_RELEASE)
+#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_ENABLE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x01)
+#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_DISABLE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x02)
+#define SCU_EVENT_POST_RNC_COMPLETE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x03)
+#define SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x04)
+
+/**
+ *
+ *
+ * ERROR_COUNT_EVENT
+ */
+#define SCU_EVENT_RX_CREDIT_BLOCKED_RECEIVED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x00)
+#define SCU_EVENT_TX_DONE_CREDIT_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x01)
+#define SCU_EVENT_RX_DONE_CREDIT_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x02)
+
+/**
+ * scu_get_event_type() -
+ *
+ * This macro returns the SCU event type from the event code.
+ */
+#define scu_get_event_type(event_code) \
+ ((event_code) & SCU_EVENT_TYPE_CODE_MASK)
+
+/**
+ * scu_get_event_specifier() -
+ *
+ * This macro returns the SCU event specifier from the event code.
+ */
+#define scu_get_event_specifier(event_code) \
+ ((event_code) & SCU_EVENT_SPECIFIC_CODE_MASK)
+
+/**
+ * scu_get_event_code() -
+ *
+ * This macro returns the combined SCU event type and SCU event specifier from
+ * the event code.
+ */
+#define scu_get_event_code(event_code) \
+ ((event_code) & SCU_EVENT_CODE_MASK)
+
+
+/**
+ *
+ *
+ * PTS_SCHEDULE_EVENT
+ */
+#define SCU_EVENT_SMP_RESPONSE_NO_PE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x00)
+#define SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE \
+ scu_get_event_specifier(SCU_EVENT_SMP_RESPONSE_NO_PE)
+
+#define SCU_EVENT_TASK_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x01)
+#define SCU_EVENT_SPECIFIC_TASK_TIMEOUT \
+ scu_get_event_specifier(SCU_EVENT_TASK_TIMEOUT)
+
+#define SCU_EVENT_IT_NEXUS_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x02)
+#define SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT \
+ scu_get_event_specifier(SCU_EVENT_IT_NEXUS_TIMEOUT)
+
+
+#endif /* __SCU_EVENT_CODES_HEADER__ */
diff --git a/drivers/scsi/isci/scu_remote_node_context.h b/drivers/scsi/isci/scu_remote_node_context.h
new file mode 100644
index 000000000000..33745adc826b
--- /dev/null
+++ b/drivers/scsi/isci/scu_remote_node_context.h
@@ -0,0 +1,229 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SCU_REMOTE_NODE_CONTEXT_HEADER__
+#define __SCU_REMOTE_NODE_CONTEXT_HEADER__
+
+/**
+ * This file contains the structures and constatns used by the SCU hardware to
+ * describe a remote node context.
+ *
+ *
+ */
+
+/**
+ * struct ssp_remote_node_context - This structure contains the SCU hardware
+ * definition for an SSP remote node.
+ *
+ *
+ */
+struct ssp_remote_node_context {
+ /* WORD 0 */
+
+ /**
+ * This field is the remote node index assigned for this remote node. All
+ * remote nodes must have a unique remote node index. The value of the remote
+ * node index can not exceed the maximum number of remote nodes reported in
+ * the SCU device context capacity register.
+ */
+ u32 remote_node_index:12;
+ u32 reserved0_1:4;
+
+ /**
+ * This field tells the SCU hardware how many simultaneous connections that
+ * this remote node will support.
+ */
+ u32 remote_node_port_width:4;
+
+ /**
+ * This field tells the SCU hardware which logical port to associate with this
+ * remote node.
+ */
+ u32 logical_port_index:3;
+ u32 reserved0_2:5;
+
+ /**
+ * This field will enable the I_T nexus loss timer for this remote node.
+ */
+ u32 nexus_loss_timer_enable:1;
+
+ /**
+ * This field is the for driver debug only and is not used.
+ */
+ u32 check_bit:1;
+
+ /**
+ * This field must be set to true when the hardware DMAs the remote node
+ * context to the hardware SRAM. When the remote node is being invalidated
+ * this field must be set to false.
+ */
+ u32 is_valid:1;
+
+ /**
+ * This field must be set to true.
+ */
+ u32 is_remote_node_context:1;
+
+ /* WORD 1 - 2 */
+
+ /**
+ * This is the low word of the remote device SAS Address
+ */
+ u32 remote_sas_address_lo;
+
+ /**
+ * This field is the high word of the remote device SAS Address
+ */
+ u32 remote_sas_address_hi;
+
+ /* WORD 3 */
+ /**
+ * This field reprensets the function number assigned to this remote device.
+ * This value must match the virtual function number that is being used to
+ * communicate to the device.
+ */
+ u32 function_number:8;
+ u32 reserved3_1:8;
+
+ /**
+ * This field provides the driver a way to cheat on the arbitration wait time
+ * for this remote node.
+ */
+ u32 arbitration_wait_time:16;
+
+ /* WORD 4 */
+ /**
+ * This field tells the SCU hardware how long this device may occupy the
+ * connection before it must be closed.
+ */
+ u32 connection_occupancy_timeout:16;
+
+ /**
+ * This field tells the SCU hardware how long to maintain a connection when
+ * there are no frames being transmitted on the link.
+ */
+ u32 connection_inactivity_timeout:16;
+
+ /* WORD 5 */
+ /**
+ * This field allows the driver to cheat on the arbitration wait time for this
+ * remote node.
+ */
+ u32 initial_arbitration_wait_time:16;
+
+ /**
+ * This field is tells the hardware what to program for the connection rate in
+ * the open address frame. See the SAS spec for valid values.
+ */
+ u32 oaf_connection_rate:4;
+
+ /**
+ * This field tells the SCU hardware what to program for the features in the
+ * open address frame. See the SAS spec for valid values.
+ */
+ u32 oaf_features:4;
+
+ /**
+ * This field tells the SCU hardware what to use for the source zone group in
+ * the open address frame. See the SAS spec for more details on zoning.
+ */
+ u32 oaf_source_zone_group:8;
+
+ /* WORD 6 */
+ /**
+ * This field tells the SCU hardware what to use as the more capibilities in
+ * the open address frame. See the SAS Spec for details.
+ */
+ u32 oaf_more_compatibility_features;
+
+ /* WORD 7 */
+ u32 reserved7;
+
+};
+
+/**
+ * struct stp_remote_node_context - This structure contains the SCU hardware
+ * definition for a STP remote node.
+ *
+ * STP Targets are not yet supported so this definition is a placeholder until
+ * we do support them.
+ */
+struct stp_remote_node_context {
+ /**
+ * Placeholder data for the STP remote node.
+ */
+ u32 data[8];
+
+};
+
+/**
+ * This union combines the SAS and SATA remote node definitions.
+ *
+ * union scu_remote_node_context
+ */
+union scu_remote_node_context {
+ /**
+ * SSP Remote Node
+ */
+ struct ssp_remote_node_context ssp;
+
+ /**
+ * STP Remote Node
+ */
+ struct stp_remote_node_context stp;
+
+};
+
+#endif /* __SCU_REMOTE_NODE_CONTEXT_HEADER__ */
diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h
new file mode 100644
index 000000000000..7df87d923285
--- /dev/null
+++ b/drivers/scsi/isci/scu_task_context.h
@@ -0,0 +1,942 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_TASK_CONTEXT_H_
+#define _SCU_TASK_CONTEXT_H_
+
+/**
+ * This file contains the structures and constants for the SCU hardware task
+ * context.
+ *
+ *
+ */
+
+
+/**
+ * enum scu_ssp_task_type - This enumberation defines the various SSP task
+ * types the SCU hardware will accept. The definition for the various task
+ * types the SCU hardware will accept can be found in the DS specification.
+ *
+ *
+ */
+typedef enum {
+ SCU_TASK_TYPE_IOREAD, /* /< IO READ direction or no direction */
+ SCU_TASK_TYPE_IOWRITE, /* /< IO Write direction */
+ SCU_TASK_TYPE_SMP_REQUEST, /* /< SMP Request type */
+ SCU_TASK_TYPE_RESPONSE, /* /< Driver generated response frame (targt mode) */
+ SCU_TASK_TYPE_RAW_FRAME, /* /< Raw frame request type */
+ SCU_TASK_TYPE_PRIMITIVE /* /< Request for a primitive to be transmitted */
+} scu_ssp_task_type;
+
+/**
+ * enum scu_sata_task_type - This enumeration defines the various SATA task
+ * types the SCU hardware will accept. The definition for the various task
+ * types the SCU hardware will accept can be found in the DS specification.
+ *
+ *
+ */
+typedef enum {
+ SCU_TASK_TYPE_DMA_IN, /* /< Read request */
+ SCU_TASK_TYPE_FPDMAQ_READ, /* /< NCQ read request */
+ SCU_TASK_TYPE_PACKET_DMA_IN, /* /< Packet read request */
+ SCU_TASK_TYPE_SATA_RAW_FRAME, /* /< Raw frame request */
+ RESERVED_4,
+ RESERVED_5,
+ RESERVED_6,
+ RESERVED_7,
+ SCU_TASK_TYPE_DMA_OUT, /* /< Write request */
+ SCU_TASK_TYPE_FPDMAQ_WRITE, /* /< NCQ write Request */
+ SCU_TASK_TYPE_PACKET_DMA_OUT /* /< Packet write request */
+} scu_sata_task_type;
+
+
+/**
+ *
+ *
+ * SCU_CONTEXT_TYPE
+ */
+#define SCU_TASK_CONTEXT_TYPE 0
+#define SCU_RNC_CONTEXT_TYPE 1
+
+/**
+ *
+ *
+ * SCU_TASK_CONTEXT_VALIDITY
+ */
+#define SCU_TASK_CONTEXT_INVALID 0
+#define SCU_TASK_CONTEXT_VALID 1
+
+/**
+ *
+ *
+ * SCU_COMMAND_CODE
+ */
+#define SCU_COMMAND_CODE_INITIATOR_NEW_TASK 0
+#define SCU_COMMAND_CODE_ACTIVE_TASK 1
+#define SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK 2
+#define SCU_COMMAND_CODE_TARGET_RAW_FRAMES 3
+
+/**
+ *
+ *
+ * SCU_TASK_PRIORITY
+ */
+/**
+ *
+ *
+ * This priority is used when there is no priority request for this request.
+ */
+#define SCU_TASK_PRIORITY_NORMAL 0
+
+/**
+ *
+ *
+ * This priority indicates that the task should be scheduled to the head of the
+ * queue. The task will NOT be executed if the TX is suspended for the remote
+ * node.
+ */
+#define SCU_TASK_PRIORITY_HEAD_OF_Q 1
+
+/**
+ *
+ *
+ * This priority indicates that the task will be executed before all
+ * SCU_TASK_PRIORITY_NORMAL and SCU_TASK_PRIORITY_HEAD_OF_Q tasks. The task
+ * WILL be executed if the TX is suspended for the remote node.
+ */
+#define SCU_TASK_PRIORITY_HIGH 2
+
+/**
+ *
+ *
+ * This task priority is reserved and should not be used.
+ */
+#define SCU_TASK_PRIORITY_RESERVED 3
+
+#define SCU_TASK_INITIATOR_MODE 1
+#define SCU_TASK_TARGET_MODE 0
+
+#define SCU_TASK_REGULAR 0
+#define SCU_TASK_ABORTED 1
+
+/* direction bit defintion */
+/**
+ *
+ *
+ * SATA_DIRECTION
+ */
+#define SCU_SATA_WRITE_DATA_DIRECTION 0
+#define SCU_SATA_READ_DATA_DIRECTION 1
+
+/**
+ *
+ *
+ * SCU_COMMAND_CONTEXT_MACROS These macros provide the mask and shift
+ * operations to construct the various SCU commands
+ */
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT 21
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK 0x00E00000
+#define scu_get_command_request_type(x) \
+ ((x) & SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT 18
+#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK 0x001C0000
+#define scu_get_command_request_subtype(x) \
+ ((x) & SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK \
+ (\
+ SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK \
+ | SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK \
+ )
+#define scu_get_command_request_full_type(x) \
+ ((x) & SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT 16
+#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK 0x00010000
+#define scu_get_command_protocl_engine_group(x) \
+ ((x) & SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK)
+
+#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT 12
+#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK 0x00007000
+#define scu_get_command_reqeust_logical_port(x) \
+ ((x) & SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK)
+
+
+#define MAKE_SCU_CONTEXT_COMMAND_TYPE(type) \
+ ((u32)(type) << SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT)
+
+/**
+ * MAKE_SCU_CONTEXT_COMMAND_TYPE() -
+ *
+ * SCU_COMMAND_TYPES These constants provide the grouping of the different SCU
+ * command types.
+ */
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(0)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(1)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(2)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(3)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(6)
+
+#define MAKE_SCU_CONTEXT_COMMAND_REQUEST(type, command) \
+ ((type) | ((command) << SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT))
+
+/**
+ *
+ *
+ * SCU_REQUEST_TYPES These constants are the various request types that can be
+ * posted to the SCU hardware.
+ */
+#define SCU_CONTEXT_COMMAND_REQUST_POST_TC \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 0))
+
+#define SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 1))
+
+#define SCU_CONTEXT_COMMAND_REQUST_DUMP_TC \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_32 \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_96 \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 2))
+
+#define SCU_CONTEXT_COMMAND_DUMP_RNC_32 \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_DUMP_RNC_96 \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_RESUME \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 2))
+
+#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_ENABLE \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 3))
+
+#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_DISABLE \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 4))
+
+/**
+ *
+ *
+ * SCU_TASK_CONTEXT_PROTOCOL SCU Task context protocol types this is uesd to
+ * program the SCU Task context protocol field in word 0x00.
+ */
+#define SCU_TASK_CONTEXT_PROTOCOL_SMP 0x00
+#define SCU_TASK_CONTEXT_PROTOCOL_SSP 0x01
+#define SCU_TASK_CONTEXT_PROTOCOL_STP 0x02
+#define SCU_TASK_CONTEXT_PROTOCOL_NONE 0x07
+
+/**
+ * struct ssp_task_context - This is the SCU hardware definition for an SSP
+ * request.
+ *
+ *
+ */
+struct ssp_task_context {
+ /* OFFSET 0x18 */
+ u32 reserved00:24;
+ u32 frame_type:8;
+
+ /* OFFSET 0x1C */
+ u32 reserved01;
+
+ /* OFFSET 0x20 */
+ u32 fill_bytes:2;
+ u32 reserved02:6;
+ u32 changing_data_pointer:1;
+ u32 retransmit:1;
+ u32 retry_data_frame:1;
+ u32 tlr_control:2;
+ u32 reserved03:19;
+
+ /* OFFSET 0x24 */
+ u32 uiRsvd4;
+
+ /* OFFSET 0x28 */
+ u32 target_port_transfer_tag:16;
+ u32 tag:16;
+
+ /* OFFSET 0x2C */
+ u32 data_offset;
+};
+
+/**
+ * struct stp_task_context - This is the SCU hardware definition for an STP
+ * request.
+ *
+ *
+ */
+struct stp_task_context {
+ /* OFFSET 0x18 */
+ u32 fis_type:8;
+ u32 pm_port:4;
+ u32 reserved0:3;
+ u32 control:1;
+ u32 command:8;
+ u32 features:8;
+
+ /* OFFSET 0x1C */
+ u32 reserved1;
+
+ /* OFFSET 0x20 */
+ u32 reserved2;
+
+ /* OFFSET 0x24 */
+ u32 reserved3;
+
+ /* OFFSET 0x28 */
+ u32 ncq_tag:5;
+ u32 reserved4:27;
+
+ /* OFFSET 0x2C */
+ u32 data_offset; /* TODO: What is this used for? */
+};
+
+/**
+ * struct smp_task_context - This is the SCU hardware definition for an SMP
+ * request.
+ *
+ *
+ */
+struct smp_task_context {
+ /* OFFSET 0x18 */
+ u32 response_length:8;
+ u32 function_result:8;
+ u32 function:8;
+ u32 frame_type:8;
+
+ /* OFFSET 0x1C */
+ u32 smp_response_ufi:12;
+ u32 reserved1:20;
+
+ /* OFFSET 0x20 */
+ u32 reserved2;
+
+ /* OFFSET 0x24 */
+ u32 reserved3;
+
+ /* OFFSET 0x28 */
+ u32 reserved4;
+
+ /* OFFSET 0x2C */
+ u32 reserved5;
+};
+
+/**
+ * struct primitive_task_context - This is the SCU hardware definition used
+ * when the driver wants to send a primitive on the link.
+ *
+ *
+ */
+struct primitive_task_context {
+ /* OFFSET 0x18 */
+ /**
+ * This field is the control word and it must be 0.
+ */
+ u32 control; /* /< must be set to 0 */
+
+ /* OFFSET 0x1C */
+ /**
+ * This field specifies the primitive that is to be transmitted.
+ */
+ u32 sequence;
+
+ /* OFFSET 0x20 */
+ u32 reserved0;
+
+ /* OFFSET 0x24 */
+ u32 reserved1;
+
+ /* OFFSET 0x28 */
+ u32 reserved2;
+
+ /* OFFSET 0x2C */
+ u32 reserved3;
+};
+
+/**
+ * The union of the protocols that can be selected in the SCU task context
+ * field.
+ *
+ * protocol_context
+ */
+union protocol_context {
+ struct ssp_task_context ssp;
+ struct stp_task_context stp;
+ struct smp_task_context smp;
+ struct primitive_task_context primitive;
+ u32 words[6];
+};
+
+/**
+ * struct scu_sgl_element - This structure represents a single SCU defined SGL
+ * element. SCU SGLs contain a 64 bit address with the maximum data transfer
+ * being 24 bits in size. The SGL can not cross a 4GB boundary.
+ *
+ * struct scu_sgl_element
+ */
+struct scu_sgl_element {
+ /**
+ * This field is the upper 32 bits of the 64 bit physical address.
+ */
+ u32 address_upper;
+
+ /**
+ * This field is the lower 32 bits of the 64 bit physical address.
+ */
+ u32 address_lower;
+
+ /**
+ * This field is the number of bytes to transfer.
+ */
+ u32 length:24;
+
+ /**
+ * This field is the address modifier to be used when a virtual function is
+ * requesting a data transfer.
+ */
+ u32 address_modifier:8;
+
+};
+
+#define SCU_SGL_ELEMENT_PAIR_A 0
+#define SCU_SGL_ELEMENT_PAIR_B 1
+
+/**
+ * struct scu_sgl_element_pair - This structure is the SCU hardware definition
+ * of a pair of SGL elements. The SCU hardware always works on SGL pairs.
+ * They are refered to in the DS specification as SGL A and SGL B. Each SGL
+ * pair is followed by the address of the next pair.
+ *
+ *
+ */
+struct scu_sgl_element_pair {
+ /* OFFSET 0x60-0x68 */
+ /**
+ * This field is the SGL element A of the SGL pair.
+ */
+ struct scu_sgl_element A;
+
+ /* OFFSET 0x6C-0x74 */
+ /**
+ * This field is the SGL element B of the SGL pair.
+ */
+ struct scu_sgl_element B;
+
+ /* OFFSET 0x78-0x7C */
+ /**
+ * This field is the upper 32 bits of the 64 bit address to the next SGL
+ * element pair.
+ */
+ u32 next_pair_upper;
+
+ /**
+ * This field is the lower 32 bits of the 64 bit address to the next SGL
+ * element pair.
+ */
+ u32 next_pair_lower;
+
+};
+
+/**
+ * struct transport_snapshot - This structure is the SCU hardware scratch area
+ * for the task context. This is set to 0 by the driver but can be read by
+ * issuing a dump TC request to the SCU.
+ *
+ *
+ */
+struct transport_snapshot {
+ /* OFFSET 0x48 */
+ u32 xfer_rdy_write_data_length;
+
+ /* OFFSET 0x4C */
+ u32 data_offset;
+
+ /* OFFSET 0x50 */
+ u32 data_transfer_size:24;
+ u32 reserved_50_0:8;
+
+ /* OFFSET 0x54 */
+ u32 next_initiator_write_data_offset;
+
+ /* OFFSET 0x58 */
+ u32 next_initiator_write_data_xfer_size:24;
+ u32 reserved_58_0:8;
+};
+
+/**
+ * struct scu_task_context - This structure defines the contents of the SCU
+ * silicon task context. It lays out all of the fields according to the
+ * expected order and location for the Storage Controller unit.
+ *
+ *
+ */
+struct scu_task_context {
+ /* OFFSET 0x00 ------ */
+ /**
+ * This field must be encoded to one of the valid SCU task priority values
+ * - SCU_TASK_PRIORITY_NORMAL
+ * - SCU_TASK_PRIORITY_HEAD_OF_Q
+ * - SCU_TASK_PRIORITY_HIGH
+ */
+ u32 priority:2;
+
+ /**
+ * This field must be set to true if this is an initiator generated request.
+ * Until target mode is supported all task requests are initiator requests.
+ */
+ u32 initiator_request:1;
+
+ /**
+ * This field must be set to one of the valid connection rates valid values
+ * are 0x8, 0x9, and 0xA.
+ */
+ u32 connection_rate:4;
+
+ /**
+ * This field muse be programed when generating an SMP response since the SMP
+ * connection remains open until the SMP response is generated.
+ */
+ u32 protocol_engine_index:3;
+
+ /**
+ * This field must contain the logical port for the task request.
+ */
+ u32 logical_port_index:3;
+
+ /**
+ * This field must be set to one of the SCU_TASK_CONTEXT_PROTOCOL values
+ * - SCU_TASK_CONTEXT_PROTOCOL_SMP
+ * - SCU_TASK_CONTEXT_PROTOCOL_SSP
+ * - SCU_TASK_CONTEXT_PROTOCOL_STP
+ * - SCU_TASK_CONTEXT_PROTOCOL_NONE
+ */
+ u32 protocol_type:3;
+
+ /**
+ * This filed must be set to the TCi allocated for this task
+ */
+ u32 task_index:12;
+
+ /**
+ * This field is reserved and must be set to 0x00
+ */
+ u32 reserved_00_0:1;
+
+ /**
+ * For a normal task request this must be set to 0. If this is an abort of
+ * this task request it must be set to 1.
+ */
+ u32 abort:1;
+
+ /**
+ * This field must be set to true for the SCU hardware to process the task.
+ */
+ u32 valid:1;
+
+ /**
+ * This field must be set to SCU_TASK_CONTEXT_TYPE
+ */
+ u32 context_type:1;
+
+ /* OFFSET 0x04 */
+ /**
+ * This field contains the RNi that is the target of this request.
+ */
+ u32 remote_node_index:12;
+
+ /**
+ * This field is programmed if this is a mirrored request, which we are not
+ * using, in which case it is the RNi for the mirrored target.
+ */
+ u32 mirrored_node_index:12;
+
+ /**
+ * This field is programmed with the direction of the SATA reqeust
+ * - SCU_SATA_WRITE_DATA_DIRECTION
+ * - SCU_SATA_READ_DATA_DIRECTION
+ */
+ u32 sata_direction:1;
+
+ /**
+ * This field is programmsed with one of the following SCU_COMMAND_CODE
+ * - SCU_COMMAND_CODE_INITIATOR_NEW_TASK
+ * - SCU_COMMAND_CODE_ACTIVE_TASK
+ * - SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK
+ * - SCU_COMMAND_CODE_TARGET_RAW_FRAMES
+ */
+ u32 command_code:2;
+
+ /**
+ * This field is set to true if the remote node should be suspended.
+ * This bit is only valid for SSP & SMP target devices.
+ */
+ u32 suspend_node:1;
+
+ /**
+ * This field is programmed with one of the following command type codes
+ *
+ * For SAS requests use the scu_ssp_task_type
+ * - SCU_TASK_TYPE_IOREAD
+ * - SCU_TASK_TYPE_IOWRITE
+ * - SCU_TASK_TYPE_SMP_REQUEST
+ * - SCU_TASK_TYPE_RESPONSE
+ * - SCU_TASK_TYPE_RAW_FRAME
+ * - SCU_TASK_TYPE_PRIMITIVE
+ *
+ * For SATA requests use the scu_sata_task_type
+ * - SCU_TASK_TYPE_DMA_IN
+ * - SCU_TASK_TYPE_FPDMAQ_READ
+ * - SCU_TASK_TYPE_PACKET_DMA_IN
+ * - SCU_TASK_TYPE_SATA_RAW_FRAME
+ * - SCU_TASK_TYPE_DMA_OUT
+ * - SCU_TASK_TYPE_FPDMAQ_WRITE
+ * - SCU_TASK_TYPE_PACKET_DMA_OUT
+ */
+ u32 task_type:4;
+
+ /* OFFSET 0x08 */
+ /**
+ * This field is reserved and the must be set to 0x00
+ */
+ u32 link_layer_control:8; /* presently all reserved */
+
+ /**
+ * This field is set to true when TLR is to be enabled
+ */
+ u32 ssp_tlr_enable:1;
+
+ /**
+ * This is field specifies if the SCU DMAs a response frame to host
+ * memory for good response frames when operating in target mode.
+ */
+ u32 dma_ssp_target_good_response:1;
+
+ /**
+ * This field indicates if the SCU should DMA the response frame to
+ * host memory.
+ */
+ u32 do_not_dma_ssp_good_response:1;
+
+ /**
+ * This field is set to true when strict ordering is to be enabled
+ */
+ u32 strict_ordering:1;
+
+ /**
+ * This field indicates the type of endianess to be utilized for the
+ * frame. command, task, and response frames utilized control_frame
+ * set to 1.
+ */
+ u32 control_frame:1;
+
+ /**
+ * This field is reserved and the driver should set to 0x00
+ */
+ u32 tl_control_reserved:3;
+
+ /**
+ * This field is set to true when the SCU hardware task timeout control is to
+ * be enabled
+ */
+ u32 timeout_enable:1;
+
+ /**
+ * This field is reserved and the driver should set it to 0x00
+ */
+ u32 pts_control_reserved:7;
+
+ /**
+ * This field should be set to true when block guard is to be enabled
+ */
+ u32 block_guard_enable:1;
+
+ /**
+ * This field is reserved and the driver should set to 0x00
+ */
+ u32 sdma_control_reserved:7;
+
+ /* OFFSET 0x0C */
+ /**
+ * This field is the address modifier for this io request it should be
+ * programmed with the virtual function that is making the request.
+ */
+ u32 address_modifier:16;
+
+ /**
+ * @todo What we support mirrored SMP response frame?
+ */
+ u32 mirrored_protocol_engine:3; /* mirrored protocol Engine Index */
+
+ /**
+ * If this is a mirrored request the logical port index for the mirrored RNi
+ * must be programmed.
+ */
+ u32 mirrored_logical_port:4; /* mirrored local port index */
+
+ /**
+ * This field is reserved and the driver must set it to 0x00
+ */
+ u32 reserved_0C_0:8;
+
+ /**
+ * This field must be set to true if the mirrored request processing is to be
+ * enabled.
+ */
+ u32 mirror_request_enable:1; /* Mirrored request Enable */
+
+ /* OFFSET 0x10 */
+ /**
+ * This field is the command iu length in dwords
+ */
+ u32 ssp_command_iu_length:8;
+
+ /**
+ * This is the target TLR enable bit it must be set to 0 when creatning the
+ * task context.
+ */
+ u32 xfer_ready_tlr_enable:1;
+
+ /**
+ * This field is reserved and the driver must set it to 0x00
+ */
+ u32 reserved_10_0:7;
+
+ /**
+ * This is the maximum burst size that the SCU hardware will send in one
+ * connection its value is (N x 512) and N must be a multiple of 2. If the
+ * value is 0x00 then maximum burst size is disabled.
+ */
+ u32 ssp_max_burst_size:16;
+
+ /* OFFSET 0x14 */
+ /**
+ * This filed is set to the number of bytes to be transfered in the request.
+ */
+ u32 transfer_length_bytes:24; /* In terms of bytes */
+
+ /**
+ * This field is reserved and the driver should set it to 0x00
+ */
+ u32 reserved_14_0:8;
+
+ /* OFFSET 0x18-0x2C */
+ /**
+ * This union provides for the protocol specif part of the SCU Task Context.
+ */
+ union protocol_context type;
+
+ /* OFFSET 0x30-0x34 */
+ /**
+ * This field is the upper 32 bits of the 64 bit physical address of the
+ * command iu buffer
+ */
+ u32 command_iu_upper;
+
+ /**
+ * This field is the lower 32 bits of the 64 bit physical address of the
+ * command iu buffer
+ */
+ u32 command_iu_lower;
+
+ /* OFFSET 0x38-0x3C */
+ /**
+ * This field is the upper 32 bits of the 64 bit physical address of the
+ * response iu buffer
+ */
+ u32 response_iu_upper;
+
+ /**
+ * This field is the lower 32 bits of the 64 bit physical address of the
+ * response iu buffer
+ */
+ u32 response_iu_lower;
+
+ /* OFFSET 0x40 */
+ /**
+ * This field is set to the task phase of the SCU hardware. The driver must
+ * set this to 0x01
+ */
+ u32 task_phase:8;
+
+ /**
+ * This field is set to the transport layer task status. The driver must set
+ * this to 0x00
+ */
+ u32 task_status:8;
+
+ /**
+ * This field is used during initiator write TLR
+ */
+ u32 previous_extended_tag:4;
+
+ /**
+ * This field is set the maximum number of retries for a STP non-data FIS
+ */
+ u32 stp_retry_count:2;
+
+ /**
+ * This field is reserved and the driver must set it to 0x00
+ */
+ u32 reserved_40_1:2;
+
+ /**
+ * This field is used by the SCU TL to determine when to take a snapshot when
+ * tranmitting read data frames.
+ * - 0x00 The entire IO
+ * - 0x01 32k
+ * - 0x02 64k
+ * - 0x04 128k
+ * - 0x08 256k
+ */
+ u32 ssp_tlr_threshold:4;
+
+ /**
+ * This field is reserved and the driver must set it to 0x00
+ */
+ u32 reserved_40_2:4;
+
+ /* OFFSET 0x44 */
+ u32 write_data_length; /* read only set to 0 */
+
+ /* OFFSET 0x48-0x58 */
+ struct transport_snapshot snapshot; /* read only set to 0 */
+
+ /* OFFSET 0x5C */
+ u32 block_protection_enable:1;
+ u32 block_size:2;
+ u32 block_protection_function:2;
+ u32 reserved_5C_0:9;
+ u32 active_sgl_element:2; /* read only set to 0 */
+ u32 sgl_exhausted:1; /* read only set to 0 */
+ u32 payload_data_transfer_error:4; /* read only set to 0 */
+ u32 frame_buffer_offset:11; /* read only set to 0 */
+
+ /* OFFSET 0x60-0x7C */
+ /**
+ * This field is the first SGL element pair found in the TC data structure.
+ */
+ struct scu_sgl_element_pair sgl_pair_ab;
+ /* OFFSET 0x80-0x9C */
+ /**
+ * This field is the second SGL element pair found in the TC data structure.
+ */
+ struct scu_sgl_element_pair sgl_pair_cd;
+
+ /* OFFSET 0xA0-BC */
+ struct scu_sgl_element_pair sgl_snapshot_ac;
+
+ /* OFFSET 0xC0 */
+ u32 active_sgl_element_pair; /* read only set to 0 */
+
+ /* OFFSET 0xC4-0xCC */
+ u32 reserved_C4_CC[3];
+
+ /* OFFSET 0xD0 */
+ u32 intermediate_crc_value:16;
+ u32 initial_crc_seed:16;
+
+ /* OFFSET 0xD4 */
+ u32 application_tag_for_verify:16;
+ u32 application_tag_for_generate:16;
+
+ /* OFFSET 0xD8 */
+ u32 reference_tag_seed_for_verify_function;
+
+ /* OFFSET 0xDC */
+ u32 reserved_DC;
+
+ /* OFFSET 0xE0 */
+ u32 reserved_E0_0:16;
+ u32 application_tag_mask_for_generate:16;
+
+ /* OFFSET 0xE4 */
+ u32 block_protection_control:16;
+ u32 application_tag_mask_for_verify:16;
+
+ /* OFFSET 0xE8 */
+ u32 block_protection_error:8;
+ u32 reserved_E8_0:24;
+
+ /* OFFSET 0xEC */
+ u32 reference_tag_seed_for_verify;
+
+ /* OFFSET 0xF0 */
+ u32 intermediate_crc_valid_snapshot:16;
+ u32 reserved_F0_0:16;
+
+ /* OFFSET 0xF4 */
+ u32 reference_tag_seed_for_verify_function_snapshot;
+
+ /* OFFSET 0xF8 */
+ u32 snapshot_of_reserved_dword_DC_of_tc;
+
+ /* OFFSET 0xFC */
+ u32 reference_tag_seed_for_generate_function_snapshot;
+
+};
+
+#endif /* _SCU_TASK_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
new file mode 100644
index 000000000000..d6bcdd013dc9
--- /dev/null
+++ b/drivers/scsi/isci/task.c
@@ -0,0 +1,1676 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/completion.h>
+#include <linux/irqflags.h>
+#include "sas.h"
+#include <scsi/libsas.h>
+#include "remote_device.h"
+#include "remote_node_context.h"
+#include "isci.h"
+#include "request.h"
+#include "task.h"
+#include "host.h"
+
+/**
+* isci_task_refuse() - complete the request to the upper layer driver in
+* the case where an I/O needs to be completed back in the submit path.
+* @ihost: host on which the the request was queued
+* @task: request to complete
+* @response: response code for the completed task.
+* @status: status code for the completed task.
+*
+*/
+static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
+ enum service_response response,
+ enum exec_status status)
+
+{
+ enum isci_completion_selection disposition;
+
+ disposition = isci_perform_normal_io_completion;
+ disposition = isci_task_set_completion_status(task, response, status,
+ disposition);
+
+ /* Tasks aborted specifically by a call to the lldd_abort_task
+ * function should not be completed to the host in the regular path.
+ */
+ switch (disposition) {
+ case isci_perform_normal_io_completion:
+ /* Normal notification (task_done) */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Normal - task = %p, response=%d, "
+ "status=%d\n",
+ __func__, task, response, status);
+
+ task->lldd_task = NULL;
+
+ isci_execpath_callback(ihost, task, task->task_done);
+ break;
+
+ case isci_perform_aborted_io_completion:
+ /*
+ * No notification because this request is already in the
+ * abort path.
+ */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Aborted - task = %p, response=%d, "
+ "status=%d\n",
+ __func__, task, response, status);
+ break;
+
+ case isci_perform_error_io_completion:
+ /* Use sas_task_abort */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Error - task = %p, response=%d, "
+ "status=%d\n",
+ __func__, task, response, status);
+
+ isci_execpath_callback(ihost, task, sas_task_abort);
+ break;
+
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci task notification default case!",
+ __func__);
+ sas_task_abort(task);
+ break;
+ }
+}
+
+#define for_each_sas_task(num, task) \
+ for (; num > 0; num--,\
+ task = list_entry(task->list.next, struct sas_task, list))
+
+
+static inline int isci_device_io_ready(struct isci_remote_device *idev,
+ struct sas_task *task)
+{
+ return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
+ (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
+ isci_task_is_ncq_recovery(task))
+ : 0;
+}
+/**
+ * isci_task_execute_task() - This function is one of the SAS Domain Template
+ * functions. This function is called by libsas to send a task down to
+ * hardware.
+ * @task: This parameter specifies the SAS task to send.
+ * @num: This parameter specifies the number of tasks to queue.
+ * @gfp_flags: This parameter specifies the context of this call.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
+{
+ struct isci_host *ihost = dev_to_ihost(task->dev);
+ struct isci_remote_device *idev;
+ unsigned long flags;
+ bool io_ready;
+ u16 tag;
+
+ dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
+
+ for_each_sas_task(num, task) {
+ enum sci_status status = SCI_FAILURE;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_lookup_device(task->dev);
+ io_ready = isci_device_io_ready(idev, task);
+ tag = isci_alloc_tag(ihost);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ dev_dbg(&ihost->pdev->dev,
+ "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
+ task, num, task->dev, idev, idev ? idev->flags : 0,
+ task->uldd_task);
+
+ if (!idev) {
+ isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
+ SAS_DEVICE_UNKNOWN);
+ } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
+ /* Indicate QUEUE_FULL so that the scsi midlayer
+ * retries.
+ */
+ isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
+ SAS_QUEUE_FULL);
+ } else {
+ /* There is a device and it's ready for I/O. */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+
+ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+ /* The I/O was aborted. */
+ spin_unlock_irqrestore(&task->task_state_lock,
+ flags);
+
+ isci_task_refuse(ihost, task,
+ SAS_TASK_UNDELIVERED,
+ SAM_STAT_TASK_ABORTED);
+ } else {
+ task->task_state_flags |= SAS_TASK_AT_INITIATOR;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ /* build and send the request. */
+ status = isci_request_execute(ihost, idev, task, tag);
+
+ if (status != SCI_SUCCESS) {
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ /* Did not really start this command. */
+ task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ /* Indicate QUEUE_FULL so that the scsi
+ * midlayer retries. if the request
+ * failed for remote device reasons,
+ * it gets returned as
+ * SAS_TASK_UNDELIVERED next time
+ * through.
+ */
+ isci_task_refuse(ihost, task,
+ SAS_TASK_COMPLETE,
+ SAS_QUEUE_FULL);
+ }
+ }
+ }
+ if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ /* command never hit the device, so just free
+ * the tci and skip the sequence increment
+ */
+ isci_tci_free(ihost, ISCI_TAG_TCI(tag));
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ }
+ isci_put_device(idev);
+ }
+ return 0;
+}
+
+static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
+{
+ struct isci_tmf *isci_tmf;
+ enum sci_status status;
+
+ if (tmf_task != ireq->ttype)
+ return SCI_FAILURE;
+
+ isci_tmf = isci_request_access_tmf(ireq);
+
+ switch (isci_tmf->tmf_code) {
+
+ case isci_tmf_sata_srst_high:
+ case isci_tmf_sata_srst_low: {
+ struct host_to_dev_fis *fis = &ireq->stp.cmd;
+
+ memset(fis, 0, sizeof(*fis));
+
+ fis->fis_type = 0x27;
+ fis->flags &= ~0x80;
+ fis->flags &= 0xF0;
+ if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
+ fis->control |= ATA_SRST;
+ else
+ fis->control &= ~ATA_SRST;
+ break;
+ }
+ /* other management commnd go here... */
+ default:
+ return SCI_FAILURE;
+ }
+
+ /* core builds the protocol specific request
+ * based on the h2d fis.
+ */
+ status = sci_task_request_construct_sata(ireq);
+
+ return status;
+}
+
+static struct isci_request *isci_task_request_build(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 tag, struct isci_tmf *isci_tmf)
+{
+ enum sci_status status = SCI_FAILURE;
+ struct isci_request *ireq = NULL;
+ struct domain_device *dev;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_tmf = %p\n", __func__, isci_tmf);
+
+ dev = idev->domain_dev;
+
+ /* do common allocation and init of request object. */
+ ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
+ if (!ireq)
+ return NULL;
+
+ /* let the core do it's construct. */
+ status = sci_task_request_construct(ihost, idev, tag,
+ ireq);
+
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: sci_task_request_construct failed - "
+ "status = 0x%x\n",
+ __func__,
+ status);
+ return NULL;
+ }
+
+ /* XXX convert to get this from task->tproto like other drivers */
+ if (dev->dev_type == SAS_END_DEV) {
+ isci_tmf->proto = SAS_PROTOCOL_SSP;
+ status = sci_task_request_construct_ssp(ireq);
+ if (status != SCI_SUCCESS)
+ return NULL;
+ }
+
+ if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ isci_tmf->proto = SAS_PROTOCOL_SATA;
+ status = isci_sata_management_task_request_build(ireq);
+
+ if (status != SCI_SUCCESS)
+ return NULL;
+ }
+ return ireq;
+}
+
+static int isci_task_execute_tmf(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_tmf *tmf, unsigned long timeout_ms)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ enum sci_task_status status = SCI_TASK_FAILURE;
+ struct isci_request *ireq;
+ int ret = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+ unsigned long timeleft;
+ u16 tag;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ tag = isci_alloc_tag(ihost);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
+ return ret;
+
+ /* sanity check, return TMF_RESP_FUNC_FAILED
+ * if the device is not there and ready.
+ */
+ if (!idev ||
+ (!test_bit(IDEV_IO_READY, &idev->flags) &&
+ !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p not ready (%#lx)\n",
+ __func__,
+ idev, idev ? idev->flags : 0);
+ goto err_tci;
+ } else
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p\n",
+ __func__, idev);
+
+ /* Assign the pointer to the TMF's completion kernel wait structure. */
+ tmf->complete = &completion;
+
+ ireq = isci_task_request_build(ihost, idev, tag, tmf);
+ if (!ireq)
+ goto err_tci;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ /* start the TMF io. */
+ status = sci_controller_start_task(ihost, idev, ireq);
+
+ if (status != SCI_TASK_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: start_io failed - status = 0x%x, request = %p\n",
+ __func__,
+ status,
+ ireq);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ goto err_tci;
+ }
+
+ if (tmf->cb_state_func != NULL)
+ tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
+
+ isci_request_change_state(ireq, started);
+
+ /* add the request to the remote device request list. */
+ list_add(&ireq->dev_node, &idev->reqs_in_process);
+
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Wait for the TMF to complete, or a timeout. */
+ timeleft = wait_for_completion_timeout(&completion,
+ msecs_to_jiffies(timeout_ms));
+
+ if (timeleft == 0) {
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmf->cb_state_func != NULL)
+ tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
+
+ sci_controller_terminate_request(ihost,
+ idev,
+ ireq);
+
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ wait_for_completion(tmf->complete);
+ }
+
+ isci_print_tmf(tmf);
+
+ if (tmf->status == SCI_SUCCESS)
+ ret = TMF_RESP_FUNC_COMPLETE;
+ else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: tmf.status == "
+ "SCI_FAILURE_IO_RESPONSE_VALID\n",
+ __func__);
+ ret = TMF_RESP_FUNC_COMPLETE;
+ }
+ /* Else - leave the default "failed" status alone. */
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: completed request = %p\n",
+ __func__,
+ ireq);
+
+ return ret;
+
+ err_tci:
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ isci_tci_free(ihost, ISCI_TAG_TCI(tag));
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ return ret;
+}
+
+static void isci_task_build_tmf(struct isci_tmf *tmf,
+ enum isci_tmf_function_codes code,
+ void (*tmf_sent_cb)(enum isci_tmf_cb_state,
+ struct isci_tmf *,
+ void *),
+ void *cb_data)
+{
+ memset(tmf, 0, sizeof(*tmf));
+
+ tmf->tmf_code = code;
+ tmf->cb_state_func = tmf_sent_cb;
+ tmf->cb_data = cb_data;
+}
+
+static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
+ enum isci_tmf_function_codes code,
+ void (*tmf_sent_cb)(enum isci_tmf_cb_state,
+ struct isci_tmf *,
+ void *),
+ struct isci_request *old_request)
+{
+ isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request);
+ tmf->io_tag = old_request->io_tag;
+}
+
+/**
+ * isci_task_validate_request_to_abort() - This function checks the given I/O
+ * against the "started" state. If the request is still "started", it's
+ * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
+ * BEFORE CALLING THIS FUNCTION.
+ * @isci_request: This parameter specifies the request object to control.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @isci_device: This is the device to which the request is pending.
+ * @aborted_io_completion: This is a completion structure that will be added to
+ * the request in case it is changed to aborting; this completion is
+ * triggered when the request is fully completed.
+ *
+ * Either "started" on successful change of the task status to "aborted", or
+ * "unallocated" if the task cannot be controlled.
+ */
+static enum isci_request_status isci_task_validate_request_to_abort(
+ struct isci_request *isci_request,
+ struct isci_host *isci_host,
+ struct isci_remote_device *isci_device,
+ struct completion *aborted_io_completion)
+{
+ enum isci_request_status old_state = unallocated;
+
+ /* Only abort the task if it's in the
+ * device's request_in_process list
+ */
+ if (isci_request && !list_empty(&isci_request->dev_node)) {
+ old_state = isci_request_change_started_to_aborted(
+ isci_request, aborted_io_completion);
+
+ }
+
+ return old_state;
+}
+
+/**
+* isci_request_cleanup_completed_loiterer() - This function will take care of
+* the final cleanup on any request which has been explicitly terminated.
+* @isci_host: This parameter specifies the ISCI host object
+* @isci_device: This is the device to which the request is pending.
+* @isci_request: This parameter specifies the terminated request object.
+* @task: This parameter is the libsas I/O request.
+*/
+static void isci_request_cleanup_completed_loiterer(
+ struct isci_host *isci_host,
+ struct isci_remote_device *isci_device,
+ struct isci_request *isci_request,
+ struct sas_task *task)
+{
+ unsigned long flags;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device=%p, request=%p, task=%p\n",
+ __func__, isci_device, isci_request, task);
+
+ if (task != NULL) {
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->lldd_task = NULL;
+
+ task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
+
+ isci_set_task_doneflags(task);
+
+ /* If this task is not in the abort path, call task_done. */
+ if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ task->task_done(task);
+ } else
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ }
+
+ if (isci_request != NULL) {
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+ list_del_init(&isci_request->dev_node);
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ }
+}
+
+/**
+ * isci_terminate_request_core() - This function will terminate the given
+ * request, and wait for it to complete. This function must only be called
+ * from a thread that can wait. Note that the request is terminated and
+ * completed (back to the host, if started there).
+ * @ihost: This SCU.
+ * @idev: The target.
+ * @isci_request: The I/O request to be terminated.
+ *
+ */
+static void isci_terminate_request_core(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *isci_request)
+{
+ enum sci_status status = SCI_SUCCESS;
+ bool was_terminated = false;
+ bool needs_cleanup_handling = false;
+ enum isci_request_status request_status;
+ unsigned long flags;
+ unsigned long termination_completed = 1;
+ struct completion *io_request_completion;
+ struct sas_task *task;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: device = %p; request = %p\n",
+ __func__, idev, isci_request);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ io_request_completion = isci_request->io_request_completion;
+
+ task = (isci_request->ttype == io_task)
+ ? isci_request_access_task(isci_request)
+ : NULL;
+
+ /* Note that we are not going to control
+ * the target to abort the request.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
+
+ /* Make sure the request wasn't just sitting around signalling
+ * device condition (if the request handle is NULL, then the
+ * request completed but needed additional handling here).
+ */
+ if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
+ was_terminated = true;
+ needs_cleanup_handling = true;
+ status = sci_controller_terminate_request(ihost,
+ idev,
+ isci_request);
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /*
+ * The only time the request to terminate will
+ * fail is when the io request is completed and
+ * being aborted.
+ */
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: sci_controller_terminate_request"
+ " returned = 0x%x\n",
+ __func__, status);
+
+ isci_request->io_request_completion = NULL;
+
+ } else {
+ if (was_terminated) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: before completion wait (%p/%p)\n",
+ __func__, isci_request, io_request_completion);
+
+ /* Wait here for the request to complete. */
+ #define TERMINATION_TIMEOUT_MSEC 500
+ termination_completed
+ = wait_for_completion_timeout(
+ io_request_completion,
+ msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
+
+ if (!termination_completed) {
+
+ /* The request to terminate has timed out. */
+ spin_lock_irqsave(&ihost->scic_lock,
+ flags);
+
+ /* Check for state changes. */
+ if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
+
+ /* The best we can do is to have the
+ * request die a silent death if it
+ * ever really completes.
+ *
+ * Set the request state to "dead",
+ * and clear the task pointer so that
+ * an actual completion event callback
+ * doesn't do anything.
+ */
+ isci_request->status = dead;
+ isci_request->io_request_completion
+ = NULL;
+
+ if (isci_request->ttype == io_task) {
+
+ /* Break links with the
+ * sas_task.
+ */
+ isci_request->ttype_ptr.io_task_ptr
+ = NULL;
+ }
+ } else
+ termination_completed = 1;
+
+ spin_unlock_irqrestore(&ihost->scic_lock,
+ flags);
+
+ if (!termination_completed) {
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: *** Timeout waiting for "
+ "termination(%p/%p)\n",
+ __func__, io_request_completion,
+ isci_request);
+
+ /* The request can no longer be referenced
+ * safely since it may go away if the
+ * termination every really does complete.
+ */
+ isci_request = NULL;
+ }
+ }
+ if (termination_completed)
+ dev_dbg(&ihost->pdev->dev,
+ "%s: after completion wait (%p/%p)\n",
+ __func__, isci_request, io_request_completion);
+ }
+
+ if (termination_completed) {
+
+ isci_request->io_request_completion = NULL;
+
+ /* Peek at the status of the request. This will tell
+ * us if there was special handling on the request such that it
+ * needs to be detached and freed here.
+ */
+ spin_lock_irqsave(&isci_request->state_lock, flags);
+ request_status = isci_request->status;
+
+ if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
+ && ((request_status == aborted)
+ || (request_status == aborting)
+ || (request_status == terminating)
+ || (request_status == completed)
+ || (request_status == dead)
+ )
+ ) {
+
+ /* The completion routine won't free a request in
+ * the aborted/aborting/etc. states, so we do
+ * it here.
+ */
+ needs_cleanup_handling = true;
+ }
+ spin_unlock_irqrestore(&isci_request->state_lock, flags);
+
+ }
+ if (needs_cleanup_handling)
+ isci_request_cleanup_completed_loiterer(
+ ihost, idev, isci_request, task);
+ }
+}
+
+/**
+ * isci_terminate_pending_requests() - This function will change the all of the
+ * requests on the given device's state to "aborting", will terminate the
+ * requests, and wait for them to complete. This function must only be
+ * called from a thread that can wait. Note that the requests are all
+ * terminated and completed (back to the host, if started there).
+ * @isci_host: This parameter specifies SCU.
+ * @idev: This parameter specifies the target.
+ *
+ */
+void isci_terminate_pending_requests(struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ struct completion request_completion;
+ enum isci_request_status old_state;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ list_splice_init(&idev->reqs_in_process, &list);
+
+ /* assumes that isci_terminate_request_core deletes from the list */
+ while (!list_empty(&list)) {
+ struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
+
+ /* Change state to "terminating" if it is currently
+ * "started".
+ */
+ old_state = isci_request_change_started_to_newstate(ireq,
+ &request_completion,
+ terminating);
+ switch (old_state) {
+ case started:
+ case completed:
+ case aborting:
+ break;
+ default:
+ /* termination in progress, or otherwise dispositioned.
+ * We know the request was on 'list' so should be safe
+ * to move it back to reqs_in_process
+ */
+ list_move(&ireq->dev_node, &idev->reqs_in_process);
+ ireq = NULL;
+ break;
+ }
+
+ if (!ireq)
+ continue;
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ init_completion(&request_completion);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev=%p request=%p; task=%p old_state=%d\n",
+ __func__, idev, ireq,
+ ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL,
+ old_state);
+
+ /* If the old_state is started:
+ * This request was not already being aborted. If it had been,
+ * then the aborting I/O (ie. the TMF request) would not be in
+ * the aborting state, and thus would be terminated here. Note
+ * that since the TMF completion's call to the kernel function
+ * "complete()" does not happen until the pending I/O request
+ * terminate fully completes, we do not have to implement a
+ * special wait here for already aborting requests - the
+ * termination of the TMF request will force the request
+ * to finish it's already started terminate.
+ *
+ * If old_state == completed:
+ * This request completed from the SCU hardware perspective
+ * and now just needs cleaning up in terms of freeing the
+ * request and potentially calling up to libsas.
+ *
+ * If old_state == aborting:
+ * This request has already gone through a TMF timeout, but may
+ * not have been terminated; needs cleaning up at least.
+ */
+ isci_terminate_request_core(ihost, idev, ireq);
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/**
+ * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
+ * Template functions.
+ * @lun: This parameter specifies the lun to be reset.
+ *
+ * status, zero indicates success.
+ */
+static int isci_task_send_lu_reset_sas(
+ struct isci_host *isci_host,
+ struct isci_remote_device *isci_device,
+ u8 *lun)
+{
+ struct isci_tmf tmf;
+ int ret = TMF_RESP_FUNC_FAILED;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_host = %p, isci_device = %p\n",
+ __func__, isci_host, isci_device);
+ /* Send the LUN reset to the target. By the time the call returns,
+ * the TMF has fully exected in the target (in which case the return
+ * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
+ * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
+ */
+ isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
+
+ #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
+ ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
+
+ if (ret == TMF_RESP_FUNC_COMPLETE)
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: %p: TMF_LU_RESET passed\n",
+ __func__, isci_device);
+ else
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: %p: TMF_LU_RESET failed (%x)\n",
+ __func__, isci_device, ret);
+
+ return ret;
+}
+
+static int isci_task_send_lu_reset_sata(struct isci_host *ihost,
+ struct isci_remote_device *idev, u8 *lun)
+{
+ int ret = TMF_RESP_FUNC_FAILED;
+ struct isci_tmf tmf;
+
+ /* Send the soft reset to the target */
+ #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
+ isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
+
+ ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
+
+ if (ret != TMF_RESP_FUNC_COMPLETE) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Assert SRST failed (%p) = %x",
+ __func__, idev, ret);
+
+ /* Return the failure so that the LUN reset is escalated
+ * to a target reset.
+ */
+ }
+ return ret;
+}
+
+/**
+ * isci_task_lu_reset() - This function is one of the SAS Domain Template
+ * functions. This is one of the Task Management functoins called by libsas,
+ * to reset the given lun. Note the assumption that while this call is
+ * executing, no I/O will be sent by the host to the device.
+ * @lun: This parameter specifies the lun to be reset.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
+{
+ struct isci_host *isci_host = dev_to_ihost(domain_device);
+ struct isci_remote_device *isci_device;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+ isci_device = isci_lookup_device(domain_device);
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
+ __func__, domain_device, isci_host, isci_device);
+
+ if (isci_device)
+ set_bit(IDEV_EH, &isci_device->flags);
+
+ /* If there is a device reset pending on any request in the
+ * device's list, fail this LUN reset request in order to
+ * escalate to the device reset.
+ */
+ if (!isci_device ||
+ isci_device_is_reset_pending(isci_host, isci_device)) {
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: No dev (%p), or "
+ "RESET PENDING: domain_device=%p\n",
+ __func__, isci_device, domain_device);
+ ret = TMF_RESP_FUNC_FAILED;
+ goto out;
+ }
+
+ /* Send the task management part of the reset. */
+ if (sas_protocol_ata(domain_device->tproto)) {
+ ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
+ } else
+ ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
+
+ /* If the LUN reset worked, all the I/O can now be terminated. */
+ if (ret == TMF_RESP_FUNC_COMPLETE)
+ /* Terminate all I/O now. */
+ isci_terminate_pending_requests(isci_host,
+ isci_device);
+
+ out:
+ isci_put_device(isci_device);
+ return ret;
+}
+
+
+/* int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
+int isci_task_clear_nexus_port(struct asd_sas_port *port)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+
+
+int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+/* Task Management Functions. Must be called from process context. */
+
+/**
+ * isci_abort_task_process_cb() - This is a helper function for the abort task
+ * TMF command. It manages the request state with respect to the successful
+ * transmission / completion of the abort task request.
+ * @cb_state: This parameter specifies when this function was called - after
+ * the TMF request has been started and after it has timed-out.
+ * @tmf: This parameter specifies the TMF in progress.
+ *
+ *
+ */
+static void isci_abort_task_process_cb(
+ enum isci_tmf_cb_state cb_state,
+ struct isci_tmf *tmf,
+ void *cb_data)
+{
+ struct isci_request *old_request;
+
+ old_request = (struct isci_request *)cb_data;
+
+ dev_dbg(&old_request->isci_host->pdev->dev,
+ "%s: tmf=%p, old_request=%p\n",
+ __func__, tmf, old_request);
+
+ switch (cb_state) {
+
+ case isci_tmf_started:
+ /* The TMF has been started. Nothing to do here, since the
+ * request state was already set to "aborted" by the abort
+ * task function.
+ */
+ if ((old_request->status != aborted)
+ && (old_request->status != completed))
+ dev_dbg(&old_request->isci_host->pdev->dev,
+ "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
+ __func__, old_request->status, tmf, old_request);
+ break;
+
+ case isci_tmf_timed_out:
+
+ /* Set the task's state to "aborting", since the abort task
+ * function thread set it to "aborted" (above) in anticipation
+ * of the task management request working correctly. Since the
+ * timeout has now fired, the TMF request failed. We set the
+ * state such that the request completion will indicate the
+ * device is no longer present.
+ */
+ isci_request_change_state(old_request, aborting);
+ break;
+
+ default:
+ dev_dbg(&old_request->isci_host->pdev->dev,
+ "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
+ __func__, cb_state, tmf, old_request);
+ break;
+ }
+}
+
+/**
+ * isci_task_abort_task() - This function is one of the SAS Domain Template
+ * functions. This function is called by libsas to abort a specified task.
+ * @task: This parameter specifies the SAS task to abort.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_abort_task(struct sas_task *task)
+{
+ struct isci_host *isci_host = dev_to_ihost(task->dev);
+ DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
+ struct isci_request *old_request = NULL;
+ enum isci_request_status old_state;
+ struct isci_remote_device *isci_device = NULL;
+ struct isci_tmf tmf;
+ int ret = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+ bool any_dev_reset = false;
+
+ /* Get the isci_request reference from the task. Note that
+ * this check does not depend on the pending request list
+ * in the device, because tasks driving resets may land here
+ * after completion in the core.
+ */
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+ spin_lock(&task->task_state_lock);
+
+ old_request = task->lldd_task;
+
+ /* If task is already done, the request isn't valid */
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
+ (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
+ old_request)
+ isci_device = isci_lookup_device(task->dev);
+
+ spin_unlock(&task->task_state_lock);
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: task = %p\n", __func__, task);
+
+ if (!isci_device || !old_request)
+ goto out;
+
+ set_bit(IDEV_EH, &isci_device->flags);
+
+ /* This version of the driver will fail abort requests for
+ * SATA/STP. Failing the abort request this way will cause the
+ * SCSI error handler thread to escalate to LUN reset
+ */
+ if (sas_protocol_ata(task->task_proto)) {
+ dev_dbg(&isci_host->pdev->dev,
+ " task %p is for a STP/SATA device;"
+ " returning TMF_RESP_FUNC_FAILED\n"
+ " to cause a LUN reset...\n", task);
+ goto out;
+ }
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: old_request == %p\n", __func__, old_request);
+
+ any_dev_reset = isci_device_is_reset_pending(isci_host, isci_device);
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+
+ any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
+
+ /* If the extraction of the request reference from the task
+ * failed, then the request has been completed (or if there is a
+ * pending reset then this abort request function must be failed
+ * in order to escalate to the target reset).
+ */
+ if ((old_request == NULL) || any_dev_reset) {
+
+ /* If the device reset task flag is set, fail the task
+ * management request. Otherwise, the original request
+ * has completed.
+ */
+ if (any_dev_reset) {
+
+ /* Turn off the task's DONE to make sure this
+ * task is escalated to a target reset.
+ */
+ task->task_state_flags &= ~SAS_TASK_STATE_DONE;
+
+ /* Make the reset happen as soon as possible. */
+ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ /* Fail the task management request in order to
+ * escalate to the target reset.
+ */
+ ret = TMF_RESP_FUNC_FAILED;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: Failing task abort in order to "
+ "escalate to target reset because\n"
+ "SAS_TASK_NEED_DEV_RESET is set for "
+ "task %p on dev %p\n",
+ __func__, task, isci_device);
+
+
+ } else {
+ /* The request has already completed and there
+ * is nothing to do here other than to set the task
+ * done bit, and indicate that the task abort function
+ * was sucessful.
+ */
+ isci_set_task_doneflags(task);
+
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ ret = TMF_RESP_FUNC_COMPLETE;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: abort task not needed for %p\n",
+ __func__, task);
+ }
+ goto out;
+ } else {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ }
+
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+
+ /* Check the request status and change to "aborted" if currently
+ * "starting"; if true then set the I/O kernel completion
+ * struct that will be triggered when the request completes.
+ */
+ old_state = isci_task_validate_request_to_abort(
+ old_request, isci_host, isci_device,
+ &aborted_io_completion);
+ if ((old_state != started) &&
+ (old_state != completed) &&
+ (old_state != aborting)) {
+
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ /* The request was already being handled by someone else (because
+ * they got to set the state away from started).
+ */
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: device = %p; old_request %p already being aborted\n",
+ __func__,
+ isci_device, old_request);
+ ret = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
+ if (task->task_proto == SAS_PROTOCOL_SMP ||
+ test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
+
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: SMP request (%d)"
+ " or complete_in_target (%d), thus no TMF\n",
+ __func__, (task->task_proto == SAS_PROTOCOL_SMP),
+ test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
+
+ /* Set the state on the task. */
+ isci_task_all_done(task);
+
+ ret = TMF_RESP_FUNC_COMPLETE;
+
+ /* Stopping and SMP devices are not sent a TMF, and are not
+ * reset, but the outstanding I/O request is terminated below.
+ */
+ } else {
+ /* Fill in the tmf stucture */
+ isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
+ isci_abort_task_process_cb,
+ old_request);
+
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
+ ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
+ ISCI_ABORT_TASK_TIMEOUT_MS);
+
+ if (ret != TMF_RESP_FUNC_COMPLETE)
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_task_send_tmf failed\n",
+ __func__);
+ }
+ if (ret == TMF_RESP_FUNC_COMPLETE) {
+ set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
+
+ /* Clean up the request on our side, and wait for the aborted
+ * I/O to complete.
+ */
+ isci_terminate_request_core(isci_host, isci_device, old_request);
+ }
+
+ /* Make sure we do not leave a reference to aborted_io_completion */
+ old_request->io_request_completion = NULL;
+ out:
+ isci_put_device(isci_device);
+ return ret;
+}
+
+/**
+ * isci_task_abort_task_set() - This function is one of the SAS Domain Template
+ * functions. This is one of the Task Management functoins called by libsas,
+ * to abort all task for the given lun.
+ * @d_device: This parameter specifies the domain device associated with this
+ * request.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_abort_task_set(
+ struct domain_device *d_device,
+ u8 *lun)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+
+/**
+ * isci_task_clear_aca() - This function is one of the SAS Domain Template
+ * functions. This is one of the Task Management functoins called by libsas.
+ * @d_device: This parameter specifies the domain device associated with this
+ * request.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_clear_aca(
+ struct domain_device *d_device,
+ u8 *lun)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+
+
+/**
+ * isci_task_clear_task_set() - This function is one of the SAS Domain Template
+ * functions. This is one of the Task Management functoins called by libsas.
+ * @d_device: This parameter specifies the domain device associated with this
+ * request.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_clear_task_set(
+ struct domain_device *d_device,
+ u8 *lun)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+
+/**
+ * isci_task_query_task() - This function is implemented to cause libsas to
+ * correctly escalate the failed abort to a LUN or target reset (this is
+ * because sas_scsi_find_task libsas function does not correctly interpret
+ * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is
+ * returned, libsas turns this into a LUN reset; when FUNC_FAILED is
+ * returned, libsas will turn this into a target reset
+ * @task: This parameter specifies the sas task being queried.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_query_task(
+ struct sas_task *task)
+{
+ /* See if there is a pending device reset for this device. */
+ if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
+ return TMF_RESP_FUNC_FAILED;
+ else
+ return TMF_RESP_FUNC_SUCC;
+}
+
+/*
+ * isci_task_request_complete() - This function is called by the sci core when
+ * an task request completes.
+ * @ihost: This parameter specifies the ISCI host object
+ * @ireq: This parameter is the completed isci_request object.
+ * @completion_status: This parameter specifies the completion status from the
+ * sci core.
+ *
+ * none.
+ */
+void
+isci_task_request_complete(struct isci_host *ihost,
+ struct isci_request *ireq,
+ enum sci_task_status completion_status)
+{
+ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+ struct completion *tmf_complete;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: request = %p, status=%d\n",
+ __func__, ireq, completion_status);
+
+ isci_request_change_state(ireq, completed);
+
+ tmf->status = completion_status;
+ set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
+
+ if (tmf->proto == SAS_PROTOCOL_SSP) {
+ memcpy(&tmf->resp.resp_iu,
+ &ireq->ssp.rsp,
+ SSP_RESP_IU_MAX_SIZE);
+ } else if (tmf->proto == SAS_PROTOCOL_SATA) {
+ memcpy(&tmf->resp.d2h_fis,
+ &ireq->stp.rsp,
+ sizeof(struct dev_to_host_fis));
+ }
+
+ /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
+ tmf_complete = tmf->complete;
+
+ sci_controller_complete_io(ihost, ireq->target_device, ireq);
+ /* set the 'terminated' flag handle to make sure it cannot be terminated
+ * or completed again.
+ */
+ set_bit(IREQ_TERMINATED, &ireq->flags);
+
+ isci_request_change_state(ireq, unallocated);
+ list_del_init(&ireq->dev_node);
+
+ /* The task management part completes last. */
+ complete(tmf_complete);
+}
+
+static void isci_smp_task_timedout(unsigned long _task)
+{
+ struct sas_task *task = (void *) _task;
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ complete(&task->completion);
+}
+
+static void isci_smp_task_done(struct sas_task *task)
+{
+ if (!del_timer(&task->timer))
+ return;
+ complete(&task->completion);
+}
+
+static struct sas_task *isci_alloc_task(void)
+{
+ struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
+
+ if (task) {
+ INIT_LIST_HEAD(&task->list);
+ spin_lock_init(&task->task_state_lock);
+ task->task_state_flags = SAS_TASK_STATE_PENDING;
+ init_timer(&task->timer);
+ init_completion(&task->completion);
+ }
+
+ return task;
+}
+
+static void isci_free_task(struct isci_host *ihost, struct sas_task *task)
+{
+ if (task) {
+ BUG_ON(!list_empty(&task->list));
+ kfree(task);
+ }
+}
+
+static int isci_smp_execute_task(struct isci_host *ihost,
+ struct domain_device *dev, void *req,
+ int req_size, void *resp, int resp_size)
+{
+ int res, retry;
+ struct sas_task *task = NULL;
+
+ for (retry = 0; retry < 3; retry++) {
+ task = isci_alloc_task();
+ if (!task)
+ return -ENOMEM;
+
+ task->dev = dev;
+ task->task_proto = dev->tproto;
+ sg_init_one(&task->smp_task.smp_req, req, req_size);
+ sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
+
+ task->task_done = isci_smp_task_done;
+
+ task->timer.data = (unsigned long) task;
+ task->timer.function = isci_smp_task_timedout;
+ task->timer.expires = jiffies + 10*HZ;
+ add_timer(&task->timer);
+
+ res = isci_task_execute_task(task, 1, GFP_KERNEL);
+
+ if (res) {
+ del_timer(&task->timer);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: executing SMP task failed:%d\n",
+ __func__, res);
+ goto ex_err;
+ }
+
+ wait_for_completion(&task->completion);
+ res = -ECOMM;
+ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: smp task timed out or aborted\n",
+ __func__);
+ isci_task_abort_task(task);
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SMP task aborted and not done\n",
+ __func__);
+ goto ex_err;
+ }
+ }
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAM_STAT_GOOD) {
+ res = 0;
+ break;
+ }
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_UNDERRUN) {
+ /* no error, but return the number of bytes of
+ * underrun */
+ res = task->task_status.residual;
+ break;
+ }
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_OVERRUN) {
+ res = -EMSGSIZE;
+ break;
+ } else {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: task to dev %016llx response: 0x%x "
+ "status 0x%x\n", __func__,
+ SAS_ADDR(dev->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat);
+ isci_free_task(ihost, task);
+ task = NULL;
+ }
+ }
+ex_err:
+ BUG_ON(retry == 3 && task != NULL);
+ isci_free_task(ihost, task);
+ return res;
+}
+
+#define DISCOVER_REQ_SIZE 16
+#define DISCOVER_RESP_SIZE 56
+
+int isci_smp_get_phy_attached_dev_type(struct isci_host *ihost,
+ struct domain_device *dev,
+ int phy_id, int *adt)
+{
+ struct smp_resp *disc_resp;
+ u8 *disc_req;
+ int res;
+
+ disc_resp = kzalloc(DISCOVER_RESP_SIZE, GFP_KERNEL);
+ if (!disc_resp)
+ return -ENOMEM;
+
+ disc_req = kzalloc(DISCOVER_REQ_SIZE, GFP_KERNEL);
+ if (disc_req) {
+ disc_req[0] = SMP_REQUEST;
+ disc_req[1] = SMP_DISCOVER;
+ disc_req[9] = phy_id;
+ } else {
+ kfree(disc_resp);
+ return -ENOMEM;
+ }
+ res = isci_smp_execute_task(ihost, dev, disc_req, DISCOVER_REQ_SIZE,
+ disc_resp, DISCOVER_RESP_SIZE);
+ if (!res) {
+ if (disc_resp->result != SMP_RESP_FUNC_ACC)
+ res = disc_resp->result;
+ else
+ *adt = disc_resp->disc.attached_dev_type;
+ }
+ kfree(disc_req);
+ kfree(disc_resp);
+
+ return res;
+}
+
+static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy_num)
+{
+ struct domain_device *dev = idev->domain_dev;
+ struct isci_port *iport = idev->isci_port;
+ struct isci_host *ihost = iport->isci_host;
+ int res, iteration = 0, attached_device_type;
+ #define STP_WAIT_MSECS 25000
+ unsigned long tmo = msecs_to_jiffies(STP_WAIT_MSECS);
+ unsigned long deadline = jiffies + tmo;
+ enum {
+ SMP_PHYWAIT_PHYDOWN,
+ SMP_PHYWAIT_PHYUP,
+ SMP_PHYWAIT_DONE
+ } phy_state = SMP_PHYWAIT_PHYDOWN;
+
+ /* While there is time, wait for the phy to go away and come back */
+ while (time_is_after_jiffies(deadline) && phy_state != SMP_PHYWAIT_DONE) {
+ int event = atomic_read(&iport->event);
+
+ ++iteration;
+
+ tmo = wait_event_timeout(ihost->eventq,
+ event != atomic_read(&iport->event) ||
+ !test_bit(IPORT_BCN_BLOCKED, &iport->flags),
+ tmo);
+ /* link down, stop polling */
+ if (!test_bit(IPORT_BCN_BLOCKED, &iport->flags))
+ break;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: iport %p, iteration %d,"
+ " phase %d: time_remaining %lu, bcns = %d\n",
+ __func__, iport, iteration, phy_state,
+ tmo, test_bit(IPORT_BCN_PENDING, &iport->flags));
+
+ res = isci_smp_get_phy_attached_dev_type(ihost, dev, phy_num,
+ &attached_device_type);
+ tmo = deadline - jiffies;
+
+ if (res) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: iteration %d, phase %d:"
+ " SMP error=%d, time_remaining=%lu\n",
+ __func__, iteration, phy_state, res, tmo);
+ break;
+ }
+ dev_dbg(&ihost->pdev->dev,
+ "%s: iport %p, iteration %d,"
+ " phase %d: time_remaining %lu, bcns = %d, "
+ "attdevtype = %x\n",
+ __func__, iport, iteration, phy_state,
+ tmo, test_bit(IPORT_BCN_PENDING, &iport->flags),
+ attached_device_type);
+
+ switch (phy_state) {
+ case SMP_PHYWAIT_PHYDOWN:
+ /* Has the device gone away? */
+ if (!attached_device_type)
+ phy_state = SMP_PHYWAIT_PHYUP;
+
+ break;
+
+ case SMP_PHYWAIT_PHYUP:
+ /* Has the device come back? */
+ if (attached_device_type)
+ phy_state = SMP_PHYWAIT_DONE;
+ break;
+
+ case SMP_PHYWAIT_DONE:
+ break;
+ }
+
+ }
+ dev_dbg(&ihost->pdev->dev, "%s: done\n", __func__);
+}
+
+static int isci_reset_device(struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
+ struct isci_port *iport = idev->isci_port;
+ enum sci_status status;
+ unsigned long flags;
+ int rc;
+
+ dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ status = sci_remote_device_reset(idev);
+ if (status != SCI_SUCCESS) {
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: sci_remote_device_reset(%p) returned %d!\n",
+ __func__, idev, status);
+
+ return TMF_RESP_FUNC_FAILED;
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Make sure all pending requests are able to be fully terminated. */
+ isci_device_clear_reset_pending(ihost, idev);
+
+ /* If this is a device on an expander, disable BCN processing. */
+ if (!scsi_is_sas_phy_local(phy))
+ set_bit(IPORT_BCN_BLOCKED, &iport->flags);
+
+ rc = sas_phy_reset(phy, true);
+
+ /* Terminate in-progress I/O now. */
+ isci_remote_device_nuke_requests(ihost, idev);
+
+ /* Since all pending TCs have been cleaned, resume the RNC. */
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ status = sci_remote_device_reset_complete(idev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* If this is a device on an expander, bring the phy back up. */
+ if (!scsi_is_sas_phy_local(phy)) {
+ /* A phy reset will cause the device to go away then reappear.
+ * Since libsas will take action on incoming BCNs (eg. remove
+ * a device going through an SMP phy-control driven reset),
+ * we need to wait until the phy comes back up before letting
+ * discovery proceed in libsas.
+ */
+ isci_wait_for_smp_phy_reset(idev, phy->number);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ isci_port_bcn_enable(ihost, idev->isci_port);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ }
+
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: sci_remote_device_reset_complete(%p) "
+ "returned %d!\n", __func__, idev, status);
+ }
+
+ dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
+
+ return rc;
+}
+
+int isci_task_I_T_nexus_reset(struct domain_device *dev)
+{
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_lookup_device(dev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
+ ret = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
+
+ ret = isci_reset_device(ihost, idev);
+ out:
+ isci_put_device(idev);
+ return ret;
+}
+
+int isci_bus_reset_handler(struct scsi_cmnd *cmd)
+{
+ struct domain_device *dev = sdev_to_domain_dev(cmd->device);
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_lookup_device(dev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (!idev) {
+ ret = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
+
+ ret = isci_reset_device(ihost, idev);
+ out:
+ isci_put_device(idev);
+ return ret;
+}
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
new file mode 100644
index 000000000000..4a7fa90287ef
--- /dev/null
+++ b/drivers/scsi/isci/task.h
@@ -0,0 +1,367 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_TASK_H_
+#define _ISCI_TASK_H_
+
+#include <scsi/sas_ata.h>
+#include "host.h"
+
+struct isci_request;
+
+/**
+ * enum isci_tmf_cb_state - This enum defines the possible states in which the
+ * TMF callback function is invoked during the TMF execution process.
+ *
+ *
+ */
+enum isci_tmf_cb_state {
+
+ isci_tmf_init_state = 0,
+ isci_tmf_started,
+ isci_tmf_timed_out
+};
+
+/**
+ * enum isci_tmf_function_codes - This enum defines the possible preparations
+ * of task management requests.
+ *
+ *
+ */
+enum isci_tmf_function_codes {
+
+ isci_tmf_func_none = 0,
+ isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
+ isci_tmf_ssp_lun_reset = TMF_LU_RESET,
+ isci_tmf_sata_srst_high = TMF_LU_RESET + 0x100, /* Non SCSI */
+ isci_tmf_sata_srst_low = TMF_LU_RESET + 0x101 /* Non SCSI */
+};
+/**
+ * struct isci_tmf - This class represents the task management object which
+ * acts as an interface to libsas for processing task management requests
+ *
+ *
+ */
+struct isci_tmf {
+
+ struct completion *complete;
+ enum sas_protocol proto;
+ union {
+ struct ssp_response_iu resp_iu;
+ struct dev_to_host_fis d2h_fis;
+ u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
+ } resp;
+ unsigned char lun[8];
+ u16 io_tag;
+ struct isci_remote_device *device;
+ enum isci_tmf_function_codes tmf_code;
+ int status;
+
+ /* The optional callback function allows the user process to
+ * track the TMF transmit / timeout conditions.
+ */
+ void (*cb_state_func)(
+ enum isci_tmf_cb_state,
+ struct isci_tmf *, void *);
+ void *cb_data;
+
+};
+
+static inline void isci_print_tmf(struct isci_tmf *tmf)
+{
+ if (SAS_PROTOCOL_SATA == tmf->proto)
+ dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+ "%s: status = %x\n"
+ "tmf->resp.d2h_fis.status = %x\n"
+ "tmf->resp.d2h_fis.error = %x\n",
+ __func__,
+ tmf->status,
+ tmf->resp.d2h_fis.status,
+ tmf->resp.d2h_fis.error);
+ else
+ dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+ "%s: status = %x\n"
+ "tmf->resp.resp_iu.data_present = %x\n"
+ "tmf->resp.resp_iu.status = %x\n"
+ "tmf->resp.resp_iu.data_length = %x\n"
+ "tmf->resp.resp_iu.data[0] = %x\n"
+ "tmf->resp.resp_iu.data[1] = %x\n"
+ "tmf->resp.resp_iu.data[2] = %x\n"
+ "tmf->resp.resp_iu.data[3] = %x\n",
+ __func__,
+ tmf->status,
+ tmf->resp.resp_iu.datapres,
+ tmf->resp.resp_iu.status,
+ be32_to_cpu(tmf->resp.resp_iu.response_data_len),
+ tmf->resp.resp_iu.resp_data[0],
+ tmf->resp.resp_iu.resp_data[1],
+ tmf->resp.resp_iu.resp_data[2],
+ tmf->resp.resp_iu.resp_data[3]);
+}
+
+
+int isci_task_execute_task(
+ struct sas_task *task,
+ int num,
+ gfp_t gfp_flags);
+
+int isci_task_abort_task(
+ struct sas_task *task);
+
+int isci_task_abort_task_set(
+ struct domain_device *d_device,
+ u8 *lun);
+
+int isci_task_clear_aca(
+ struct domain_device *d_device,
+ u8 *lun);
+
+int isci_task_clear_task_set(
+ struct domain_device *d_device,
+ u8 *lun);
+
+int isci_task_query_task(
+ struct sas_task *task);
+
+int isci_task_lu_reset(
+ struct domain_device *d_device,
+ u8 *lun);
+
+int isci_task_clear_nexus_port(
+ struct asd_sas_port *port);
+
+int isci_task_clear_nexus_ha(
+ struct sas_ha_struct *ha);
+
+int isci_task_I_T_nexus_reset(
+ struct domain_device *d_device);
+
+void isci_task_request_complete(
+ struct isci_host *isci_host,
+ struct isci_request *request,
+ enum sci_task_status completion_status);
+
+u16 isci_task_ssp_request_get_io_tag_to_manage(
+ struct isci_request *request);
+
+u8 isci_task_ssp_request_get_function(
+ struct isci_request *request);
+
+
+void *isci_task_ssp_request_get_response_data_address(
+ struct isci_request *request);
+
+u32 isci_task_ssp_request_get_response_data_length(
+ struct isci_request *request);
+
+int isci_queuecommand(
+ struct scsi_cmnd *scsi_cmd,
+ void (*donefunc)(struct scsi_cmnd *));
+
+int isci_bus_reset_handler(struct scsi_cmnd *cmd);
+
+/**
+ * enum isci_completion_selection - This enum defines the possible actions to
+ * take with respect to a given request's notification back to libsas.
+ *
+ *
+ */
+enum isci_completion_selection {
+
+ isci_perform_normal_io_completion, /* Normal notify (task_done) */
+ isci_perform_aborted_io_completion, /* No notification. */
+ isci_perform_error_io_completion /* Use sas_task_abort */
+};
+
+static inline void isci_set_task_doneflags(
+ struct sas_task *task)
+{
+ /* Since no futher action will be taken on this task,
+ * make sure to mark it complete from the lldd perspective.
+ */
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+}
+/**
+ * isci_task_all_done() - This function clears the task bits to indicate the
+ * LLDD is done with the task.
+ *
+ *
+ */
+static inline void isci_task_all_done(
+ struct sas_task *task)
+{
+ unsigned long flags;
+
+ /* Since no futher action will be taken on this task,
+ * make sure to mark it complete from the lldd perspective.
+ */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ isci_set_task_doneflags(task);
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+}
+
+/**
+ * isci_task_set_completion_status() - This function sets the completion status
+ * for the request.
+ * @task: This parameter is the completed request.
+ * @response: This parameter is the response code for the completed task.
+ * @status: This parameter is the status code for the completed task.
+ *
+* @return The new notification mode for the request.
+*/
+static inline enum isci_completion_selection
+isci_task_set_completion_status(
+ struct sas_task *task,
+ enum service_response response,
+ enum exec_status status,
+ enum isci_completion_selection task_notification_selection)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+
+ /* If a device reset is being indicated, make sure the I/O
+ * is in the error path.
+ */
+ if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
+ /* Fail the I/O to make sure it goes into the error path. */
+ response = SAS_TASK_UNDELIVERED;
+ status = SAM_STAT_TASK_ABORTED;
+
+ task_notification_selection = isci_perform_error_io_completion;
+ }
+ task->task_status.resp = response;
+ task->task_status.stat = status;
+
+ switch (task_notification_selection) {
+
+ case isci_perform_error_io_completion:
+
+ if (task->task_proto == SAS_PROTOCOL_SMP) {
+ /* There is no error escalation in the SMP case.
+ * Convert to a normal completion to avoid the
+ * timeout in the discovery path and to let the
+ * next action take place quickly.
+ */
+ task_notification_selection
+ = isci_perform_normal_io_completion;
+
+ /* Fall through to the normal case... */
+ } else {
+ /* Use sas_task_abort */
+ /* Leave SAS_TASK_STATE_DONE clear
+ * Leave SAS_TASK_AT_INITIATOR set.
+ */
+ break;
+ }
+
+ case isci_perform_aborted_io_completion:
+ /* This path can occur with task-managed requests as well as
+ * requests terminated because of LUN or device resets.
+ */
+ /* Fall through to the normal case... */
+ case isci_perform_normal_io_completion:
+ /* Normal notification (task_done) */
+ isci_set_task_doneflags(task);
+ break;
+ default:
+ WARN_ONCE(1, "unknown task_notification_selection: %d\n",
+ task_notification_selection);
+ break;
+ }
+
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ return task_notification_selection;
+
+}
+/**
+* isci_execpath_callback() - This function is called from the task
+* execute path when the task needs to callback libsas about the submit-time
+* task failure. The callback occurs either through the task's done function
+* or through sas_task_abort. In the case of regular non-discovery SATA/STP I/O
+* requests, libsas takes the host lock before calling execute task. Therefore
+* in this situation the host lock must be managed before calling the func.
+*
+* @ihost: This parameter is the controller to which the I/O request was sent.
+* @task: This parameter is the I/O request.
+* @func: This parameter is the function to call in the correct context.
+* @status: This parameter is the status code for the completed task.
+*
+*/
+static inline void isci_execpath_callback(struct isci_host *ihost,
+ struct sas_task *task,
+ void (*func)(struct sas_task *))
+{
+ struct domain_device *dev = task->dev;
+
+ if (dev_is_sata(dev) && task->uldd_task) {
+ unsigned long flags;
+
+ /* Since we are still in the submit path, and since
+ * libsas takes the host lock on behalf of SATA
+ * devices before I/O starts (in the non-discovery case),
+ * we need to unlock before we can call the callback function.
+ */
+ raw_local_irq_save(flags);
+ spin_unlock(dev->sata_dev.ap->lock);
+ func(task);
+ spin_lock(dev->sata_dev.ap->lock);
+ raw_local_irq_restore(flags);
+ } else
+ func(task);
+}
+#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
new file mode 100644
index 000000000000..e9e1e2abacb9
--- /dev/null
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -0,0 +1,225 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+#include "unsolicited_frame_control.h"
+#include "registers.h"
+
+int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
+{
+ struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
+ struct sci_unsolicited_frame *uf;
+ u32 buf_len, header_len, i;
+ dma_addr_t dma;
+ size_t size;
+ void *virt;
+
+ /*
+ * Prepare all of the memory sizes for the UF headers, UF address
+ * table, and UF buffers themselves.
+ */
+ buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+ header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
+ size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t);
+
+ /*
+ * The Unsolicited Frame buffers are set at the start of the UF
+ * memory descriptor entry. The headers and address table will be
+ * placed after the buffers.
+ */
+ virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ /*
+ * Program the location of the UF header table into the SCU.
+ * Notes:
+ * - The address must align on a 64-byte boundary. Guaranteed to be
+ * on 64-byte boundary already 1KB boundary for unsolicited frames.
+ * - Program unused header entries to overlap with the last
+ * unsolicited frame. The silicon will never DMA to these unused
+ * headers, since we program the UF address table pointers to
+ * NULL.
+ */
+ uf_control->headers.physical_address = dma + buf_len;
+ uf_control->headers.array = virt + buf_len;
+
+ /*
+ * Program the location of the UF address table into the SCU.
+ * Notes:
+ * - The address must align on a 64-bit boundary. Guaranteed to be on 64
+ * byte boundary already due to above programming headers being on a
+ * 64-bit boundary and headers are on a 64-bytes in size.
+ */
+ uf_control->address_table.physical_address = dma + buf_len + header_len;
+ uf_control->address_table.array = virt + buf_len + header_len;
+ uf_control->get = 0;
+
+ /*
+ * UF buffer requirements are:
+ * - The last entry in the UF queue is not NULL.
+ * - There is a power of 2 number of entries (NULL or not-NULL)
+ * programmed into the queue.
+ * - Aligned on a 1KB boundary. */
+
+ /*
+ * Program the actual used UF buffers into the UF address table and
+ * the controller's array of UFs.
+ */
+ for (i = 0; i < SCU_MAX_UNSOLICITED_FRAMES; i++) {
+ uf = &uf_control->buffers.array[i];
+
+ uf_control->address_table.array[i] = dma;
+
+ uf->buffer = virt;
+ uf->header = &uf_control->headers.array[i];
+ uf->state = UNSOLICITED_FRAME_EMPTY;
+
+ /*
+ * Increment the address of the physical and virtual memory
+ * pointers. Everything is aligned on 1k boundary with an
+ * increment of 1k.
+ */
+ virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+ dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+ }
+
+ return 0;
+}
+
+enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_header)
+{
+ if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
+ /* Skip the first word in the frame since this is a controll word used
+ * by the hardware.
+ */
+ *frame_header = &uf_control->buffers.array[frame_index].header->data;
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+}
+
+enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_buffer)
+{
+ if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
+ *frame_buffer = uf_control->buffers.array[frame_index].buffer;
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+}
+
+bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index)
+{
+ u32 frame_get;
+ u32 frame_cycle;
+
+ frame_get = uf_control->get & (SCU_MAX_UNSOLICITED_FRAMES - 1);
+ frame_cycle = uf_control->get & SCU_MAX_UNSOLICITED_FRAMES;
+
+ /*
+ * In the event there are NULL entries in the UF table, we need to
+ * advance the get pointer in order to find out if this frame should
+ * be released (i.e. update the get pointer)
+ */
+ while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
+ upper_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
+ frame_get < SCU_MAX_UNSOLICITED_FRAMES)
+ frame_get++;
+
+ /*
+ * The table has a NULL entry as it's last element. This is
+ * illegal.
+ */
+ BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES);
+ if (frame_index >= SCU_MAX_UNSOLICITED_FRAMES)
+ return false;
+
+ uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED;
+
+ if (frame_get != frame_index) {
+ /*
+ * Frames remain in use until we advance the get pointer
+ * so there is nothing we can do here
+ */
+ return false;
+ }
+
+ /*
+ * The frame index is equal to the current get pointer so we
+ * can now free up all of the frame entries that
+ */
+ while (uf_control->buffers.array[frame_get].state == UNSOLICITED_FRAME_RELEASED) {
+ uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY;
+
+ if (frame_get+1 == SCU_MAX_UNSOLICITED_FRAMES-1) {
+ frame_cycle ^= SCU_MAX_UNSOLICITED_FRAMES;
+ frame_get = 0;
+ } else
+ frame_get++;
+ }
+
+ uf_control->get = SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get;
+
+ return true;
+}
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
new file mode 100644
index 000000000000..31cb9506f52d
--- /dev/null
+++ b/drivers/scsi/isci/unsolicited_frame_control.h
@@ -0,0 +1,278 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
+#define _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
+
+#include "isci.h"
+
+#define SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS 15
+
+/**
+ * struct scu_unsolicited_frame_header -
+ *
+ * This structure delineates the format of an unsolicited frame header. The
+ * first DWORD are UF attributes defined by the silicon architecture. The data
+ * depicts actual header information received on the link.
+ */
+struct scu_unsolicited_frame_header {
+ /**
+ * This field indicates if there is an Initiator Index Table entry with
+ * which this header is associated.
+ */
+ u32 iit_exists:1;
+
+ /**
+ * This field simply indicates the protocol type (i.e. SSP, STP, SMP).
+ */
+ u32 protocol_type:3;
+
+ /**
+ * This field indicates if the frame is an address frame (IAF or OAF)
+ * or if it is a information unit frame.
+ */
+ u32 is_address_frame:1;
+
+ /**
+ * This field simply indicates the connection rate at which the frame
+ * was received.
+ */
+ u32 connection_rate:4;
+
+ u32 reserved:23;
+
+ /**
+ * This field represents the actual header data received on the link.
+ */
+ u32 data[SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS];
+
+};
+
+
+
+/**
+ * enum unsolicited_frame_state -
+ *
+ * This enumeration represents the current unsolicited frame state. The
+ * controller object can not updtate the hardware unsolicited frame put pointer
+ * unless it has already processed the priror unsolicited frames.
+ */
+enum unsolicited_frame_state {
+ /**
+ * This state is when the frame is empty and not in use. It is
+ * different from the released state in that the hardware could DMA
+ * data to this frame buffer.
+ */
+ UNSOLICITED_FRAME_EMPTY,
+
+ /**
+ * This state is set when the frame buffer is in use by by some
+ * object in the system.
+ */
+ UNSOLICITED_FRAME_IN_USE,
+
+ /**
+ * This state is set when the frame is returned to the free pool
+ * but one or more frames prior to this one are still in use.
+ * Once all of the frame before this one are freed it will go to
+ * the empty state.
+ */
+ UNSOLICITED_FRAME_RELEASED,
+
+ UNSOLICITED_FRAME_MAX_STATES
+};
+
+/**
+ * struct sci_unsolicited_frame -
+ *
+ * This is the unsolicited frame data structure it acts as the container for
+ * the current frame state, frame header and frame buffer.
+ */
+struct sci_unsolicited_frame {
+ /**
+ * This field contains the current frame state
+ */
+ enum unsolicited_frame_state state;
+
+ /**
+ * This field points to the frame header data.
+ */
+ struct scu_unsolicited_frame_header *header;
+
+ /**
+ * This field points to the frame buffer data.
+ */
+ void *buffer;
+
+};
+
+/**
+ * struct sci_uf_header_array -
+ *
+ * This structure contains all of the unsolicited frame header information.
+ */
+struct sci_uf_header_array {
+ /**
+ * This field is represents a virtual pointer to the start
+ * address of the UF address table. The table contains
+ * 64-bit pointers as required by the hardware.
+ */
+ struct scu_unsolicited_frame_header *array;
+
+ /**
+ * This field specifies the physical address location for the UF
+ * buffer array.
+ */
+ dma_addr_t physical_address;
+
+};
+
+/**
+ * struct sci_uf_buffer_array -
+ *
+ * This structure contains all of the unsolicited frame buffer (actual payload)
+ * information.
+ */
+struct sci_uf_buffer_array {
+ /**
+ * This field is the unsolicited frame data its used to manage
+ * the data for the unsolicited frame requests. It also represents
+ * the virtual address location that corresponds to the
+ * physical_address field.
+ */
+ struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES];
+
+ /**
+ * This field specifies the physical address location for the UF
+ * buffer array.
+ */
+ dma_addr_t physical_address;
+};
+
+/**
+ * struct sci_uf_address_table_array -
+ *
+ * This object maintains all of the unsolicited frame address table specific
+ * data. The address table is a collection of 64-bit pointers that point to
+ * 1KB buffers into which the silicon will DMA unsolicited frames.
+ */
+struct sci_uf_address_table_array {
+ /**
+ * This field represents a virtual pointer that refers to the
+ * starting address of the UF address table.
+ * 64-bit pointers are required by the hardware.
+ */
+ dma_addr_t *array;
+
+ /**
+ * This field specifies the physical address location for the UF
+ * address table.
+ */
+ dma_addr_t physical_address;
+
+};
+
+/**
+ * struct sci_unsolicited_frame_control -
+ *
+ * This object contains all of the data necessary to handle unsolicited frames.
+ */
+struct sci_unsolicited_frame_control {
+ /**
+ * This field is the software copy of the unsolicited frame queue
+ * get pointer. The controller object writes this value to the
+ * hardware to let the hardware put more unsolicited frame entries.
+ */
+ u32 get;
+
+ /**
+ * This field contains all of the unsolicited frame header
+ * specific fields.
+ */
+ struct sci_uf_header_array headers;
+
+ /**
+ * This field contains all of the unsolicited frame buffer
+ * specific fields.
+ */
+ struct sci_uf_buffer_array buffers;
+
+ /**
+ * This field contains all of the unsolicited frame address table
+ * specific fields.
+ */
+ struct sci_uf_address_table_array address_table;
+
+};
+
+struct isci_host;
+
+int sci_unsolicited_frame_control_construct(struct isci_host *ihost);
+
+enum sci_status sci_unsolicited_frame_control_get_header(
+ struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_header);
+
+enum sci_status sci_unsolicited_frame_control_get_buffer(
+ struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_buffer);
+
+bool sci_unsolicited_frame_control_release_frame(
+ struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index);
+
+#endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
index 795828b90f45..8945e201e42e 100644
--- a/drivers/spi/spi_s3c64xx.c
+++ b/drivers/spi/spi_s3c64xx.c
@@ -116,9 +116,7 @@
(((i)->fifo_lvl_mask + 1))) \
? 1 : 0)
-#define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \
- (((i)->fifo_lvl_mask + 1) << 1)) \
- ? 1 : 0)
+#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & (1 << (i)->tx_st_done)) ? 1 : 0)
#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 2a20dabec76d..d6620ad309ce 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -516,8 +516,17 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
{
+ ssb_pcicore_fix_sprom_core_index(pc);
+
/* Disable PCI interrupts. */
ssb_write32(pc->dev, SSB_INTVEC, 0);
+
+ /* Additional PCIe always once-executed workarounds */
+ if (pc->dev->id.coreid == SSB_DEV_PCIE) {
+ ssb_pcicore_serdes_workaround(pc);
+ /* TODO: ASPM */
+ /* TODO: Clock Request Update */
+ }
}
void ssb_pcicore_init(struct ssb_pcicore *pc)
@@ -529,8 +538,6 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
if (!ssb_device_is_enabled(dev))
ssb_device_enable(dev, 0);
- ssb_pcicore_fix_sprom_core_index(pc);
-
#ifdef CONFIG_SSB_PCICORE_HOSTMODE
pc->hostmode = pcicore_is_in_hostmode(pc);
if (pc->hostmode)
@@ -538,13 +545,6 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
#endif /* CONFIG_SSB_PCICORE_HOSTMODE */
if (!pc->hostmode)
ssb_pcicore_init_clientmode(pc);
-
- /* Additional PCIe always once-executed workarounds */
- if (dev->id.coreid == SSB_DEV_PCIE) {
- ssb_pcicore_serdes_workaround(pc);
- /* TODO: ASPM */
- /* TODO: Clock Request Update */
- }
}
static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address)
diff --git a/drivers/staging/lirc/lirc_imon.c b/drivers/staging/lirc/lirc_imon.c
index 4039eda2a15b..4a9e563f40fa 100644
--- a/drivers/staging/lirc/lirc_imon.c
+++ b/drivers/staging/lirc/lirc_imon.c
@@ -672,8 +672,6 @@ static void imon_incoming_packet(struct imon_context *context,
static void usb_rx_callback(struct urb *urb)
{
struct imon_context *context;
- unsigned char *buf;
- int len;
int intfnum = 0;
if (!urb)
@@ -683,9 +681,6 @@ static void usb_rx_callback(struct urb *urb)
if (!context)
return;
- buf = urb->transfer_buffer;
- len = urb->actual_length;
-
switch (urb->status) {
case -ENOENT: /* usbcore unlink successful! */
return;
@@ -728,7 +723,6 @@ static int imon_probe(struct usb_interface *interface,
int ir_ep_found = 0;
int alloc_status = 0;
int vfd_proto_6p = 0;
- int code_length;
struct imon_context *context = NULL;
int i;
u16 vendor, product;
@@ -749,8 +743,6 @@ static int imon_probe(struct usb_interface *interface,
else
context->display = 1;
- code_length = BUF_CHUNK_SIZE * 8;
-
usbdev = usb_get_dev(interface_to_usbdev(interface));
iface_desc = interface->cur_altsetting;
num_endpts = iface_desc->desc.bNumEndpoints;
@@ -856,7 +848,7 @@ static int imon_probe(struct usb_interface *interface,
strcpy(driver->name, MOD_NAME);
driver->minor = -1;
- driver->code_length = sizeof(int) * 8;
+ driver->code_length = BUF_CHUNK_SIZE * 8;
driver->sample_rate = 0;
driver->features = LIRC_CAN_REC_MODE2;
driver->data = context;
diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/lirc/lirc_serial.c
index 4a3cca03224a..805df913bb6e 100644
--- a/drivers/staging/lirc/lirc_serial.c
+++ b/drivers/staging/lirc/lirc_serial.c
@@ -838,7 +838,23 @@ static int hardware_init_port(void)
static int init_port(void)
{
- int i, nlow, nhigh;
+ int i, nlow, nhigh, result;
+
+ result = request_irq(irq, irq_handler,
+ IRQF_DISABLED | (share_irq ? IRQF_SHARED : 0),
+ LIRC_DRIVER_NAME, (void *)&hardware);
+
+ switch (result) {
+ case -EBUSY:
+ printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", irq);
+ return -EBUSY;
+ case -EINVAL:
+ printk(KERN_ERR LIRC_DRIVER_NAME
+ ": Bad irq number or handler\n");
+ return -EINVAL;
+ default:
+ break;
+ };
/* Reserve io region. */
/*
@@ -893,34 +909,17 @@ static int init_port(void)
printk(KERN_INFO LIRC_DRIVER_NAME ": Manually using active "
"%s receiver\n", sense ? "low" : "high");
+ dprintk("Interrupt %d, port %04x obtained\n", irq, io);
return 0;
}
static int set_use_inc(void *data)
{
- int result;
unsigned long flags;
/* initialize timestamp */
do_gettimeofday(&lasttv);
- result = request_irq(irq, irq_handler,
- IRQF_DISABLED | (share_irq ? IRQF_SHARED : 0),
- LIRC_DRIVER_NAME, (void *)&hardware);
-
- switch (result) {
- case -EBUSY:
- printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", irq);
- return -EBUSY;
- case -EINVAL:
- printk(KERN_ERR LIRC_DRIVER_NAME
- ": Bad irq number or handler\n");
- return -EINVAL;
- default:
- dprintk("Interrupt %d, port %04x obtained\n", irq, io);
- break;
- }
-
spin_lock_irqsave(&hardware[type].lock, flags);
/* Set DLAB 0. */
@@ -945,10 +944,6 @@ static void set_use_dec(void *data)
soutp(UART_IER, sinp(UART_IER) &
(~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
spin_unlock_irqrestore(&hardware[type].lock, flags);
-
- free_irq(irq, (void *)&hardware);
-
- dprintk("freed IRQ %d\n", irq);
}
static ssize_t lirc_write(struct file *file, const char *buf,
@@ -1256,6 +1251,9 @@ exit_serial_exit:
static void __exit lirc_serial_exit_module(void)
{
lirc_serial_exit();
+
+ free_irq(irq, (void *)&hardware);
+
if (iommap != 0)
release_mem_region(iommap, 8 << ioshift);
else
diff --git a/drivers/staging/lirc/lirc_sir.c b/drivers/staging/lirc/lirc_sir.c
index a7b46f24f24e..0d3864594b12 100644
--- a/drivers/staging/lirc/lirc_sir.c
+++ b/drivers/staging/lirc/lirc_sir.c
@@ -739,23 +739,16 @@ static void send_space(unsigned long len)
static void send_pulse(unsigned long len)
{
long bytes_out = len / TIME_CONST;
- long time_left;
- time_left = (long)len - (long)bytes_out * (long)TIME_CONST;
- if (bytes_out == 0) {
+ if (bytes_out == 0)
bytes_out++;
- time_left = 0;
- }
+
while (bytes_out--) {
outb(PULSE, io + UART_TX);
/* FIXME treba seriozne cakanie z char/serial.c */
while (!(inb(io + UART_LSR) & UART_LSR_THRE))
;
}
-#if 0
- if (time_left > 0)
- safe_udelay(time_left);
-#endif
}
#endif
diff --git a/drivers/staging/lirc/lirc_zilog.c b/drivers/staging/lirc/lirc_zilog.c
index dd6a57c3c3a3..4e051f6b52db 100644
--- a/drivers/staging/lirc/lirc_zilog.c
+++ b/drivers/staging/lirc/lirc_zilog.c
@@ -475,14 +475,14 @@ static int lirc_thread(void *arg)
dprintk("poll thread started\n");
while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
/* if device not opened, we can sleep half a second */
if (atomic_read(&ir->open_count) == 0) {
schedule_timeout(HZ/2);
continue;
}
- set_current_state(TASK_INTERRUPTIBLE);
-
/*
* This is ~113*2 + 24 + jitter (2*repeat gap + code length).
* We use this interval as the chip resets every time you poll
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 6d5d6e679fc7..af9b7814965a 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1709,12 +1709,13 @@ static int atmel_serial_resume(struct platform_device *pdev)
static int __devinit atmel_serial_probe(struct platform_device *pdev)
{
struct atmel_uart_port *port;
+ struct atmel_uart_data *pdata = pdev->dev.platform_data;
void *data;
int ret;
BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
- port = &atmel_ports[pdev->id];
+ port = &atmel_ports[pdata->num];
port->backup_imr = 0;
atmel_init_port(port, pdev);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 64c7ab4702df..0b5ec234c787 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1147,6 +1147,14 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
* any drivers bound to them (a key side effect)
*/
if (dev->actconfig) {
+ /*
+ * FIXME: In order to avoid self-deadlock involving the
+ * bandwidth_mutex, we have to mark all the interfaces
+ * before unregistering any of them.
+ */
+ for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++)
+ dev->actconfig->interface[i]->unregistering = 1;
+
for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) {
struct usb_interface *interface;
@@ -1156,7 +1164,6 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
continue;
dev_dbg(&dev->dev, "unregistering interface %s\n",
dev_name(&interface->dev));
- interface->unregistering = 1;
remove_intf_ep_devs(interface);
device_del(&interface->dev);
}
@@ -1286,6 +1293,8 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
interface);
return -EINVAL;
}
+ if (iface->unregistering)
+ return -ENODEV;
alt = usb_altnum_to_altsetting(iface, alternate);
if (!alt) {
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 2cd9a60c7f3a..4e4833168087 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -46,7 +46,6 @@
#include <asm/system.h>
#include <asm/unaligned.h>
#include <asm/dma.h>
-#include <asm/cacheflush.h>
#include "fsl_usb2_udc.h"
@@ -118,6 +117,17 @@ static void (*_fsl_writel)(u32 v, unsigned __iomem *p);
#define fsl_readl(p) (*_fsl_readl)((p))
#define fsl_writel(v, p) (*_fsl_writel)((v), (p))
+static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata)
+{
+ if (pdata->big_endian_mmio) {
+ _fsl_readl = _fsl_readl_be;
+ _fsl_writel = _fsl_writel_be;
+ } else {
+ _fsl_readl = _fsl_readl_le;
+ _fsl_writel = _fsl_writel_le;
+ }
+}
+
static inline u32 cpu_to_hc32(const u32 x)
{
return udc_controller->pdata->big_endian_desc
@@ -132,6 +142,8 @@ static inline u32 hc32_to_cpu(const u32 x)
: le32_to_cpu((__force __le32)x);
}
#else /* !CONFIG_PPC32 */
+static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata) {}
+
#define fsl_readl(addr) readl(addr)
#define fsl_writel(val32, addr) writel(val32, addr)
#define cpu_to_hc32(x) cpu_to_le32(x)
@@ -1277,6 +1289,11 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction)
req->req.complete = NULL;
req->dtd_count = 0;
+ req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+ req->req.buf, req->req.length,
+ ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->mapped = 1;
+
if (fsl_req_to_dtd(req) == 0)
fsl_queue_td(ep, req);
else
@@ -1348,9 +1365,6 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
/* Fill in the reqest structure */
*((u16 *) req->req.buf) = cpu_to_le16(tmp);
- /* flush cache for the req buffer */
- flush_dcache_range((u32)req->req.buf, (u32)req->req.buf + 8);
-
req->ep = ep;
req->req.length = 2;
req->req.status = -EINPROGRESS;
@@ -1358,6 +1372,11 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
req->req.complete = NULL;
req->dtd_count = 0;
+ req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+ req->req.buf, req->req.length,
+ ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->mapped = 1;
+
/* prime the data phase */
if ((fsl_req_to_dtd(req) == 0))
fsl_queue_td(ep, req);
@@ -2354,7 +2373,6 @@ static int __init struct_udc_setup(struct fsl_udc *udc,
struct fsl_req, req);
/* allocate a small amount of memory to get valid address */
udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
- udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
udc->resume_state = USB_STATE_NOTATTACHED;
udc->usb_state = USB_STATE_POWERED;
@@ -2470,13 +2488,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
}
/* Set accessors only after pdata->init() ! */
- if (pdata->big_endian_mmio) {
- _fsl_readl = _fsl_readl_be;
- _fsl_writel = _fsl_writel_be;
- } else {
- _fsl_readl = _fsl_readl_le;
- _fsl_writel = _fsl_writel_le;
- }
+ fsl_set_accessors(pdata);
#ifndef CONFIG_ARCH_MXC
if (pdata->have_sysif_regs)
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 5fc983c5b92c..cf03ad067147 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -447,6 +447,8 @@ static int clcdfb_register(struct clcd_fb *fb)
goto out;
}
+ fb->fb.device = &fb->dev->dev;
+
fb->fb.fix.mmio_start = fb->dev->res.start;
fb->fb.fix.mmio_len = resource_size(&fb->dev->res);
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index bedf5be27f05..0acc7d65aeaa 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -555,8 +555,6 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
static int fsl_diu_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- unsigned long htotal, vtotal;
-
pr_debug("check_var xres: %d\n", var->xres);
pr_debug("check_var yres: %d\n", var->yres);
@@ -635,20 +633,6 @@ static int fsl_diu_check_var(struct fb_var_screeninfo *var,
break;
}
- /* If the pixclock is below the minimum spec'd value then set to
- * refresh rate for 60Hz since this is supported by most monitors.
- * Refer to Documentation/fb/ for calculations.
- */
- if ((var->pixclock < MIN_PIX_CLK) || (var->pixclock > MAX_PIX_CLK)) {
- htotal = var->xres + var->right_margin + var->hsync_len +
- var->left_margin;
- vtotal = var->yres + var->lower_margin + var->vsync_len +
- var->upper_margin;
- var->pixclock = (vtotal * htotal * 6UL) / 100UL;
- var->pixclock = KHZ2PICOS(var->pixclock);
- pr_debug("pixclock set for 60Hz refresh = %u ps\n",
- var->pixclock);
- }
var->height = -1;
var->width = -1;
diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
index c6b554f72c6d..5a5d0928df33 100644
--- a/drivers/video/geode/gx1fb_core.c
+++ b/drivers/video/geode/gx1fb_core.c
@@ -29,7 +29,7 @@ static int crt_option = 1;
static char panel_option[32] = "";
/* Modes relevant to the GX1 (taken from modedb.c) */
-static const struct fb_videomode __initdata gx1_modedb[] = {
+static const struct fb_videomode __devinitdata gx1_modedb[] = {
/* 640x480-60 VESA */
{ NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
@@ -195,7 +195,7 @@ static int gx1fb_blank(int blank_mode, struct fb_info *info)
return par->vid_ops->blank_display(info, blank_mode);
}
-static int __init gx1fb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
+static int __devinit gx1fb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
{
struct geodefb_par *par = info->par;
unsigned gx_base;
@@ -268,7 +268,7 @@ static struct fb_ops gx1fb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static struct fb_info * __init gx1fb_init_fbinfo(struct device *dev)
+static struct fb_info * __devinit gx1fb_init_fbinfo(struct device *dev)
{
struct geodefb_par *par;
struct fb_info *info;
@@ -318,7 +318,7 @@ static struct fb_info * __init gx1fb_init_fbinfo(struct device *dev)
return info;
}
-static int __init gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int __devinit gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct geodefb_par *par;
struct fb_info *info;
@@ -382,7 +382,7 @@ static int __init gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *
return ret;
}
-static void gx1fb_remove(struct pci_dev *pdev)
+static void __devexit gx1fb_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct geodefb_par *par = info->par;
@@ -441,7 +441,7 @@ static struct pci_driver gx1fb_driver = {
.name = "gx1fb",
.id_table = gx1fb_id_table,
.probe = gx1fb_probe,
- .remove = gx1fb_remove,
+ .remove = __devexit_p(gx1fb_remove),
};
static int __init gx1fb_init(void)
@@ -456,7 +456,7 @@ static int __init gx1fb_init(void)
return pci_register_driver(&gx1fb_driver);
}
-static void __exit gx1fb_cleanup(void)
+static void __devexit gx1fb_cleanup(void)
{
pci_unregister_driver(&gx1fb_driver);
}
diff --git a/drivers/video/hecubafb.c b/drivers/video/hecubafb.c
index fbef15f7a218..614251a9af91 100644
--- a/drivers/video/hecubafb.c
+++ b/drivers/video/hecubafb.c
@@ -233,7 +233,7 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
videomemory = vzalloc(videomemorysize);
if (!videomemory)
- return retval;
+ goto err_videomem_alloc;
info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev);
if (!info)
@@ -275,6 +275,7 @@ err_fbreg:
framebuffer_release(info);
err_fballoc:
vfree(videomemory);
+err_videomem_alloc:
module_put(board->owner);
return retval;
}
diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c
index 9170c82b495c..cc7d7329dc15 100644
--- a/drivers/video/sh_mobile_meram.c
+++ b/drivers/video/sh_mobile_meram.c
@@ -218,7 +218,7 @@ static inline void meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata,
icb_offset = 0xc0000000 | (cfg->current_reg << 23);
*icb_addr_y = icb_offset | (cfg->icb[0].marker_icb << 24);
- if ((*icb_addr_c) && is_nvcolor(cfg->pixelformat))
+ if (is_nvcolor(cfg->pixelformat))
*icb_addr_c = icb_offset | (cfg->icb[1].marker_icb << 24);
}
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index 87f0be1e78b5..6294dca95500 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -1664,7 +1664,7 @@ static void sm501fb_stop(struct sm501fb_info *info)
resource_size(info->regs_res));
}
-static int sm501fb_init_fb(struct fb_info *fb,
+static int __devinit sm501fb_init_fb(struct fb_info *fb,
enum sm501_controller head,
const char *fbname)
{
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 52b0f3e8ccac..816a4fda04f5 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -1233,8 +1233,12 @@ static int dlfb_setup_modes(struct dlfb_data *dev,
if (dlfb_is_valid_mode(&info->monspecs.modedb[i], info))
fb_add_videomode(&info->monspecs.modedb[i],
&info->modelist);
- else /* if we've removed top/best mode */
- info->monspecs.misc &= ~FB_MISC_1ST_DETAIL;
+ else {
+ if (i == 0)
+ /* if we've removed top/best mode */
+ info->monspecs.misc
+ &= ~FB_MISC_1ST_DETAIL;
+ }
}
default_vmode = fb_find_best_display(&info->monspecs,
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index a99bbe86db13..501b3406c6d5 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -175,6 +175,7 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
static void vesafb_destroy(struct fb_info *info)
{
+ fb_dealloc_cmap(&info->cmap);
if (info->screen_base)
iounmap(info->screen_base);
release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index ad57593d224a..a0c8965c1a79 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -109,6 +109,7 @@ struct ds1wm_data {
/* byte to write that makes all intr disabled, */
/* considering active_state (IAS) (optimization) */
u8 int_en_reg_none;
+ unsigned int reset_recover_delay; /* see ds1wm.h */
};
static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg,
@@ -187,6 +188,9 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data)
return 1;
}
+ if (ds1wm_data->reset_recover_delay)
+ msleep(ds1wm_data->reset_recover_delay);
+
return 0;
}
@@ -490,6 +494,7 @@ static int ds1wm_probe(struct platform_device *pdev)
}
ds1wm_data->irq = res->start;
ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0);
+ ds1wm_data->reset_recover_delay = plat->reset_recover_delay;
if (res->flags & IORESOURCE_IRQ_HIGHEDGE)
irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING);
diff --git a/firmware/Makefile b/firmware/Makefile
index 0d15a3d113a2..5f43bfba3c7a 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -82,6 +82,7 @@ fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis \
fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin
fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
advansys/3550.bin advansys/38C0800.bin
+fw-shipped-$(CONFIG_SCSI_ISCI) += isci/isci_firmware.bin
fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \
qlogic/12160.bin
fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin
diff --git a/firmware/isci/isci_firmware.bin.ihex b/firmware/isci/isci_firmware.bin.ihex
new file mode 100644
index 000000000000..2e6619570072
--- /dev/null
+++ b/firmware/isci/isci_firmware.bin.ihex
@@ -0,0 +1,16 @@
+:10000000495343554F454D42E80018100002000087
+:1000100000000000000000000101000000000000DE
+:10002000FFFFCF5F0100000008DD0B0000FC0F00A8
+:10003000097C0B006EFC0A00FFFFCF5F010000008F
+:1000400008DD0B0000FC0F00097C0B006EFC0A00B1
+:10005000FFFFCF5F0100000008DD0B0000FC0F0078
+:10006000097C0B006EFC0A00FFFFCF5F010000005F
+:1000700008DD0B0000FC0F00097C0B006EFC0A0081
+:100080000101000000000000FFFFCF5F0200000040
+:1000900008DD0B0000FC0F00097C0B006EFC0A0061
+:1000A000FFFFCF5F0200000008DD0B0000FC0F0027
+:1000B000097C0B006EFC0A00FFFFCF5F020000000E
+:1000C00008DD0B0000FC0F00097C0B006EFC0A0031
+:1000D000FFFFCF5F0200000008DD0B0000FC0F00F7
+:0800E000097C0B006EFC0A0014
+:00000001FF
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 63039ed9576f..2bc5dc644b4c 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1864,6 +1864,7 @@ cleanup:
kfree(psinfo);
kfree(notes);
kfree(fpu);
+ kfree(shdr4extnum);
#ifdef ELF_CORE_COPY_XFPREGS
kfree(xfpu);
#endif
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index f30ac05dbda7..3b859a3e6a0e 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1335,6 +1335,11 @@ struct btrfs_ioctl_defrag_range_args {
*/
#define BTRFS_STRING_ITEM_KEY 253
+/*
+ * Flags for mount options.
+ *
+ * Note: don't forget to add new options to btrfs_show_options()
+ */
#define BTRFS_MOUNT_NODATASUM (1 << 0)
#define BTRFS_MOUNT_NODATACOW (1 << 1)
#define BTRFS_MOUNT_NOBARRIER (1 << 2)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index d340f63d8f07..3601f0aebddf 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2678,12 +2678,14 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
int ret;
/*
- * If root is tree root, it means this inode is used to
- * store free space information. And these inodes are updated
- * when committing the transaction, so they needn't delaye to
- * be updated, or deadlock will occured.
+ * If the inode is a free space inode, we can deadlock during commit
+ * if we put it into the delayed code.
+ *
+ * The data relocation inode should also be directly updated
+ * without delay
*/
- if (!is_free_space_inode(root, inode)) {
+ if (!is_free_space_inode(root, inode)
+ && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_delayed_update_inode(trans, root, inode);
if (!ret)
btrfs_set_inode_last_trans(trans, inode);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 0bb4ebbb71b7..15634d4648d7 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -723,6 +723,12 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_puts(seq, ",clear_cache");
if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
seq_puts(seq, ",user_subvol_rm_allowed");
+ if (btrfs_test_opt(root, ENOSPC_DEBUG))
+ seq_puts(seq, ",enospc_debug");
+ if (btrfs_test_opt(root, AUTO_DEFRAG))
+ seq_puts(seq, ",autodefrag");
+ if (btrfs_test_opt(root, INODE_MAP_CACHE))
+ seq_puts(seq, ",inode_cache");
return 0;
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 1efa56e18f9b..19450bc53632 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2098,7 +2098,8 @@ int btrfs_balance(struct btrfs_root *dev_root)
chunk_root->root_key.objectid,
found_key.objectid,
found_key.offset);
- BUG_ON(ret && ret != -ENOSPC);
+ if (ret && ret != -ENOSPC)
+ goto error;
key.offset = found_key.offset - 1;
}
ret = 0;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 9542f07d0b93..4698a5c553dc 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -290,7 +290,6 @@ static int striped_read(struct inode *inode,
struct ceph_inode_info *ci = ceph_inode(inode);
u64 pos, this_len;
int io_align, page_align;
- int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */
int left, pages_left;
int read;
struct page **page_pos;
@@ -326,12 +325,11 @@ more:
ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
if (ret > 0) {
- int didpages =
- ((pos & ~PAGE_CACHE_MASK) + ret) >> PAGE_CACHE_SHIFT;
+ int didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
if (read < pos - off) {
dout(" zero gap %llu to %llu\n", off + read, pos);
- ceph_zero_page_vector_range(page_off + read,
+ ceph_zero_page_vector_range(page_align + read,
pos - off - read, pages);
}
pos += ret;
@@ -356,7 +354,7 @@ more:
left = inode->i_size - pos;
dout("zero tail %d\n", left);
- ceph_zero_page_vector_range(page_off + read, left,
+ ceph_zero_page_vector_range(page_align + read, left,
pages);
read += left;
}
@@ -478,9 +476,6 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data,
else
pos = *offset;
- io_align = pos & ~PAGE_MASK;
- buf_align = (unsigned long)data & ~PAGE_MASK;
-
ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left);
if (ret < 0)
return ret;
@@ -504,6 +499,8 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data,
* boundary. this isn't atomic, unfortunately. :(
*/
more:
+ io_align = pos & ~PAGE_MASK;
+ buf_align = (unsigned long)data & ~PAGE_MASK;
len = left;
if (file->f_flags & O_DIRECT) {
/* write from beginning of first page, regardless of
@@ -593,6 +590,7 @@ out:
pos += len;
written += len;
left -= len;
+ data += written;
if (left)
goto more;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 35f9154615fa..3e2989976297 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -649,9 +649,9 @@ cifs_do_mount(struct file_system_type *fs_type,
cFYI(1, "Devname: %s flags: %d ", dev_name, flags);
- rc = cifs_setup_volume_info(&volume_info, (char *)data, dev_name);
- if (rc)
- return ERR_PTR(rc);
+ volume_info = cifs_get_volume_info((char *)data, dev_name);
+ if (IS_ERR(volume_info))
+ return ERR_CAST(volume_info);
cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
if (cifs_sb == NULL) {
@@ -713,7 +713,7 @@ cifs_do_mount(struct file_system_type *fs_type,
out_super:
deactivate_locked_super(sb);
out:
- cifs_cleanup_volume_info(&volume_info);
+ cifs_cleanup_volume_info(volume_info);
return root;
out_mountdata:
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 257f312ede42..8df28e925e5b 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -154,9 +154,9 @@ extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
struct cifs_sb_info *cifs_sb);
extern int cifs_match_super(struct super_block *, void *);
-extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info);
-extern int cifs_setup_volume_info(struct smb_vol **pvolume_info,
- char *mount_data, const char *devname);
+extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info);
+extern struct smb_vol *cifs_get_volume_info(char *mount_data,
+ const char *devname);
extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *);
extern void cifs_umount(struct cifs_sb_info *);
extern void cifs_dfs_release_automount_timer(void);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 7f540df52527..dbd669cc5bc7 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -65,6 +65,8 @@ static int ip_connect(struct TCP_Server_Info *server);
static int generic_ip_connect(struct TCP_Server_Info *server);
static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
static void cifs_prune_tlinks(struct work_struct *work);
+static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
+ const char *devname);
/*
* cifs tcp session reconnection
@@ -2240,8 +2242,8 @@ cifs_match_super(struct super_block *sb, void *data)
rc = compare_mount_options(sb, mnt_data);
out:
- cifs_put_tlink(tlink);
spin_unlock(&cifs_tcp_ses_lock);
+ cifs_put_tlink(tlink);
return rc;
}
@@ -2474,14 +2476,6 @@ generic_ip_connect(struct TCP_Server_Info *server)
if (rc < 0)
return rc;
- rc = socket->ops->connect(socket, saddr, slen, 0);
- if (rc < 0) {
- cFYI(1, "Error %d connecting to server", rc);
- sock_release(socket);
- server->ssocket = NULL;
- return rc;
- }
-
/*
* Eventually check for other socket options to change from
* the default. sock_setsockopt not used because it expects
@@ -2510,6 +2504,14 @@ generic_ip_connect(struct TCP_Server_Info *server)
socket->sk->sk_sndbuf,
socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
+ rc = socket->ops->connect(socket, saddr, slen, 0);
+ if (rc < 0) {
+ cFYI(1, "Error %d connecting to server", rc);
+ sock_release(socket);
+ server->ssocket = NULL;
+ return rc;
+ }
+
if (sport == htons(RFC1001_PORT))
rc = ip_rfc1001_connect(server);
@@ -2830,15 +2832,9 @@ is_path_accessible(int xid, struct cifs_tcon *tcon,
return rc;
}
-void
-cifs_cleanup_volume_info(struct smb_vol **pvolume_info)
+static void
+cleanup_volume_info_contents(struct smb_vol *volume_info)
{
- struct smb_vol *volume_info;
-
- if (!pvolume_info || !*pvolume_info)
- return;
-
- volume_info = *pvolume_info;
kfree(volume_info->username);
kzfree(volume_info->password);
kfree(volume_info->UNC);
@@ -2846,28 +2842,44 @@ cifs_cleanup_volume_info(struct smb_vol **pvolume_info)
kfree(volume_info->domainname);
kfree(volume_info->iocharset);
kfree(volume_info->prepath);
+}
+
+void
+cifs_cleanup_volume_info(struct smb_vol *volume_info)
+{
+ if (!volume_info)
+ return;
+ cleanup_volume_info_contents(volume_info);
kfree(volume_info);
- *pvolume_info = NULL;
- return;
}
+
#ifdef CONFIG_CIFS_DFS_UPCALL
/* build_path_to_root returns full path to root when
* we do not have an exiting connection (tcon) */
static char *
-build_unc_path_to_root(const struct smb_vol *volume_info,
+build_unc_path_to_root(const struct smb_vol *vol,
const struct cifs_sb_info *cifs_sb)
{
- char *full_path;
+ char *full_path, *pos;
+ unsigned int pplen = vol->prepath ? strlen(vol->prepath) : 0;
+ unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1);
- int unc_len = strnlen(volume_info->UNC, MAX_TREE_SIZE + 1);
- full_path = kmalloc(unc_len + 1, GFP_KERNEL);
+ full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
if (full_path == NULL)
return ERR_PTR(-ENOMEM);
- strncpy(full_path, volume_info->UNC, unc_len);
- full_path[unc_len] = 0; /* add trailing null */
+ strncpy(full_path, vol->UNC, unc_len);
+ pos = full_path + unc_len;
+
+ if (pplen) {
+ strncpy(pos, vol->prepath, pplen);
+ pos += pplen;
+ }
+
+ *pos = '\0'; /* add trailing null */
convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
+ cFYI(1, "%s: full_path=%s", __func__, full_path);
return full_path;
}
@@ -2910,15 +2922,18 @@ expand_dfs_referral(int xid, struct cifs_ses *pSesInfo,
&fake_devname);
free_dfs_info_array(referrals, num_referrals);
- kfree(fake_devname);
-
- if (cifs_sb->mountdata != NULL)
- kfree(cifs_sb->mountdata);
if (IS_ERR(mdata)) {
rc = PTR_ERR(mdata);
mdata = NULL;
+ } else {
+ cleanup_volume_info_contents(volume_info);
+ memset(volume_info, '\0', sizeof(*volume_info));
+ rc = cifs_setup_volume_info(volume_info, mdata,
+ fake_devname);
}
+ kfree(fake_devname);
+ kfree(cifs_sb->mountdata);
cifs_sb->mountdata = mdata;
}
kfree(full_path);
@@ -2926,33 +2941,20 @@ expand_dfs_referral(int xid, struct cifs_ses *pSesInfo,
}
#endif
-int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data,
- const char *devname)
+static int
+cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
+ const char *devname)
{
- struct smb_vol *volume_info;
int rc = 0;
- *pvolume_info = NULL;
-
- volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL);
- if (!volume_info) {
- rc = -ENOMEM;
- goto out;
- }
-
- if (cifs_parse_mount_options(mount_data, devname,
- volume_info)) {
- rc = -EINVAL;
- goto out;
- }
+ if (cifs_parse_mount_options(mount_data, devname, volume_info))
+ return -EINVAL;
if (volume_info->nullauth) {
cFYI(1, "null user");
volume_info->username = kzalloc(1, GFP_KERNEL);
- if (volume_info->username == NULL) {
- rc = -ENOMEM;
- goto out;
- }
+ if (volume_info->username == NULL)
+ return -ENOMEM;
} else if (volume_info->username) {
/* BB fixme parse for domain name here */
cFYI(1, "Username: %s", volume_info->username);
@@ -2960,8 +2962,7 @@ int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data,
cifserror("No username specified");
/* In userspace mount helper we can get user name from alternate
locations such as env variables and files on disk */
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* this is needed for ASCII cp to Unicode converts */
@@ -2973,18 +2974,32 @@ int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data,
if (volume_info->local_nls == NULL) {
cERROR(1, "CIFS mount error: iocharset %s not found",
volume_info->iocharset);
- rc = -ELIBACC;
- goto out;
+ return -ELIBACC;
}
}
- *pvolume_info = volume_info;
- return rc;
-out:
- cifs_cleanup_volume_info(&volume_info);
return rc;
}
+struct smb_vol *
+cifs_get_volume_info(char *mount_data, const char *devname)
+{
+ int rc;
+ struct smb_vol *volume_info;
+
+ volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL);
+ if (!volume_info)
+ return ERR_PTR(-ENOMEM);
+
+ rc = cifs_setup_volume_info(volume_info, mount_data, devname);
+ if (rc) {
+ cifs_cleanup_volume_info(volume_info);
+ volume_info = ERR_PTR(rc);
+ }
+
+ return volume_info;
+}
+
int
cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
{
@@ -2997,6 +3012,7 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
struct tcon_link *tlink;
#ifdef CONFIG_CIFS_DFS_UPCALL
int referral_walks_count = 0;
+#endif
rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
if (rc)
@@ -3004,6 +3020,7 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
+#ifdef CONFIG_CIFS_DFS_UPCALL
try_mount_again:
/* cleanup activities if we're chasing a referral */
if (referral_walks_count) {
@@ -3012,7 +3029,6 @@ try_mount_again:
else if (pSesInfo)
cifs_put_smb_ses(pSesInfo);
- cifs_cleanup_volume_info(&volume_info);
FreeXid(xid);
}
#endif
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 816696621ec9..42e5363b4102 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -92,6 +92,7 @@ static void cifs_fscache_disable_inode_cookie(struct inode *inode)
if (cifsi->fscache) {
cFYI(1, "%s: (0x%p)", __func__, cifsi->fscache);
+ fscache_uncache_all_inode_pages(cifsi->fscache, inode);
fscache_relinquish_cookie(cifsi->fscache, 1);
cifsi->fscache = NULL;
}
diff --git a/fs/dcache.c b/fs/dcache.c
index 37f72ee5bf7c..6e4ea6d87774 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2213,14 +2213,15 @@ static void dentry_unlock_parents_for_move(struct dentry *dentry,
* The hash value has to match the hash queue that the dentry is on..
*/
/*
- * d_move - move a dentry
+ * __d_move - move a dentry
* @dentry: entry to move
* @target: new dentry
*
* Update the dcache to reflect the move of a file name. Negative
- * dcache entries should not be moved in this way.
+ * dcache entries should not be moved in this way. Caller hold
+ * rename_lock.
*/
-void d_move(struct dentry * dentry, struct dentry * target)
+static void __d_move(struct dentry * dentry, struct dentry * target)
{
if (!dentry->d_inode)
printk(KERN_WARNING "VFS: moving negative dcache entry\n");
@@ -2228,8 +2229,6 @@ void d_move(struct dentry * dentry, struct dentry * target)
BUG_ON(d_ancestor(dentry, target));
BUG_ON(d_ancestor(target, dentry));
- write_seqlock(&rename_lock);
-
dentry_lock_for_move(dentry, target);
write_seqcount_begin(&dentry->d_seq);
@@ -2275,6 +2274,20 @@ void d_move(struct dentry * dentry, struct dentry * target)
spin_unlock(&target->d_lock);
fsnotify_d_move(dentry);
spin_unlock(&dentry->d_lock);
+}
+
+/*
+ * d_move - move a dentry
+ * @dentry: entry to move
+ * @target: new dentry
+ *
+ * Update the dcache to reflect the move of a file name. Negative
+ * dcache entries should not be moved in this way.
+ */
+void d_move(struct dentry *dentry, struct dentry *target)
+{
+ write_seqlock(&rename_lock);
+ __d_move(dentry, target);
write_sequnlock(&rename_lock);
}
EXPORT_SYMBOL(d_move);
@@ -2302,7 +2315,7 @@ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
* This helper attempts to cope with remotely renamed directories
*
* It assumes that the caller is already holding
- * dentry->d_parent->d_inode->i_mutex and the inode->i_lock
+ * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
*
* Note: If ever the locking in lock_rename() changes, then please
* remember to update this too...
@@ -2317,11 +2330,6 @@ static struct dentry *__d_unalias(struct inode *inode,
if (alias->d_parent == dentry->d_parent)
goto out_unalias;
- /* Check for loops */
- ret = ERR_PTR(-ELOOP);
- if (d_ancestor(alias, dentry))
- goto out_err;
-
/* See lock_rename() */
ret = ERR_PTR(-EBUSY);
if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
@@ -2331,7 +2339,7 @@ static struct dentry *__d_unalias(struct inode *inode,
goto out_err;
m2 = &alias->d_parent->d_inode->i_mutex;
out_unalias:
- d_move(alias, dentry);
+ __d_move(alias, dentry);
ret = alias;
out_err:
spin_unlock(&inode->i_lock);
@@ -2416,15 +2424,24 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
alias = __d_find_alias(inode, 0);
if (alias) {
actual = alias;
- /* Is this an anonymous mountpoint that we could splice
- * into our tree? */
- if (IS_ROOT(alias)) {
+ write_seqlock(&rename_lock);
+
+ if (d_ancestor(alias, dentry)) {
+ /* Check for loops */
+ actual = ERR_PTR(-ELOOP);
+ } else if (IS_ROOT(alias)) {
+ /* Is this an anonymous mountpoint that we
+ * could splice into our tree? */
__d_materialise_dentry(dentry, alias);
+ write_sequnlock(&rename_lock);
__d_drop(alias);
goto found;
+ } else {
+ /* Nope, but we must(!) avoid directory
+ * aliasing */
+ actual = __d_unalias(inode, dentry, alias);
}
- /* Nope, but we must(!) avoid directory aliasing */
- actual = __d_unalias(inode, dentry, alias);
+ write_sequnlock(&rename_lock);
if (IS_ERR(actual))
dput(alias);
goto out_nolock;
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index a2a5d19ece6a..2f343b4d7a7d 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -954,3 +954,47 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
pagevec_reinit(pagevec);
}
EXPORT_SYMBOL(fscache_mark_pages_cached);
+
+/*
+ * Uncache all the pages in an inode that are marked PG_fscache, assuming them
+ * to be associated with the given cookie.
+ */
+void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
+ struct inode *inode)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct pagevec pvec;
+ pgoff_t next;
+ int i;
+
+ _enter("%p,%p", cookie, inode);
+
+ if (!mapping || mapping->nrpages == 0) {
+ _leave(" [no pages]");
+ return;
+ }
+
+ pagevec_init(&pvec, 0);
+ next = 0;
+ while (next <= (loff_t)-1 &&
+ pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)
+ ) {
+ for (i = 0; i < pagevec_count(&pvec); i++) {
+ struct page *page = pvec.pages[i];
+ pgoff_t page_index = page->index;
+
+ ASSERTCMP(page_index, >=, next);
+ next = page_index + 1;
+
+ if (PageFsCache(page)) {
+ __fscache_wait_on_page_write(cookie, page);
+ __fscache_uncache_page(cookie, page);
+ }
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+ }
+
+ _leave("");
+}
+EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 802ac5eeba28..f9fbbe96c222 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -1069,6 +1069,7 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
return 0;
gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_ail_lock);
head = bh = page_buffers(page);
do {
if (atomic_read(&bh->b_count))
@@ -1080,6 +1081,7 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
goto not_possible;
bh = bh->b_this_page;
} while(bh != head);
+ spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp);
head = bh = page_buffers(page);
@@ -1112,6 +1114,7 @@ not_possible: /* Should never happen */
WARN_ON(buffer_dirty(bh));
WARN_ON(buffer_pinned(bh));
cannot_release:
+ spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp);
return 0;
}
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 8ef70f464731..2cca29316bd6 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -47,10 +47,10 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl)
bd_ail_gl_list);
bh = bd->bd_bh;
gfs2_remove_from_ail(bd);
- spin_unlock(&sdp->sd_ail_lock);
-
bd->bd_bh = NULL;
bh->b_private = NULL;
+ spin_unlock(&sdp->sd_ail_lock);
+
bd->bd_blkno = bh->b_blocknr;
gfs2_log_lock(sdp);
gfs2_assert_withdraw(sdp, !buffer_busy(bh));
@@ -221,8 +221,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
}
}
- if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
+ if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
+ gfs2_log_flush(gl->gl_sbd, NULL);
gl->gl_sbd->sd_rindex_uptodate = 0;
+ }
if (ip && S_ISREG(ip->i_inode.i_mode))
truncate_inode_pages(ip->i_inode.i_mapping, 0);
}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 0a064e91ac70..81206e70cbf6 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -17,6 +17,7 @@
#include <linux/buffer_head.h>
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
+#include <linux/completion.h>
#define DIO_WAIT 0x00000010
#define DIO_METADATA 0x00000020
@@ -546,6 +547,7 @@ struct gfs2_sbd {
struct gfs2_glock *sd_trans_gl;
wait_queue_head_t sd_glock_wait;
atomic_t sd_glock_disposal;
+ struct completion sd_locking_init;
/* Inode Stuff */
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 903115f2bb34..85c62923ee29 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -903,6 +903,7 @@ void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
if (gfs2_ail1_empty(sdp))
break;
}
+ gfs2_log_flush(sdp, NULL);
}
static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 8ac9ae189b53..2a77071fb7b6 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -72,6 +72,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
init_waitqueue_head(&sdp->sd_glock_wait);
atomic_set(&sdp->sd_glock_disposal, 0);
+ init_completion(&sdp->sd_locking_init);
spin_lock_init(&sdp->sd_statfs_spin);
spin_lock_init(&sdp->sd_rindex_spin);
@@ -1017,11 +1018,13 @@ hostdata_error:
fsname++;
if (lm->lm_mount == NULL) {
fs_info(sdp, "Now mounting FS...\n");
+ complete(&sdp->sd_locking_init);
return 0;
}
ret = lm->lm_mount(sdp, fsname);
if (ret == 0)
fs_info(sdp, "Joined cluster. Now mounting FS...\n");
+ complete(&sdp->sd_locking_init);
return ret;
}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index ed540e7018be..fb0edf735483 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -757,13 +757,17 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
struct timespec atime;
struct gfs2_dinode *di;
int ret = -EAGAIN;
+ int unlock_required = 0;
/* Skip timestamp update, if this is from a memalloc */
if (current->flags & PF_MEMALLOC)
goto do_flush;
- ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
- if (ret)
- goto do_flush;
+ if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
+ ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
+ if (ret)
+ goto do_flush;
+ unlock_required = 1;
+ }
ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
if (ret)
goto do_unlock;
@@ -780,7 +784,8 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
}
gfs2_trans_end(sdp);
do_unlock:
- gfs2_glock_dq_uninit(&gh);
+ if (unlock_required)
+ gfs2_glock_dq_uninit(&gh);
do_flush:
if (wbc->sync_mode == WB_SYNC_ALL)
gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
@@ -1427,7 +1432,20 @@ out:
return error;
}
-/*
+/**
+ * gfs2_evict_inode - Remove an inode from cache
+ * @inode: The inode to evict
+ *
+ * There are three cases to consider:
+ * 1. i_nlink == 0, we are final opener (and must deallocate)
+ * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
+ * 3. i_nlink > 0
+ *
+ * If the fs is read only, then we have to treat all cases as per #3
+ * since we are unable to do any deallocation. The inode will be
+ * deallocated by the next read/write node to attempt an allocation
+ * in the same resource group
+ *
* We have to (at the moment) hold the inodes main lock to cover
* the gap between unlocking the shared lock on the iopen lock and
* taking the exclusive lock. I'd rather do a shared -> exclusive
@@ -1470,6 +1488,8 @@ static void gfs2_evict_inode(struct inode *inode)
if (error)
goto out_truncate;
+ /* Case 1 starts here */
+
if (S_ISDIR(inode->i_mode) &&
(ip->i_diskflags & GFS2_DIF_EXHASH)) {
error = gfs2_dir_exhash_dealloc(ip);
@@ -1493,13 +1513,16 @@ static void gfs2_evict_inode(struct inode *inode)
goto out_unlock;
out_truncate:
+ /* Case 2 starts here */
error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
if (error)
goto out_unlock;
- gfs2_final_release_pages(ip);
+ /* Needs to be done before glock release & also in a transaction */
+ truncate_inode_pages(&inode->i_data, 0);
gfs2_trans_end(sdp);
out_unlock:
+ /* Error path for case 1 */
if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
gfs2_glock_dq(&ip->i_iopen_gh);
gfs2_holder_uninit(&ip->i_iopen_gh);
@@ -1507,6 +1530,7 @@ out_unlock:
if (error && error != GLR_TRYFAILED && error != -EROFS)
fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
out:
+ /* Case 3 starts here */
truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index e20eab37bc80..443cabcfcd23 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -338,6 +338,9 @@ static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
rv = sscanf(buf, "%u", &first);
if (rv != 1 || first > 1)
return -EINVAL;
+ rv = wait_for_completion_killable(&sdp->sd_locking_init);
+ if (rv)
+ return rv;
spin_lock(&sdp->sd_jindex_spin);
rv = -EBUSY;
if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
@@ -414,7 +417,9 @@ static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
rv = sscanf(buf, "%d", &jid);
if (rv != 1)
return -EINVAL;
-
+ rv = wait_for_completion_killable(&sdp->sd_locking_init);
+ if (rv)
+ return rv;
spin_lock(&sdp->sd_jindex_spin);
rv = -EINVAL;
if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index b49b55584c84..84a47b709f51 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -500,7 +500,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
out_put_hidden_dir:
iput(sbi->hidden_dir);
out_put_root:
- iput(sbi->alloc_file);
+ iput(root);
out_put_alloc_file:
iput(sbi->alloc_file);
out_close_cat_tree:
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 3031d81f5f0f..4ac88ff79aa6 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -36,6 +36,7 @@ int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
{
DECLARE_COMPLETION_ONSTACK(wait);
struct bio *bio;
+ int ret = 0;
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_sector = sector;
@@ -54,8 +55,10 @@ int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
wait_for_completion(&wait);
if (!bio_flagged(bio, BIO_UPTODATE))
- return -EIO;
- return 0;
+ ret = -EIO;
+
+ bio_put(bio);
+ return ret;
}
static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd)
diff --git a/fs/locks.c b/fs/locks.c
index 0a4f50dfadfb..b286539d547a 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -160,10 +160,28 @@ EXPORT_SYMBOL_GPL(unlock_flocks);
static struct kmem_cache *filelock_cache __read_mostly;
+static void locks_init_lock_always(struct file_lock *fl)
+{
+ fl->fl_next = NULL;
+ fl->fl_fasync = NULL;
+ fl->fl_owner = NULL;
+ fl->fl_pid = 0;
+ fl->fl_nspid = NULL;
+ fl->fl_file = NULL;
+ fl->fl_flags = 0;
+ fl->fl_type = 0;
+ fl->fl_start = fl->fl_end = 0;
+}
+
/* Allocate an empty lock structure. */
struct file_lock *locks_alloc_lock(void)
{
- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
+
+ if (fl)
+ locks_init_lock_always(fl);
+
+ return fl;
}
EXPORT_SYMBOL_GPL(locks_alloc_lock);
@@ -200,17 +218,9 @@ void locks_init_lock(struct file_lock *fl)
INIT_LIST_HEAD(&fl->fl_link);
INIT_LIST_HEAD(&fl->fl_block);
init_waitqueue_head(&fl->fl_wait);
- fl->fl_next = NULL;
- fl->fl_fasync = NULL;
- fl->fl_owner = NULL;
- fl->fl_pid = 0;
- fl->fl_nspid = NULL;
- fl->fl_file = NULL;
- fl->fl_flags = 0;
- fl->fl_type = 0;
- fl->fl_start = fl->fl_end = 0;
fl->fl_ops = NULL;
fl->fl_lmops = NULL;
+ locks_init_lock_always(fl);
}
EXPORT_SYMBOL(locks_init_lock);
diff --git a/fs/namei.c b/fs/namei.c
index 0223c41fb114..5c867dd1c0b3 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -433,6 +433,8 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
goto err_parent;
BUG_ON(nd->inode != parent->d_inode);
} else {
+ if (dentry->d_parent != parent)
+ goto err_parent;
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
if (!__d_rcu_to_refcount(dentry, nd->seq))
goto err_child;
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index ce153a6b3aec..419119c371bf 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -259,12 +259,10 @@ static void nfs_fscache_disable_inode_cookie(struct inode *inode)
dfprintk(FSCACHE,
"NFS: nfsi 0x%p turning cache off\n", NFS_I(inode));
- /* Need to invalidate any mapped pages that were read in before
- * turning off the cache.
+ /* Need to uncache any pages attached to this inode that
+ * fscache knows about before turning off the cache.
*/
- if (inode->i_mapping && inode->i_mapping->nrpages)
- invalidate_inode_pages2(inode->i_mapping);
-
+ fscache_uncache_all_inode_pages(NFS_I(inode)->fscache, inode);
nfs_fscache_zap_inode_cookie(inode);
}
}
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 0bafcc91c27f..f9d03abcd04c 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -398,7 +398,6 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync)
* this offset and save the original offset.
*/
data->args.offset = filelayout_get_dserver_offset(lseg, offset);
- data->mds_offset = offset;
/* Perform an asynchronous write */
status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient,
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 6870bc61ceec..e6e8f3b9a1de 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -91,7 +91,7 @@ static int nfs4_stat_to_errno(int);
#define encode_getfh_maxsz (op_encode_hdr_maxsz)
#define decode_getfh_maxsz (op_decode_hdr_maxsz + 1 + \
((3+NFS4_FHSIZE) >> 2))
-#define nfs4_fattr_bitmap_maxsz 3
+#define nfs4_fattr_bitmap_maxsz 4
#define encode_getattr_maxsz (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz)
#define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2))
#define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2))
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index e268e3b23497..727168059684 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -864,6 +864,8 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
data->args.fh = NFS_FH(inode);
data->args.offset = req_offset(req) + offset;
+ /* pnfs_set_layoutcommit needs this */
+ data->mds_offset = data->args.offset;
data->args.pgbase = req->wb_pgbase + offset;
data->args.pages = data->pagevec;
data->args.count = count;
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 09983a3344a5..b1e88d56069c 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -681,15 +681,15 @@ xfs_inode_item_unlock(
* where the cluster buffer may be unpinned before the inode is inserted into
* the AIL during transaction committed processing. If the buffer is unpinned
* before the inode item has been committed and inserted, then it is possible
- * for the buffer to be written and IO completions before the inode is inserted
+ * for the buffer to be written and IO completes before the inode is inserted
* into the AIL. In that case, we'd be inserting a clean, stale inode into the
* AIL which will never get removed. It will, however, get reclaimed which
* triggers an assert in xfs_inode_free() complaining about freein an inode
* still in the AIL.
*
- * To avoid this, return a lower LSN than the one passed in so that the
- * transaction committed code will not move the inode forward in the AIL but
- * will still unpin it properly.
+ * To avoid this, just unpin the inode directly and return a LSN of -1 so the
+ * transaction committed code knows that it does not need to do any further
+ * processing on the item.
*/
STATIC xfs_lsn_t
xfs_inode_item_committed(
@@ -699,8 +699,10 @@ xfs_inode_item_committed(
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode;
- if (xfs_iflags_test(ip, XFS_ISTALE))
- return lsn - 1;
+ if (xfs_iflags_test(ip, XFS_ISTALE)) {
+ xfs_inode_item_unpin(lip, 0);
+ return -1;
+ }
return lsn;
}
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 7c7bc2b786bd..c83f63b33aae 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1361,7 +1361,7 @@ xfs_trans_item_committed(
lip->li_flags |= XFS_LI_ABORTED;
item_lsn = IOP_COMMITTED(lip, commit_lsn);
- /* If the committed routine returns -1, item has been freed. */
+ /* item_lsn of -1 means the item needs no further processing */
if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
return;
@@ -1474,7 +1474,7 @@ xfs_trans_committed_bulk(
lip->li_flags |= XFS_LI_ABORTED;
item_lsn = IOP_COMMITTED(lip, commit_lsn);
- /* item_lsn of -1 means the item was freed */
+ /* item_lsn of -1 means the item needs no further processing */
if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
continue;
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index e08f344c6cff..3d53efd25ab9 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -182,6 +182,7 @@
{0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
@@ -192,6 +193,7 @@
{0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 246f576c981d..447c36752385 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -117,10 +117,10 @@
/* drbdsetup XY resize -d Z
* you are free to reduce the device size to nothing, if you want to.
* the upper limit with 64bit kernel, enough ram and flexible meta data
- * is 16 TB, currently. */
+ * is 1 PiB, currently. */
/* DRBD_MAX_SECTORS */
#define DRBD_DISK_SIZE_SECT_MIN 0
-#define DRBD_DISK_SIZE_SECT_MAX (16 * (2LLU << 30))
+#define DRBD_DISK_SIZE_SECT_MAX (1 * (2LLU << 40))
#define DRBD_DISK_SIZE_SECT_DEF 0 /* = disabled = no user size... */
#define DRBD_ON_IO_ERROR_DEF EP_PASS_ON
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 7c4d72f5581f..9ec20dec3353 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -204,6 +204,8 @@ extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
gfp_t);
+extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
+ struct inode *);
/**
* fscache_register_netfs - Register a filesystem as desiring caching services
@@ -643,4 +645,23 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie,
return false;
}
+/**
+ * fscache_uncache_all_inode_pages - Uncache all an inode's pages
+ * @cookie: The cookie representing the inode's cache object.
+ * @inode: The inode to uncache pages from.
+ *
+ * Uncache all the pages in an inode that are marked PG_fscache, assuming them
+ * to be associated with the given cookie.
+ *
+ * This function may sleep. It will wait for pages that are being written out
+ * and will wait whilst the PG_fscache mark is removed by the cache.
+ */
+static inline
+void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
+ struct inode *inode)
+{
+ if (fscache_cookie_valid(cookie))
+ __fscache_uncache_all_inode_pages(cookie, inode);
+}
+
#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h
index 781d4671415f..daa9952d2174 100644
--- a/include/linux/fsl-diu-fb.h
+++ b/include/linux/fsl-diu-fb.h
@@ -24,12 +24,6 @@
* See mpc8610fb_set_par(), map_video_memory(), and unmap_video_memory()
*/
#define MEM_ALLOC_THRESHOLD (1024*768*4+32)
-/* Minimum value that the pixel clock can be set to in pico seconds
- * This is determined by platform clock/3 where the minimum platform
- * clock is 533MHz. This gives 5629 pico seconds.
- */
-#define MIN_PIX_CLK 5629
-#define MAX_PIX_CLK 96096
#include <linux/types.h>
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 8b4538446636..baa397eb9c33 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -676,7 +676,8 @@ void irq_gc_mask_disable_reg(struct irq_data *d);
void irq_gc_mask_set_bit(struct irq_data *d);
void irq_gc_mask_clr_bit(struct irq_data *d);
void irq_gc_unmask_enable_reg(struct irq_data *d);
-void irq_gc_ack(struct irq_data *d);
+void irq_gc_ack_set_bit(struct irq_data *d);
+void irq_gc_ack_clr_bit(struct irq_data *d);
void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
void irq_gc_eoi(struct irq_data *d);
int irq_gc_set_wake(struct irq_data *d, unsigned int on);
diff --git a/include/linux/memory.h b/include/linux/memory.h
index e1e3b2b84f85..935699b30b7c 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -20,6 +20,8 @@
#include <linux/compiler.h>
#include <linux/mutex.h>
+#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
+
struct memory_block {
unsigned long start_section_nr;
unsigned long end_section_nr;
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h
index be469a357cbb..38a372a0e285 100644
--- a/include/linux/mfd/ds1wm.h
+++ b/include/linux/mfd/ds1wm.h
@@ -3,4 +3,11 @@
struct ds1wm_driver_data {
int active_high;
int clock_rate;
+ /* in milliseconds, the amount of time to */
+ /* sleep following a reset pulse. Zero */
+ /* should work if your bus devices recover*/
+ /* time respects the 1-wire spec since the*/
+ /* ds1wm implements the precise timings of*/
+ /* a reset pulse/presence detect sequence.*/
+ unsigned int reset_recover_delay;
};
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index c6927a4d157f..6ad43554ac05 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -64,6 +64,19 @@ struct mmc_ext_csd {
unsigned long long enhanced_area_offset; /* Units: Byte */
unsigned int enhanced_area_size; /* Units: KB */
unsigned int boot_size; /* in bytes */
+ u8 raw_partition_support; /* 160 */
+ u8 raw_erased_mem_count; /* 181 */
+ u8 raw_ext_csd_structure; /* 194 */
+ u8 raw_card_type; /* 196 */
+ u8 raw_s_a_timeout; /* 217 */
+ u8 raw_hc_erase_gap_size; /* 221 */
+ u8 raw_erase_timeout_mult; /* 223 */
+ u8 raw_hc_erase_grp_size; /* 224 */
+ u8 raw_sec_trim_mult; /* 229 */
+ u8 raw_sec_erase_mult; /* 230 */
+ u8 raw_sec_feature_support;/* 231 */
+ u8 raw_trim_mult; /* 232 */
+ u8 raw_sectors[4]; /* 212 - 4 bytes */
};
struct sd_scr {
diff --git a/include/linux/opp.h b/include/linux/opp.h
index 5449945d589f..7020e9736fc5 100644
--- a/include/linux/opp.h
+++ b/include/linux/opp.h
@@ -94,12 +94,20 @@ static inline int opp_disable(struct device *dev, unsigned long freq)
#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
int opp_init_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **table);
+void opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table);
#else
static inline int opp_init_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **table)
{
return -EINVAL;
}
+
+static inline
+void opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+}
#endif /* CONFIG_CPU_FREQ */
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 98491ee35102..21097cb086fe 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -11,8 +11,12 @@
#include <linux/device.h>
-#define GPD_IN_SUSPEND 1
-#define GPD_POWER_OFF 2
+enum gpd_status {
+ GPD_STATE_ACTIVE = 0, /* PM domain is active */
+ GPD_STATE_BUSY, /* Something is happening to the PM domain */
+ GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
+ GPD_STATE_POWER_OFF, /* PM domain is off */
+};
struct dev_power_governor {
bool (*power_down_ok)(struct dev_pm_domain *domain);
@@ -20,6 +24,7 @@ struct dev_power_governor {
struct generic_pm_domain {
struct dev_pm_domain domain; /* PM domain operations */
+ struct list_head gpd_list_node; /* Node in the global PM domains list */
struct list_head sd_node; /* Node in the parent's subdomain list */
struct generic_pm_domain *parent; /* Parent PM domain */
struct list_head sd_list; /* List of dubdomains */
@@ -29,7 +34,10 @@ struct generic_pm_domain {
struct work_struct power_off_work;
unsigned int in_progress; /* Number of devices being suspended now */
unsigned int sd_count; /* Number of subdomains with power "on" */
- bool power_is_off; /* Whether or not power has been removed */
+ enum gpd_status status; /* Current state of the domain */
+ wait_queue_head_t status_wait_queue;
+ struct task_struct *poweroff_task; /* Powering off task */
+ unsigned int resume_count; /* Number of devices being resumed */
unsigned int device_count; /* Number of devices */
unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */
@@ -63,6 +71,9 @@ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *target);
extern void pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off);
+extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
+extern void pm_genpd_poweroff_unused(void);
+extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
#else
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev)
@@ -86,6 +97,12 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
}
static inline void pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off) {}
+static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
+{
+ return -ENOSYS;
+}
+static inline void pm_genpd_poweroff_unused(void) {}
+static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {}
#endif
#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a837b20ba190..496770a96487 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -808,7 +808,7 @@ enum cpu_idle_type {
* when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
* increased costs.
*/
-#if BITS_PER_LONG > 32
+#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
# define SCHED_LOAD_RESOLUTION 10
# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 083ffea7ba18..e1e3742733be 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -92,6 +92,13 @@ typedef int __bitwise suspend_state_t;
* @enter() and @wake(), even if any of them fails. It is executed after
* a failing @prepare.
*
+ * @suspend_again: Returns whether the system should suspend again (true) or
+ * not (false). If the platform wants to poll sensors or execute some
+ * code during suspended without invoking userspace and most of devices,
+ * suspend_again callback is the place assuming that periodic-wakeup or
+ * alarm-wakeup is already setup. This allows to execute some codes while
+ * being kept suspended in the view of userland and devices.
+ *
* @end: Called by the PM core right after resuming devices, to indicate to
* the platform that the system has returned to the working state or
* the transition to the sleep state has been aborted.
@@ -113,6 +120,7 @@ struct platform_suspend_ops {
int (*enter)(suspend_state_t state);
void (*wake)(void);
void (*finish)(void);
+ bool (*suspend_again)(void);
void (*end)(void);
void (*recover)(void);
};
diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h
index 630e702c9511..168dd0b1bae2 100644
--- a/include/media/lirc_dev.h
+++ b/include/media/lirc_dev.h
@@ -9,7 +9,7 @@
#ifndef _LINUX_LIRC_DEV_H
#define _LINUX_LIRC_DEV_H
-#define MAX_IRCTL_DEVICES 4
+#define MAX_IRCTL_DEVICES 8
#define BUFLEN 16
#define mod(n, div) ((n) % (div))
diff --git a/include/media/m5mols.h b/include/media/m5mols.h
index 2d7e7ca2313d..aac2c0e06d5e 100644
--- a/include/media/m5mols.h
+++ b/include/media/m5mols.h
@@ -2,10 +2,10 @@
* Driver header for M-5MOLS 8M Pixel camera sensor with ISP
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
*
* Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 1562c4ff3a65..2884e3e69cb1 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -173,16 +173,20 @@ struct v4l2_subdev_core_ops {
struct v4l2_event_subscription *sub);
};
-/* s_mode: switch the tuner to a specific tuner mode. Replacement of s_radio.
+/* s_radio: v4l device was opened in radio mode.
- s_radio: v4l device was opened in Radio mode, to be replaced by s_mode.
+ g_frequency: freq->type must be filled in. Normally done by video_ioctl2
+ or the bridge driver.
+
+ g_tuner:
+ s_tuner: vt->type must be filled in. Normally done by video_ioctl2 or the
+ bridge driver.
s_type_addr: sets tuner type and its I2C addr.
s_config: sets tda9887 specific stuff, like port1, port2 and qss
*/
struct v4l2_subdev_tuner_ops {
- int (*s_mode)(struct v4l2_subdev *sd, enum v4l2_tuner_type);
int (*s_radio)(struct v4l2_subdev *sd);
int (*s_frequency)(struct v4l2_subdev *sd, struct v4l2_frequency *freq);
int (*g_frequency)(struct v4l2_subdev *sd, struct v4l2_frequency *freq);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 0589f554788a..396e8fc8910e 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -2688,7 +2688,7 @@ void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf,
* @dev: network device
* @addr: The source MAC address of the frame
* @key_type: The key type that the received frame used
- * @key_id: Key identifier (0..3)
+ * @key_id: Key identifier (0..3). Can be -1 if missing.
* @tsc: The TSC value of the frame that generated the MIC failure (6 octets)
* @gfp: allocation flags
*
diff --git a/include/net/dst.h b/include/net/dst.h
index 7d15d238b6ec..e12ddfb9eb16 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -77,6 +77,7 @@ struct dst_entry {
#define DST_NOPOLICY 0x0004
#define DST_NOHASH 0x0008
#define DST_NOCACHE 0x0010
+#define DST_NOCOUNT 0x0020
union {
struct dst_entry *next;
struct rtable __rcu *rt_next;
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index dd6847e5d6e4..6506458ccd33 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -63,6 +63,7 @@ typedef enum {
SCTP_CMD_ECN_ECNE, /* Do delayed ECNE processing. */
SCTP_CMD_ECN_CWR, /* Do delayed CWR processing. */
SCTP_CMD_TIMER_START, /* Start a timer. */
+ SCTP_CMD_TIMER_START_ONCE, /* Start a timer once */
SCTP_CMD_TIMER_RESTART, /* Restart a timer. */
SCTP_CMD_TIMER_STOP, /* Stop a timer. */
SCTP_CMD_INIT_CHOOSE_TRANSPORT, /* Choose transport for an INIT. */
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 99b027b2adce..ca4693b4e09e 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -80,7 +80,7 @@ static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
void sctp_ulpevent_free(struct sctp_ulpevent *);
int sctp_ulpevent_is_notification(const struct sctp_ulpevent *);
-void sctp_queue_purge_ulpevents(struct sk_buff_head *list);
+unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list);
struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
const struct sctp_association *asoc,
diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
index 736eac71d053..af1b49e982df 100644
--- a/include/sound/sb16_csp.h
+++ b/include/sound/sb16_csp.h
@@ -99,7 +99,14 @@ struct snd_sb_csp_info {
/* get CSP information */
#define SNDRV_SB_CSP_IOCTL_INFO _IOR('H', 0x10, struct snd_sb_csp_info)
/* load microcode to CSP */
-#define SNDRV_SB_CSP_IOCTL_LOAD_CODE _IOW('H', 0x11, struct snd_sb_csp_microcode)
+/* NOTE: struct snd_sb_csp_microcode overflows the max size (13 bits)
+ * defined for some architectures like MIPS, and it leads to build errors.
+ * (x86 and co have 14-bit size, thus it's valid, though.)
+ * As a workaround for skipping the size-limit check, here we don't use the
+ * normal _IOW() macro but _IOC() with the manual argument.
+ */
+#define SNDRV_SB_CSP_IOCTL_LOAD_CODE \
+ _IOC(_IOC_WRITE, 'H', 0x11, sizeof(struct snd_sb_csp_microcode))
/* unload microcode from CSP */
#define SNDRV_SB_CSP_IOCTL_UNLOAD_CODE _IO('H', 0x12)
/* start CSP */
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index 31a9db711906..3a2cab407b93 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -101,10 +101,10 @@ void irq_gc_unmask_enable_reg(struct irq_data *d)
}
/**
- * irq_gc_ack - Ack pending interrupt
+ * irq_gc_ack_set_bit - Ack pending interrupt via setting bit
* @d: irq_data
*/
-void irq_gc_ack(struct irq_data *d)
+void irq_gc_ack_set_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
u32 mask = 1 << (d->irq - gc->irq_base);
@@ -115,6 +115,20 @@ void irq_gc_ack(struct irq_data *d)
}
/**
+ * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
+ * @d: irq_data
+ */
+void irq_gc_ack_clr_bit(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ u32 mask = ~(1 << (d->irq - gc->irq_base));
+
+ irq_gc_lock(gc);
+ irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
+ irq_gc_unlock(gc);
+}
+
+/**
* irq_gc_mask_disable_reg_and_ack- Mask and ack pending interrupt
* @d: irq_data
*/
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index fa27e750dbc0..a8ce45097f3d 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -375,15 +375,19 @@ int jump_label_text_reserved(void *start, void *end)
static void jump_label_update(struct jump_label_key *key, int enable)
{
- struct jump_entry *entry = key->entries;
-
- /* if there are no users, entry can be NULL */
- if (entry)
- __jump_label_update(key, entry, __stop___jump_table, enable);
+ struct jump_entry *entry = key->entries, *stop = __stop___jump_table;
#ifdef CONFIG_MODULES
+ struct module *mod = __module_address((jump_label_t)key);
+
__jump_label_mod_update(key, enable);
+
+ if (mod)
+ stop = mod->jump_entries + mod->num_jump_entries;
#endif
+ /* if there are no users, entry can be NULL */
+ if (entry)
+ __jump_label_update(key, entry, stop, enable);
}
#endif
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 2981af4ce7cb..6c601f871964 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -37,8 +37,9 @@ EXPORT_SYMBOL_GPL(unregister_pm_notifier);
int pm_notifier_call_chain(unsigned long val)
{
- return (blocking_notifier_call_chain(&pm_chain_head, val, NULL)
- == NOTIFY_BAD) ? -EINVAL : 0;
+ int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
+
+ return notifier_to_errno(ret);
}
/* If set, devices may be suspended and resumed asynchronously. */
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index ace55889f702..06efa54f93d6 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1211,7 +1211,11 @@ static void free_unnecessary_pages(void)
to_free_highmem = alloc_highmem - save;
} else {
to_free_highmem = 0;
- to_free_normal -= save - alloc_highmem;
+ save -= alloc_highmem;
+ if (to_free_normal > save)
+ to_free_normal -= save;
+ else
+ to_free_normal = 0;
}
memory_bm_position_reset(&copy_bm);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 1c41ba215419..b6b71ad2208f 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -44,6 +44,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
suspend_ops = ops;
mutex_unlock(&pm_mutex);
}
+EXPORT_SYMBOL_GPL(suspend_set_ops);
bool valid_state(suspend_state_t state)
{
@@ -65,6 +66,7 @@ int suspend_valid_only_mem(suspend_state_t state)
{
return state == PM_SUSPEND_MEM;
}
+EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
static int suspend_test(int level)
{
@@ -126,12 +128,13 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
}
/**
- * suspend_enter - enter the desired system sleep state.
- * @state: state to enter
+ * suspend_enter - enter the desired system sleep state.
+ * @state: State to enter
+ * @wakeup: Returns information that suspend should not be entered again.
*
- * This function should be called after devices have been suspended.
+ * This function should be called after devices have been suspended.
*/
-static int suspend_enter(suspend_state_t state)
+static int suspend_enter(suspend_state_t state, bool *wakeup)
{
int error;
@@ -165,7 +168,8 @@ static int suspend_enter(suspend_state_t state)
error = syscore_suspend();
if (!error) {
- if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
+ *wakeup = pm_wakeup_pending();
+ if (!(suspend_test(TEST_CORE) || *wakeup)) {
error = suspend_ops->enter(state);
events_check_enabled = false;
}
@@ -199,6 +203,7 @@ static int suspend_enter(suspend_state_t state)
int suspend_devices_and_enter(suspend_state_t state)
{
int error;
+ bool wakeup = false;
if (!suspend_ops)
return -ENOSYS;
@@ -220,7 +225,10 @@ int suspend_devices_and_enter(suspend_state_t state)
if (suspend_test(TEST_DEVICES))
goto Recover_platform;
- error = suspend_enter(state);
+ do {
+ error = suspend_enter(state, &wakeup);
+ } while (!error && !wakeup
+ && suspend_ops->suspend_again && suspend_ops->suspend_again());
Resume_devices:
suspend_test_start();
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 7e59ffb3d0ba..ba06207b1dd3 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -84,9 +84,32 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
static struct rcu_state *rcu_state;
+/*
+ * The rcu_scheduler_active variable transitions from zero to one just
+ * before the first task is spawned. So when this variable is zero, RCU
+ * can assume that there is but one task, allowing RCU to (for example)
+ * optimized synchronize_sched() to a simple barrier(). When this variable
+ * is one, RCU must actually do all the hard work required to detect real
+ * grace periods. This variable is also used to suppress boot-time false
+ * positives from lockdep-RCU error checking.
+ */
int rcu_scheduler_active __read_mostly;
EXPORT_SYMBOL_GPL(rcu_scheduler_active);
+/*
+ * The rcu_scheduler_fully_active variable transitions from zero to one
+ * during the early_initcall() processing, which is after the scheduler
+ * is capable of creating new tasks. So RCU processing (for example,
+ * creating tasks for RCU priority boosting) must be delayed until after
+ * rcu_scheduler_fully_active transitions from zero to one. We also
+ * currently delay invocation of any RCU callbacks until after this point.
+ *
+ * It might later prove better for people registering RCU callbacks during
+ * early boot to take responsibility for these callbacks, but one step at
+ * a time.
+ */
+static int rcu_scheduler_fully_active __read_mostly;
+
#ifdef CONFIG_RCU_BOOST
/*
@@ -98,7 +121,6 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
DEFINE_PER_CPU(char, rcu_cpu_has_work);
-static char rcu_kthreads_spawnable;
#endif /* #ifdef CONFIG_RCU_BOOST */
@@ -1467,6 +1489,8 @@ static void rcu_process_callbacks(struct softirq_action *unused)
*/
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
{
+ if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
+ return;
if (likely(!rsp->boost)) {
rcu_do_batch(rsp, rdp);
return;
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 14dc7dd00902..75113cb7c4fb 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1532,7 +1532,7 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
struct sched_param sp;
struct task_struct *t;
- if (!rcu_kthreads_spawnable ||
+ if (!rcu_scheduler_fully_active ||
per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
return 0;
t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
@@ -1639,7 +1639,7 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
struct sched_param sp;
struct task_struct *t;
- if (!rcu_kthreads_spawnable ||
+ if (!rcu_scheduler_fully_active ||
rnp->qsmaskinit == 0)
return 0;
if (rnp->node_kthread_task == NULL) {
@@ -1665,7 +1665,7 @@ static int __init rcu_spawn_kthreads(void)
int cpu;
struct rcu_node *rnp;
- rcu_kthreads_spawnable = 1;
+ rcu_scheduler_fully_active = 1;
for_each_possible_cpu(cpu) {
per_cpu(rcu_cpu_has_work, cpu) = 0;
if (cpu_online(cpu))
@@ -1687,7 +1687,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
struct rcu_node *rnp = rdp->mynode;
/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
- if (rcu_kthreads_spawnable) {
+ if (rcu_scheduler_fully_active) {
(void)rcu_spawn_one_cpu_kthread(cpu);
if (rnp->node_kthread_task == NULL)
(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
@@ -1726,6 +1726,13 @@ static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
{
}
+static int __init rcu_scheduler_really_started(void)
+{
+ rcu_scheduler_fully_active = 1;
+ return 0;
+}
+early_initcall(rcu_scheduler_really_started);
+
static void __cpuinit rcu_prepare_kthreads(int cpu)
{
}
diff --git a/kernel/resource.c b/kernel/resource.c
index 798e2fae2a06..3ff40178dce7 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -38,6 +38,14 @@ struct resource iomem_resource = {
};
EXPORT_SYMBOL(iomem_resource);
+/* constraints to be met while allocating resources */
+struct resource_constraint {
+ resource_size_t min, max, align;
+ resource_size_t (*alignf)(void *, const struct resource *,
+ resource_size_t, resource_size_t);
+ void *alignf_data;
+};
+
static DEFINE_RWLOCK(resource_lock);
static void *r_next(struct seq_file *m, void *v, loff_t *pos)
@@ -384,16 +392,13 @@ static bool resource_contains(struct resource *res1, struct resource *res2)
}
/*
- * Find empty slot in the resource tree given range and alignment.
+ * Find empty slot in the resource tree with the given range and
+ * alignment constraints
*/
-static int find_resource(struct resource *root, struct resource *new,
- resource_size_t size, resource_size_t min,
- resource_size_t max, resource_size_t align,
- resource_size_t (*alignf)(void *,
- const struct resource *,
- resource_size_t,
- resource_size_t),
- void *alignf_data)
+static int __find_resource(struct resource *root, struct resource *old,
+ struct resource *new,
+ resource_size_t size,
+ struct resource_constraint *constraint)
{
struct resource *this = root->child;
struct resource tmp = *new, avail, alloc;
@@ -404,25 +409,26 @@ static int find_resource(struct resource *root, struct resource *new,
* Skip past an allocated resource that starts at 0, since the assignment
* of this->start - 1 to tmp->end below would cause an underflow.
*/
- if (this && this->start == 0) {
- tmp.start = this->end + 1;
+ if (this && this->start == root->start) {
+ tmp.start = (this == old) ? old->start : this->end + 1;
this = this->sibling;
}
for(;;) {
if (this)
- tmp.end = this->start - 1;
+ tmp.end = (this == old) ? this->end : this->start - 1;
else
tmp.end = root->end;
- resource_clip(&tmp, min, max);
+ resource_clip(&tmp, constraint->min, constraint->max);
arch_remove_reservations(&tmp);
/* Check for overflow after ALIGN() */
avail = *new;
- avail.start = ALIGN(tmp.start, align);
+ avail.start = ALIGN(tmp.start, constraint->align);
avail.end = tmp.end;
if (avail.start >= tmp.start) {
- alloc.start = alignf(alignf_data, &avail, size, align);
+ alloc.start = constraint->alignf(constraint->alignf_data, &avail,
+ size, constraint->align);
alloc.end = alloc.start + size - 1;
if (resource_contains(&avail, &alloc)) {
new->start = alloc.start;
@@ -432,14 +438,75 @@ static int find_resource(struct resource *root, struct resource *new,
}
if (!this)
break;
- tmp.start = this->end + 1;
+ if (this != old)
+ tmp.start = this->end + 1;
this = this->sibling;
}
return -EBUSY;
}
+/*
+ * Find empty slot in the resource tree given range and alignment.
+ */
+static int find_resource(struct resource *root, struct resource *new,
+ resource_size_t size,
+ struct resource_constraint *constraint)
+{
+ return __find_resource(root, NULL, new, size, constraint);
+}
+
/**
- * allocate_resource - allocate empty slot in the resource tree given range & alignment
+ * reallocate_resource - allocate a slot in the resource tree given range & alignment.
+ * The resource will be relocated if the new size cannot be reallocated in the
+ * current location.
+ *
+ * @root: root resource descriptor
+ * @old: resource descriptor desired by caller
+ * @newsize: new size of the resource descriptor
+ * @constraint: the size and alignment constraints to be met.
+ */
+int reallocate_resource(struct resource *root, struct resource *old,
+ resource_size_t newsize,
+ struct resource_constraint *constraint)
+{
+ int err=0;
+ struct resource new = *old;
+ struct resource *conflict;
+
+ write_lock(&resource_lock);
+
+ if ((err = __find_resource(root, old, &new, newsize, constraint)))
+ goto out;
+
+ if (resource_contains(&new, old)) {
+ old->start = new.start;
+ old->end = new.end;
+ goto out;
+ }
+
+ if (old->child) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (resource_contains(old, &new)) {
+ old->start = new.start;
+ old->end = new.end;
+ } else {
+ __release_resource(old);
+ *old = new;
+ conflict = __request_resource(root, old);
+ BUG_ON(conflict);
+ }
+out:
+ write_unlock(&resource_lock);
+ return err;
+}
+
+
+/**
+ * allocate_resource - allocate empty slot in the resource tree given range & alignment.
+ * The resource will be reallocated with a new size if it was already allocated
* @root: root resource descriptor
* @new: resource descriptor desired by caller
* @size: requested resource region size
@@ -459,12 +526,25 @@ int allocate_resource(struct resource *root, struct resource *new,
void *alignf_data)
{
int err;
+ struct resource_constraint constraint;
if (!alignf)
alignf = simple_align_resource;
+ constraint.min = min;
+ constraint.max = max;
+ constraint.align = align;
+ constraint.alignf = alignf;
+ constraint.alignf_data = alignf_data;
+
+ if ( new->parent ) {
+ /* resource is already allocated, try reallocating with
+ the new constraints */
+ return reallocate_resource(root, new, size, &constraint);
+ }
+
write_lock(&resource_lock);
- err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
+ err = find_resource(root, new, size, &constraint);
if (err >= 0 && __request_resource(root, new))
err = -EBUSY;
write_unlock(&resource_lock);
diff --git a/kernel/sched.c b/kernel/sched.c
index 3f2e502d609b..3dc716f6d8ad 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -292,8 +292,8 @@ static DEFINE_SPINLOCK(task_group_lock);
* (The default weight is 1024 - so there's no practical
* limitation from this.)
*/
-#define MIN_SHARES 2
-#define MAX_SHARES (1UL << (18 + SCHED_LOAD_RESOLUTION))
+#define MIN_SHARES (1UL << 1)
+#define MAX_SHARES (1UL << 18)
static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
#endif
@@ -7757,6 +7757,9 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
#endif
#endif
cfs_rq->min_vruntime = (u64)(-(1LL << 20));
+#ifndef CONFIG_64BIT
+ cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
+#endif
}
static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
@@ -8450,10 +8453,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
if (!tg->se[0])
return -EINVAL;
- if (shares < MIN_SHARES)
- shares = MIN_SHARES;
- else if (shares > MAX_SHARES)
- shares = MAX_SHARES;
+ shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
mutex_lock(&shares_mutex);
if (tg->shares == shares)
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 9d86e45086f5..a78b7c6e042c 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -198,7 +198,7 @@ static void free_object(struct debug_obj *obj)
* initialized:
*/
if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
- sched = !work_pending(&debug_obj_work);
+ sched = keventd_up() && !work_pending(&debug_obj_work);
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ddffc74cdebe..e013b8e57d25 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -108,10 +108,12 @@ enum mem_cgroup_events_index {
enum mem_cgroup_events_target {
MEM_CGROUP_TARGET_THRESH,
MEM_CGROUP_TARGET_SOFTLIMIT,
+ MEM_CGROUP_TARGET_NUMAINFO,
MEM_CGROUP_NTARGETS,
};
#define THRESHOLDS_EVENTS_TARGET (128)
#define SOFTLIMIT_EVENTS_TARGET (1024)
+#define NUMAINFO_EVENTS_TARGET (1024)
struct mem_cgroup_stat_cpu {
long count[MEM_CGROUP_STAT_NSTATS];
@@ -237,7 +239,8 @@ struct mem_cgroup {
int last_scanned_node;
#if MAX_NUMNODES > 1
nodemask_t scan_nodes;
- unsigned long next_scan_node_update;
+ atomic_t numainfo_events;
+ atomic_t numainfo_updating;
#endif
/*
* Should the accounting and control be hierarchical, per subtree?
@@ -577,15 +580,6 @@ static long mem_cgroup_read_stat(struct mem_cgroup *mem,
return val;
}
-static long mem_cgroup_local_usage(struct mem_cgroup *mem)
-{
- long ret;
-
- ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
- ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
- return ret;
-}
-
static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
bool charge)
{
@@ -689,6 +683,9 @@ static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target)
case MEM_CGROUP_TARGET_SOFTLIMIT:
next = val + SOFTLIMIT_EVENTS_TARGET;
break;
+ case MEM_CGROUP_TARGET_NUMAINFO:
+ next = val + NUMAINFO_EVENTS_TARGET;
+ break;
default:
return;
}
@@ -707,11 +704,19 @@ static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
mem_cgroup_threshold(mem);
__mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH);
if (unlikely(__memcg_event_check(mem,
- MEM_CGROUP_TARGET_SOFTLIMIT))){
+ MEM_CGROUP_TARGET_SOFTLIMIT))) {
mem_cgroup_update_tree(mem, page);
__mem_cgroup_target_update(mem,
- MEM_CGROUP_TARGET_SOFTLIMIT);
+ MEM_CGROUP_TARGET_SOFTLIMIT);
+ }
+#if MAX_NUMNODES > 1
+ if (unlikely(__memcg_event_check(mem,
+ MEM_CGROUP_TARGET_NUMAINFO))) {
+ atomic_inc(&mem->numainfo_events);
+ __mem_cgroup_target_update(mem,
+ MEM_CGROUP_TARGET_NUMAINFO);
}
+#endif
}
}
@@ -1129,7 +1134,6 @@ unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
return MEM_CGROUP_ZSTAT(mz, lru);
}
-#ifdef CONFIG_NUMA
static unsigned long mem_cgroup_node_nr_file_lru_pages(struct mem_cgroup *memcg,
int nid)
{
@@ -1141,6 +1145,17 @@ static unsigned long mem_cgroup_node_nr_file_lru_pages(struct mem_cgroup *memcg,
return ret;
}
+static unsigned long mem_cgroup_node_nr_anon_lru_pages(struct mem_cgroup *memcg,
+ int nid)
+{
+ unsigned long ret;
+
+ ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_ANON) +
+ mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_ANON);
+ return ret;
+}
+
+#if MAX_NUMNODES > 1
static unsigned long mem_cgroup_nr_file_lru_pages(struct mem_cgroup *memcg)
{
u64 total = 0;
@@ -1152,17 +1167,6 @@ static unsigned long mem_cgroup_nr_file_lru_pages(struct mem_cgroup *memcg)
return total;
}
-static unsigned long mem_cgroup_node_nr_anon_lru_pages(struct mem_cgroup *memcg,
- int nid)
-{
- unsigned long ret;
-
- ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_ANON) +
- mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_ANON);
-
- return ret;
-}
-
static unsigned long mem_cgroup_nr_anon_lru_pages(struct mem_cgroup *memcg)
{
u64 total = 0;
@@ -1559,6 +1563,28 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
return ret;
}
+/**
+ * test_mem_cgroup_node_reclaimable
+ * @mem: the target memcg
+ * @nid: the node ID to be checked.
+ * @noswap : specify true here if the user wants flle only information.
+ *
+ * This function returns whether the specified memcg contains any
+ * reclaimable pages on a node. Returns true if there are any reclaimable
+ * pages in the node.
+ */
+static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem,
+ int nid, bool noswap)
+{
+ if (mem_cgroup_node_nr_file_lru_pages(mem, nid))
+ return true;
+ if (noswap || !total_swap_pages)
+ return false;
+ if (mem_cgroup_node_nr_anon_lru_pages(mem, nid))
+ return true;
+ return false;
+
+}
#if MAX_NUMNODES > 1
/*
@@ -1570,26 +1596,26 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
{
int nid;
-
- if (time_after(mem->next_scan_node_update, jiffies))
+ /*
+ * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
+ * pagein/pageout changes since the last update.
+ */
+ if (!atomic_read(&mem->numainfo_events))
+ return;
+ if (atomic_inc_return(&mem->numainfo_updating) > 1)
return;
- mem->next_scan_node_update = jiffies + 10*HZ;
/* make a nodemask where this memcg uses memory from */
mem->scan_nodes = node_states[N_HIGH_MEMORY];
for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
- if (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_FILE) ||
- mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_FILE))
- continue;
-
- if (total_swap_pages &&
- (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_ANON) ||
- mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_ANON)))
- continue;
- node_clear(nid, mem->scan_nodes);
+ if (!test_mem_cgroup_node_reclaimable(mem, nid, false))
+ node_clear(nid, mem->scan_nodes);
}
+
+ atomic_set(&mem->numainfo_events, 0);
+ atomic_set(&mem->numainfo_updating, 0);
}
/*
@@ -1627,11 +1653,51 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
return node;
}
+/*
+ * Check all nodes whether it contains reclaimable pages or not.
+ * For quick scan, we make use of scan_nodes. This will allow us to skip
+ * unused nodes. But scan_nodes is lazily updated and may not cotain
+ * enough new information. We need to do double check.
+ */
+bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
+{
+ int nid;
+
+ /*
+ * quick check...making use of scan_node.
+ * We can skip unused nodes.
+ */
+ if (!nodes_empty(mem->scan_nodes)) {
+ for (nid = first_node(mem->scan_nodes);
+ nid < MAX_NUMNODES;
+ nid = next_node(nid, mem->scan_nodes)) {
+
+ if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
+ return true;
+ }
+ }
+ /*
+ * Check rest of nodes.
+ */
+ for_each_node_state(nid, N_HIGH_MEMORY) {
+ if (node_isset(nid, mem->scan_nodes))
+ continue;
+ if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
+ return true;
+ }
+ return false;
+}
+
#else
int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
{
return 0;
}
+
+bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
+{
+ return test_mem_cgroup_node_reclaimable(mem, 0, noswap);
+}
#endif
/*
@@ -1702,7 +1768,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
}
}
}
- if (!mem_cgroup_local_usage(victim)) {
+ if (!mem_cgroup_reclaimable(victim, noswap)) {
/* this cgroup's local usage == 0 */
css_put(&victim->css);
continue;
diff --git a/mm/memory.c b/mm/memory.c
index 40b7531ee8ba..9b8a01d941cb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -305,6 +305,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
if (batch->nr == batch->max) {
if (!tlb_next_batch(tlb))
return 0;
+ batch = tlb->active;
}
VM_BUG_ON(batch->nr > batch->max);
diff --git a/mm/nommu.c b/mm/nommu.c
index 1fd0c51b10a6..9edc897a3970 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1813,10 +1813,13 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
return NULL;
}
-int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
- unsigned long to, unsigned long size, pgprot_t prot)
+int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
{
- vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
+ if (addr != (pfn << PAGE_SHIFT))
+ return -EINVAL;
+
+ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
return 0;
}
EXPORT_SYMBOL(remap_pfn_range);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4f49535d4cd3..5ed24b94c5e6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2326,7 +2326,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
return true;
/* Check the watermark levels */
- for (i = 0; i < pgdat->nr_zones; i++) {
+ for (i = 0; i <= classzone_idx; i++) {
struct zone *zone = pgdat->node_zones + i;
if (!populated_zone(zone))
@@ -2344,7 +2344,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
}
if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
- classzone_idx, 0))
+ i, 0))
all_zones_ok = false;
else
balanced += zone->present_pages;
@@ -2451,7 +2451,6 @@ loop_again:
if (!zone_watermark_ok_safe(zone, order,
high_wmark_pages(zone), 0, 0)) {
end_zone = i;
- *classzone_idx = i;
break;
}
}
@@ -2510,18 +2509,18 @@ loop_again:
KSWAPD_ZONE_BALANCE_GAP_RATIO);
if (!zone_watermark_ok_safe(zone, order,
high_wmark_pages(zone) + balance_gap,
- end_zone, 0))
+ end_zone, 0)) {
shrink_zone(priority, zone, &sc);
- reclaim_state->reclaimed_slab = 0;
- nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
- sc.nr_reclaimed += reclaim_state->reclaimed_slab;
- total_scanned += sc.nr_scanned;
- if (zone->all_unreclaimable)
- continue;
- if (nr_slab == 0 &&
- !zone_reclaimable(zone))
- zone->all_unreclaimable = 1;
+ reclaim_state->reclaimed_slab = 0;
+ nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
+ sc.nr_reclaimed += reclaim_state->reclaimed_slab;
+ total_scanned += sc.nr_scanned;
+
+ if (nr_slab == 0 && !zone_reclaimable(zone))
+ zone->all_unreclaimable = 1;
+ }
+
/*
* If we've done a decent amount of scanning and
* the reclaim ratio is low, start doing writepage
@@ -2531,6 +2530,12 @@ loop_again:
total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
sc.may_writepage = 1;
+ if (zone->all_unreclaimable) {
+ if (end_zone && end_zone == i)
+ end_zone--;
+ continue;
+ }
+
if (!zone_watermark_ok_safe(zone, order,
high_wmark_pages(zone), end_zone, 0)) {
all_zones_ok = 0;
@@ -2709,8 +2714,8 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
*/
static int kswapd(void *p)
{
- unsigned long order;
- int classzone_idx;
+ unsigned long order, new_order;
+ int classzone_idx, new_classzone_idx;
pg_data_t *pgdat = (pg_data_t*)p;
struct task_struct *tsk = current;
@@ -2740,17 +2745,23 @@ static int kswapd(void *p)
tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
set_freezable();
- order = 0;
- classzone_idx = MAX_NR_ZONES - 1;
+ order = new_order = 0;
+ classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
for ( ; ; ) {
- unsigned long new_order;
- int new_classzone_idx;
int ret;
- new_order = pgdat->kswapd_max_order;
- new_classzone_idx = pgdat->classzone_idx;
- pgdat->kswapd_max_order = 0;
- pgdat->classzone_idx = MAX_NR_ZONES - 1;
+ /*
+ * If the last balance_pgdat was unsuccessful it's unlikely a
+ * new request of a similar or harder type will succeed soon
+ * so consider going to sleep on the basis we reclaimed at
+ */
+ if (classzone_idx >= new_classzone_idx && order == new_order) {
+ new_order = pgdat->kswapd_max_order;
+ new_classzone_idx = pgdat->classzone_idx;
+ pgdat->kswapd_max_order = 0;
+ pgdat->classzone_idx = pgdat->nr_zones - 1;
+ }
+
if (order < new_order || classzone_idx > new_classzone_idx) {
/*
* Don't sleep if someone wants a larger 'order'
@@ -2763,7 +2774,7 @@ static int kswapd(void *p)
order = pgdat->kswapd_max_order;
classzone_idx = pgdat->classzone_idx;
pgdat->kswapd_max_order = 0;
- pgdat->classzone_idx = MAX_NR_ZONES - 1;
+ pgdat->classzone_idx = pgdat->nr_zones - 1;
}
ret = try_to_freeze();
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 7ea5cf9ea08a..86bff9b1ac47 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -586,9 +586,14 @@ static void vlan_dev_uninit(struct net_device *dev)
static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ u32 old_features = features;
features &= real_dev->features;
features &= real_dev->vlan_features;
+
+ if (old_features & NETIF_F_SOFT_FEATURES)
+ features |= old_features & NETIF_F_SOFT_FEATURES;
+
if (dev_ethtool_get_rx_csum(real_dev))
features |= NETIF_F_RXCSUM;
features |= NETIF_F_LLTX;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index d3a05b9ade7a..bcd158f40bb9 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -393,6 +393,9 @@ int hci_conn_del(struct hci_conn *conn)
hci_dev_put(hdev);
+ if (conn->handle == 0)
+ kfree(conn);
+
return 0;
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index c405a954a603..43b4c2deb7cc 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -464,7 +464,8 @@ static void hidp_idle_timeout(unsigned long arg)
{
struct hidp_session *session = (struct hidp_session *) arg;
- kthread_stop(session->task);
+ atomic_inc(&session->terminate);
+ wake_up_process(session->task);
}
static void hidp_set_timer(struct hidp_session *session)
@@ -535,7 +536,8 @@ static void hidp_process_hid_control(struct hidp_session *session,
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
- kthread_stop(session->task);
+ atomic_inc(&session->terminate);
+ wake_up_process(current);
}
}
@@ -706,9 +708,8 @@ static int hidp_session(void *arg)
add_wait_queue(sk_sleep(intr_sk), &intr_wait);
session->waiting_for_startup = 0;
wake_up_interruptible(&session->startup_queue);
- while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
-
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!atomic_read(&session->terminate)) {
if (ctrl_sk->sk_state != BT_CONNECTED ||
intr_sk->sk_state != BT_CONNECTED)
break;
@@ -726,6 +727,7 @@ static int hidp_session(void *arg)
hidp_process_transmit(session);
schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
@@ -1060,7 +1062,8 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
err_add_device:
hid_destroy_device(session->hid);
session->hid = NULL;
- kthread_stop(session->task);
+ atomic_inc(&session->terminate);
+ wake_up_process(session->task);
unlink:
hidp_del_timer(session);
@@ -1111,7 +1114,8 @@ int hidp_del_connection(struct hidp_conndel_req *req)
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
- kthread_stop(session->task);
+ atomic_inc(&session->terminate);
+ wake_up_process(session->task);
}
} else
err = -ENOENT;
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index 19e95004b286..af1bcc823f26 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -142,6 +142,7 @@ struct hidp_session {
uint ctrl_mtu;
uint intr_mtu;
+ atomic_t terminate;
struct task_struct *task;
unsigned char keys[8];
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 56fdd9162da9..ebff14c69078 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -2323,7 +2323,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
sk = chan->sk;
- if (sk->sk_state != BT_CONFIG) {
+ if ((bt_sk(sk)->defer_setup && sk->sk_state != BT_CONNECT2) ||
+ (!bt_sk(sk)->defer_setup && sk->sk_state != BT_CONFIG)) {
struct l2cap_cmd_rej rej;
rej.reason = cpu_to_le16(0x0002);
@@ -2334,7 +2335,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
/* Reject if config buffer is too small. */
len = cmd_len - sizeof(*req);
- if (chan->conf_len + len > sizeof(chan->conf_req)) {
+ if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
l2cap_build_conf_rsp(chan, rsp,
L2CAP_CONF_REJECT, flags), rsp);
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index c188c803c09c..32b8f9f7f79e 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -49,7 +49,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
skb_pull(skb, ETH_HLEN);
rcu_read_lock();
- if (is_multicast_ether_addr(dest)) {
+ if (is_broadcast_ether_addr(dest))
+ br_flood_deliver(br, skb);
+ else if (is_multicast_ether_addr(dest)) {
if (unlikely(netpoll_tx_running(dev))) {
br_flood_deliver(br, skb);
goto out;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index f3ac1e858ee1..f06ee39c73fd 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -60,7 +60,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
br = p->br;
br_fdb_update(br, p, eth_hdr(skb)->h_source);
- if (is_multicast_ether_addr(dest) &&
+ if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
br_multicast_rcv(br, p, skb))
goto drop;
@@ -77,7 +77,9 @@ int br_handle_frame_finish(struct sk_buff *skb)
dst = NULL;
- if (is_multicast_ether_addr(dest)) {
+ if (is_broadcast_ether_addr(dest))
+ skb2 = skb;
+ else if (is_multicast_ether_addr(dest)) {
mdst = br_mdb_get(br, skb);
if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
if ((mdst && mdst->mglist) ||
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 9cb627a4073a..7330c2757c0c 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -477,8 +477,9 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
calc_layout(osdc, vino, layout, off, plen, req, ops);
req->r_file_layout = *layout; /* keep a copy */
- /* in case it differs from natural alignment that calc_layout
- filled in for us */
+ /* in case it differs from natural (file) alignment that
+ calc_layout filled in for us */
+ req->r_num_pages = calc_pages_for(page_align, *plen);
req->r_page_alignment = page_align;
ceph_osdc_build_request(req, off, plen, ops,
@@ -2027,8 +2028,9 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
int want = calc_pages_for(req->r_page_alignment, data_len);
if (unlikely(req->r_num_pages < want)) {
- pr_warning("tid %lld reply %d > expected %d pages\n",
- tid, want, m->nr_pages);
+ pr_warning("tid %lld reply has %d bytes %d pages, we"
+ " had only %d pages ready\n", tid, data_len,
+ want, req->r_num_pages);
*skip = 1;
ceph_msg_put(m);
m = NULL;
diff --git a/net/core/dst.c b/net/core/dst.c
index 9ccca038444f..6135f3671692 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -190,7 +190,8 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
dst->lastuse = jiffies;
dst->flags = flags;
dst->next = NULL;
- dst_entries_add(ops, 1);
+ if (!(flags & DST_NOCOUNT))
+ dst_entries_add(ops, 1);
return dst;
}
EXPORT_SYMBOL(dst_alloc);
@@ -243,7 +244,8 @@ again:
neigh_release(neigh);
}
- dst_entries_add(dst->ops, -1);
+ if (!(dst->flags & DST_NOCOUNT))
+ dst_entries_add(dst->ops, -1);
if (dst->ops->destroy)
dst->ops->destroy(dst);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index eae1f676f870..ef1528af7abf 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -465,8 +465,10 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (addr_len < sizeof(struct sockaddr_in))
goto out;
- if (addr->sin_family != AF_INET)
+ if (addr->sin_family != AF_INET) {
+ err = -EAFNOSUPPORT;
goto out;
+ }
chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 4a7e16b5d3f3..84f26e8e6c60 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -828,7 +828,7 @@ static int __ip_append_data(struct sock *sk,
cork->length += length;
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
- (rt->dst.dev->features & NETIF_F_UFO)) {
+ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, transhdrlen,
mtu, flags);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 054a59d21eb0..46febcacb729 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3220,7 +3220,7 @@ __setup("thash_entries=", set_thash_entries);
void __init tcp_init(void)
{
struct sk_buff *skb = NULL;
- unsigned long nr_pages, limit;
+ unsigned long limit;
int i, max_share, cnt;
unsigned long jiffy = jiffies;
@@ -3277,13 +3277,7 @@ void __init tcp_init(void)
sysctl_tcp_max_orphans = cnt / 2;
sysctl_max_syn_backlog = max(128, cnt / 256);
- /* Set the pressure threshold to be a fraction of global memory that
- * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
- * memory, with a floor of 128 pages.
- */
- nr_pages = totalram_pages - totalhigh_pages;
- limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
- limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
+ limit = nr_free_buffer_pages() / 8;
limit = max(limit, 128UL);
sysctl_tcp_mem[0] = limit / 4 * 3;
sysctl_tcp_mem[1] = limit;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 48cd88e62553..198f75b7bdd3 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2209,16 +2209,10 @@ void __init udp_table_init(struct udp_table *table, const char *name)
void __init udp_init(void)
{
- unsigned long nr_pages, limit;
+ unsigned long limit;
udp_table_init(&udp_table, "UDP");
- /* Set the pressure threshold up by the same strategy of TCP. It is a
- * fraction of global memory that is up to 1/2 at 256 MB, decreasing
- * toward zero with the amount of memory, with a floor of 128 pages.
- */
- nr_pages = totalram_pages - totalhigh_pages;
- limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
- limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
+ limit = nr_free_buffer_pages() / 8;
limit = max(limit, 128UL);
sysctl_udp_mem[0] = limit / 4 * 3;
sysctl_udp_mem[1] = limit;
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 2d51840e53a1..327a617d594c 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -32,7 +32,12 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
dst = skb_dst(skb);
mtu = dst_mtu(dst);
if (skb->len > mtu) {
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
+ if (skb->sk)
+ ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr,
+ inet_sk(skb->sk)->inet_dport, mtu);
+ else
+ icmp_send(skb, ICMP_DEST_UNREACH,
+ ICMP_FRAG_NEEDED, htonl(mtu));
ret = -EMSGSIZE;
}
out:
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index d450a2f9fc06..3b5669a2582d 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -274,7 +274,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
return -EINVAL;
if (addr->sin6_family != AF_INET6)
- return -EINVAL;
+ return -EAFNOSUPPORT;
addr_type = ipv6_addr_type(&addr->sin6_addr);
if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index de2b1decd786..0ef1f086feb8 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -228,9 +228,10 @@ static struct rt6_info ip6_blk_hole_entry_template = {
/* allocate dst with ip6_dst_ops */
static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
- struct net_device *dev)
+ struct net_device *dev,
+ int flags)
{
- struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, 0);
+ struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
@@ -1042,7 +1043,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
if (unlikely(idev == NULL))
return NULL;
- rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev);
+ rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
if (unlikely(rt == NULL)) {
in6_dev_put(idev);
goto out;
@@ -1062,14 +1063,6 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
rt->dst.output = ip6_output;
-#if 0 /* there's no chance to use these for ndisc */
- rt->dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
- ? DST_HOST
- : 0;
- ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
- rt->rt6i_dst.plen = 128;
-#endif
-
spin_lock_bh(&icmp6_dst_lock);
rt->dst.next = icmp6_dst_gc_list;
icmp6_dst_gc_list = &rt->dst;
@@ -1214,7 +1207,7 @@ int ip6_route_add(struct fib6_config *cfg)
goto out;
}
- rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL);
+ rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
if (rt == NULL) {
err = -ENOMEM;
@@ -1244,7 +1237,7 @@ int ip6_route_add(struct fib6_config *cfg)
ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
rt->rt6i_dst.plen = cfg->fc_dst_len;
if (rt->rt6i_dst.plen == 128)
- rt->dst.flags = DST_HOST;
+ rt->dst.flags |= DST_HOST;
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
@@ -1734,7 +1727,7 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
{
struct net *net = dev_net(ort->rt6i_dev);
struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
- ort->dst.dev);
+ ort->dst.dev, 0);
if (rt) {
rt->dst.input = ort->dst.input;
@@ -2013,7 +2006,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
{
struct net *net = dev_net(idev->dev);
struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
- net->loopback_dev);
+ net->loopback_dev, 0);
struct neighbour *neigh;
if (rt == NULL) {
@@ -2025,7 +2018,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
in6_dev_hold(idev);
- rt->dst.flags = DST_HOST;
+ rt->dst.flags |= DST_HOST;
rt->dst.input = ip6_input;
rt->dst.output = ip6_output;
rt->rt6i_idev = idev;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 58ffa7d069c7..669d2e32efb6 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -877,7 +877,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
local->sched_scan_ies.ie[i] = kzalloc(2 +
IEEE80211_MAX_SSID_LEN +
- local->scan_ies_len,
+ local->scan_ies_len +
+ req->ie_len,
GFP_KERNEL);
if (!local->sched_scan_ies.ie[i]) {
ret = -ENOMEM;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 9dc3b5f26e80..8f6a302d2ac3 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -86,6 +86,11 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int queue = rx->queue;
+
+ /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
+ if (rx->queue == NUM_RX_DATA_QUEUES - 1)
+ queue = 0;
/*
* it makes no sense to check for MIC errors on anything other
@@ -148,13 +153,19 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
update_iv:
/* update IV in key information to be able to detect replays */
- rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32;
- rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16;
+ rx->key->u.tkip.rx[queue].iv32 = rx->tkip_iv32;
+ rx->key->u.tkip.rx[queue].iv16 = rx->tkip_iv16;
return RX_CONTINUE;
mic_fail:
- mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx,
+ /*
+ * In some cases the key can be unset - e.g. a multicast packet, in
+ * a driver that supports HW encryption. Send up the key idx only if
+ * the key is set.
+ */
+ mac80211_ev_michael_mic_failure(rx->sdata,
+ rx->key ? rx->key->conf.keyidx : -1,
(void *) skb->data, NULL, GFP_ATOMIC);
return RX_DROP_UNUSABLE;
}
@@ -235,6 +246,11 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
struct ieee80211_key *key = rx->key;
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ int queue = rx->queue;
+
+ /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
+ if (rx->queue == NUM_RX_DATA_QUEUES - 1)
+ queue = 0;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -255,7 +271,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
key, skb->data + hdrlen,
skb->len - hdrlen, rx->sta->sta.addr,
- hdr->addr1, hwaccel, rx->queue,
+ hdr->addr1, hwaccel, queue,
&rx->tkip_iv32,
&rx->tkip_iv16);
if (res != TKIP_DECRYPT_OK)
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 1c88c8911dc5..d03682109b7a 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1582,6 +1582,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
#endif /* SCTP_DEBUG */
if (transport) {
if (bytes_acked) {
+ struct sctp_association *asoc = transport->asoc;
+
/* We may have counted DATA that was migrated
* to this transport due to DEL-IP operation.
* Subtract those bytes, since the were never
@@ -1600,6 +1602,17 @@ static void sctp_check_transmitted(struct sctp_outq *q,
transport->error_count = 0;
transport->asoc->overall_error_count = 0;
+ /*
+ * While in SHUTDOWN PENDING, we may have started
+ * the T5 shutdown guard timer after reaching the
+ * retransmission limit. Stop that timer as soon
+ * as the receiver acknowledged any data.
+ */
+ if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
+ del_timer(&asoc->timers
+ [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
+ sctp_association_put(asoc);
+
/* Mark the destination transport address as
* active if it is not so marked.
*/
@@ -1629,10 +1642,15 @@ static void sctp_check_transmitted(struct sctp_outq *q,
* A sender is doing zero window probing when the
* receiver's advertised window is zero, and there is
* only one data chunk in flight to the receiver.
+ *
+ * Allow the association to timeout while in SHUTDOWN
+ * PENDING or SHUTDOWN RECEIVED in case the receiver
+ * stays in zero window mode forever.
*/
if (!q->asoc->peer.rwnd &&
!list_empty(&tlist) &&
- (sack_ctsn+2 == q->asoc->next_tsn)) {
+ (sack_ctsn+2 == q->asoc->next_tsn) &&
+ q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
SCTP_DEBUG_PRINTK("%s: SACK received for zero "
"window probe: %u\n",
__func__, sack_ctsn);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 67380a29e2e9..207175b2f40a 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1058,7 +1058,6 @@ SCTP_STATIC __init int sctp_init(void)
int status = -EINVAL;
unsigned long goal;
unsigned long limit;
- unsigned long nr_pages;
int max_share;
int order;
@@ -1148,15 +1147,7 @@ SCTP_STATIC __init int sctp_init(void)
/* Initialize handle used for association ids. */
idr_init(&sctp_assocs_id);
- /* Set the pressure threshold to be a fraction of global memory that
- * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
- * memory, with a floor of 128 pages.
- * Note this initializes the data in sctpv6_prot too
- * Unabashedly stolen from tcp_init
- */
- nr_pages = totalram_pages - totalhigh_pages;
- limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
- limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
+ limit = nr_free_buffer_pages() / 8;
limit = max(limit, 128UL);
sysctl_sctp_mem[0] = limit / 4 * 3;
sysctl_sctp_mem[1] = limit;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 534c2e5feb05..6e0f88295aaf 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -670,10 +670,19 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
* HEARTBEAT should clear the error counter of the destination
* transport address to which the HEARTBEAT was sent.
- * The association's overall error count is also cleared.
*/
t->error_count = 0;
- t->asoc->overall_error_count = 0;
+
+ /*
+ * Although RFC4960 specifies that the overall error count must
+ * be cleared when a HEARTBEAT ACK is received, we make an
+ * exception while in SHUTDOWN PENDING. If the peer keeps its
+ * window shut forever, we may never be able to transmit our
+ * outstanding data and rely on the retransmission limit be reached
+ * to shutdown the association.
+ */
+ if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
+ t->asoc->overall_error_count = 0;
/* Clear the hb_sent flag to signal that we had a good
* acknowledgement.
@@ -1437,6 +1446,13 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr);
break;
+ case SCTP_CMD_TIMER_START_ONCE:
+ timer = &asoc->timers[cmd->obj.to];
+
+ if (timer_pending(timer))
+ break;
+ /* fall through */
+
case SCTP_CMD_TIMER_START:
timer = &asoc->timers[cmd->obj.to];
timeout = asoc->timeouts[cmd->obj.to];
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index a297283154d5..246117142b5c 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -5154,7 +5154,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
* The sender of the SHUTDOWN MAY also start an overall guard timer
* 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
*/
- sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
if (asoc->autoclose)
@@ -5299,14 +5299,28 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS);
if (asoc->overall_error_count >= asoc->max_retrans) {
- sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
- SCTP_ERROR(ETIMEDOUT));
- /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
- sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
- SCTP_PERR(SCTP_ERROR_NO_ERROR));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
- return SCTP_DISPOSITION_DELETE_TCB;
+ if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
+ /*
+ * We are here likely because the receiver had its rwnd
+ * closed for a while and we have not been able to
+ * transmit the locally queued data within the maximum
+ * retransmission attempts limit. Start the T5
+ * shutdown guard timer to give the receiver one last
+ * chance and some additional time to recover before
+ * aborting.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
+ } else {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
+ /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_NO_ERROR));
+ SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ return SCTP_DISPOSITION_DELETE_TCB;
+ }
}
/* E1) For the destination address for which the timer
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 0338dc6fdc9d..7c211a7f90f4 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -827,7 +827,7 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
/* SCTP_STATE_ESTABLISHED */ \
TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
/* SCTP_STATE_SHUTDOWN_PENDING */ \
- TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
/* SCTP_STATE_SHUTDOWN_SENT */ \
TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6766913a53e6..d3ccf7973c59 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1384,6 +1384,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
struct sctp_endpoint *ep;
struct sctp_association *asoc;
struct list_head *pos, *temp;
+ unsigned int data_was_unread;
SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout);
@@ -1393,6 +1394,10 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
ep = sctp_sk(sk)->ep;
+ /* Clean up any skbs sitting on the receive queue. */
+ data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
+ data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
+
/* Walk all associations on an endpoint. */
list_for_each_safe(pos, temp, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
@@ -1410,7 +1415,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
}
}
- if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
+ if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
+ !skb_queue_empty(&asoc->ulpq.reasm) ||
+ (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
struct sctp_chunk *chunk;
chunk = sctp_make_abort_user(asoc, NULL, 0);
@@ -1420,10 +1427,6 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
sctp_primitive_SHUTDOWN(asoc, NULL);
}
- /* Clean up any skbs sitting on the receive queue. */
- sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
- sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
-
/* On a TCP-style socket, block for at most linger_time if set. */
if (sctp_style(sk, TCP) && timeout)
sctp_wait_for_close(sk, timeout);
@@ -2073,10 +2076,33 @@ static int sctp_setsockopt_disable_fragments(struct sock *sk,
static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
unsigned int optlen)
{
+ struct sctp_association *asoc;
+ struct sctp_ulpevent *event;
+
if (optlen > sizeof(struct sctp_event_subscribe))
return -EINVAL;
if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
return -EFAULT;
+
+ /*
+ * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
+ * if there is no data to be sent or retransmit, the stack will
+ * immediately send up this notification.
+ */
+ if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT,
+ &sctp_sk(sk)->subscribe)) {
+ asoc = sctp_id2assoc(sk, 0);
+
+ if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
+ event = sctp_ulpevent_make_sender_dry_event(asoc,
+ GFP_ATOMIC);
+ if (!event)
+ return -ENOMEM;
+
+ sctp_ulpq_tail_event(&asoc->ulpq, event);
+ }
+ }
+
return 0;
}
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index e70e5fc87890..8a84017834c2 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -1081,9 +1081,19 @@ void sctp_ulpevent_free(struct sctp_ulpevent *event)
}
/* Purge the skb lists holding ulpevents. */
-void sctp_queue_purge_ulpevents(struct sk_buff_head *list)
+unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list)
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(list)) != NULL)
- sctp_ulpevent_free(sctp_skb2event(skb));
+ unsigned int data_unread = 0;
+
+ while ((skb = skb_dequeue(list)) != NULL) {
+ struct sctp_ulpevent *event = sctp_skb2event(skb);
+
+ if (!sctp_ulpevent_is_notification(event))
+ data_unread += skb->len;
+
+ sctp_ulpevent_free(event);
+ }
+
+ return data_unread;
}
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 9a80a922c527..e45d2fbbe5a8 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -597,7 +597,7 @@ void rpcb_getport_async(struct rpc_task *task)
u32 bind_version;
struct rpc_xprt *xprt;
struct rpc_clnt *rpcb_clnt;
- static struct rpcbind_args *map;
+ struct rpcbind_args *map;
struct rpc_task *child;
struct sockaddr_storage addr;
struct sockaddr *sap = (struct sockaddr *)&addr;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index a27406b1654f..4814e246a874 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -616,30 +616,25 @@ static void __rpc_execute(struct rpc_task *task)
BUG_ON(RPC_IS_QUEUED(task));
for (;;) {
+ void (*do_action)(struct rpc_task *);
/*
- * Execute any pending callback.
+ * Execute any pending callback first.
*/
- if (task->tk_callback) {
- void (*save_callback)(struct rpc_task *);
-
- /*
- * We set tk_callback to NULL before calling it,
- * in case it sets the tk_callback field itself:
- */
- save_callback = task->tk_callback;
- task->tk_callback = NULL;
- save_callback(task);
- } else {
+ do_action = task->tk_callback;
+ task->tk_callback = NULL;
+ if (do_action == NULL) {
/*
* Perform the next FSM step.
- * tk_action may be NULL when the task has been killed
- * by someone else.
+ * tk_action may be NULL if the task has been killed.
+ * In particular, note that rpc_killall_tasks may
+ * do this at any time, so beware when dereferencing.
*/
- if (task->tk_action == NULL)
+ do_action = task->tk_action;
+ if (do_action == NULL)
break;
- task->tk_action(task);
}
+ do_action(task);
/*
* Lockless check for whether task is sleeping or not.
diff --git a/net/wireless/core.c b/net/wireless/core.c
index c22ef3492ee6..880dbe2e6f94 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -366,6 +366,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
mutex_init(&rdev->mtx);
mutex_init(&rdev->devlist_mtx);
+ mutex_init(&rdev->sched_scan_mtx);
INIT_LIST_HEAD(&rdev->netdev_list);
spin_lock_init(&rdev->bss_lock);
INIT_LIST_HEAD(&rdev->bss_list);
@@ -701,6 +702,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
rfkill_destroy(rdev->rfkill);
mutex_destroy(&rdev->mtx);
mutex_destroy(&rdev->devlist_mtx);
+ mutex_destroy(&rdev->sched_scan_mtx);
list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
cfg80211_put_bss(&scan->pub);
cfg80211_rdev_free_wowlan(rdev);
@@ -737,12 +739,16 @@ static void wdev_cleanup_work(struct work_struct *work)
___cfg80211_scan_done(rdev, true);
}
+ cfg80211_unlock_rdev(rdev);
+
+ mutex_lock(&rdev->sched_scan_mtx);
+
if (WARN_ON(rdev->sched_scan_req &&
rdev->sched_scan_req->dev == wdev->netdev)) {
__cfg80211_stop_sched_scan(rdev, false);
}
- cfg80211_unlock_rdev(rdev);
+ mutex_unlock(&rdev->sched_scan_mtx);
mutex_lock(&rdev->devlist_mtx);
rdev->opencount--;
@@ -830,9 +836,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
break;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_STATION:
- cfg80211_lock_rdev(rdev);
+ mutex_lock(&rdev->sched_scan_mtx);
__cfg80211_stop_sched_scan(rdev, false);
- cfg80211_unlock_rdev(rdev);
+ mutex_unlock(&rdev->sched_scan_mtx);
wdev_lock(wdev);
#ifdef CONFIG_CFG80211_WEXT
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 3dce1f167eba..a570ff9214ec 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -65,6 +65,8 @@ struct cfg80211_registered_device {
struct work_struct scan_done_wk;
struct work_struct sched_scan_results_wk;
+ struct mutex sched_scan_mtx;
+
#ifdef CONFIG_NL80211_TESTMODE
struct genl_info *testmode_info;
#endif
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 98fa8eb6cc4b..cea338150d05 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3461,9 +3461,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
- if (rdev->sched_scan_req)
- return -EINPROGRESS;
-
if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
return -EINVAL;
@@ -3502,12 +3499,21 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
if (ie_len > wiphy->max_scan_ie_len)
return -EINVAL;
+ mutex_lock(&rdev->sched_scan_mtx);
+
+ if (rdev->sched_scan_req) {
+ err = -EINPROGRESS;
+ goto out;
+ }
+
request = kzalloc(sizeof(*request)
+ sizeof(*request->ssids) * n_ssids
+ sizeof(*request->channels) * n_channels
+ ie_len, GFP_KERNEL);
- if (!request)
- return -ENOMEM;
+ if (!request) {
+ err = -ENOMEM;
+ goto out;
+ }
if (n_ssids)
request->ssids = (void *)&request->channels[n_channels];
@@ -3605,6 +3611,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
out_free:
kfree(request);
out:
+ mutex_unlock(&rdev->sched_scan_mtx);
return err;
}
@@ -3612,12 +3619,17 @@ static int nl80211_stop_sched_scan(struct sk_buff *skb,
struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ int err;
if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
!rdev->ops->sched_scan_stop)
return -EOPNOTSUPP;
- return __cfg80211_stop_sched_scan(rdev, false);
+ mutex_lock(&rdev->sched_scan_mtx);
+ err = __cfg80211_stop_sched_scan(rdev, false);
+ mutex_unlock(&rdev->sched_scan_mtx);
+
+ return err;
}
static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
@@ -6463,7 +6475,8 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
if (addr)
NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type);
- NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id);
+ if (key_id != -1)
+ NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id);
if (tsc)
NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 7a6c67667d70..ae0c2256ba3b 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -100,14 +100,14 @@ void __cfg80211_sched_scan_results(struct work_struct *wk)
rdev = container_of(wk, struct cfg80211_registered_device,
sched_scan_results_wk);
- cfg80211_lock_rdev(rdev);
+ mutex_lock(&rdev->sched_scan_mtx);
/* we don't have sched_scan_req anymore if the scan is stopping */
if (rdev->sched_scan_req)
nl80211_send_sched_scan_results(rdev,
rdev->sched_scan_req->dev);
- cfg80211_unlock_rdev(rdev);
+ mutex_unlock(&rdev->sched_scan_mtx);
}
void cfg80211_sched_scan_results(struct wiphy *wiphy)
@@ -123,9 +123,9 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
{
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- cfg80211_lock_rdev(rdev);
+ mutex_lock(&rdev->sched_scan_mtx);
__cfg80211_stop_sched_scan(rdev, true);
- cfg80211_unlock_rdev(rdev);
+ mutex_unlock(&rdev->sched_scan_mtx);
}
EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
@@ -135,7 +135,7 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
int err;
struct net_device *dev;
- ASSERT_RDEV_LOCK(rdev);
+ lockdep_assert_held(&rdev->sched_scan_mtx);
if (!rdev->sched_scan_req)
return 0;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 9bec2e8a838c..5ce74a385525 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -50,7 +50,7 @@ static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
static void xfrm_init_pmtu(struct dst_entry *dst);
static int stale_bundle(struct dst_entry *dst);
-static int xfrm_bundle_ok(struct xfrm_dst *xdst, int family);
+static int xfrm_bundle_ok(struct xfrm_dst *xdst);
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
@@ -2241,7 +2241,7 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
static int stale_bundle(struct dst_entry *dst)
{
- return !xfrm_bundle_ok((struct xfrm_dst *)dst, AF_UNSPEC);
+ return !xfrm_bundle_ok((struct xfrm_dst *)dst);
}
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
@@ -2313,7 +2313,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
* still valid.
*/
-static int xfrm_bundle_ok(struct xfrm_dst *first, int family)
+static int xfrm_bundle_ok(struct xfrm_dst *first)
{
struct dst_entry *dst = &first->u.dst;
struct xfrm_dst *last;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index d70f85eb7864..9414b9c5b1e4 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1345,6 +1345,8 @@ out:
xfrm_state_check_expire(x1);
err = 0;
+ x->km.state = XFRM_STATE_DEAD;
+ __xfrm_state_put(x);
}
spin_unlock_bh(&x1->lock);
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
index 3b029cba2baf..a27235685949 100755
--- a/scripts/depmod.sh
+++ b/scripts/depmod.sh
@@ -21,13 +21,15 @@ fi
# older versions of depmod require the version string to start with three
# numbers, so we cheat with a symlink here
depmod_hack_needed=true
-mkdir -p .tmp_depmod/lib/modules/$KERNELRELEASE
-if "$DEPMOD" -b .tmp_depmod $KERNELRELEASE 2>/dev/null; then
- if test -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep -o \
- -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep.bin; then
+tmp_dir=$(mktemp -d ${TMPDIR:-/tmp}/depmod.XXXXXX)
+mkdir -p "$tmp_dir/lib/modules/$KERNELRELEASE"
+if "$DEPMOD" -b "$tmp_dir" $KERNELRELEASE 2>/dev/null; then
+ if test -e "$tmp_dir/lib/modules/$KERNELRELEASE/modules.dep" -o \
+ -e "$tmp_dir/lib/modules/$KERNELRELEASE/modules.dep.bin"; then
depmod_hack_needed=false
fi
fi
+rm -rf "$tmp_dir"
if $depmod_hack_needed; then
symlink="$INSTALL_MOD_PATH/lib/modules/99.98.$KERNELRELEASE"
ln -s "$KERNELRELEASE" "$symlink"
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
index 6e2409181895..bfee60c4d4c0 100644
--- a/sound/atmel/abdac.c
+++ b/sound/atmel/abdac.c
@@ -599,4 +599,4 @@ module_exit(atmel_abdac_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Driver for Atmel Audio Bitstream DAC (ABDAC)");
-MODULE_AUTHOR("Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>");
+MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index b310702c646e..ac35222ad0dd 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -1199,4 +1199,4 @@ module_exit(atmel_ac97c_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Driver for Atmel AC97 controller");
-MODULE_AUTHOR("Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>");
+MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c
index f16bc8aad6ed..e083122ca55a 100644
--- a/sound/pci/cs5535audio/cs5535audio_pcm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pcm.c
@@ -149,7 +149,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
&((struct cs5535audio_dma_desc *) dma->desc_buf.area)[i];
desc->addr = cpu_to_le32(addr);
desc->size = cpu_to_le32(period_bytes);
- desc->ctlreserved = cpu_to_le32(PRD_EOP);
+ desc->ctlreserved = cpu_to_le16(PRD_EOP);
desc_addr += sizeof(struct cs5535audio_dma_desc);
addr += period_bytes;
}
@@ -157,7 +157,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
lastdesc = &((struct cs5535audio_dma_desc *) dma->desc_buf.area)[periods];
lastdesc->addr = cpu_to_le32((u32) dma->desc_buf.addr);
lastdesc->size = 0;
- lastdesc->ctlreserved = cpu_to_le32(PRD_JMP);
+ lastdesc->ctlreserved = cpu_to_le16(PRD_JMP);
jmpprd_addr = cpu_to_le32(lastdesc->addr +
(sizeof(struct cs5535audio_dma_desc)*periods));
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index b05f7be9dc1b..e3e853153d14 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -294,7 +294,7 @@ static int hdmi_update_eld(struct hdmi_eld *e,
snd_printd(KERN_INFO "HDMI: out of range MNL %d\n", mnl);
goto out_fail;
} else
- strlcpy(e->monitor_name, buf + ELD_FIXED_BYTES, mnl);
+ strlcpy(e->monitor_name, buf + ELD_FIXED_BYTES, mnl + 1);
for (i = 0; i < e->sad_count; i++) {
if (ELD_FIXED_BYTES + mnl + 3 * (i + 1) > size) {
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 694b9daf691f..7bbc5f237a5e 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3074,6 +3074,7 @@ static const char * const cxt5066_models[CXT5066_MODELS] = {
};
static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT5066_AUTO),
SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
@@ -4389,6 +4390,8 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
.patch = patch_cxt5066 },
{ .id = 0x14f15069, .name = "CX20585",
.patch = patch_cxt5066 },
+ { .id = 0x14f1506c, .name = "CX20588",
+ .patch = patch_cxt5066 },
{ .id = 0x14f1506e, .name = "CX20590",
.patch = patch_cxt5066 },
{ .id = 0x14f15097, .name = "CX20631",
@@ -4417,6 +4420,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15066");
MODULE_ALIAS("snd-hda-codec-id:14f15067");
MODULE_ALIAS("snd-hda-codec-id:14f15068");
MODULE_ALIAS("snd-hda-codec-id:14f15069");
+MODULE_ALIAS("snd-hda-codec-id:14f1506c");
MODULE_ALIAS("snd-hda-codec-id:14f1506e");
MODULE_ALIAS("snd-hda-codec-id:14f15097");
MODULE_ALIAS("snd-hda-codec-id:14f15098");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index d21191dcfe88..b48fb43b5448 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2715,17 +2715,30 @@ typedef int (*getput_call_t)(struct snd_kcontrol *kcontrol,
static int alc_cap_getput_caller(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol,
- getput_call_t func)
+ getput_call_t func, bool check_adc_switch)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct alc_spec *spec = codec->spec;
- unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
- int err;
+ int i, err = 0;
mutex_lock(&codec->control_mutex);
- kcontrol->private_value = HDA_COMPOSE_AMP_VAL(spec->adc_nids[adc_idx],
- 3, 0, HDA_INPUT);
- err = func(kcontrol, ucontrol);
+ if (check_adc_switch && spec->dual_adc_switch) {
+ for (i = 0; i < spec->num_adc_nids; i++) {
+ kcontrol->private_value =
+ HDA_COMPOSE_AMP_VAL(spec->adc_nids[i],
+ 3, 0, HDA_INPUT);
+ err = func(kcontrol, ucontrol);
+ if (err < 0)
+ goto error;
+ }
+ } else {
+ i = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
+ kcontrol->private_value =
+ HDA_COMPOSE_AMP_VAL(spec->adc_nids[i],
+ 3, 0, HDA_INPUT);
+ err = func(kcontrol, ucontrol);
+ }
+ error:
mutex_unlock(&codec->control_mutex);
return err;
}
@@ -2734,14 +2747,14 @@ static int alc_cap_vol_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
return alc_cap_getput_caller(kcontrol, ucontrol,
- snd_hda_mixer_amp_volume_get);
+ snd_hda_mixer_amp_volume_get, false);
}
static int alc_cap_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
return alc_cap_getput_caller(kcontrol, ucontrol,
- snd_hda_mixer_amp_volume_put);
+ snd_hda_mixer_amp_volume_put, true);
}
/* capture mixer elements */
@@ -2751,14 +2764,14 @@ static int alc_cap_sw_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
return alc_cap_getput_caller(kcontrol, ucontrol,
- snd_hda_mixer_amp_switch_get);
+ snd_hda_mixer_amp_switch_get, false);
}
static int alc_cap_sw_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
return alc_cap_getput_caller(kcontrol, ucontrol,
- snd_hda_mixer_amp_switch_put);
+ snd_hda_mixer_amp_switch_put, true);
}
#define _DEFINE_CAPMIX(num) \
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 3f08afc0f0d3..c8e402fc3782 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -896,11 +896,11 @@ struct hdspm {
unsigned char max_channels_in;
unsigned char max_channels_out;
- char *channel_map_in;
- char *channel_map_out;
+ signed char *channel_map_in;
+ signed char *channel_map_out;
- char *channel_map_in_ss, *channel_map_in_ds, *channel_map_in_qs;
- char *channel_map_out_ss, *channel_map_out_ds, *channel_map_out_qs;
+ signed char *channel_map_in_ss, *channel_map_in_ds, *channel_map_in_qs;
+ signed char *channel_map_out_ss, *channel_map_out_ds, *channel_map_out_qs;
char **port_names_in;
char **port_names_out;
diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
index b5101efd1c87..f1fd95bb6416 100644
--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
+++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
@@ -138,11 +138,20 @@ static snd_pcm_uframes_t bf5xx_pcm_pointer(struct snd_pcm_substream *substream)
pr_debug("%s enter\n", __func__);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
diff = sport_curr_offset_tx(sport);
- frames = bytes_to_frames(substream->runtime, diff);
} else {
diff = sport_curr_offset_rx(sport);
- frames = bytes_to_frames(substream->runtime, diff);
}
+
+ /*
+ * TX at least can report one frame beyond the end of the
+ * buffer if we hit the wraparound case - clamp to within the
+ * buffer as the ALSA APIs require.
+ */
+ if (diff == snd_pcm_lib_buffer_bytes(substream))
+ diff = 0;
+
+ frames = bytes_to_frames(substream->runtime, diff);
+
return frames;
}
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 4be0570e3f1f..65f46047b1cb 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -357,7 +357,7 @@ static int ak4642_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
default:
return -EINVAL;
}
- snd_soc_update_bits(codec, PW_MGMT2, MS, data);
+ snd_soc_update_bits(codec, PW_MGMT2, MS | MCKO | PMPLL, data);
snd_soc_update_bits(codec, MD_CTL1, BCKO_MASK, bcko);
/* format type */
diff --git a/sound/soc/codecs/tlv320aic26.c b/sound/soc/codecs/tlv320aic26.c
index e2a7608d3944..7859bdcc93db 100644
--- a/sound/soc/codecs/tlv320aic26.c
+++ b/sound/soc/codecs/tlv320aic26.c
@@ -161,10 +161,18 @@ static int aic26_hw_params(struct snd_pcm_substream *substream,
dev_dbg(&aic26->spi->dev, "bad format\n"); return -EINVAL;
}
- /* Configure PLL */
+ /**
+ * Configure PLL
+ * fsref = (mclk * PLLM) / 2048
+ * where PLLM = J.DDDD (DDDD register ranges from 0 to 9999, decimal)
+ */
pval = 1;
- jval = (fsref == 44100) ? 7 : 8;
- dval = (fsref == 44100) ? 5264 : 1920;
+ /* compute J portion of multiplier */
+ jval = fsref / (aic26->mclk / 2048);
+ /* compute fractional DDDD component of multiplier */
+ dval = fsref - (jval * (aic26->mclk / 2048));
+ dval = (10000 * dval) / (aic26->mclk / 2048);
+ dev_dbg(&aic26->spi->dev, "Setting PLLM to %d.%04d\n", jval, dval);
qval = 0;
reg = 0x8000 | qval << 11 | pval << 8 | jval << 2;
aic26_reg_write(codec, AIC26_REG_PLL_PROG1, reg);
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index c3d96fc8c267..789453d44ec5 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1114,12 +1114,19 @@ static int aic3x_set_power(struct snd_soc_codec *codec, int power)
/* Sync reg_cache with the hardware */
codec->cache_only = 0;
- for (i = 0; i < ARRAY_SIZE(aic3x_reg); i++)
+ for (i = AIC3X_SAMPLE_RATE_SEL_REG; i < ARRAY_SIZE(aic3x_reg); i++)
snd_soc_write(codec, i, cache[i]);
if (aic3x->model == AIC3X_MODEL_3007)
aic3x_init_3007(codec);
codec->cache_sync = 0;
} else {
+ /*
+ * Do soft reset to this codec instance in order to clear
+ * possible VDD leakage currents in case the supply regulators
+ * remain on
+ */
+ snd_soc_write(codec, AIC3X_RESET, SOFT_RESET);
+ codec->cache_sync = 1;
aic3x->power = 0;
/* HW writes are needless when bias is off */
codec->cache_only = 1;
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 2dc964b55e4f..76b4361e9b80 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -175,6 +175,7 @@ static const struct snd_kcontrol_new wm8731_input_mux_controls =
SOC_DAPM_ENUM("Input Select", wm8731_insel_enum);
static const struct snd_soc_dapm_widget wm8731_dapm_widgets[] = {
+SND_SOC_DAPM_SUPPLY("ACTIVE",WM8731_ACTIVE, 0, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("OSC", WM8731_PWR, 5, 1, NULL, 0),
SND_SOC_DAPM_MIXER("Output Mixer", WM8731_PWR, 4, 1,
&wm8731_output_mixer_controls[0],
@@ -204,6 +205,8 @@ static int wm8731_check_osc(struct snd_soc_dapm_widget *source,
static const struct snd_soc_dapm_route wm8731_intercon[] = {
{"DAC", NULL, "OSC", wm8731_check_osc},
{"ADC", NULL, "OSC", wm8731_check_osc},
+ {"DAC", NULL, "ACTIVE"},
+ {"ADC", NULL, "ACTIVE"},
/* output mixer */
{"Output Mixer", "Line Bypass Switch", "Line Input"},
@@ -315,29 +318,6 @@ static int wm8731_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int wm8731_pcm_prepare(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_codec *codec = dai->codec;
-
- /* set active */
- snd_soc_write(codec, WM8731_ACTIVE, 0x0001);
-
- return 0;
-}
-
-static void wm8731_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_codec *codec = dai->codec;
-
- /* deactivate */
- if (!codec->active) {
- udelay(50);
- snd_soc_write(codec, WM8731_ACTIVE, 0x0);
- }
-}
-
static int wm8731_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
@@ -480,7 +460,6 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
snd_soc_write(codec, WM8731_PWR, reg | 0x0040);
break;
case SND_SOC_BIAS_OFF:
- snd_soc_write(codec, WM8731_ACTIVE, 0x0);
snd_soc_write(codec, WM8731_PWR, 0xffff);
regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies),
wm8731->supplies);
@@ -496,9 +475,7 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
SNDRV_PCM_FMTBIT_S24_LE)
static struct snd_soc_dai_ops wm8731_dai_ops = {
- .prepare = wm8731_pcm_prepare,
.hw_params = wm8731_hw_params,
- .shutdown = wm8731_shutdown,
.digital_mute = wm8731_mute,
.set_sysclk = wm8731_set_dai_sysclk,
.set_fmt = wm8731_set_dai_fmt,
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 970a95c5360b..c2fc0356c2a4 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -1713,6 +1713,8 @@ static int _wm8994_set_fll(struct snd_soc_codec *codec, int id, int src,
snd_soc_update_bits(codec, WM8994_FLL1_CONTROL_1 + reg_offset,
WM8994_FLL1_ENA | WM8994_FLL1_FRAC,
reg);
+
+ msleep(5);
}
wm8994->fll[id].in = freq_in;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index d75043ed7fc0..b194be09e74d 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1929,8 +1929,9 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card)
"%s", card->name);
snprintf(card->snd_card->longname, sizeof(card->snd_card->longname),
"%s", card->long_name ? card->long_name : card->name);
- snprintf(card->snd_card->driver, sizeof(card->snd_card->driver),
- "%s", card->driver_name ? card->driver_name : card->name);
+ if (card->driver_name)
+ strlcpy(card->snd_card->driver, card->driver_name,
+ sizeof(card->snd_card->driver));
if (card->late_probe) {
ret = card->late_probe(card);
diff --git a/sound/soc/tegra/tegra_i2s.c b/sound/soc/tegra/tegra_i2s.c
index 6b817e20548c..95f03c10b4f7 100644
--- a/sound/soc/tegra/tegra_i2s.c
+++ b/sound/soc/tegra/tegra_i2s.c
@@ -222,12 +222,18 @@ static int tegra_i2s_hw_params(struct snd_pcm_substream *substream,
if (i2sclock % (2 * srate))
reg |= TEGRA_I2S_TIMING_NON_SYM_ENABLE;
+ if (!i2s->clk_refs)
+ clk_enable(i2s->clk_i2s);
+
tegra_i2s_write(i2s, TEGRA_I2S_TIMING, reg);
tegra_i2s_write(i2s, TEGRA_I2S_FIFO_SCR,
TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_FOUR_SLOTS |
TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_FOUR_SLOTS);
+ if (!i2s->clk_refs)
+ clk_disable(i2s->clk_i2s);
+
return 0;
}
diff --git a/sound/spi/at73c213.c b/sound/spi/at73c213.c
index 337a00241a1f..4dd051bdf4fd 100644
--- a/sound/spi/at73c213.c
+++ b/sound/spi/at73c213.c
@@ -1124,6 +1124,6 @@ static void __exit at73c213_exit(void)
}
module_exit(at73c213_exit);
-MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
+MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
MODULE_DESCRIPTION("Sound driver for AT73C213 with Atmel SSC");
MODULE_LICENSE("GPL");