summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/cppc_acpi.c12
-rw-r--r--drivers/block/cciss_scsi.c72
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/atmel-rng.c26
-rw-r--r--drivers/char/hw_random/core.c3
-rw-r--r--drivers/char/hw_random/meson-rng.c2
-rw-r--r--drivers/char/hw_random/msm-rng.c4
-rw-r--r--drivers/char/hw_random/omap-rng.c162
-rw-r--r--drivers/char/hw_random/pic32-rng.c3
-rw-r--r--drivers/char/hw_random/pseries-rng.c5
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/Makefile14
-rw-r--r--drivers/char/tpm/tpm-chip.c38
-rw-r--r--drivers/char/tpm/tpm-interface.c110
-rw-r--r--drivers/char/tpm/tpm-sysfs.c7
-rw-r--r--drivers/char/tpm/tpm.h41
-rw-r--r--drivers/char/tpm/tpm2-cmd.c2
-rw-r--r--drivers/char/tpm/tpm_acpi.c46
-rw-r--r--drivers/char/tpm/tpm_crb.c173
-rw-r--r--drivers/char/tpm/tpm_eventlog.c230
-rw-r--r--drivers/char/tpm/tpm_eventlog.h22
-rw-r--r--drivers/char/tpm/tpm_of.c48
-rw-r--r--drivers/char/tpm/tpm_tis.c11
-rw-r--r--drivers/char/tpm/tpm_tis_core.c64
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c85
-rw-r--r--drivers/char/tpm/xen-tpmfront.c1
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c3
-rw-r--r--drivers/crypto/atmel-aes-regs.h4
-rw-r--r--drivers/crypto/atmel-aes.c189
-rw-r--r--drivers/crypto/caam/Kconfig11
-rw-r--r--drivers/crypto/caam/Makefile1
-rw-r--r--drivers/crypto/caam/caamalg.c1505
-rw-r--r--drivers/crypto/caam/caamalg_desc.c1306
-rw-r--r--drivers/crypto/caam/caamalg_desc.h97
-rw-r--r--drivers/crypto/caam/caamhash.c227
-rw-r--r--drivers/crypto/caam/caampkc.c4
-rw-r--r--drivers/crypto/caam/caamrng.c10
-rw-r--r--drivers/crypto/caam/ctrl.c75
-rw-r--r--drivers/crypto/caam/desc.h22
-rw-r--r--drivers/crypto/caam/desc_constr.h133
-rw-r--r--drivers/crypto/caam/error.c5
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/jr.c27
-rw-r--r--drivers/crypto/caam/key_gen.c62
-rw-r--r--drivers/crypto/caam/key_gen.h6
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h6
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c4
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c30
-rw-r--r--drivers/crypto/ccp/ccp-dev.c6
-rw-r--r--drivers/crypto/ccp/ccp-dev.h45
-rw-r--r--drivers/crypto/chelsio/Kconfig1
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c2001
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h103
-rw-r--r--drivers/crypto/chelsio/chcr_core.c8
-rw-r--r--drivers/crypto/chelsio/chcr_core.h18
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h115
-rw-r--r--drivers/crypto/marvell/cesa.c4
-rw-r--r--drivers/crypto/marvell/cesa.h5
-rw-r--r--drivers/crypto/marvell/cipher.c8
-rw-r--r--drivers/crypto/marvell/hash.c65
-rw-r--r--drivers/crypto/marvell/tdma.c33
-rw-r--r--drivers/crypto/mv_cesa.c4
-rw-r--r--drivers/crypto/nx/nx.c1
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/talitos.c2
-rw-r--r--drivers/crypto/vmx/Makefile12
-rw-r--r--drivers/extcon/extcon-arizona.c8
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/hid/Kconfig19
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-asus.c299
-rw-r--r--drivers/hid/hid-core.c14
-rw-r--r--drivers/hid/hid-cp2112.c205
-rw-r--r--drivers/hid/hid-ids.h19
-rw-r--r--drivers/hid/hid-input.c96
-rw-r--r--drivers/hid/hid-mf.c166
-rw-r--r--drivers/hid/hid-microsoft.c6
-rw-r--r--drivers/hid/hid-multitouch.c88
-rw-r--r--drivers/hid/hid-sensor-hub.c6
-rw-r--r--drivers/hid/hid-sony.c445
-rw-r--r--drivers/hid/hid-udraw-ps3.c474
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c146
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c67
-rw-r--r--drivers/hid/intel-ish-hid/ipc/utils.h64
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c9
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c6
-rw-r--r--drivers/hid/usbhid/hid-core.c6
-rw-r--r--drivers/hid/usbhid/hid-quirks.c5
-rw-r--r--drivers/hid/wacom.h2
-rw-r--r--drivers/hid/wacom_sys.c92
-rw-r--r--drivers/hid/wacom_wac.c601
-rw-r--r--drivers/hid/wacom_wac.h76
-rw-r--r--drivers/input/misc/arizona-haptics.c13
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/isdn/hisax/q931.c2
-rw-r--r--drivers/md/Kconfig10
-rw-r--r--drivers/md/bitmap.c166
-rw-r--r--drivers/md/dm-bufio.c28
-rw-r--r--drivers/md/dm-cache-metadata.c3
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-cache-target.c3
-rw-r--r--drivers/md/dm-crypt.c214
-rw-r--r--drivers/md/dm-flakey.c53
-rw-r--r--drivers/md/dm-io.c34
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm-mpath.c42
-rw-r--r--drivers/md/dm-raid.c86
-rw-r--r--drivers/md/dm-rq.c18
-rw-r--r--drivers/md/dm-table.c43
-rw-r--r--drivers/md/dm-verity-target.c2
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/linear.c31
-rw-r--r--drivers/md/md.c701
-rw-r--r--drivers/md/md.h108
-rw-r--r--drivers/md/multipath.c92
-rw-r--r--drivers/md/persistent-data/dm-array.c2
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c19
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c4
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c14
-rw-r--r--drivers/md/raid0.c107
-rw-r--r--drivers/md/raid1.c247
-rw-r--r--drivers/md/raid1.h19
-rw-r--r--drivers/md/raid10.c295
-rw-r--r--drivers/md/raid10.h2
-rw-r--r--drivers/md/raid5-cache.c1833
-rw-r--r--drivers/md/raid5.c623
-rw-r--r--drivers/md/raid5.h172
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/message/fusion/mptbase.c28
-rw-r--r--drivers/message/fusion/mptscsih.c11
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/misc/ibmasm/module.c2
-rw-r--r--drivers/mmc/Kconfig2
-rw-r--r--drivers/mmc/Makefile1
-rw-r--r--drivers/mmc/card/Kconfig70
-rw-r--r--drivers/mmc/card/Makefile10
-rw-r--r--drivers/mmc/core/Kconfig66
-rw-r--r--drivers/mmc/core/Makefile4
-rw-r--r--drivers/mmc/core/block.c (renamed from drivers/mmc/card/block.c)0
-rw-r--r--drivers/mmc/core/block.h (renamed from drivers/mmc/card/block.h)0
-rw-r--r--drivers/mmc/core/mmc_test.c (renamed from drivers/mmc/card/mmc_test.c)2
-rw-r--r--drivers/mmc/core/queue.c (renamed from drivers/mmc/card/queue.c)2
-rw-r--r--drivers/mmc/core/queue.h (renamed from drivers/mmc/card/queue.h)0
-rw-r--r--drivers/mmc/core/sdio_uart.c (renamed from drivers/mmc/card/sdio_uart.c)2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c2
-rw-r--r--drivers/nvme/host/core.c21
-rw-r--r--drivers/nvme/target/configfs.c10
-rw-r--r--drivers/phy/phy-qcom-ufs-i.h7
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-14nm.c72
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-20nm.c65
-rw-r--r--drivers/phy/phy-qcom-ufs.c273
-rw-r--r--drivers/power/reset/Kconfig10
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-poweroff.c1
-rw-r--r--drivers/power/reset/at91-reset.c2
-rw-r--r--drivers/power/reset/piix4-poweroff.c113
-rw-r--r--drivers/power/reset/syscon-reboot-mode.c1
-rw-r--r--drivers/power/reset/zx-reboot.c1
-rw-r--r--drivers/power/supply/ab8500_fg.c8
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c1
-rw-r--r--drivers/power/supply/bq24190_charger.c2
-rw-r--r--drivers/power/supply/bq27xxx_battery.c44
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c4
-rw-r--r--drivers/power/supply/ipaq_micro_battery.c2
-rw-r--r--drivers/power/supply/lp8788-charger.c3
-rw-r--r--drivers/power/supply/max17040_battery.c52
-rw-r--r--drivers/power/supply/max8997_charger.c1
-rw-r--r--drivers/power/supply/power_supply_core.c4
-rw-r--r--drivers/power/supply/wm8350_power.c2
-rw-r--r--drivers/power/supply/wm97xx_battery.c25
-rw-r--r--drivers/regulator/rk808-regulator.c9
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fc.c36
-rw-r--r--drivers/scsi/Kconfig35
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/NCR5380.c137
-rw-r--r--drivers/scsi/NCR5380.h87
-rw-r--r--drivers/scsi/aacraid/aacraid.h1
-rw-r--r--drivers/scsi/aacraid/comminit.c10
-rw-r--r--drivers/scsi/aacraid/commsup.c25
-rw-r--r--drivers/scsi/aacraid/linit.c20
-rw-r--r--drivers/scsi/advansys.c3
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c5
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h5
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c82
-rw-r--r--drivers/scsi/arm/cumana_1.c98
-rw-r--r--drivers/scsi/arm/oak.c34
-rw-r--r--drivers/scsi/atari_scsi.c77
-rw-r--r--drivers/scsi/be2iscsi/be_main.c8
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h30
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c62
-rw-r--r--drivers/scsi/bfa/bfad_im.h4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxlflash/common.h39
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c6
-rw-r--r--drivers/scsi/cxlflash/main.c410
-rw-r--r--drivers/scsi/cxlflash/sislite.h2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c16
-rw-r--r--drivers/scsi/dmx3191d.c33
-rw-r--r--drivers/scsi/dpt_i2o.c7
-rw-r--r--drivers/scsi/fcoe/fcoe.c25
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c157
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c83
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c30
-rw-r--r--drivers/scsi/fnic/fnic_trace.c4
-rw-r--r--drivers/scsi/fnic/fnic_trace.h2
-rw-r--r--drivers/scsi/fnic/vnic_dev.c10
-rw-r--r--drivers/scsi/g_NCR5380.c296
-rw-r--r--drivers/scsi/g_NCR5380.h32
-rw-r--r--drivers/scsi/g_NCR5380_mmio.c10
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c67
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c79
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c556
-rw-r--r--drivers/scsi/hpsa.c252
-rw-r--r--drivers/scsi/hpsa.h6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c40
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c900
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h5
-rw-r--r--drivers/scsi/ipr.c174
-rw-r--r--drivers/scsi/ipr.h7
-rw-r--r--drivers/scsi/ips.c13
-rw-r--r--drivers/scsi/isci/host.h1
-rw-r--r--drivers/scsi/isci/init.c23
-rw-r--r--drivers/scsi/isci/probe_roms.c1
-rw-r--r--drivers/scsi/isci/remote_node_context.c7
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/libfc/fc_disc.c61
-rw-r--r--drivers/scsi/libfc/fc_elsct.c2
-rw-r--r--drivers/scsi/libfc/fc_exch.c256
-rw-r--r--drivers/scsi/libfc/fc_fcp.c235
-rw-r--r--drivers/scsi/libfc/fc_libfc.c2
-rw-r--r--drivers/scsi/libfc/fc_lport.c126
-rw-r--r--drivers/scsi/libfc/fc_rport.c561
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c160
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c422
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_scsi.c83
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c136
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c23
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c186
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h39
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c69
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c129
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c2
-rw-r--r--drivers/scsi/pmcraid.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c449
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c15
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h18
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c27
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c97
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c15
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_lib.c51
-rw-r--r--drivers/scsi/scsi_transport_fc.c455
-rw-r--r--drivers/scsi/scsi_transport_srp.c52
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c102
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/sun3_scsi.c80
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c39
-rw-r--r--drivers/scsi/ufs/ufs.h5
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h9
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c5
-rw-r--r--drivers/scsi/ufs/ufshcd.c482
-rw-r--r--drivers/scsi/ufs/ufshcd.h46
-rw-r--r--drivers/scsi/ufs/ufshci.h3
-rw-r--r--drivers/scsi/ufs/unipro.h4
-rw-r--r--drivers/target/target_core_fabric_configfs.c7
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c20
-rw-r--r--drivers/target/tcm_fc/tfc_io.c4
-rw-r--r--drivers/usb/dwc3/gadget.c2
-rw-r--r--drivers/usb/gadget/configfs.c8
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c25
305 files changed, 16386 insertions, 8813 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index c5f9cbe0ae21..83e5f7e1a20d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -104,7 +104,7 @@ config ACPI_PROCFS_POWER
Say N to delete power /proc/acpi/ directories that have moved to /sys/
config ACPI_REV_OVERRIDE_POSSIBLE
- bool "Allow supported ACPI revision to be overriden"
+ bool "Allow supported ACPI revision to be overridden"
depends on X86
default y
help
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index e0ea8f56d2bf..3ca0729f7e0e 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -776,9 +776,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
init_waitqueue_head(&pcc_data.pcc_write_wait_q);
}
- /* Plug PSD data into this CPUs CPC descriptor. */
- per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
-
/* Everything looks okay */
pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
@@ -789,10 +786,15 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
+ /* Plug PSD data into this CPUs CPC descriptor. */
+ per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
+
ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
"acpi_cppc");
- if (ret)
+ if (ret) {
+ per_cpu(cpc_desc_ptr, pr->id) = NULL;
goto out_free;
+ }
kfree(output.pointer);
return 0;
@@ -826,6 +828,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
void __iomem *addr;
cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
+ if (!cpc_ptr)
+ return;
/* Free all the mapped sys mem areas for this CPU */
for (i = 2; i < cpc_ptr->num_entries; i++) {
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 1537302e56e3..a18de9d727b0 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -260,43 +260,6 @@ scsi_cmd_stack_free(ctlr_info_t *h)
}
#if 0
-static int xmargin=8;
-static int amargin=60;
-
-static void
-print_bytes (unsigned char *c, int len, int hex, int ascii)
-{
-
- int i;
- unsigned char *x;
-
- if (hex)
- {
- x = c;
- for (i=0;i<len;i++)
- {
- if ((i % xmargin) == 0 && i>0) printk("\n");
- if ((i % xmargin) == 0) printk("0x%04x:", i);
- printk(" %02x", *x);
- x++;
- }
- printk("\n");
- }
- if (ascii)
- {
- x = c;
- for (i=0;i<len;i++)
- {
- if ((i % amargin) == 0 && i>0) printk("\n");
- if ((i % amargin) == 0) printk("0x%04x:", i);
- if (*x > 26 && *x < 128) printk("%c", *x);
- else printk(".");
- x++;
- }
- printk("\n");
- }
-}
-
static void
print_cmd(CommandList_struct *cp)
{
@@ -305,30 +268,13 @@ print_cmd(CommandList_struct *cp)
printk("sgtot:%d\n", cp->Header.SGTotal);
printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper,
cp->Header.Tag.lower);
- printk("LUN:0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- cp->Header.LUN.LunAddrBytes[0],
- cp->Header.LUN.LunAddrBytes[1],
- cp->Header.LUN.LunAddrBytes[2],
- cp->Header.LUN.LunAddrBytes[3],
- cp->Header.LUN.LunAddrBytes[4],
- cp->Header.LUN.LunAddrBytes[5],
- cp->Header.LUN.LunAddrBytes[6],
- cp->Header.LUN.LunAddrBytes[7]);
+ printk("LUN:0x%8phN\n", cp->Header.LUN.LunAddrBytes);
printk("CDBLen:%d\n", cp->Request.CDBLen);
printk("Type:%d\n",cp->Request.Type.Type);
printk("Attr:%d\n",cp->Request.Type.Attribute);
printk(" Dir:%d\n",cp->Request.Type.Direction);
printk("Timeout:%d\n",cp->Request.Timeout);
- printk( "CDB: %02x %02x %02x %02x %02x %02x %02x %02x"
- " %02x %02x %02x %02x %02x %02x %02x %02x\n",
- cp->Request.CDB[0], cp->Request.CDB[1],
- cp->Request.CDB[2], cp->Request.CDB[3],
- cp->Request.CDB[4], cp->Request.CDB[5],
- cp->Request.CDB[6], cp->Request.CDB[7],
- cp->Request.CDB[8], cp->Request.CDB[9],
- cp->Request.CDB[10], cp->Request.CDB[11],
- cp->Request.CDB[12], cp->Request.CDB[13],
- cp->Request.CDB[14], cp->Request.CDB[15]),
+ printk("CDB: %16ph\n", cp->Request.CDB);
printk("edesc.Addr: 0x%08x/0%08x, Len = %d\n",
cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower,
cp->ErrDesc.Len);
@@ -340,9 +286,7 @@ print_cmd(CommandList_struct *cp)
printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size);
printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num);
printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
-
}
-
#endif
static int
@@ -782,8 +726,10 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
"reported\n", c);
break;
case CMD_INVALID: {
- /* print_bytes(c, sizeof(*c), 1, 0);
- print_cmd(c); */
+ /*
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
+ print_cmd(c);
+ */
/* We get CMD_INVALID if you address a non-existent tape drive instead
of a selection timeout (no response). You will see this if you yank
out a tape drive, then try to access it. This is kind of a shame
@@ -985,8 +931,10 @@ cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
dev_warn(&h->pdev->dev,
"%p is reported invalid (probably means "
"target device no longer present)\n", c);
- /* print_bytes((unsigned char *) c, sizeof(*c), 1, 0);
- print_cmd(c); */
+ /*
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
+ print_cmd(c);
+ */
}
break;
case CMD_PROTOCOL_ERR:
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 200dab5136a7..ceff2fc524b1 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -168,7 +168,7 @@ config HW_RANDOM_IXP4XX
config HW_RANDOM_OMAP
tristate "OMAP Random Number Generator support"
- depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
+ depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 0fcc9e69a346..661c82cde0f2 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -48,6 +48,16 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
return 0;
}
+static void atmel_trng_enable(struct atmel_trng *trng)
+{
+ writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+}
+
+static void atmel_trng_disable(struct atmel_trng *trng)
+{
+ writel(TRNG_KEY, trng->base + TRNG_CR);
+}
+
static int atmel_trng_probe(struct platform_device *pdev)
{
struct atmel_trng *trng;
@@ -71,7 +81,7 @@ static int atmel_trng_probe(struct platform_device *pdev)
if (ret)
return ret;
- writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+ atmel_trng_enable(trng);
trng->rng.name = pdev->name;
trng->rng.read = atmel_trng_read;
@@ -84,7 +94,7 @@ static int atmel_trng_probe(struct platform_device *pdev)
return 0;
err_register:
- clk_disable(trng->clk);
+ clk_disable_unprepare(trng->clk);
return ret;
}
@@ -94,7 +104,7 @@ static int atmel_trng_remove(struct platform_device *pdev)
hwrng_unregister(&trng->rng);
- writel(TRNG_KEY, trng->base + TRNG_CR);
+ atmel_trng_disable(trng);
clk_disable_unprepare(trng->clk);
return 0;
@@ -105,6 +115,7 @@ static int atmel_trng_suspend(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
+ atmel_trng_disable(trng);
clk_disable_unprepare(trng->clk);
return 0;
@@ -113,8 +124,15 @@ static int atmel_trng_suspend(struct device *dev)
static int atmel_trng_resume(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
+ int ret;
- return clk_prepare_enable(trng->clk);
+ ret = clk_prepare_enable(trng->clk);
+ if (ret)
+ return ret;
+
+ atmel_trng_enable(trng);
+
+ return 0;
}
static const struct dev_pm_ops atmel_trng_pm_ops = {
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index d2d2c89de5b4..f9766415ff10 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -92,6 +92,7 @@ static void add_early_randomness(struct hwrng *rng)
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
add_device_randomness(rng_buffer, bytes_read);
+ memset(rng_buffer, 0, size);
}
static inline void cleanup_rng(struct kref *kref)
@@ -287,6 +288,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
}
}
out:
+ memset(rng_buffer, 0, rng_buffer_size());
return ret ? : err;
out_unlock_reading:
@@ -425,6 +427,7 @@ static int hwrng_fillfn(void *unused)
/* Outside lock, sure, but y'know: randomness. */
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
rc * current_quality * 8 >> 10);
+ memset(rng_fillbuf, 0, rng_buffer_size());
}
hwrng_fill = NULL;
return 0;
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index 58bef39f7286..119d698439ae 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -110,6 +110,7 @@ static const struct of_device_id meson_rng_of_match[] = {
{ .compatible = "amlogic,meson-rng", },
{},
};
+MODULE_DEVICE_TABLE(of, meson_rng_of_match);
static struct platform_driver meson_rng_driver = {
.probe = meson_rng_probe,
@@ -121,7 +122,6 @@ static struct platform_driver meson_rng_driver = {
module_platform_driver(meson_rng_driver);
-MODULE_ALIAS("platform:meson-rng");
MODULE_DESCRIPTION("Meson H/W Random Number Generator driver");
MODULE_AUTHOR("Lawrence Mok <lawrence.mok@amlogic.com>");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
index 96fb986402eb..841fee845ec9 100644
--- a/drivers/char/hw_random/msm-rng.c
+++ b/drivers/char/hw_random/msm-rng.c
@@ -90,10 +90,6 @@ static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
/* calculate max size bytes to transfer back to caller */
maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
- /* no room for word data */
- if (maxsize < WORD_SZ)
- return 0;
-
ret = clk_prepare_enable(rng->clk);
if (ret)
return ret;
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index f5c26a5f6875..3ad86fdf954e 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -28,6 +28,7 @@
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/interrupt.h>
+#include <linux/clk.h>
#include <asm/io.h>
@@ -63,10 +64,13 @@
#define OMAP2_RNG_OUTPUT_SIZE 0x4
#define OMAP4_RNG_OUTPUT_SIZE 0x8
+#define EIP76_RNG_OUTPUT_SIZE 0x10
enum {
- RNG_OUTPUT_L_REG = 0,
- RNG_OUTPUT_H_REG,
+ RNG_OUTPUT_0_REG = 0,
+ RNG_OUTPUT_1_REG,
+ RNG_OUTPUT_2_REG,
+ RNG_OUTPUT_3_REG,
RNG_STATUS_REG,
RNG_INTMASK_REG,
RNG_INTACK_REG,
@@ -82,7 +86,7 @@ enum {
};
static const u16 reg_map_omap2[] = {
- [RNG_OUTPUT_L_REG] = 0x0,
+ [RNG_OUTPUT_0_REG] = 0x0,
[RNG_STATUS_REG] = 0x4,
[RNG_CONFIG_REG] = 0x28,
[RNG_REV_REG] = 0x3c,
@@ -90,8 +94,8 @@ static const u16 reg_map_omap2[] = {
};
static const u16 reg_map_omap4[] = {
- [RNG_OUTPUT_L_REG] = 0x0,
- [RNG_OUTPUT_H_REG] = 0x4,
+ [RNG_OUTPUT_0_REG] = 0x0,
+ [RNG_OUTPUT_1_REG] = 0x4,
[RNG_STATUS_REG] = 0x8,
[RNG_INTMASK_REG] = 0xc,
[RNG_INTACK_REG] = 0x10,
@@ -106,6 +110,23 @@ static const u16 reg_map_omap4[] = {
[RNG_SYSCONFIG_REG] = 0x1FE4,
};
+static const u16 reg_map_eip76[] = {
+ [RNG_OUTPUT_0_REG] = 0x0,
+ [RNG_OUTPUT_1_REG] = 0x4,
+ [RNG_OUTPUT_2_REG] = 0x8,
+ [RNG_OUTPUT_3_REG] = 0xc,
+ [RNG_STATUS_REG] = 0x10,
+ [RNG_INTACK_REG] = 0x10,
+ [RNG_CONTROL_REG] = 0x14,
+ [RNG_CONFIG_REG] = 0x18,
+ [RNG_ALARMCNT_REG] = 0x1c,
+ [RNG_FROENABLE_REG] = 0x20,
+ [RNG_FRODETUNE_REG] = 0x24,
+ [RNG_ALARMMASK_REG] = 0x28,
+ [RNG_ALARMSTOP_REG] = 0x2c,
+ [RNG_REV_REG] = 0x7c,
+};
+
struct omap_rng_dev;
/**
* struct omap_rng_pdata - RNG IP block-specific data
@@ -127,6 +148,8 @@ struct omap_rng_dev {
void __iomem *base;
struct device *dev;
const struct omap_rng_pdata *pdata;
+ struct hwrng rng;
+ struct clk *clk;
};
static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg)
@@ -140,41 +163,35 @@ static inline void omap_rng_write(struct omap_rng_dev *priv, u16 reg,
__raw_writel(val, priv->base + priv->pdata->regs[reg]);
}
-static int omap_rng_data_present(struct hwrng *rng, int wait)
+
+static int omap_rng_do_read(struct hwrng *rng, void *data, size_t max,
+ bool wait)
{
struct omap_rng_dev *priv;
- int data, i;
+ int i, present;
priv = (struct omap_rng_dev *)rng->priv;
+ if (max < priv->pdata->data_size)
+ return 0;
+
for (i = 0; i < 20; i++) {
- data = priv->pdata->data_present(priv);
- if (data || !wait)
+ present = priv->pdata->data_present(priv);
+ if (present || !wait)
break;
- /* RNG produces data fast enough (2+ MBit/sec, even
- * during "rngtest" loads, that these delays don't
- * seem to trigger. We *could* use the RNG IRQ, but
- * that'd be higher overhead ... so why bother?
- */
+
udelay(10);
}
- return data;
-}
-
-static int omap_rng_data_read(struct hwrng *rng, u32 *data)
-{
- struct omap_rng_dev *priv;
- u32 data_size, i;
-
- priv = (struct omap_rng_dev *)rng->priv;
- data_size = priv->pdata->data_size;
+ if (!present)
+ return 0;
- for (i = 0; i < data_size / sizeof(u32); i++)
- data[i] = omap_rng_read(priv, RNG_OUTPUT_L_REG + i);
+ memcpy_fromio(data, priv->base + priv->pdata->regs[RNG_OUTPUT_0_REG],
+ priv->pdata->data_size);
if (priv->pdata->regs[RNG_INTACK_REG])
omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_RDY_MASK);
- return data_size;
+
+ return priv->pdata->data_size;
}
static int omap_rng_init(struct hwrng *rng)
@@ -193,13 +210,6 @@ static void omap_rng_cleanup(struct hwrng *rng)
priv->pdata->cleanup(priv);
}
-static struct hwrng omap_rng_ops = {
- .name = "omap",
- .data_present = omap_rng_data_present,
- .data_read = omap_rng_data_read,
- .init = omap_rng_init,
- .cleanup = omap_rng_cleanup,
-};
static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv)
{
@@ -231,6 +241,38 @@ static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv)
return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY;
}
+static int eip76_rng_init(struct omap_rng_dev *priv)
+{
+ u32 val;
+
+ /* Return if RNG is already running. */
+ if (omap_rng_read(priv, RNG_CONTROL_REG) & RNG_CONTROL_ENABLE_TRNG_MASK)
+ return 0;
+
+ /* Number of 512 bit blocks of raw Noise Source output data that must
+ * be processed by either the Conditioning Function or the
+ * SP 800-90 DRBG ‘BC_DF’ functionality to yield a ‘full entropy’
+ * output value.
+ */
+ val = 0x5 << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT;
+
+ /* Number of FRO samples that are XOR-ed together into one bit to be
+ * shifted into the main shift register
+ */
+ val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT;
+ omap_rng_write(priv, RNG_CONFIG_REG, val);
+
+ /* Enable all available FROs */
+ omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0);
+ omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK);
+
+ /* Enable TRNG */
+ val = RNG_CONTROL_ENABLE_TRNG_MASK;
+ omap_rng_write(priv, RNG_CONTROL_REG, val);
+
+ return 0;
+}
+
static int omap4_rng_init(struct omap_rng_dev *priv)
{
u32 val;
@@ -300,6 +342,14 @@ static struct omap_rng_pdata omap4_rng_pdata = {
.cleanup = omap4_rng_cleanup,
};
+static struct omap_rng_pdata eip76_rng_pdata = {
+ .regs = (u16 *)reg_map_eip76,
+ .data_size = EIP76_RNG_OUTPUT_SIZE,
+ .data_present = omap4_rng_data_present,
+ .init = eip76_rng_init,
+ .cleanup = omap4_rng_cleanup,
+};
+
static const struct of_device_id omap_rng_of_match[] = {
{
.compatible = "ti,omap2-rng",
@@ -309,6 +359,10 @@ static const struct of_device_id omap_rng_of_match[] = {
.compatible = "ti,omap4-rng",
.data = &omap4_rng_pdata,
},
+ {
+ .compatible = "inside-secure,safexcel-eip76",
+ .data = &eip76_rng_pdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, omap_rng_of_match);
@@ -327,7 +381,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
}
priv->pdata = match->data;
- if (of_device_is_compatible(dev->of_node, "ti,omap4-rng")) {
+ if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
+ of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "%s: error getting IRQ resource - %d\n",
@@ -343,6 +398,16 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
return err;
}
omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
+
+ priv->clk = of_clk_get(pdev->dev.of_node, 0);
+ if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (!IS_ERR(priv->clk)) {
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ dev_err(&pdev->dev, "unable to enable the clk, "
+ "err = %d\n", err);
+ }
}
return 0;
}
@@ -372,7 +437,11 @@ static int omap_rng_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- omap_rng_ops.priv = (unsigned long)priv;
+ priv->rng.read = omap_rng_do_read;
+ priv->rng.init = omap_rng_init;
+ priv->rng.cleanup = omap_rng_cleanup;
+
+ priv->rng.priv = (unsigned long)priv;
platform_set_drvdata(pdev, priv);
priv->dev = dev;
@@ -383,6 +452,12 @@ static int omap_rng_probe(struct platform_device *pdev)
goto err_ioremap;
}
+ priv->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!priv->rng.name) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
@@ -394,20 +469,24 @@ static int omap_rng_probe(struct platform_device *pdev)
ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
get_omap_rng_device_details(priv);
if (ret)
- goto err_ioremap;
+ goto err_register;
- ret = hwrng_register(&omap_rng_ops);
+ ret = hwrng_register(&priv->rng);
if (ret)
goto err_register;
- dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n",
+ dev_info(&pdev->dev, "Random Number Generator ver. %02x\n",
omap_rng_read(priv, RNG_REV_REG));
return 0;
err_register:
priv->base = NULL;
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
err_ioremap:
dev_err(dev, "initialization failed.\n");
return ret;
@@ -417,13 +496,16 @@ static int omap_rng_remove(struct platform_device *pdev)
{
struct omap_rng_dev *priv = platform_get_drvdata(pdev);
- hwrng_unregister(&omap_rng_ops);
+ hwrng_unregister(&priv->rng);
priv->pdata->cleanup(priv);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
index 11dc9b7c09ce..9b5e68a71d01 100644
--- a/drivers/char/hw_random/pic32-rng.c
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -62,9 +62,6 @@ static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
u32 t;
unsigned int timeout = RNG_TIMEOUT;
- if (max < 8)
- return 0;
-
do {
t = readl(priv->base + RNGRCNT) & RCNT_MASK;
if (t == 64) {
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index 63ce51d09af1..d9f46b437cc2 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -28,7 +28,6 @@
static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
u64 buffer[PLPAR_HCALL_BUFSIZE];
- size_t size = max < 8 ? max : 8;
int rc;
rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
@@ -36,10 +35,10 @@ static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait
pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
return -EIO;
}
- memcpy(data, buffer, size);
+ memcpy(data, buffer, 8);
/* The hypervisor interface returns 64 bits */
- return size;
+ return 8;
}
/**
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 9faa0b1e7766..277186d3b668 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -32,7 +32,7 @@ config TCG_TIS_CORE
config TCG_TIS
tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface"
- depends on X86
+ depends on X86 || OF
select TCG_TIS_CORE
---help---
If you have a TPM security chip that is compliant with the
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index a385fb8c17de..a05b1ebd0b26 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -2,16 +2,10 @@
# Makefile for the kernel tpm device drivers.
#
obj-$(CONFIG_TCG_TPM) += tpm.o
-tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o
-tpm-$(CONFIG_ACPI) += tpm_ppi.o
-
-ifdef CONFIG_ACPI
- tpm-y += tpm_eventlog.o tpm_acpi.o
-else
-ifdef CONFIG_TCG_IBMVTPM
- tpm-y += tpm_eventlog.o tpm_of.o
-endif
-endif
+tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \
+ tpm_eventlog.o
+tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_acpi.o
+tpm-$(CONFIG_OF) += tpm_of.o
obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index e5950131bd90..7a4869151d3b 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -127,6 +127,7 @@ static void tpm_dev_release(struct device *dev)
idr_remove(&dev_nums_idr, chip->dev_num);
mutex_unlock(&idr_lock);
+ kfree(chip->log.bios_event_log);
kfree(chip);
}
@@ -276,27 +277,6 @@ static void tpm_del_char_device(struct tpm_chip *chip)
up_write(&chip->ops_sem);
}
-static int tpm1_chip_register(struct tpm_chip *chip)
-{
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return 0;
-
- tpm_sysfs_add_device(chip);
-
- chip->bios_dir = tpm_bios_log_setup(dev_name(&chip->dev));
-
- return 0;
-}
-
-static void tpm1_chip_unregister(struct tpm_chip *chip)
-{
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return;
-
- if (chip->bios_dir)
- tpm_bios_log_teardown(chip->bios_dir);
-}
-
static void tpm_del_legacy_sysfs(struct tpm_chip *chip)
{
struct attribute **i;
@@ -363,20 +343,20 @@ int tpm_chip_register(struct tpm_chip *chip)
return rc;
}
- rc = tpm1_chip_register(chip);
- if (rc)
+ tpm_sysfs_add_device(chip);
+
+ rc = tpm_bios_log_setup(chip);
+ if (rc != 0 && rc != -ENODEV)
return rc;
tpm_add_ppi(chip);
rc = tpm_add_char_device(chip);
if (rc) {
- tpm1_chip_unregister(chip);
+ tpm_bios_log_teardown(chip);
return rc;
}
- chip->flags |= TPM_CHIP_FLAG_REGISTERED;
-
rc = tpm_add_legacy_sysfs(chip);
if (rc) {
tpm_chip_unregister(chip);
@@ -402,12 +382,8 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
*/
void tpm_chip_unregister(struct tpm_chip *chip)
{
- if (!(chip->flags & TPM_CHIP_FLAG_REGISTERED))
- return;
-
tpm_del_legacy_sysfs(chip);
-
- tpm1_chip_unregister(chip);
+ tpm_bios_log_teardown(chip);
tpm_del_char_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 3a9149cf0110..a2688ac2b48f 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -29,6 +29,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/freezer.h>
+#include <linux/pm_runtime.h>
#include "tpm.h"
#include "tpm_eventlog.h"
@@ -356,6 +357,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
if (!(flags & TPM_TRANSMIT_UNLOCKED))
mutex_lock(&chip->tpm_mutex);
+ if (chip->dev.parent)
+ pm_runtime_get_sync(chip->dev.parent);
+
rc = chip->ops->send(chip, (u8 *) buf, count);
if (rc < 0) {
dev_err(&chip->dev,
@@ -397,6 +401,9 @@ out_recv:
dev_err(&chip->dev,
"tpm_transmit: tpm_recv: error %zd\n", rc);
out:
+ if (chip->dev.parent)
+ pm_runtime_put_sync(chip->dev.parent);
+
if (!(flags & TPM_TRANSMIT_UNLOCKED))
mutex_unlock(&chip->tpm_mutex);
return rc;
@@ -437,26 +444,29 @@ static const struct tpm_input_header tpm_getcap_header = {
.ordinal = TPM_ORD_GET_CAP
};
-ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap,
+ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
const char *desc)
{
struct tpm_cmd_t tpm_cmd;
int rc;
tpm_cmd.header.in = tpm_getcap_header;
- if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) {
- tpm_cmd.params.getcap_in.cap = subcap_id;
+ if (subcap_id == TPM_CAP_VERSION_1_1 ||
+ subcap_id == TPM_CAP_VERSION_1_2) {
+ tpm_cmd.params.getcap_in.cap = cpu_to_be32(subcap_id);
/*subcap field not necessary */
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0);
tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32));
} else {
if (subcap_id == TPM_CAP_FLAG_PERM ||
subcap_id == TPM_CAP_FLAG_VOL)
- tpm_cmd.params.getcap_in.cap = TPM_CAP_FLAG;
+ tpm_cmd.params.getcap_in.cap =
+ cpu_to_be32(TPM_CAP_FLAG);
else
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.cap =
+ cpu_to_be32(TPM_CAP_PROP);
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = subcap_id;
+ tpm_cmd.params.getcap_in.subcap = cpu_to_be32(subcap_id);
}
rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
desc);
@@ -488,12 +498,14 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
int tpm_get_timeouts(struct tpm_chip *chip)
{
- struct tpm_cmd_t tpm_cmd;
+ cap_t cap;
unsigned long new_timeout[4];
unsigned long old_timeout[4];
- struct duration_t *duration_cap;
ssize_t rc;
+ if (chip->flags & TPM_CHIP_FLAG_HAVE_TIMEOUTS)
+ return 0;
+
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
/* Fixed timeouts for TPM2 */
chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
@@ -506,46 +518,30 @@ int tpm_get_timeouts(struct tpm_chip *chip)
msecs_to_jiffies(TPM2_DURATION_MEDIUM);
chip->duration[TPM_LONG] =
msecs_to_jiffies(TPM2_DURATION_LONG);
+
+ chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
return 0;
}
- tpm_cmd.header.in = tpm_getcap_header;
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
- tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
- NULL);
-
+ rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap,
+ "attempting to determine the timeouts");
if (rc == TPM_ERR_INVALID_POSTINIT) {
/* The TPM is not started, we are the first to talk to it.
Execute a startup command. */
- dev_info(&chip->dev, "Issuing TPM_STARTUP");
+ dev_info(&chip->dev, "Issuing TPM_STARTUP\n");
if (tpm_startup(chip, TPM_ST_CLEAR))
return rc;
- tpm_cmd.header.in = tpm_getcap_header;
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
- tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
- 0, NULL);
+ rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap,
+ "attempting to determine the timeouts");
}
- if (rc) {
- dev_err(&chip->dev,
- "A TPM error (%zd) occurred attempting to determine the timeouts\n",
- rc);
- goto duration;
- }
-
- if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
- be32_to_cpu(tpm_cmd.header.out.length)
- != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
- return -EINVAL;
+ if (rc)
+ return rc;
- old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
- old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
- old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
- old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
+ old_timeout[0] = be32_to_cpu(cap.timeout.a);
+ old_timeout[1] = be32_to_cpu(cap.timeout.b);
+ old_timeout[2] = be32_to_cpu(cap.timeout.c);
+ old_timeout[3] = be32_to_cpu(cap.timeout.d);
memcpy(new_timeout, old_timeout, sizeof(new_timeout));
/*
@@ -583,29 +579,17 @@ int tpm_get_timeouts(struct tpm_chip *chip)
chip->timeout_c = usecs_to_jiffies(new_timeout[2]);
chip->timeout_d = usecs_to_jiffies(new_timeout[3]);
-duration:
- tpm_cmd.header.in = tpm_getcap_header;
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
- tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;
-
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
- "attempting to determine the durations");
+ rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_DURATION, &cap,
+ "attempting to determine the durations");
if (rc)
return rc;
- if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
- be32_to_cpu(tpm_cmd.header.out.length)
- != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
- return -EINVAL;
-
- duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->duration[TPM_SHORT] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_short));
chip->duration[TPM_MEDIUM] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_medium));
chip->duration[TPM_LONG] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_long));
/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
* value wrong and apparently reports msecs rather than usecs. So we
@@ -619,6 +603,8 @@ duration:
chip->duration_adjusted = true;
dev_info(&chip->dev, "Adjusting TPM timeout parameters.");
}
+
+ chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
return 0;
}
EXPORT_SYMBOL_GPL(tpm_get_timeouts);
@@ -726,6 +712,14 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
}
EXPORT_SYMBOL_GPL(tpm_pcr_read);
+#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
+#define EXTEND_PCR_RESULT_SIZE 34
+static const struct tpm_input_header pcrextend_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(34),
+ .ordinal = TPM_ORD_PCR_EXTEND
+};
+
/**
* tpm_pcr_extend - extend pcr value with hash
* @chip_num: tpm idx # or AN&
@@ -736,14 +730,6 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read);
* isn't, protect against the chip disappearing, by incrementing
* the module usage count.
*/
-#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
-#define EXTEND_PCR_RESULT_SIZE 34
-static const struct tpm_input_header pcrextend_header = {
- .tag = TPM_TAG_RQU_COMMAND,
- .length = cpu_to_be32(34),
- .ordinal = TPM_ORD_PCR_EXTEND
-};
-
int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
{
struct tpm_cmd_t cmd;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index a76ab4af9fb2..848ad6580b46 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -193,7 +193,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
be32_to_cpu(cap.manufacturer_id));
/* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
- rc = tpm_getcap(chip, CAP_VERSION_1_2, &cap,
+ rc = tpm_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
"attempting to determine the 1.2 version");
if (!rc) {
str += sprintf(str,
@@ -204,7 +204,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
cap.tpm_version_1_2.revMinor);
} else {
/* Otherwise just use TPM_STRUCT_VER */
- rc = tpm_getcap(chip, CAP_VERSION_1_1, &cap,
+ rc = tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
"attempting to determine the 1.1 version");
if (rc)
return 0;
@@ -284,6 +284,9 @@ static const struct attribute_group tpm_dev_group = {
void tpm_sysfs_add_device(struct tpm_chip *chip)
{
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return;
+
/* The sysfs routines rely on an implicit tpm_try_get_ops, device_del
* is called before ops is null'd and the sysfs core synchronizes this
* removal so that no callbacks are running or can run again
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 4d183c97f6a6..1ae976894257 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -35,11 +35,14 @@
#include <linux/cdev.h>
#include <linux/highmem.h>
+#include "tpm_eventlog.h"
+
enum tpm_const {
TPM_MINOR = 224, /* officially assigned */
TPM_BUFSIZE = 4096,
TPM_NUM_DEVICES = 65536,
TPM_RETRY = 50, /* 5 seconds */
+ TPM_NUM_EVENT_LOG_FILES = 3,
};
enum tpm_timeout {
@@ -139,10 +142,15 @@ enum tpm2_startup_types {
#define TPM_PPI_VERSION_LEN 3
enum tpm_chip_flags {
- TPM_CHIP_FLAG_REGISTERED = BIT(0),
TPM_CHIP_FLAG_TPM2 = BIT(1),
TPM_CHIP_FLAG_IRQ = BIT(2),
TPM_CHIP_FLAG_VIRTUAL = BIT(3),
+ TPM_CHIP_FLAG_HAVE_TIMEOUTS = BIT(4),
+};
+
+struct tpm_chip_seqops {
+ struct tpm_chip *chip;
+ const struct seq_operations *seqops;
};
struct tpm_chip {
@@ -156,6 +164,10 @@ struct tpm_chip {
struct rw_semaphore ops_sem;
const struct tpm_class_ops *ops;
+ struct tpm_bios_log log;
+ struct tpm_chip_seqops bin_log_seqops;
+ struct tpm_chip_seqops ascii_log_seqops;
+
unsigned int flags;
int dev_num; /* /dev/tpm# */
@@ -171,7 +183,7 @@ struct tpm_chip {
unsigned long duration[3]; /* jiffies */
bool duration_adjusted;
- struct dentry **bios_dir;
+ struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES];
const struct attribute_group *groups[3];
unsigned int groups_cnt;
@@ -282,21 +294,20 @@ typedef union {
} cap_t;
enum tpm_capabilities {
- TPM_CAP_FLAG = cpu_to_be32(4),
- TPM_CAP_PROP = cpu_to_be32(5),
- CAP_VERSION_1_1 = cpu_to_be32(0x06),
- CAP_VERSION_1_2 = cpu_to_be32(0x1A)
+ TPM_CAP_FLAG = 4,
+ TPM_CAP_PROP = 5,
+ TPM_CAP_VERSION_1_1 = 0x06,
+ TPM_CAP_VERSION_1_2 = 0x1A,
};
enum tpm_sub_capabilities {
- TPM_CAP_PROP_PCR = cpu_to_be32(0x101),
- TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103),
- TPM_CAP_FLAG_PERM = cpu_to_be32(0x108),
- TPM_CAP_FLAG_VOL = cpu_to_be32(0x109),
- TPM_CAP_PROP_OWNER = cpu_to_be32(0x111),
- TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115),
- TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120),
-
+ TPM_CAP_PROP_PCR = 0x101,
+ TPM_CAP_PROP_MANUFACTURER = 0x103,
+ TPM_CAP_FLAG_PERM = 0x108,
+ TPM_CAP_FLAG_VOL = 0x109,
+ TPM_CAP_PROP_OWNER = 0x111,
+ TPM_CAP_PROP_TIS_TIMEOUT = 0x115,
+ TPM_CAP_PROP_TIS_DURATION = 0x120,
};
struct tpm_getcap_params_in {
@@ -484,7 +495,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
unsigned int flags);
ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd, int len,
unsigned int flags, const char *desc);
-ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap,
+ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
const char *desc);
int tpm_get_timeouts(struct tpm_chip *);
int tpm1_auto_startup(struct tpm_chip *chip);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 7df55d58c939..da5b782a9731 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -680,7 +680,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
}
/**
- * tpm_unseal_trusted() - unseal the payload of a trusted key
+ * tpm2_unseal_trusted() - unseal the payload of a trusted key
* @chip_num: TPM chip to use
* @payload: the key data in clear and encrypted form
* @options: authentication values and other options
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
index 565a9478cb94..b7718c95fd0b 100644
--- a/drivers/char/tpm/tpm_acpi.c
+++ b/drivers/char/tpm/tpm_acpi.c
@@ -6,10 +6,11 @@
* Stefan Berger <stefanb@us.ibm.com>
* Reiner Sailer <sailer@watson.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
- * Access to the eventlog extended by the TCG BIOS of PC platform
+ * Access to the event log extended by the TCG BIOS of PC platform
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -45,29 +46,28 @@ struct acpi_tcpa {
};
/* read binary bios log */
-int read_log(struct tpm_bios_log *log)
+int tpm_read_log_acpi(struct tpm_chip *chip)
{
struct acpi_tcpa *buff;
acpi_status status;
void __iomem *virt;
u64 len, start;
+ struct tpm_bios_log *log;
- if (log->bios_event_log != NULL) {
- printk(KERN_ERR
- "%s: ERROR - Eventlog already initialized\n",
- __func__);
- return -EFAULT;
- }
+ log = &chip->log;
+
+ /* Unfortuntely ACPI does not associate the event log with a specific
+ * TPM, like PPI. Thus all ACPI TPMs will read the same log.
+ */
+ if (!chip->acpi_dev_handle)
+ return -ENODEV;
/* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
status = acpi_get_table(ACPI_SIG_TCPA, 1,
(struct acpi_table_header **)&buff);
- if (ACPI_FAILURE(status)) {
- printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n",
- __func__);
- return -EIO;
- }
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
switch(buff->platform_class) {
case BIOS_SERVER:
@@ -81,29 +81,29 @@ int read_log(struct tpm_bios_log *log)
break;
}
if (!len) {
- printk(KERN_ERR "%s: ERROR - TCPA log area empty\n", __func__);
+ dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__);
return -EIO;
}
/* malloc EventLog space */
log->bios_event_log = kmalloc(len, GFP_KERNEL);
- if (!log->bios_event_log) {
- printk("%s: ERROR - Not enough Memory for BIOS measurements\n",
- __func__);
+ if (!log->bios_event_log)
return -ENOMEM;
- }
log->bios_event_log_end = log->bios_event_log + len;
virt = acpi_os_map_iomem(start, len);
- if (!virt) {
- kfree(log->bios_event_log);
- printk("%s: ERROR - Unable to map memory\n", __func__);
- return -EIO;
- }
+ if (!virt)
+ goto err;
memcpy_fromio(log->bios_event_log, virt, len);
acpi_os_unmap_iomem(virt, len);
return 0;
+
+err:
+ kfree(log->bios_event_log);
+ log->bios_event_log = NULL;
+ return -EIO;
+
}
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index a7c870af916c..717b6b47c042 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -19,6 +19,7 @@
#include <linux/highmem.h>
#include <linux/rculist.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include "tpm.h"
#define ACPI_SIG_TPM2 "TPM2"
@@ -83,7 +84,71 @@ struct crb_priv {
u32 cmd_size;
};
-static SIMPLE_DEV_PM_OPS(crb_pm, tpm_pm_suspend, tpm_pm_resume);
+/**
+ * crb_go_idle - request tpm crb device to go the idle state
+ *
+ * @dev: crb device
+ * @priv: crb private data
+ *
+ * Write CRB_CTRL_REQ_GO_IDLE to TPM_CRB_CTRL_REQ
+ * The device should respond within TIMEOUT_C by clearing the bit.
+ * Anyhow, we do not wait here as a consequent CMD_READY request
+ * will be handled correctly even if idle was not completed.
+ *
+ * The function does nothing for devices with ACPI-start method.
+ *
+ * Return: 0 always
+ */
+static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv)
+{
+ if (priv->flags & CRB_FL_ACPI_START)
+ return 0;
+
+ iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->cca->req);
+ /* we don't really care when this settles */
+
+ return 0;
+}
+
+/**
+ * crb_cmd_ready - request tpm crb device to enter ready state
+ *
+ * @dev: crb device
+ * @priv: crb private data
+ *
+ * Write CRB_CTRL_REQ_CMD_READY to TPM_CRB_CTRL_REQ
+ * and poll till the device acknowledge it by clearing the bit.
+ * The device should respond within TIMEOUT_C.
+ *
+ * The function does nothing for devices with ACPI-start method
+ *
+ * Return: 0 on success -ETIME on timeout;
+ */
+static int __maybe_unused crb_cmd_ready(struct device *dev,
+ struct crb_priv *priv)
+{
+ ktime_t stop, start;
+
+ if (priv->flags & CRB_FL_ACPI_START)
+ return 0;
+
+ iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->cca->req);
+
+ start = ktime_get();
+ stop = ktime_add(start, ms_to_ktime(TPM2_TIMEOUT_C));
+ do {
+ if (!(ioread32(&priv->cca->req) & CRB_CTRL_REQ_CMD_READY))
+ return 0;
+ usleep_range(50, 100);
+ } while (ktime_before(ktime_get(), stop));
+
+ if (ioread32(&priv->cca->req) & CRB_CTRL_REQ_CMD_READY) {
+ dev_warn(dev, "cmdReady timed out\n");
+ return -ETIME;
+ }
+
+ return 0;
+}
static u8 crb_status(struct tpm_chip *chip)
{
@@ -196,21 +261,6 @@ static const struct tpm_class_ops tpm_crb = {
.req_complete_val = CRB_DRV_STS_COMPLETE,
};
-static int crb_init(struct acpi_device *device, struct crb_priv *priv)
-{
- struct tpm_chip *chip;
-
- chip = tpmm_chip_alloc(&device->dev, &tpm_crb);
- if (IS_ERR(chip))
- return PTR_ERR(chip);
-
- dev_set_drvdata(&chip->dev, priv);
- chip->acpi_dev_handle = device->handle;
- chip->flags = TPM_CHIP_FLAG_TPM2;
-
- return tpm_chip_register(chip);
-}
-
static int crb_check_resource(struct acpi_resource *ares, void *data)
{
struct resource *io_res = data;
@@ -249,6 +299,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
struct list_head resources;
struct resource io_res;
struct device *dev = &device->dev;
+ u32 pa_high, pa_low;
u64 cmd_pa;
u32 cmd_size;
u64 rsp_pa;
@@ -276,12 +327,27 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
if (IS_ERR(priv->cca))
return PTR_ERR(priv->cca);
- cmd_pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) |
- (u64) ioread32(&priv->cca->cmd_pa_low);
+ /*
+ * PTT HW bug w/a: wake up the device to access
+ * possibly not retained registers.
+ */
+ ret = crb_cmd_ready(dev, priv);
+ if (ret)
+ return ret;
+
+ pa_high = ioread32(&priv->cca->cmd_pa_high);
+ pa_low = ioread32(&priv->cca->cmd_pa_low);
+ cmd_pa = ((u64)pa_high << 32) | pa_low;
cmd_size = ioread32(&priv->cca->cmd_size);
+
+ dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
+ pa_high, pa_low, cmd_size);
+
priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size);
- if (IS_ERR(priv->cmd))
- return PTR_ERR(priv->cmd);
+ if (IS_ERR(priv->cmd)) {
+ ret = PTR_ERR(priv->cmd);
+ goto out;
+ }
memcpy_fromio(&rsp_pa, &priv->cca->rsp_pa, 8);
rsp_pa = le64_to_cpu(rsp_pa);
@@ -289,7 +355,8 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
if (cmd_pa != rsp_pa) {
priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size);
- return PTR_ERR_OR_ZERO(priv->rsp);
+ ret = PTR_ERR_OR_ZERO(priv->rsp);
+ goto out;
}
/* According to the PTP specification, overlapping command and response
@@ -297,18 +364,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
*/
if (cmd_size != rsp_size) {
dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
+
priv->cmd_size = cmd_size;
priv->rsp = priv->cmd;
- return 0;
+
+out:
+ crb_go_idle(dev, priv);
+
+ return ret;
}
static int crb_acpi_add(struct acpi_device *device)
{
struct acpi_table_tpm2 *buf;
struct crb_priv *priv;
+ struct tpm_chip *chip;
struct device *dev = &device->dev;
acpi_status status;
u32 sm;
@@ -346,7 +420,33 @@ static int crb_acpi_add(struct acpi_device *device)
if (rc)
return rc;
- return crb_init(device, priv);
+ chip = tpmm_chip_alloc(dev, &tpm_crb);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ dev_set_drvdata(&chip->dev, priv);
+ chip->acpi_dev_handle = device->handle;
+ chip->flags = TPM_CHIP_FLAG_TPM2;
+
+ rc = crb_cmd_ready(dev, priv);
+ if (rc)
+ return rc;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ rc = tpm_chip_register(chip);
+ if (rc) {
+ crb_go_idle(dev, priv);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return rc;
+ }
+
+ pm_runtime_put(dev);
+
+ return 0;
}
static int crb_acpi_remove(struct acpi_device *device)
@@ -356,9 +456,34 @@ static int crb_acpi_remove(struct acpi_device *device)
tpm_chip_unregister(chip);
+ pm_runtime_disable(dev);
+
return 0;
}
+#ifdef CONFIG_PM
+static int crb_pm_runtime_suspend(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return crb_go_idle(dev, priv);
+}
+
+static int crb_pm_runtime_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return crb_cmd_ready(dev, priv);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops crb_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
+ SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
+};
+
static struct acpi_device_id crb_device_ids[] = {
{"MSFT0101", 0},
{"", 0},
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
index e7228863290e..11bb1138a828 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm_eventlog.c
@@ -7,10 +7,11 @@
* Stefan Berger <stefanb@us.ibm.com>
* Reiner Sailer <sailer@watson.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
- * Access to the eventlog created by a system's firmware / BIOS
+ * Access to the event log created by a system's firmware / BIOS
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -72,7 +73,8 @@ static const char* tcpa_pc_event_id_strings[] = {
static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
{
loff_t i;
- struct tpm_bios_log *log = m->private;
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
void *addr = log->bios_event_log;
void *limit = log->bios_event_log_end;
struct tcpa_event *event;
@@ -119,7 +121,8 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
loff_t *pos)
{
struct tcpa_event *event = v;
- struct tpm_bios_log *log = m->private;
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
void *limit = log->bios_event_log_end;
u32 converted_event_size;
u32 converted_event_type;
@@ -260,13 +263,10 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
static int tpm_bios_measurements_release(struct inode *inode,
struct file *file)
{
- struct seq_file *seq = file->private_data;
- struct tpm_bios_log *log = seq->private;
+ struct seq_file *seq = (struct seq_file *)file->private_data;
+ struct tpm_chip *chip = (struct tpm_chip *)seq->private;
- if (log) {
- kfree(log->bios_event_log);
- kfree(log);
- }
+ put_device(&chip->dev);
return seq_release(inode, file);
}
@@ -304,151 +304,159 @@ static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
return 0;
}
-static const struct seq_operations tpm_ascii_b_measurments_seqops = {
+static const struct seq_operations tpm_ascii_b_measurements_seqops = {
.start = tpm_bios_measurements_start,
.next = tpm_bios_measurements_next,
.stop = tpm_bios_measurements_stop,
.show = tpm_ascii_bios_measurements_show,
};
-static const struct seq_operations tpm_binary_b_measurments_seqops = {
+static const struct seq_operations tpm_binary_b_measurements_seqops = {
.start = tpm_bios_measurements_start,
.next = tpm_bios_measurements_next,
.stop = tpm_bios_measurements_stop,
.show = tpm_binary_bios_measurements_show,
};
-static int tpm_ascii_bios_measurements_open(struct inode *inode,
+static int tpm_bios_measurements_open(struct inode *inode,
struct file *file)
{
int err;
- struct tpm_bios_log *log;
struct seq_file *seq;
-
- log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
- if (!log)
- return -ENOMEM;
-
- if ((err = read_log(log)))
- goto out_free;
+ struct tpm_chip_seqops *chip_seqops;
+ const struct seq_operations *seqops;
+ struct tpm_chip *chip;
+
+ inode_lock(inode);
+ if (!inode->i_private) {
+ inode_unlock(inode);
+ return -ENODEV;
+ }
+ chip_seqops = (struct tpm_chip_seqops *)inode->i_private;
+ seqops = chip_seqops->seqops;
+ chip = chip_seqops->chip;
+ get_device(&chip->dev);
+ inode_unlock(inode);
/* now register seq file */
- err = seq_open(file, &tpm_ascii_b_measurments_seqops);
+ err = seq_open(file, seqops);
if (!err) {
seq = file->private_data;
- seq->private = log;
- } else {
- goto out_free;
+ seq->private = chip;
}
-out:
return err;
-out_free:
- kfree(log->bios_event_log);
- kfree(log);
- goto out;
}
-static const struct file_operations tpm_ascii_bios_measurements_ops = {
- .open = tpm_ascii_bios_measurements_open,
+static const struct file_operations tpm_bios_measurements_ops = {
+ .owner = THIS_MODULE,
+ .open = tpm_bios_measurements_open,
.read = seq_read,
.llseek = seq_lseek,
.release = tpm_bios_measurements_release,
};
-static int tpm_binary_bios_measurements_open(struct inode *inode,
- struct file *file)
+static int tpm_read_log(struct tpm_chip *chip)
{
- int err;
- struct tpm_bios_log *log;
- struct seq_file *seq;
-
- log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
- if (!log)
- return -ENOMEM;
+ int rc;
- if ((err = read_log(log)))
- goto out_free;
-
- /* now register seq file */
- err = seq_open(file, &tpm_binary_b_measurments_seqops);
- if (!err) {
- seq = file->private_data;
- seq->private = log;
- } else {
- goto out_free;
+ if (chip->log.bios_event_log != NULL) {
+ dev_dbg(&chip->dev,
+ "%s: ERROR - event log already initialized\n",
+ __func__);
+ return -EFAULT;
}
-out:
- return err;
-out_free:
- kfree(log->bios_event_log);
- kfree(log);
- goto out;
-}
-
-static const struct file_operations tpm_binary_bios_measurements_ops = {
- .open = tpm_binary_bios_measurements_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = tpm_bios_measurements_release,
-};
+ rc = tpm_read_log_acpi(chip);
+ if (rc != -ENODEV)
+ return rc;
-static int is_bad(void *p)
-{
- if (!p)
- return 1;
- if (IS_ERR(p) && (PTR_ERR(p) != -ENODEV))
- return 1;
- return 0;
+ return tpm_read_log_of(chip);
}
-struct dentry **tpm_bios_log_setup(const char *name)
+/*
+ * tpm_bios_log_setup() - Read the event log from the firmware
+ * @chip: TPM chip to use.
+ *
+ * If an event log is found then the securityfs files are setup to
+ * export it to userspace, otherwise nothing is done.
+ *
+ * Returns -ENODEV if the firmware has no event log or securityfs is not
+ * supported.
+ */
+int tpm_bios_log_setup(struct tpm_chip *chip)
{
- struct dentry **ret = NULL, *tpm_dir, *bin_file, *ascii_file;
-
- tpm_dir = securityfs_create_dir(name, NULL);
- if (is_bad(tpm_dir))
- goto out;
-
- bin_file =
+ const char *name = dev_name(&chip->dev);
+ unsigned int cnt;
+ int rc = 0;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return 0;
+
+ rc = tpm_read_log(chip);
+ if (rc)
+ return rc;
+
+ cnt = 0;
+ chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
+ /* NOTE: securityfs_create_dir can return ENODEV if securityfs is
+ * compiled out. The caller should ignore the ENODEV return code.
+ */
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ chip->bin_log_seqops.chip = chip;
+ chip->bin_log_seqops.seqops = &tpm_binary_b_measurements_seqops;
+
+ chip->bios_dir[cnt] =
securityfs_create_file("binary_bios_measurements",
- S_IRUSR | S_IRGRP, tpm_dir, NULL,
- &tpm_binary_bios_measurements_ops);
- if (is_bad(bin_file))
- goto out_tpm;
+ 0440, chip->bios_dir[0],
+ (void *)&chip->bin_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ chip->ascii_log_seqops.chip = chip;
+ chip->ascii_log_seqops.seqops = &tpm_ascii_b_measurements_seqops;
- ascii_file =
+ chip->bios_dir[cnt] =
securityfs_create_file("ascii_bios_measurements",
- S_IRUSR | S_IRGRP, tpm_dir, NULL,
- &tpm_ascii_bios_measurements_ops);
- if (is_bad(ascii_file))
- goto out_bin;
-
- ret = kmalloc(3 * sizeof(struct dentry *), GFP_KERNEL);
- if (!ret)
- goto out_ascii;
-
- ret[0] = ascii_file;
- ret[1] = bin_file;
- ret[2] = tpm_dir;
-
- return ret;
-
-out_ascii:
- securityfs_remove(ascii_file);
-out_bin:
- securityfs_remove(bin_file);
-out_tpm:
- securityfs_remove(tpm_dir);
-out:
- return NULL;
+ 0440, chip->bios_dir[0],
+ (void *)&chip->ascii_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ return 0;
+
+err:
+ rc = PTR_ERR(chip->bios_dir[cnt]);
+ chip->bios_dir[cnt] = NULL;
+ tpm_bios_log_teardown(chip);
+ return rc;
}
-void tpm_bios_log_teardown(struct dentry **lst)
+void tpm_bios_log_teardown(struct tpm_chip *chip)
{
int i;
-
- for (i = 0; i < 3; i++)
- securityfs_remove(lst[i]);
+ struct inode *inode;
+
+ /* securityfs_remove currently doesn't take care of handling sync
+ * between removal and opening of pseudo files. To handle this, a
+ * workaround is added by making i_private = NULL here during removal
+ * and to check it during open(), both within inode_lock()/unlock().
+ * This design ensures that open() either safely gets kref or fails.
+ */
+ for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) {
+ if (chip->bios_dir[i]) {
+ inode = d_inode(chip->bios_dir[i]);
+ inode_lock(inode);
+ inode->i_private = NULL;
+ inode_unlock(inode);
+ securityfs_remove(chip->bios_dir[i]);
+ }
+ }
}
diff --git a/drivers/char/tpm/tpm_eventlog.h b/drivers/char/tpm/tpm_eventlog.h
index 8de62b09be51..1660d74ea79a 100644
--- a/drivers/char/tpm/tpm_eventlog.h
+++ b/drivers/char/tpm/tpm_eventlog.h
@@ -73,20 +73,24 @@ enum tcpa_pc_event_ids {
HOST_TABLE_OF_DEVICES,
};
-int read_log(struct tpm_bios_log *log);
-
-#if defined(CONFIG_TCG_IBMVTPM) || defined(CONFIG_TCG_IBMVTPM_MODULE) || \
- defined(CONFIG_ACPI)
-extern struct dentry **tpm_bios_log_setup(const char *);
-extern void tpm_bios_log_teardown(struct dentry **);
+#if defined(CONFIG_ACPI)
+int tpm_read_log_acpi(struct tpm_chip *chip);
#else
-static inline struct dentry **tpm_bios_log_setup(const char *name)
+static inline int tpm_read_log_acpi(struct tpm_chip *chip)
{
- return NULL;
+ return -ENODEV;
}
-static inline void tpm_bios_log_teardown(struct dentry **dir)
+#endif
+#if defined(CONFIG_OF)
+int tpm_read_log_of(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_of(struct tpm_chip *chip)
{
+ return -ENODEV;
}
#endif
+int tpm_bios_log_setup(struct tpm_chip *chip);
+void tpm_bios_log_teardown(struct tpm_chip *chip);
+
#endif
diff --git a/drivers/char/tpm/tpm_of.c b/drivers/char/tpm/tpm_of.c
index 570f30c5c5f4..7dee42d7b5e0 100644
--- a/drivers/char/tpm/tpm_of.c
+++ b/drivers/char/tpm/tpm_of.c
@@ -2,6 +2,7 @@
* Copyright 2012 IBM Corporation
*
* Author: Ashley Lai <ashleydlai@gmail.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
@@ -20,55 +21,38 @@
#include "tpm.h"
#include "tpm_eventlog.h"
-int read_log(struct tpm_bios_log *log)
+int tpm_read_log_of(struct tpm_chip *chip)
{
struct device_node *np;
const u32 *sizep;
const u64 *basep;
+ struct tpm_bios_log *log;
- if (log->bios_event_log != NULL) {
- pr_err("%s: ERROR - Eventlog already initialized\n", __func__);
- return -EFAULT;
- }
-
- np = of_find_node_by_name(NULL, "vtpm");
- if (!np) {
- pr_err("%s: ERROR - IBMVTPM not supported\n", __func__);
+ log = &chip->log;
+ if (chip->dev.parent && chip->dev.parent->of_node)
+ np = chip->dev.parent->of_node;
+ else
return -ENODEV;
- }
sizep = of_get_property(np, "linux,sml-size", NULL);
- if (sizep == NULL) {
- pr_err("%s: ERROR - SML size not found\n", __func__);
- goto cleanup_eio;
- }
- if (*sizep == 0) {
- pr_err("%s: ERROR - event log area empty\n", __func__);
- goto cleanup_eio;
- }
-
basep = of_get_property(np, "linux,sml-base", NULL);
- if (basep == NULL) {
- pr_err("%s: ERROR - SML not found\n", __func__);
- goto cleanup_eio;
+ if (sizep == NULL && basep == NULL)
+ return -ENODEV;
+ if (sizep == NULL || basep == NULL)
+ return -EIO;
+
+ if (*sizep == 0) {
+ dev_warn(&chip->dev, "%s: Event log area empty\n", __func__);
+ return -EIO;
}
log->bios_event_log = kmalloc(*sizep, GFP_KERNEL);
- if (!log->bios_event_log) {
- pr_err("%s: ERROR - Not enough memory for BIOS measurements\n",
- __func__);
- of_node_put(np);
+ if (!log->bios_event_log)
return -ENOMEM;
- }
log->bios_event_log_end = log->bios_event_log + *sizep;
memcpy(log->bios_event_log, __va(*basep), *sizep);
- of_node_put(np);
return 0;
-
-cleanup_eio:
- of_node_put(np);
- return -EIO;
}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index eaf5730d79eb..0127af130cb1 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -28,6 +28,8 @@
#include <linux/wait.h>
#include <linux/acpi.h>
#include <linux/freezer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include "tpm.h"
#include "tpm_tis_core.h"
@@ -354,12 +356,21 @@ static int tpm_tis_plat_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id tis_of_platform_match[] = {
+ {.compatible = "tcg,tpm-tis-mmio"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, tis_of_platform_match);
+#endif
+
static struct platform_driver tis_drv = {
.probe = tpm_tis_plat_probe,
.remove = tpm_tis_plat_remove,
.driver = {
.name = "tpm_tis",
.pm = &tpm_tis_pm,
+ .of_match_table = of_match_ptr(tis_of_platform_match),
},
};
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index e3bf31b37138..7993678954a2 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -180,12 +180,19 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int size = 0, burstcnt, rc;
- while (size < count &&
- wait_for_tpm_stat(chip,
+ while (size < count) {
+ rc = wait_for_tpm_stat(chip,
TPM_STS_DATA_AVAIL | TPM_STS_VALID,
chip->timeout_c,
- &priv->read_queue, true) == 0) {
- burstcnt = min_t(int, get_burstcount(chip), count - size);
+ &priv->read_queue, true);
+ if (rc < 0)
+ return rc;
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0) {
+ dev_err(&chip->dev, "Unable to read burstcount\n");
+ return burstcnt;
+ }
+ burstcnt = min_t(int, burstcnt, count - size);
rc = tpm_tis_read_bytes(priv, TPM_DATA_FIFO(priv->locality),
burstcnt, buf + size);
@@ -229,8 +236,11 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
goto out;
}
- wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
- &priv->int_queue, false);
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ size = -ETIME;
+ goto out;
+ }
status = tpm_tis_status(chip);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
dev_err(&chip->dev, "Error left over data\n");
@@ -271,7 +281,13 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
}
while (count < len - 1) {
- burstcnt = min_t(int, get_burstcount(chip), len - count - 1);
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0) {
+ dev_err(&chip->dev, "Unable to read burstcount\n");
+ rc = burstcnt;
+ goto out_err;
+ }
+ burstcnt = min_t(int, burstcnt, len - count - 1);
rc = tpm_tis_write_bytes(priv, TPM_DATA_FIFO(priv->locality),
burstcnt, buf + count);
if (rc < 0)
@@ -279,8 +295,11 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
count += burstcnt;
- wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
- &priv->int_queue, false);
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
status = tpm_tis_status(chip);
if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
rc = -EIO;
@@ -293,8 +312,11 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
if (rc < 0)
goto out_err;
- wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
- &priv->int_queue, false);
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
status = tpm_tis_status(chip);
if (!itpm && (status & TPM_STS_DATA_EXPECT) != 0) {
rc = -EIO;
@@ -755,20 +777,20 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
dev_dbg(dev, "\tData Avail Int Support\n");
- /* Very early on issue a command to the TPM in polling mode to make
- * sure it works. May as well use that command to set the proper
- * timeouts for the driver.
- */
- if (tpm_get_timeouts(chip)) {
- dev_err(dev, "Could not get TPM timeouts and durations\n");
- rc = -ENODEV;
- goto out_err;
- }
-
/* INTERRUPT Setup */
init_waitqueue_head(&priv->read_queue);
init_waitqueue_head(&priv->int_queue);
if (irq != -1) {
+ /* Before doing irq testing issue a command to the TPM in polling mode
+ * to make sure it works. May as well use that command to set the
+ * proper timeouts for the driver.
+ */
+ if (tpm_get_timeouts(chip)) {
+ dev_err(dev, "Could not get TPM timeouts and durations\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
+
if (irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq);
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 9a940332c157..5463b58af26e 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2015, 2016 IBM Corporation
+ * Copyright (C) 2016 Intel Corporation
*
* Author: Stefan Berger <stefanb@us.ibm.com>
*
@@ -41,6 +42,7 @@ struct proxy_dev {
long state; /* internal state */
#define STATE_OPENED_FLAG BIT(0)
#define STATE_WAIT_RESPONSE_FLAG BIT(1) /* waiting for emulator response */
+#define STATE_REGISTERED_FLAG BIT(2)
size_t req_len; /* length of queued TPM request */
size_t resp_len; /* length of queued TPM response */
@@ -369,12 +371,9 @@ static void vtpm_proxy_work(struct work_struct *work)
rc = tpm_chip_register(proxy_dev->chip);
if (rc)
- goto err;
-
- return;
-
-err:
- vtpm_proxy_fops_undo_open(proxy_dev);
+ vtpm_proxy_fops_undo_open(proxy_dev);
+ else
+ proxy_dev->state |= STATE_REGISTERED_FLAG;
}
/*
@@ -515,7 +514,8 @@ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
*/
vtpm_proxy_fops_undo_open(proxy_dev);
- tpm_chip_unregister(proxy_dev->chip);
+ if (proxy_dev->state & STATE_REGISTERED_FLAG)
+ tpm_chip_unregister(proxy_dev->chip);
vtpm_proxy_delete_proxy_dev(proxy_dev);
}
@@ -524,6 +524,50 @@ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
* Code related to the control device /dev/vtpmx
*/
+/**
+ * vtpmx_ioc_new_dev - handler for the %VTPM_PROXY_IOC_NEW_DEV ioctl
+ * @file: /dev/vtpmx
+ * @ioctl: the ioctl number
+ * @arg: pointer to the struct vtpmx_proxy_new_dev
+ *
+ * Creates an anonymous file that is used by the process acting as a TPM to
+ * communicate with the client processes. The function will also add a new TPM
+ * device through which data is proxied to this TPM acting process. The caller
+ * will be provided with a file descriptor to communicate with the clients and
+ * major and minor numbers for the TPM device.
+ */
+static long vtpmx_ioc_new_dev(struct file *file, unsigned int ioctl,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct vtpm_proxy_new_dev __user *vtpm_new_dev_p;
+ struct vtpm_proxy_new_dev vtpm_new_dev;
+ struct file *vtpm_file;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ vtpm_new_dev_p = argp;
+
+ if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p,
+ sizeof(vtpm_new_dev)))
+ return -EFAULT;
+
+ vtpm_file = vtpm_proxy_create_device(&vtpm_new_dev);
+ if (IS_ERR(vtpm_file))
+ return PTR_ERR(vtpm_file);
+
+ if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev,
+ sizeof(vtpm_new_dev))) {
+ put_unused_fd(vtpm_new_dev.fd);
+ fput(vtpm_file);
+ return -EFAULT;
+ }
+
+ fd_install(vtpm_new_dev.fd, vtpm_file);
+ return 0;
+}
+
/*
* vtpmx_fops_ioctl: ioctl on /dev/vtpmx
*
@@ -531,34 +575,11 @@ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
* Returns 0 on success, a negative error code otherwise.
*/
static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
+ unsigned long arg)
{
- void __user *argp = (void __user *)arg;
- struct vtpm_proxy_new_dev __user *vtpm_new_dev_p;
- struct vtpm_proxy_new_dev vtpm_new_dev;
- struct file *file;
-
switch (ioctl) {
case VTPM_PROXY_IOC_NEW_DEV:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- vtpm_new_dev_p = argp;
- if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p,
- sizeof(vtpm_new_dev)))
- return -EFAULT;
- file = vtpm_proxy_create_device(&vtpm_new_dev);
- if (IS_ERR(file))
- return PTR_ERR(file);
- if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev,
- sizeof(vtpm_new_dev))) {
- put_unused_fd(vtpm_new_dev.fd);
- fput(file);
- return -EFAULT;
- }
-
- fd_install(vtpm_new_dev.fd, file);
- return 0;
-
+ return vtpmx_ioc_new_dev(f, ioctl, arg);
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 50072cc4fe5c..5aaa268f3a78 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -307,7 +307,6 @@ static int tpmfront_probe(struct xenbus_device *dev,
rv = setup_ring(dev, priv);
if (rv) {
chip = dev_get_drvdata(&dev->dev);
- tpm_chip_unregister(chip);
ring_free(priv);
return rv;
}
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index dae1e39139e9..d10b4ae5e0da 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -135,8 +135,7 @@ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
&ctx->sa_out_dma_addr, GFP_ATOMIC);
if (ctx->sa_out == NULL) {
- dma_free_coherent(ctx->dev->core_dev->device,
- ctx->sa_len * 4,
+ dma_free_coherent(ctx->dev->core_dev->device, size * 4,
ctx->sa_in, ctx->sa_in_dma_addr);
return -ENOMEM;
}
diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h
index 6c2951bb70b1..0ec04407b533 100644
--- a/drivers/crypto/atmel-aes-regs.h
+++ b/drivers/crypto/atmel-aes-regs.h
@@ -28,6 +28,7 @@
#define AES_MR_OPMOD_CFB (0x3 << 12)
#define AES_MR_OPMOD_CTR (0x4 << 12)
#define AES_MR_OPMOD_GCM (0x5 << 12)
+#define AES_MR_OPMOD_XTS (0x6 << 12)
#define AES_MR_LOD (0x1 << 15)
#define AES_MR_CFBS_MASK (0x7 << 16)
#define AES_MR_CFBS_128b (0x0 << 16)
@@ -67,6 +68,9 @@
#define AES_CTRR 0x98
#define AES_GCMHR(x) (0x9c + ((x) * 0x04))
+#define AES_TWR(x) (0xc0 + ((x) * 0x04))
+#define AES_ALPHAR(x) (0xd0 + ((x) * 0x04))
+
#define AES_HW_VERSION 0xFC
#endif /* __ATMEL_AES_REGS_H__ */
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index e3d40a8dfffb..0e3d0d655b96 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -36,6 +36,7 @@
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/xts.h>
#include <crypto/internal/aead.h>
#include <linux/platform_data/crypto-atmel.h>
#include <dt-bindings/dma/at91.h>
@@ -68,6 +69,7 @@
#define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
#define AES_FLAGS_CTR AES_MR_OPMOD_CTR
#define AES_FLAGS_GCM AES_MR_OPMOD_GCM
+#define AES_FLAGS_XTS AES_MR_OPMOD_XTS
#define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \
AES_FLAGS_ENCRYPT | \
@@ -89,6 +91,7 @@ struct atmel_aes_caps {
bool has_cfb64;
bool has_ctr32;
bool has_gcm;
+ bool has_xts;
u32 max_burst_size;
};
@@ -135,6 +138,12 @@ struct atmel_aes_gcm_ctx {
atmel_aes_fn_t ghash_resume;
};
+struct atmel_aes_xts_ctx {
+ struct atmel_aes_base_ctx base;
+
+ u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
+};
+
struct atmel_aes_reqctx {
unsigned long mode;
};
@@ -282,6 +291,20 @@ static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
break;
+ case AES_TWR(0):
+ case AES_TWR(1):
+ case AES_TWR(2):
+ case AES_TWR(3):
+ snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2);
+ break;
+
+ case AES_ALPHAR(0):
+ case AES_ALPHAR(1):
+ case AES_ALPHAR(2):
+ case AES_ALPHAR(3):
+ snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2);
+ break;
+
default:
snprintf(tmp, sz, "0x%02x", offset);
break;
@@ -317,7 +340,7 @@ static inline void atmel_aes_write(struct atmel_aes_dev *dd,
char tmp[16];
dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
- atmel_aes_reg_name(offset, tmp));
+ atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
}
#endif /* VERBOSE_DEBUG */
@@ -453,15 +476,15 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
return err;
}
-static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
- const u32 *iv)
+static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
+ const u32 *iv, const u32 *key, int keylen)
{
u32 valmr = 0;
/* MR register must be set before IV registers */
- if (dd->ctx->keylen == AES_KEYSIZE_128)
+ if (keylen == AES_KEYSIZE_128)
valmr |= AES_MR_KEYSIZE_128;
- else if (dd->ctx->keylen == AES_KEYSIZE_192)
+ else if (keylen == AES_KEYSIZE_192)
valmr |= AES_MR_KEYSIZE_192;
else
valmr |= AES_MR_KEYSIZE_256;
@@ -478,13 +501,19 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
atmel_aes_write(dd, AES_MR, valmr);
- atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
- SIZE_IN_WORDS(dd->ctx->keylen));
+ atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
atmel_aes_write_block(dd, AES_IVR(0), iv);
}
+static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
+ const u32 *iv)
+
+{
+ atmel_aes_write_ctrl_key(dd, use_dma, iv,
+ dd->ctx->key, dd->ctx->keylen);
+}
/* CPU transfer */
@@ -1769,6 +1798,137 @@ static struct aead_alg aes_gcm_alg = {
};
+/* xts functions */
+
+static inline struct atmel_aes_xts_ctx *
+atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx)
+{
+ return container_of(ctx, struct atmel_aes_xts_ctx, base);
+}
+
+static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
+
+static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
+{
+ struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
+ struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ unsigned long flags;
+ int err;
+
+ atmel_aes_set_mode(dd, rctx);
+
+ err = atmel_aes_hw_init(dd);
+ if (err)
+ return atmel_aes_complete(dd, err);
+
+ /* Compute the tweak value from req->info with ecb(aes). */
+ flags = dd->flags;
+ dd->flags &= ~AES_FLAGS_MODE_MASK;
+ dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
+ atmel_aes_write_ctrl_key(dd, false, NULL,
+ ctx->key2, ctx->base.keylen);
+ dd->flags = flags;
+
+ atmel_aes_write_block(dd, AES_IDATAR(0), req->info);
+ return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
+}
+
+static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
+{
+ struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+ bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD);
+ u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
+ static const u32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
+ u8 *tweak_bytes = (u8 *)tweak;
+ int i;
+
+ /* Read the computed ciphered tweak value. */
+ atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
+ /*
+ * Hardware quirk:
+ * the order of the ciphered tweak bytes need to be reversed before
+ * writing them into the ODATARx registers.
+ */
+ for (i = 0; i < AES_BLOCK_SIZE/2; ++i) {
+ u8 tmp = tweak_bytes[AES_BLOCK_SIZE - 1 - i];
+
+ tweak_bytes[AES_BLOCK_SIZE - 1 - i] = tweak_bytes[i];
+ tweak_bytes[i] = tmp;
+ }
+
+ /* Process the data. */
+ atmel_aes_write_ctrl(dd, use_dma, NULL);
+ atmel_aes_write_block(dd, AES_TWR(0), tweak);
+ atmel_aes_write_block(dd, AES_ALPHAR(0), one);
+ if (use_dma)
+ return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
+ atmel_aes_transfer_complete);
+
+ return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
+ atmel_aes_transfer_complete);
+}
+
+static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct atmel_aes_xts_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ int err;
+
+ err = xts_check_key(crypto_ablkcipher_tfm(tfm), key, keylen);
+ if (err)
+ return err;
+
+ memcpy(ctx->base.key, key, keylen/2);
+ memcpy(ctx->key2, key + keylen/2, keylen/2);
+ ctx->base.keylen = keylen/2;
+
+ return 0;
+}
+
+static int atmel_aes_xts_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_xts_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req, AES_FLAGS_XTS);
+}
+
+static int atmel_aes_xts_cra_init(struct crypto_tfm *tfm)
+{
+ struct atmel_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+ ctx->base.start = atmel_aes_xts_start;
+
+ return 0;
+}
+
+static struct crypto_alg aes_xts_alg = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "atmel-xts-aes",
+ .cra_priority = ATMEL_AES_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_aes_xts_cra_init,
+ .cra_exit = atmel_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = atmel_aes_xts_setkey,
+ .encrypt = atmel_aes_xts_encrypt,
+ .decrypt = atmel_aes_xts_decrypt,
+ }
+};
+
+
/* Probe functions */
static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
@@ -1877,6 +2037,9 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
{
int i;
+ if (dd->caps.has_xts)
+ crypto_unregister_alg(&aes_xts_alg);
+
if (dd->caps.has_gcm)
crypto_unregister_aead(&aes_gcm_alg);
@@ -1909,8 +2072,16 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
goto err_aes_gcm_alg;
}
+ if (dd->caps.has_xts) {
+ err = crypto_register_alg(&aes_xts_alg);
+ if (err)
+ goto err_aes_xts_alg;
+ }
+
return 0;
+err_aes_xts_alg:
+ crypto_unregister_aead(&aes_gcm_alg);
err_aes_gcm_alg:
crypto_unregister_alg(&aes_cfb64_alg);
err_aes_cfb64_alg:
@@ -1928,6 +2099,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
dd->caps.has_cfb64 = 0;
dd->caps.has_ctr32 = 0;
dd->caps.has_gcm = 0;
+ dd->caps.has_xts = 0;
dd->caps.max_burst_size = 1;
/* keep only major version number */
@@ -1937,6 +2109,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
dd->caps.has_cfb64 = 1;
dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
+ dd->caps.has_xts = 1;
dd->caps.max_burst_size = 4;
break;
case 0x200:
@@ -2138,7 +2311,7 @@ aes_dd_err:
static int atmel_aes_remove(struct platform_device *pdev)
{
- static struct atmel_aes_dev *aes_dd;
+ struct atmel_aes_dev *aes_dd;
aes_dd = platform_get_drvdata(pdev);
if (!aes_dd)
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 64bf3024b680..bc0d3569f8d9 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -74,7 +74,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_AEAD
select CRYPTO_AUTHENC
@@ -89,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_HASH
help
@@ -101,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
config CRYPTO_DEV_FSL_CAAM_PKC_API
tristate "Register public key cryptography implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RSA
help
@@ -113,7 +113,7 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RNG
select HW_RANDOM
@@ -134,3 +134,6 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
help
Selecting this will enable printing of various debug
information in the CAAM driver.
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+ def_tristate CRYPTO_DEV_FSL_CAAM_CRYPTO_API
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 08bf5515ae8a..6554742f357e 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -8,6 +8,7 @@ endif
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 954a64c7757b..662fe94cb2f8 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2,6 +2,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
*
* Based on talitos crypto API driver.
*
@@ -53,6 +54,7 @@
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
+#include "caamalg_desc.h"
/*
* crypto alg
@@ -62,8 +64,6 @@
#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
CTR_RFC3686_NONCE_SIZE + \
SHA512_DIGEST_SIZE * 2)
-/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
-#define CAAM_MAX_IV_LENGTH 16
#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
@@ -71,37 +71,6 @@
#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
CAAM_CMD_SZ * 5)
-/* length of descriptors text */
-#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
-#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
-
-/* Note: Nonce is counted in enckeylen */
-#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
-
-#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
-
-#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
-#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
-#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
-
-#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
-#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
-
-#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
-
-#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
- 20 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
- 15 * CAAM_CMD_SZ)
-
#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -117,8 +86,7 @@
static void dbg_dump_sg(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
- struct scatterlist *sg, size_t tlen, bool ascii,
- bool may_sleep)
+ struct scatterlist *sg, size_t tlen, bool ascii)
{
struct scatterlist *it;
void *it_page;
@@ -152,7 +120,6 @@ static struct list_head alg_list;
struct caam_alg_entry {
int class1_alg_type;
int class2_alg_type;
- int alg_op;
bool rfc3686;
bool geniv;
};
@@ -163,52 +130,6 @@ struct caam_aead_alg {
bool registered;
};
-/* Set DK bit in class 1 operation if shared */
-static inline void append_dec_op1(u32 *desc, u32 type)
-{
- u32 *jump_cmd, *uncond_jump_cmd;
-
- /* DK bit is valid only for AES */
- if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT);
- return;
- }
-
- jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT);
- uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
- set_jump_tgt_here(desc, jump_cmd);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT | OP_ALG_AAI_DK);
- set_jump_tgt_here(desc, uncond_jump_cmd);
-}
-
-/*
- * For aead functions, read payload and write payload,
- * both of which are specified in req->src and req->dst
- */
-static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
-{
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
- KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
-}
-
-/*
- * For ablkcipher encrypt and decrypt, read from req->src and
- * write to req->dst
- */
-static inline void ablkcipher_append_src_dst(u32 *desc)
-{
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
- KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
-}
-
/*
* per-session context
*/
@@ -220,147 +141,36 @@ struct caam_ctx {
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
dma_addr_t sh_desc_givenc_dma;
- u32 class1_alg_type;
- u32 class2_alg_type;
- u32 alg_op;
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
- unsigned int enckeylen;
- unsigned int split_key_len;
- unsigned int split_key_pad_len;
+ struct alginfo adata;
+ struct alginfo cdata;
unsigned int authsize;
};
-static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
- int keys_fit_inline, bool is_rfc3686)
-{
- u32 *nonce;
- unsigned int enckeylen = ctx->enckeylen;
-
- /*
- * RFC3686 specific:
- * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
- * | enckeylen = encryption key size + nonce size
- */
- if (is_rfc3686)
- enckeylen -= CTR_RFC3686_NONCE_SIZE;
-
- if (keys_fit_inline) {
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- append_key_as_imm(desc, (void *)ctx->key +
- ctx->split_key_pad_len, enckeylen,
- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- } else {
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- }
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
- enckeylen);
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc,
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-}
-
-static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
- int keys_fit_inline, bool is_rfc3686)
-{
- u32 *key_jump_cmd;
-
- /* Note: Context registers are saved. */
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-}
-
static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
+ ctx->adata.keylen_pad;
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
+ ctx->adata.key_inline = true;
+ ctx->adata.key_virt = ctx->key;
+ } else {
+ ctx->adata.key_inline = false;
+ ctx->adata.key_dma = ctx->key_dma;
+ }
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* assoclen + cryptlen = seqinlen */
- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Prepare to read and write cryptlen + assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
- MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
- MOVE_DEST_DESCBUF |
- MOVE_WAITCOMP |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
- MOVE_AUX_LS);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -368,84 +178,22 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- desc = ctx->sh_desc_dec;
+ if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
+ ctx->adata.key_inline = true;
+ ctx->adata.key_virt = ctx->key;
+ } else {
+ ctx->adata.key_inline = false;
+ ctx->adata.key_dma = ctx->key_dma;
+ }
/* aead_decrypt shared descriptor */
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* assoclen + cryptlen = seqoutlen */
- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Prepare to read and write cryptlen + assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
- MOVE_DEST_MATH2 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
- MOVE_DEST_DESCBUF |
- MOVE_WAITCOMP |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Read and write cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /*
- * Insert a NOP here, since we need at least 4 instructions between
- * code patching the descriptor buffer and the location being patched.
- */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL);
- set_jump_tgt_here(desc, jump_cmd);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
- MOVE_AUX_LS);
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Load ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
-
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -453,12 +201,6 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -470,11 +212,11 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline;
- u32 geniv, moveiv;
u32 ctx1_iv_off = 0;
- u32 *desc;
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ u32 *desc, *nonce = NULL;
+ u32 inl_mask;
+ unsigned int data_len[2];
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
@@ -482,7 +224,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
return 0;
/* NULL encryption / decryption */
- if (!ctx->enckeylen)
+ if (!ctx->cdata.keylen)
return aead_null_set_sh_desc(aead);
/*
@@ -497,8 +239,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* RFC3686 specific:
* CONTEXT1[255:128] = {NONCE, IV, COUNTER}
*/
- if (is_rfc3686)
+ if (is_rfc3686) {
ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+ }
+
+ data_len[0] = ctx->adata.keylen_pad;
+ data_len[1] = ctx->cdata.keylen;
if (alg->caam.geniv)
goto skip_enc;
@@ -507,54 +255,29 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_encrypt shared descriptor */
- desc = ctx->sh_desc_enc;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- FIFOLDST_VLF);
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ if (desc_inline_query(DESC_AEAD_ENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
- /* Read and write cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize,
+ is_rfc3686, nonce, ctx1_iv_off);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -562,79 +285,36 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
skip_enc:
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_decrypt shared descriptor */
- desc = ctx->sh_desc_dec;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ if (desc_inline_query(DESC_AEAD_DEC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- if (alg->caam.geniv)
- append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
else
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
+ ctx->adata.key_dma = ctx->key_dma;
- if (alg->caam.geniv) {
- append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- (ctx1_iv_off << LDST_OFFSET_SHIFT));
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
- }
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Choose operation */
- if (ctr_mode)
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
else
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Read and write cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* Load ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+ /* aead_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, alg->caam.geniv, is_rfc3686,
+ nonce, ctx1_iv_off);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -642,11 +322,6 @@ skip_enc:
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
if (!alg->caam.geniv)
goto skip_givenc;
@@ -655,93 +330,30 @@ skip_enc:
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_givencrypt shared descriptor */
- desc = ctx->sh_desc_enc;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- if (is_rfc3686)
- goto copy_iv;
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
- NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
- (ivsize << MOVE_LEN_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
-copy_iv:
- /* Copy IV to class 1 context */
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
- (ivsize << MOVE_LEN_SHIFT));
-
- /* Return to encryption */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* ivsize + cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
-
- /* Copy iv from outfifo to class 2 fifo */
- moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
- NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Will write ivsize + cryptlen */
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Not need to reload iv */
- append_seq_fifo_load(desc, ivsize,
- FIFOLD_CLASS_SKIP);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
- /* Will read cryptlen */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+ /* aead_givencrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -749,11 +361,6 @@ copy_iv:
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
skip_givenc:
return 0;
@@ -774,12 +381,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd, *zero_payload_jump_cmd,
- *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -787,82 +393,16 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_GCM_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* skip key loading if they are loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD | JUMP_COND_SELF);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* if assoclen + cryptlen is ZERO, skip to ICV write */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* cryptlen = seqinlen - assoclen */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
-
- /* if cryptlen is ZERO jump to zero-payload commands */
- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
- set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
-
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* write encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
- /* jump the zero-payload commands */
- append_jump(desc, JUMP_TEST_ALL | 2);
-
- /* zero-payload commands */
- set_jump_tgt_here(desc, zero_payload_jump_cmd);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
-
- /* There is no input data */
- set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
-
- /* write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -870,80 +410,21 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_GCM_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* skip key loading if they are loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD |
- JUMP_COND_SELF);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
-
- /* cryptlen = seqoutlen - assoclen */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* jump to zero-payload command if cryptlen is zero */
- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* store encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /* zero-payload command */
- set_jump_tgt_here(desc, zero_payload_jump_cmd);
-
- /* read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -951,11 +432,6 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -974,11 +450,11 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -986,62 +462,16 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Skip IV */
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* cryptlen = seqoutlen - assoclen */
- append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Write encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* Read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1049,73 +479,21 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Skip IV */
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
-
- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* Will write cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Store payload data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* Read encrypted data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /* Read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1123,11 +501,6 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -1147,12 +520,11 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd;
- u32 *read_move_cmd, *write_move_cmd;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -1160,61 +532,16 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* assoclen + cryptlen = seqinlen */
- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Will read assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Will write assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Read and write assoclen + cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- /* Move payload data to OFIFO */
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1222,77 +549,21 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* assoclen + cryptlen = seqoutlen */
- append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Will read assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Will write assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Store payload data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* In-snoop assoclen + cryptlen data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- /* Move payload data to OFIFO */
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1300,11 +571,6 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -1320,19 +586,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
return 0;
}
-static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
- u32 authkeylen)
-{
- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
- ctx->split_key_pad_len, key_in, authkeylen,
- ctx->alg_op);
-}
-
static int aead_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
struct crypto_authenc_keys keys;
@@ -1341,33 +597,25 @@ static int aead_setkey(struct crypto_aead *aead,
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- /* Pick class 2 key length from algorithm submask */
- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
- OP_ALG_ALGSEL_SHIFT] * 2;
- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
-
- if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
- goto badkey;
-
#ifdef DEBUG
printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
keys.authkeylen + keys.enckeylen, keys.enckeylen,
keys.authkeylen);
- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
- ctx->split_key_len, ctx->split_key_pad_len);
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
- ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
if (ret) {
goto badkey;
}
/* postpend encryption key to auth split key */
- memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad +
keys.enckeylen, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1376,14 +624,14 @@ static int aead_setkey(struct crypto_aead *aead,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len + keys.enckeylen, 1);
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
- ctx->enckeylen = keys.enckeylen;
+ ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
keys.enckeylen, DMA_TO_DEVICE);
}
@@ -1412,11 +660,11 @@ static int gcm_setkey(struct crypto_aead *aead,
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
- ctx->enckeylen = keylen;
+ ctx->cdata.keylen = keylen;
ret = gcm_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE);
}
@@ -1444,9 +692,9 @@ static int rfc4106_setkey(struct crypto_aead *aead,
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
- ctx->enckeylen = keylen - 4;
+ ctx->cdata.keylen = keylen - 4;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1455,7 +703,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
ret = rfc4106_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE);
}
@@ -1483,9 +731,9 @@ static int rfc4543_setkey(struct crypto_aead *aead,
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
- ctx->enckeylen = keylen - 4;
+ ctx->cdata.keylen = keylen - 4;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1494,7 +742,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
ret = rfc4543_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE);
}
@@ -1505,21 +753,18 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const u8 *key, unsigned int keylen)
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
const char *alg_name = crypto_tfm_alg_name(tfm);
struct device *jrdev = ctx->jrdev;
- int ret = 0;
- u32 *key_jump_cmd;
+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
u32 *desc;
- u8 *nonce;
- u32 geniv;
u32 ctx1_iv_off = 0;
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = (ctr_mode &&
(strstr(alg_name, "rfc3686") != NULL));
+ memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -1542,60 +787,20 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- memcpy(ctx->key, key, keylen);
ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
- ctx->enckeylen = keylen;
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
/* ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Load iv */
- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1603,61 +808,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* load IV */
- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Choose operation */
- if (ctr_mode)
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
- else
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1666,76 +821,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/* ablkcipher_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load Nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
- NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_INFIFO |
- MOVE_DEST_CLASS1CTX |
- (crt->ivsize << MOVE_LEN_SHIFT) |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Copy generated IV to memory */
- append_seq_store(desc, crt->ivsize,
- LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
- (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- if (ctx1_iv_off)
- append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
- (1 << JUMP_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1743,14 +832,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
- return ret;
+ return 0;
}
static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
@@ -1758,8 +841,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
- u32 *key_jump_cmd, *desc;
- __be64 sector_size = cpu_to_be64(512);
+ u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(ablkcipher,
@@ -1774,88 +856,23 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
- ctx->enckeylen = keylen;
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
/* xts_ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 keys only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load sector size with index 40 bytes (0x28) */
- append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
- append_data(desc, (void *)&sector_size, 8);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /*
- * create sequence for loading the sector index
- * Upper 8B of IV - will be used as sector index
- * Lower 8B of IV - will be discarded
- */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
/* xts_ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load sector size with index 40 bytes (0x28) */
- append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
- append_data(desc, (void *)&sector_size, 8);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /*
- * create sequence for loading the sector index
- * Upper 8B of IV - will be used as sector index
- * Lower 8B of IV - will be discarded
- */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Load operation */
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
@@ -1864,31 +881,22 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
return 0;
}
/*
* aead_edesc - s/w-extended aead descriptor
- * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
- * @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct aead_edesc {
- int assoc_nents;
int src_nents;
int dst_nents;
- dma_addr_t iv_dma;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -1900,9 +908,9 @@ struct aead_edesc {
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct ablkcipher_edesc {
@@ -2019,8 +1027,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ablkcipher_edesc *)((char *)desc -
- offsetof(struct ablkcipher_edesc, hw_desc));
+ edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -2031,7 +1038,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
edesc->src_nents > 1 ? 100 : ivsize, 1);
dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
ablkcipher_unmap(jrdev, edesc, req);
@@ -2052,8 +1059,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ablkcipher_edesc *)((char *)desc -
- offsetof(struct ablkcipher_edesc, hw_desc));
+ edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -2063,7 +1069,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
ivsize, 1);
dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
ablkcipher_unmap(jrdev, edesc, req);
@@ -2157,7 +1163,7 @@ static void init_gcm_job(struct aead_request *req,
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
/* Append Salt */
if (!generic_gcm)
- append_data(desc, ctx->key + ctx->enckeylen, 4);
+ append_data(desc, ctx->key + ctx->cdata.keylen, 4);
/* Append IV */
append_data(desc, req->iv, ivsize);
/* End of blank commands */
@@ -2172,7 +1178,7 @@ static void init_authenc_job(struct aead_request *req,
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
u32 *desc = edesc->hw_desc;
@@ -2218,15 +1224,13 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
+ edesc->src_nents ? 100 : req->nbytes, 1);
#endif
len = desc_len(sh_desc);
@@ -2278,14 +1282,12 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
+ edesc->src_nents ? 100 : req->nbytes, 1);
#endif
len = desc_len(sh_desc);
@@ -2344,10 +1346,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
/* Check if data are contiguous. */
all_contig = !src_nents;
- if (!all_contig) {
- src_nents = src_nents ? : 1;
+ if (!all_contig)
sec4_sg_len = src_nents;
- }
sec4_sg_len += dst_nents;
@@ -2556,11 +1556,9 @@ static int aead_decrypt(struct aead_request *req)
int ret = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- req->assoclen + req->cryptlen, 1, may_sleep);
+ req->assoclen + req->cryptlen, 1);
#endif
/* allocate extended descriptor */
@@ -2618,16 +1616,33 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (likely(req->src == req->dst)) {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
} else {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_TO_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
DMA_FROM_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
}
iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2647,6 +1662,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2673,6 +1690,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -2794,11 +1814,26 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
if (likely(req->src == req->dst)) {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
} else {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_TO_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
DMA_FROM_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
}
/*
@@ -2808,6 +1843,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2823,6 +1860,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2850,6 +1889,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ kfree(edesc);
return ERR_PTR(-ENOMEM);
}
edesc->iv_dma = iv_dma;
@@ -2916,7 +1958,6 @@ struct caam_alg_template {
} template_u;
u32 class1_alg_type;
u32 class2_alg_type;
- u32 alg_op;
};
static struct caam_alg_template driver_algs[] = {
@@ -3101,7 +2142,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3123,7 +2163,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3145,7 +2184,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3167,7 +2205,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3189,7 +2226,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3211,7 +2247,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3233,7 +2268,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3256,7 +2290,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3279,7 +2312,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3302,7 +2334,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3325,7 +2356,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3348,7 +2378,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3371,7 +2400,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3394,7 +2422,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3417,7 +2444,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3440,7 +2466,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3463,7 +2488,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3486,7 +2510,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3509,7 +2532,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
}
},
{
@@ -3532,7 +2554,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
}
},
@@ -3556,7 +2577,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3580,7 +2600,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3604,7 +2623,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3628,7 +2646,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3652,7 +2669,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3676,7 +2692,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3700,7 +2715,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3724,7 +2738,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3748,7 +2761,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3772,7 +2784,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3795,7 +2806,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3818,7 +2828,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3841,7 +2850,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3864,7 +2872,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3887,7 +2894,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3910,7 +2916,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3933,7 +2938,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3956,7 +2960,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3979,7 +2982,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -4002,7 +3004,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -4025,7 +3026,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -4048,7 +3048,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -4073,7 +3072,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4098,7 +3096,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4124,7 +3121,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4149,7 +3145,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4175,7 +3170,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4200,7 +3194,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4226,7 +3219,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4251,7 +3243,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4277,7 +3268,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4302,7 +3292,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4328,7 +3317,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4353,7 +3341,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4375,9 +3362,8 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
}
/* copy descriptor header template value */
- ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
- ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
return 0;
}
@@ -4420,7 +3406,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
if (ctx->key_dma &&
!dma_mapping_error(ctx->jrdev, ctx->key_dma))
dma_unmap_single(ctx->jrdev, ctx->key_dma,
- ctx->enckeylen + ctx->split_key_pad_len,
+ ctx->cdata.keylen + ctx->adata.keylen_pad,
DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev);
@@ -4498,7 +3484,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
t_alg->caam.class1_alg_type = template->class1_alg_type;
t_alg->caam.class2_alg_type = template->class2_alg_type;
- t_alg->caam.alg_op = template->alg_op;
return t_alg;
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
new file mode 100644
index 000000000000..f3f48c10b9d6
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -0,0 +1,1306 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#include "compat.h"
+#include "desc_constr.h"
+#include "caamalg_desc.h"
+
+/*
+ * For aead functions, read payload and write payload,
+ * both of which are specified in req->src and req->dst
+ */
+static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+{
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+ KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
+}
+
+/* Set DK bit in class 1 operation if shared */
+static inline void append_dec_op1(u32 *desc, u32 type)
+{
+ u32 *jump_cmd, *uncond_jump_cmd;
+
+ /* DK bit is valid only for AES */
+ if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ return;
+ }
+
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+ set_jump_tgt_here(desc, uncond_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol) with no (null) encryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* assoclen + cryptlen = seqinlen */
+ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Prepare to read and write cryptlen + assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
+
+/**
+ * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
+ * (non-protocol) with no (null) decryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqoutlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Prepare to read and write cryptlen + assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH2 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /*
+ * Insert a NOP here, since we need at least 4 instructions between
+ * code patching the descriptor buffer and the location being patched.
+ */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
+
+static void init_sh_desc_key_aead(u32 * const desc,
+ struct alginfo * const cdata,
+ struct alginfo * const adata,
+ const bool is_rfc3686, u32 *nonce)
+{
+ u32 *key_jump_cmd;
+ unsigned int enckeylen = cdata->keylen;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /*
+ * RFC3686 specific:
+ * | key = {AUTH_KEY, ENC_KEY, NONCE}
+ * | enckeylen = encryption key size + nonce size
+ */
+ if (is_rfc3686)
+ enckeylen -= CTR_RFC3686_NONCE_SIZE;
+
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, enckeylen,
+ enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686) {
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc,
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int icvsize,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off)
+{
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ FIFOLDST_VLF);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
+
+/**
+ * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool geniv,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off)
+{
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ if (geniv) {
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
+ }
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Choose operation */
+ if (ctx1_iv_off)
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
+
+/**
+ * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol) with HW-generated initialization
+ * vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off)
+{
+ u32 geniv, moveiv;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ if (is_rfc3686)
+ goto copy_iv;
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+copy_iv:
+ /* Copy IV to class 1 context */
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (ivsize << MOVE_LEN_SHIFT));
+
+ /* Return to encryption */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Copy iv from outfifo to class 2 fifo */
+ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Not need to reload iv */
+ append_seq_fifo_load(desc, ivsize,
+ FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
+ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead givenc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
+
+/**
+ * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
+ *zero_assoc_jump_cmd2;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD | JUMP_COND_SELF);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* if assoclen + cryptlen is ZERO, skip to ICV write */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* cryptlen = seqinlen - assoclen */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* if cryptlen is ZERO jump to zero-payload commands */
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* jump the zero-payload commands */
+ append_jump(desc, JUMP_TEST_ALL | 2);
+
+ /* zero-payload commands */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+
+ /* There is no input data */
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+
+ /* write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
+
+/**
+ * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD |
+ JUMP_COND_SELF);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ /* cryptlen = seqoutlen - assoclen */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* jump to zero-payload command if cryptlen is zero */
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* store encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* zero-payload command */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
+
+/**
+ * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Skip IV */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* cryptlen = seqoutlen - assoclen */
+ append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
+
+/**
+ * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Skip IV */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
+
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* Will write cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read encrypted data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
+
+/**
+ * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* assoclen + cryptlen = seqinlen */
+ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Will write assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read and write assoclen + cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
+
+/**
+ * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqoutlen */
+ append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Will write assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* In-snoop assoclen + cryptlen data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
+
+/*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+ */
+static inline void ablkcipher_append_src_dst(u32 *desc)
+{
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
+ KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+}
+
+/**
+ * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Load iv */
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* load IV */
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Choose operation */
+ if (ctx1_iv_off)
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
+
+/**
+ * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
+ * with HW-generated initialization vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd, geniv;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load Nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
+ (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
+ MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy generated IV to memory */
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ if (ctx1_iv_off)
+ append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
+ (1 << JUMP_OFFSET_SHIFT));
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
+ * descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
+{
+ __be64 sector_size = cpu_to_be64(512);
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 keys only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load sector size with index 40 bytes (0x28) */
+ append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (0x28 << LDST_OFFSET_SHIFT));
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /*
+ * create sequence for loading the sector index
+ * Upper 8B of IV - will be used as sector index
+ * Lower 8B of IV - will be discarded
+ */
+ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
+ * descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
+{
+ __be64 sector_size = cpu_to_be64(512);
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load sector size with index 40 bytes (0x28) */
+ append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (0x28 << LDST_OFFSET_SHIFT));
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /*
+ * create sequence for loading the sector index
+ * Upper 8B of IV - will be used as sector index
+ * Lower 8B of IV - will be discarded
+ */
+ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Load operation */
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM descriptor support");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
new file mode 100644
index 000000000000..95551737333a
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -0,0 +1,97 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#ifndef _CAAMALG_DESC_H_
+#define _CAAMALG_DESC_H_
+
+/* length of descriptors text */
+#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+
+/* Note: Nonce is counted in cdata.keylen */
+#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
+
+#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
+
+#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
+#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
+#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
+
+#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+
+#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
+
+#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+ 20 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+ 15 * CAAM_CMD_SZ)
+
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int icvsize,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool geniv,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off);
+
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
+
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
+
+#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 660dc206969f..e58639ea53b1 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -72,7 +72,7 @@
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
/* length of descriptors text */
-#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
@@ -103,20 +103,15 @@ struct caam_hash_ctx {
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
- u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
dma_addr_t sh_desc_update_dma ____cacheline_aligned;
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
- dma_addr_t sh_desc_finup_dma;
struct device *jrdev;
- u32 alg_type;
- u32 alg_op;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
dma_addr_t key_dma;
int ctx_len;
- unsigned int split_key_len;
- unsigned int split_key_pad_len;
+ struct alginfo adata;
};
/* ahash state */
@@ -222,89 +217,54 @@ static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
return 0;
}
-/* Common shared descriptor commands */
-static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
-{
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
-}
-
-/* Append key if it has been set */
-static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
-{
- u32 *key_jump_cmd;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- if (ctx->split_key_len) {
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- append_key_ahash(desc, ctx);
-
- set_jump_tgt_here(desc, key_jump_cmd);
- }
-
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-}
-
/*
- * For ahash read data from seqin following state->caam_ctx,
- * and write resulting class2 context to seqout, which may be state->caam_ctx
- * or req->result
+ * For ahash update, final and finup (import_ctx = true)
+ * import context, read and write to seqout
+ * For ahash firsts and digest (import_ctx = false)
+ * read and write to seqout
*/
-static inline void ahash_append_load_str(u32 *desc, int digestsize)
+static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
+ struct caam_hash_ctx *ctx, bool import_ctx)
{
- /* Calculate remaining bytes to read */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Read remaining bytes */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
- FIFOLD_TYPE_MSG | KEY_VLF);
+ u32 op = ctx->adata.algtype;
+ u32 *skip_key_load;
- /* Store class2 context bytes */
- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-}
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
-/*
- * For ahash update, final and finup, import context, read and write to seqout
- */
-static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
- int digestsize,
- struct caam_hash_ctx *ctx)
-{
- init_sh_desc_key_ahash(desc, ctx);
+ /* Append key if it has been set; ahash update excluded */
+ if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
+ /* Skip key loading if already shared */
+ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
- /* Import context from software */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_2_CCB | ctx->ctx_len);
+ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
+ ctx->adata.keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
- /* Class 2 operation */
- append_operation(desc, op | state | OP_ALG_ENCRYPT);
+ set_jump_tgt_here(desc, skip_key_load);
- /*
- * Load from buf and/or src and write to req->result or state->context
- */
- ahash_append_load_str(desc, digestsize);
-}
+ op |= OP_ALG_AAI_HMAC_PRECOMP;
+ }
-/* For ahash firsts and digest, read and write to seqout */
-static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
- int digestsize, struct caam_hash_ctx *ctx)
-{
- init_sh_desc_key_ahash(desc, ctx);
+ /* If needed, import context from software */
+ if (import_ctx)
+ append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
/* Class 2 operation */
append_operation(desc, op | state | OP_ALG_ENCRYPT);
/*
* Load from buf and/or src and write to req->result or state->context
+ * Calculate remaining bytes to read
*/
- ahash_append_load_str(desc, digestsize);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+ FIFOLD_TYPE_MSG | KEY_VLF);
+ /* Store class2 context bytes */
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
}
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
@@ -312,28 +272,11 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
- u32 have_key = 0;
u32 *desc;
- if (ctx->split_key_len)
- have_key = OP_ALG_AAI_HMAC_PRECOMP;
-
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Import context from software */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_2_CCB | ctx->ctx_len);
-
- /* Class 2 operation */
- append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
- OP_ALG_ENCRYPT);
-
- /* Load data and write to result or context */
- ahash_append_load_str(desc, ctx->ctx_len);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
@@ -348,10 +291,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
-
- ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
- ctx->ctx_len, ctx);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -367,10 +307,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
-
- ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
- OP_ALG_AS_FINALIZE, digestsize, ctx);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
@@ -383,30 +320,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
desc_bytes(desc), 1);
#endif
- /* ahash_finup shared descriptor */
- desc = ctx->sh_desc_finup;
-
- ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
- OP_ALG_AS_FINALIZE, digestsize, ctx);
-
- ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
-
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
-
- ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
- digestsize, ctx);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -424,14 +340,6 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return 0;
}
-static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
- u32 keylen)
-{
- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
- ctx->split_key_pad_len, key_in, keylen,
- ctx->alg_op);
-}
-
/* Digest hash size if it is too large */
static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *keylen, u8 *key_out, u32 digestsize)
@@ -467,7 +375,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
}
/* Job descriptor to perform unkeyed hash on key_in */
- append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
+ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
OP_ALG_AS_INITFINAL);
append_seq_in_ptr(desc, src_dma, *keylen, 0);
append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
@@ -511,8 +419,6 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
static int ahash_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
@@ -537,23 +443,12 @@ static int ahash_setkey(struct crypto_ahash *ahash,
key = hashed_key;
}
- /* Pick class 2 key length from algorithm submask */
- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
- OP_ALG_ALGSEL_SHIFT] * 2;
- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
-
-#ifdef DEBUG
- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
- ctx->split_key_len, ctx->split_key_pad_len);
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
-
- ret = gen_split_hash_key(ctx, key, keylen);
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
+ CAAM_MAX_HASH_KEY_SIZE);
if (ret)
goto bad_free_key;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -563,14 +458,15 @@ static int ahash_setkey(struct crypto_ahash *ahash,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len, 1);
+ ctx->adata.keylen_pad, 1);
#endif
ret = ahash_set_sh_desc(ahash);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad,
DMA_TO_DEVICE);
}
+
error_free_key:
kfree(hashed_key);
return ret;
@@ -639,8 +535,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -674,8 +569,7 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -709,8 +603,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -744,8 +637,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -1078,7 +970,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
/* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
- ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
+ ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
flags);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
@@ -1683,7 +1575,6 @@ struct caam_hash_template {
unsigned int blocksize;
struct ahash_alg template_ahash;
u32 alg_type;
- u32 alg_op;
};
/* ahash descriptors */
@@ -1709,7 +1600,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA1,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
}, {
.name = "sha224",
.driver_name = "sha224-caam",
@@ -1731,7 +1621,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA224,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
}, {
.name = "sha256",
.driver_name = "sha256-caam",
@@ -1753,7 +1642,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA256,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
}, {
.name = "sha384",
.driver_name = "sha384-caam",
@@ -1775,7 +1663,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA384,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
}, {
.name = "sha512",
.driver_name = "sha512-caam",
@@ -1797,7 +1684,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA512,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
}, {
.name = "md5",
.driver_name = "md5-caam",
@@ -1819,14 +1705,12 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_MD5,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
};
struct caam_hash_alg {
struct list_head entry;
int alg_type;
- int alg_op;
struct ahash_alg ahash_alg;
};
@@ -1859,10 +1743,10 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
return PTR_ERR(ctx->jrdev);
}
/* copy descriptor header template value */
- ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
- ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+ ctx->ctx_len = runninglen[(ctx->adata.algtype &
+ OP_ALG_ALGSEL_SUBMASK) >>
OP_ALG_ALGSEL_SHIFT];
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
@@ -1893,10 +1777,6 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
desc_bytes(ctx->sh_desc_digest),
DMA_TO_DEVICE);
- if (ctx->sh_desc_finup_dma &&
- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
- dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
- desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev);
}
@@ -1956,7 +1836,6 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_type = &crypto_ahash_type;
t_alg->alg_type = template->alg_type;
- t_alg->alg_op = template->alg_op;
return t_alg;
}
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 851015e652b8..32100c4851dd 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -395,7 +395,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct rsa_key raw_key = {0};
+ struct rsa_key raw_key = {NULL};
struct caam_rsa_key *rsa_key = &ctx->key;
int ret;
@@ -441,7 +441,7 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct rsa_key raw_key = {0};
+ struct rsa_key raw_key = {NULL};
struct caam_rsa_key *rsa_key = &ctx->key;
int ret;
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 9b92af2c7241..41398da3edf4 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -52,7 +52,7 @@
/* length of descriptors */
#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
-#define DESC_RNG_LEN (4 * CAAM_CMD_SZ)
+#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
/* Buffer, its dma address and lock */
struct buf_data {
@@ -100,8 +100,7 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
{
struct buf_data *bd;
- bd = (struct buf_data *)((char *)desc -
- offsetof(struct buf_data, hw_desc));
+ bd = container_of(desc, struct buf_data, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
init_sh_desc(desc, HDR_SHARE_SERIAL);
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-
/* Generate random bytes */
append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
@@ -351,7 +347,7 @@ static int __init caam_rng_init(void)
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(dev);
}
- rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
+ rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
if (!rng_ctx) {
err = -ENOMEM;
goto free_caam_alloc;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index e483b78c6343..755109841cfd 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -330,8 +330,8 @@ static int caam_remove(struct platform_device *pdev)
clk_disable_unprepare(ctrlpriv->caam_ipg);
clk_disable_unprepare(ctrlpriv->caam_mem);
clk_disable_unprepare(ctrlpriv->caam_aclk);
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
-
+ if (ctrlpriv->caam_emi_slow)
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
return 0;
}
@@ -365,11 +365,8 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
*/
val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
>> RTSDCTL_ENT_DLY_SHIFT;
- if (ent_delay <= val) {
- /* put RNG4 into run mode */
- clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
- return;
- }
+ if (ent_delay <= val)
+ goto start_rng;
val = rd_reg32(&r4tst->rtsdctl);
val = (val & ~RTSDCTL_ENT_DLY_MASK) |
@@ -381,15 +378,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
/* read the control register */
val = rd_reg32(&r4tst->rtmctl);
+start_rng:
/*
* select raw sampling in both entropy shifter
- * and statistical checker
+ * and statistical checker; ; put RNG4 into run mode
*/
- clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
- /* put RNG4 into run mode */
- clrsetbits_32(&val, RTMCTL_PRGM, 0);
- /* write back the control register */
- wr_reg32(&r4tst->rtmctl, val);
+ clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
}
/**
@@ -482,14 +476,16 @@ static int caam_probe(struct platform_device *pdev)
}
ctrlpriv->caam_aclk = clk;
- clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM emi_slow clk: %d\n", ret);
- return ret;
+ if (!of_machine_is_compatible("fsl,imx6ul")) {
+ clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM emi_slow clk: %d\n", ret);
+ return ret;
+ }
+ ctrlpriv->caam_emi_slow = clk;
}
- ctrlpriv->caam_emi_slow = clk;
ret = clk_prepare_enable(ctrlpriv->caam_ipg);
if (ret < 0) {
@@ -510,11 +506,13 @@ static int caam_probe(struct platform_device *pdev)
goto disable_caam_mem;
}
- ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
- ret);
- goto disable_caam_aclk;
+ if (ctrlpriv->caam_emi_slow) {
+ ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
+ ret);
+ goto disable_caam_aclk;
+ }
}
/* Get configuration properties from device tree */
@@ -541,13 +539,13 @@ static int caam_probe(struct platform_device *pdev)
else
BLOCK_OFFSET = PG_SIZE_64K;
- ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
- ctrlpriv->assure = (struct caam_assurance __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
+ ctrlpriv->assure = (struct caam_assurance __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
);
- ctrlpriv->deco = (struct caam_deco __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->deco = (struct caam_deco __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * DECO_BLOCK_NUMBER
);
@@ -627,8 +625,8 @@ static int caam_probe(struct platform_device *pdev)
ring);
continue;
}
- ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+ ((__force uint8_t *)ctrl +
(ring + JR_BLOCK_NUMBER) *
BLOCK_OFFSET
);
@@ -641,8 +639,8 @@ static int caam_probe(struct platform_device *pdev)
!!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
CTPR_MS_QI_MASK);
if (ctrlpriv->qi_present) {
- ctrlpriv->qi = (struct caam_queue_if __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * QI_BLOCK_NUMBER
);
/* This is all that's required to physically enable QI */
@@ -800,7 +798,7 @@ static int caam_probe(struct platform_device *pdev)
&caam_fops_u32_ro);
/* Internal covering keys (useful in non-secure mode only) */
- ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
+ ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_kek = debugfs_create_blob("kek",
S_IRUSR |
@@ -808,7 +806,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl,
&ctrlpriv->ctl_kek_wrap);
- ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
+ ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
S_IRUSR |
@@ -816,7 +814,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl,
&ctrlpriv->ctl_tkek_wrap);
- ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
+ ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
S_IRUSR |
@@ -833,7 +831,8 @@ caam_remove:
iounmap_ctrl:
iounmap(ctrl);
disable_caam_emi_slow:
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+ if (ctrlpriv->caam_emi_slow)
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
disable_caam_aclk:
clk_disable_unprepare(ctrlpriv->caam_aclk);
disable_caam_mem:
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 513b6646bb36..2e6766a1573f 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -22,12 +22,6 @@
#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
#define SEC4_SG_OFFSET_MASK 0x00001fff
-struct sec4_sg_entry {
- u64 ptr;
- u32 len;
- u32 bpid_offset;
-};
-
/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
#define MAX_CAAM_DESCSIZE 64
@@ -90,8 +84,8 @@ struct sec4_sg_entry {
#define HDR_ZRO 0x00008000
/* Start Index or SharedDesc Length */
-#define HDR_START_IDX_MASK 0x3f
#define HDR_START_IDX_SHIFT 16
+#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
/* If shared descriptor header, 6-bit length */
#define HDR_DESCLEN_SHR_MASK 0x3f
@@ -121,10 +115,10 @@ struct sec4_sg_entry {
#define HDR_PROP_DNR 0x00000800
/* JobDesc/SharedDesc share property */
-#define HDR_SD_SHARE_MASK 0x03
#define HDR_SD_SHARE_SHIFT 8
-#define HDR_JD_SHARE_MASK 0x07
+#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
#define HDR_JD_SHARE_SHIFT 8
+#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
@@ -235,7 +229,7 @@ struct sec4_sg_entry {
#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
@@ -400,7 +394,7 @@ struct sec4_sg_entry {
#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
@@ -1107,8 +1101,8 @@ struct sec4_sg_entry {
/* For non-protocol/alg-only op commands */
#define OP_ALG_TYPE_SHIFT 24
#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
-#define OP_ALG_TYPE_CLASS1 2
-#define OP_ALG_TYPE_CLASS2 4
+#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
#define OP_ALG_ALGSEL_SHIFT 16
#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
@@ -1249,7 +1243,7 @@ struct sec4_sg_entry {
#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
/* PKHA mode copy-memory functions */
-#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
+#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
#define OP_ALG_PKMODE_DST_REG_SHIFT 10
#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index a8cd8a78ec1f..b9c8d98ef826 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -33,38 +33,39 @@
extern bool caam_little_end;
-static inline int desc_len(u32 *desc)
+static inline int desc_len(u32 * const desc)
{
return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
}
-static inline int desc_bytes(void *desc)
+static inline int desc_bytes(void * const desc)
{
return desc_len(desc) * CAAM_CMD_SZ;
}
-static inline u32 *desc_end(u32 *desc)
+static inline u32 *desc_end(u32 * const desc)
{
return desc + desc_len(desc);
}
-static inline void *sh_desc_pdb(u32 *desc)
+static inline void *sh_desc_pdb(u32 * const desc)
{
return desc + 1;
}
-static inline void init_desc(u32 *desc, u32 options)
+static inline void init_desc(u32 * const desc, u32 options)
{
*desc = cpu_to_caam32((options | HDR_ONE) + 1);
}
-static inline void init_sh_desc(u32 *desc, u32 options)
+static inline void init_sh_desc(u32 * const desc, u32 options)
{
PRINT_POS;
init_desc(desc, CMD_SHARED_DESC_HDR | options);
}
-static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
+ size_t pdb_bytes)
{
u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
@@ -72,19 +73,20 @@ static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
options);
}
-static inline void init_job_desc(u32 *desc, u32 options)
+static inline void init_job_desc(u32 * const desc, u32 options)
{
init_desc(desc, CMD_DESC_HDR | options);
}
-static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+static inline void init_job_desc_pdb(u32 * const desc, u32 options,
+ size_t pdb_bytes)
{
u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
}
-static inline void append_ptr(u32 *desc, dma_addr_t ptr)
+static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
{
dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
@@ -94,8 +96,8 @@ static inline void append_ptr(u32 *desc, dma_addr_t ptr)
CAAM_PTR_SZ / CAAM_CMD_SZ);
}
-static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
- u32 options)
+static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
+ int len, u32 options)
{
PRINT_POS;
init_job_desc(desc, HDR_SHARED | options |
@@ -103,7 +105,7 @@ static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
append_ptr(desc, ptr);
}
-static inline void append_data(u32 *desc, void *data, int len)
+static inline void append_data(u32 * const desc, void *data, int len)
{
u32 *offset = desc_end(desc);
@@ -114,7 +116,7 @@ static inline void append_data(u32 *desc, void *data, int len)
(len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
}
-static inline void append_cmd(u32 *desc, u32 command)
+static inline void append_cmd(u32 * const desc, u32 command)
{
u32 *cmd = desc_end(desc);
@@ -125,7 +127,7 @@ static inline void append_cmd(u32 *desc, u32 command)
#define append_u32 append_cmd
-static inline void append_u64(u32 *desc, u64 data)
+static inline void append_u64(u32 * const desc, u64 data)
{
u32 *offset = desc_end(desc);
@@ -142,14 +144,14 @@ static inline void append_u64(u32 *desc, u64 data)
}
/* Write command without affecting header, and return pointer to next word */
-static inline u32 *write_cmd(u32 *desc, u32 command)
+static inline u32 *write_cmd(u32 * const desc, u32 command)
{
*desc = cpu_to_caam32(command);
return desc + 1;
}
-static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
+static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
u32 command)
{
append_cmd(desc, command | len);
@@ -157,7 +159,7 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
}
/* Write length after pointer, rather than inside command */
-static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
+static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
unsigned int len, u32 command)
{
append_cmd(desc, command);
@@ -166,7 +168,7 @@ static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
append_cmd(desc, len);
}
-static inline void append_cmd_data(u32 *desc, void *data, int len,
+static inline void append_cmd_data(u32 * const desc, void *data, int len,
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
@@ -174,7 +176,7 @@ static inline void append_cmd_data(u32 *desc, void *data, int len,
}
#define APPEND_CMD_RET(cmd, op) \
-static inline u32 *append_##cmd(u32 *desc, u32 options) \
+static inline u32 *append_##cmd(u32 * const desc, u32 options) \
{ \
u32 *cmd = desc_end(desc); \
PRINT_POS; \
@@ -184,13 +186,13 @@ static inline u32 *append_##cmd(u32 *desc, u32 options) \
APPEND_CMD_RET(jump, JUMP)
APPEND_CMD_RET(move, MOVE)
-static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
+static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
{
*jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
(desc_len(desc) - (jump_cmd - desc)));
}
-static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
+static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
{
u32 val = caam32_to_cpu(*move_cmd);
@@ -200,7 +202,7 @@ static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
}
#define APPEND_CMD(cmd, op) \
-static inline void append_##cmd(u32 *desc, u32 options) \
+static inline void append_##cmd(u32 * const desc, u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | options); \
@@ -208,7 +210,8 @@ static inline void append_##cmd(u32 *desc, u32 options) \
APPEND_CMD(operation, OPERATION)
#define APPEND_CMD_LEN(cmd, op) \
-static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
+static inline void append_##cmd(u32 * const desc, unsigned int len, \
+ u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | len | options); \
@@ -220,8 +223,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
#define APPEND_CMD_PTR(cmd, op) \
-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
- u32 options) \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
+ unsigned int len, u32 options) \
{ \
PRINT_POS; \
append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
@@ -231,8 +234,8 @@ APPEND_CMD_PTR(load, LOAD)
APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
APPEND_CMD_PTR(fifo_store, FIFO_STORE)
-static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
- u32 options)
+static inline void append_store(u32 * const desc, dma_addr_t ptr,
+ unsigned int len, u32 options)
{
u32 cmd_src;
@@ -249,7 +252,8 @@ static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
}
#define APPEND_SEQ_PTR_INTLEN(cmd, op) \
-static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
+static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
+ dma_addr_t ptr, \
unsigned int len, \
u32 options) \
{ \
@@ -263,7 +267,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
APPEND_SEQ_PTR_INTLEN(out, OUT)
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -273,7 +277,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
#define APPEND_CMD_PTR_EXTLEN(cmd, op) \
-static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
+static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -287,7 +291,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
* the size of its type
*/
#define APPEND_CMD_PTR_LEN(cmd, op, type) \
-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
type len, u32 options) \
{ \
PRINT_POS; \
@@ -304,7 +308,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
* from length of immediate data provided, e.g., split keys
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
@@ -315,7 +319,7 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
APPEND_CMD_PTR_TO_IMM2(key, KEY);
#define APPEND_CMD_RAW_IMM(cmd, op, type) \
-static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
+static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
u32 options) \
{ \
PRINT_POS; \
@@ -426,3 +430,64 @@ do { \
APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
+
+/**
+ * struct alginfo - Container for algorithm details
+ * @algtype: algorithm selector; for valid values, see documentation of the
+ * functions where it is used.
+ * @keylen: length of the provided algorithm key, in bytes
+ * @keylen_pad: padded length of the provided algorithm key, in bytes
+ * @key: address where algorithm key resides; virtual address if key_inline
+ * is true, dma (bus) address if key_inline is false.
+ * @key_inline: true - key can be inlined in the descriptor; false - key is
+ * referenced by the descriptor
+ */
+struct alginfo {
+ u32 algtype;
+ unsigned int keylen;
+ unsigned int keylen_pad;
+ union {
+ dma_addr_t key_dma;
+ void *key_virt;
+ };
+ bool key_inline;
+};
+
+/**
+ * desc_inline_query() - Provide indications on which data items can be inlined
+ * and which shall be referenced in a shared descriptor.
+ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
+ * excluding the data items to be inlined (or corresponding
+ * pointer if an item is not inlined). Each cnstr_* function that
+ * generates descriptors should have a define mentioning
+ * corresponding length.
+ * @jd_len: Maximum length of the job descriptor(s) that will be used
+ * together with the shared descriptor.
+ * @data_len: Array of lengths of the data items trying to be inlined
+ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
+ * otherwise.
+ * @count: Number of data items (size of @data_len array); must be <= 32
+ *
+ * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
+ * check @inl_mask for details.
+ */
+static inline int desc_inline_query(unsigned int sd_base_len,
+ unsigned int jd_len, unsigned int *data_len,
+ u32 *inl_mask, unsigned int count)
+{
+ int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
+ unsigned int i;
+
+ *inl_mask = 0;
+ for (i = 0; (i < count) && (rem_bytes > 0); i++) {
+ if (rem_bytes - (int)(data_len[i] +
+ (count - i - 1) * CAAM_PTR_SZ) >= 0) {
+ rem_bytes -= data_len[i];
+ *inl_mask |= (1 << i);
+ } else {
+ rem_bytes -= CAAM_PTR_SZ;
+ }
+ }
+
+ return (rem_bytes >= 0) ? 0 : -1;
+}
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 33e41ea83fcc..79a0cc70717f 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -146,10 +146,9 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
strlen(rng_err_id_list[err_id])) {
/* RNG-only error */
err_str = rng_err_id_list[err_id];
- } else if (err_id < ARRAY_SIZE(err_id_list))
+ } else {
err_str = err_id_list[err_id];
- else
- snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+ }
/*
* CCB ICV check failures are part of normal operation life;
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5d4c05074a5c..e2bcacc1a921 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -41,6 +41,7 @@ struct caam_drv_private_jr {
struct device *dev;
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
+ struct tasklet_struct irqtask;
int irq; /* One per queue */
/* Number of scatterlist crypt transforms active on the JobR */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 757c27f9953d..c8604dfadbf5 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -73,6 +73,8 @@ static int caam_jr_shutdown(struct device *dev)
ret = caam_reset_hw_jr(dev);
+ tasklet_kill(&jrp->irqtask);
+
/* Release interrupt */
free_irq(jrp->irq, dev);
@@ -128,7 +130,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/*
* Check the output ring for ready responses, kick
- * the threaded irq if jobs done.
+ * tasklet if jobs done.
*/
irqstate = rd_reg32(&jrp->rregs->jrintstatus);
if (!irqstate)
@@ -150,13 +152,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/* Have valid interrupt at this point, just ACK and trigger */
wr_reg32(&jrp->rregs->jrintstatus, irqstate);
- return IRQ_WAKE_THREAD;
+ preempt_disable();
+ tasklet_schedule(&jrp->irqtask);
+ preempt_enable();
+
+ return IRQ_HANDLED;
}
-static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void caam_jr_dequeue(unsigned long devarg)
{
int hw_idx, sw_idx, i, head, tail;
- struct device *dev = st_dev;
+ struct device *dev = (struct device *)devarg;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
@@ -230,8 +237,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
/* reenable / unmask IRQs */
clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
-
- return IRQ_HANDLED;
}
/**
@@ -389,10 +394,11 @@ static int caam_jr_init(struct device *dev)
jrp = dev_get_drvdata(dev);
+ tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+
/* Connect job ring interrupt handler. */
- error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
- caam_jr_threadirq, IRQF_SHARED,
- dev_name(dev), dev);
+ error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+ dev_name(dev), dev);
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
@@ -454,6 +460,7 @@ out_free_inpring:
out_free_irq:
free_irq(jrp->irq, dev);
out_kill_deq:
+ tasklet_kill(&jrp->irqtask);
return error;
}
@@ -489,7 +496,7 @@ static int caam_jr_probe(struct platform_device *pdev)
return -ENOMEM;
}
- jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
+ jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
if (sizeof(dma_addr_t) == sizeof(u64))
if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index e1eaf4ff9762..1bb2816a9b4d 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -10,6 +10,36 @@
#include "desc_constr.h"
#include "key_gen.h"
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline u32 split_key_len(u32 hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ u32 idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (u32)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline u32 split_key_pad_len(u32 hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
void split_key_done(struct device *dev, u32 *desc, u32 err,
void *context)
{
@@ -41,15 +71,29 @@ Split key generation-----------------------------------------------
[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
@0xffe04000
*/
-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
- int split_key_pad_len, const u8 *key_in, u32 keylen,
- u32 alg_op)
+int gen_split_key(struct device *jrdev, u8 *key_out,
+ struct alginfo * const adata, const u8 *key_in, u32 keylen,
+ int max_keylen)
{
u32 *desc;
struct split_key_result result;
dma_addr_t dma_addr_in, dma_addr_out;
int ret = -ENOMEM;
+ adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
+ adata->keylen_pad = split_key_pad_len(adata->algtype &
+ OP_ALG_ALGSEL_MASK);
+
+#ifdef DEBUG
+ dev_err(jrdev, "split keylen %d split keylen padded %d\n",
+ adata->keylen, adata->keylen_pad);
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
+#endif
+
+ if (adata->keylen_pad > max_keylen)
+ return -EINVAL;
+
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
@@ -63,7 +107,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
goto out_free;
}
- dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+ dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_out)) {
dev_err(jrdev, "unable to map key output memory\n");
@@ -74,7 +118,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
/* Sets MDHA up into an HMAC-INIT */
- append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
+ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
+ OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
+ OP_ALG_AS_INIT);
/*
* do a FIFO_LOAD of zero, this will trigger the internal key expansion
@@ -87,7 +133,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
* FIFO_STORE with the explicit split-key content store
* (0x26 output type)
*/
- append_fifo_store(desc, dma_addr_out, split_key_len,
+ append_fifo_store(desc, dma_addr_out, adata->keylen,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
#ifdef DEBUG
@@ -108,11 +154,11 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key_out,
- split_key_pad_len, 1);
+ adata->keylen_pad, 1);
#endif
}
- dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
+ dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
DMA_FROM_DEVICE);
out_unmap_in:
dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index c5588f6d8109..4628f389eb64 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -12,6 +12,6 @@ struct split_key_result {
void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
- int split_key_pad_len, const u8 *key_in, u32 keylen,
- u32 alg_op);
+int gen_split_key(struct device *jrdev, u8 *key_out,
+ struct alginfo * const adata, const u8 *key_in, u32 keylen,
+ int max_keylen);
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 41cd5a356d05..6afa20c4a013 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -7,7 +7,11 @@
#include "regs.h"
-struct sec4_sg_entry;
+struct sec4_sg_entry {
+ u64 ptr;
+ u32 len;
+ u32 bpid_offset;
+};
/*
* convert single dma address to h/w link table format
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 8d2dbacc6161..7bc09989e18a 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -404,10 +404,6 @@ static int ccp_init(struct ccp_device *ccp)
goto e_pool;
}
- /* Initialize the queues used to wait for KSB space and suspend */
- init_waitqueue_head(&ccp->sb_queue);
- init_waitqueue_head(&ccp->suspend_queue);
-
dev_dbg(dev, "Starting threads...\n");
/* Create a kthread for each queue */
for (i = 0; i < ccp->cmd_q_count; i++) {
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index faf3cb3ddce2..e2ce8190ecc9 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -21,6 +21,12 @@
#include "ccp-dev.h"
+/* Allocate the requested number of contiguous LSB slots
+ * from the LSB bitmap. Look in the private range for this
+ * queue first; failing that, check the public area.
+ * If no space is available, wait around.
+ * Return: first slot number
+ */
static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
{
struct ccp_device *ccp;
@@ -50,7 +56,7 @@ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
bitmap_set(ccp->lsbmap, start, count);
mutex_unlock(&ccp->sb_mutex);
- return start * LSB_ITEM_SIZE;
+ return start;
}
ccp->sb_avail = 0;
@@ -63,17 +69,18 @@ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
}
}
+/* Free a number of LSB slots from the bitmap, starting at
+ * the indicated starting slot number.
+ */
static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start,
unsigned int count)
{
- int lsbno = start / LSB_SIZE;
-
if (!start)
return;
- if (cmd_q->lsb == lsbno) {
+ if (cmd_q->lsb == start) {
/* An entry from the private LSB */
- bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+ bitmap_clear(cmd_q->lsbmap, start, count);
} else {
/* From the shared LSBs */
struct ccp_device *ccp = cmd_q->ccp;
@@ -396,7 +403,7 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_PROT(&desc) = 0;
function.raw = 0;
- CCP_RSA_SIZE(&function) = op->u.rsa.mod_size;
+ CCP_RSA_SIZE(&function) = op->u.rsa.mod_size >> 3;
CCP5_CMD_FUNCTION(&desc) = function.raw;
CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
@@ -411,10 +418,10 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
- /* Key (Exponent) is in external memory */
- CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
- CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
- CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+ /* Exponent is in LSB memory */
+ CCP5_CMD_KEY_LO(&desc) = op->sb_key * LSB_ITEM_SIZE;
+ CCP5_CMD_KEY_HI(&desc) = 0;
+ CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
return ccp5_do_cmd(&desc, op->cmd_q);
}
@@ -751,9 +758,6 @@ static int ccp5_init(struct ccp_device *ccp)
goto e_pool;
}
- /* Initialize the queue used to suspend */
- init_waitqueue_head(&ccp->suspend_queue);
-
dev_dbg(dev, "Loading LSB map...\n");
/* Copy the private LSB mask to the public registers */
status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index cafa633aae10..511ab042b5e7 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -41,7 +41,7 @@ struct ccp_tasklet_data {
};
/* Human-readable error strings */
-char *ccp_error_codes[] = {
+static char *ccp_error_codes[] = {
"",
"ERR 01: ILLEGAL_ENGINE",
"ERR 02: ILLEGAL_KEY_ID",
@@ -478,6 +478,10 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
ccp->sb_count = KSB_COUNT;
ccp->sb_start = 0;
+ /* Initialize the wait queues */
+ init_waitqueue_head(&ccp->sb_queue);
+ init_waitqueue_head(&ccp->suspend_queue);
+
ccp->ord = ccp_increment_unit_ordinal();
snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index da5f4a678083..830f35e6005f 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -278,7 +278,7 @@ struct ccp_cmd_queue {
/* Private LSB that is assigned to this queue, or -1 if none.
* Bitmap for my private LSB, unused otherwise
*/
- unsigned int lsb;
+ int lsb;
DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE);
/* Queue processing thread */
@@ -515,7 +515,6 @@ struct ccp_op {
struct ccp_passthru_op passthru;
struct ccp_ecc_op ecc;
} u;
- struct ccp_mem key;
};
static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
@@ -541,23 +540,23 @@ static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
* word 7: upper 16 bits of key pointer; key memory type
*/
struct dword0 {
- __le32 soc:1;
- __le32 ioc:1;
- __le32 rsvd1:1;
- __le32 init:1;
- __le32 eom:1; /* AES/SHA only */
- __le32 function:15;
- __le32 engine:4;
- __le32 prot:1;
- __le32 rsvd2:7;
+ unsigned int soc:1;
+ unsigned int ioc:1;
+ unsigned int rsvd1:1;
+ unsigned int init:1;
+ unsigned int eom:1; /* AES/SHA only */
+ unsigned int function:15;
+ unsigned int engine:4;
+ unsigned int prot:1;
+ unsigned int rsvd2:7;
};
struct dword3 {
- __le32 src_hi:16;
- __le32 src_mem:2;
- __le32 lsb_cxt_id:8;
- __le32 rsvd1:5;
- __le32 fixed:1;
+ unsigned int src_hi:16;
+ unsigned int src_mem:2;
+ unsigned int lsb_cxt_id:8;
+ unsigned int rsvd1:5;
+ unsigned int fixed:1;
};
union dword4 {
@@ -567,18 +566,18 @@ union dword4 {
union dword5 {
struct {
- __le32 dst_hi:16;
- __le32 dst_mem:2;
- __le32 rsvd1:13;
- __le32 fixed:1;
+ unsigned int dst_hi:16;
+ unsigned int dst_mem:2;
+ unsigned int rsvd1:13;
+ unsigned int fixed:1;
} fields;
__le32 sha_len_hi;
};
struct dword7 {
- __le32 key_hi:16;
- __le32 key_mem:2;
- __le32 rsvd1:14;
+ unsigned int key_hi:16;
+ unsigned int key_mem:2;
+ unsigned int rsvd1:14;
};
struct ccp5_desc {
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 4ce67fb9a880..3e104f5aa0c2 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -4,6 +4,7 @@ config CRYPTO_DEV_CHELSIO
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
+ select CRYPTO_AUTHENC
---help---
The Chelsio Crypto Co-processor driver for T6 adapters.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 56b153805462..2ed1e24b44a8 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -54,6 +54,12 @@
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
#include "t4fw_api.h"
@@ -62,6 +68,11 @@
#include "chcr_algo.h"
#include "chcr_crypto.h"
+static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
+{
+ return ctx->crypto_ctx->aeadctx;
+}
+
static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
{
return ctx->crypto_ctx->ablkctx;
@@ -72,6 +83,16 @@ static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
return ctx->crypto_ctx->hmacctx;
}
+static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
+{
+ return gctx->ctx->gcm;
+}
+
+static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
+{
+ return gctx->ctx->authenc;
+}
+
static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
{
return ctx->dev->u_ctx;
@@ -94,12 +115,37 @@ static inline unsigned int sgl_len(unsigned int n)
return (3 * n) / 2 + (n & 1) + 2;
}
+static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+{
+ u8 temp[SHA512_DIGEST_SIZE];
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ int authsize = crypto_aead_authsize(tfm);
+ struct cpl_fw6_pld *fw6_pld;
+ int cmp = 0;
+
+ fw6_pld = (struct cpl_fw6_pld *)input;
+ if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
+ (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
+ cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
+ } else {
+
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
+ authsize, req->assoclen +
+ req->cryptlen - authsize);
+ cmp = memcmp(temp, (fw6_pld + 1), authsize);
+ }
+ if (cmp)
+ *err = -EBADMSG;
+ else
+ *err = 0;
+}
+
/*
* chcr_handle_resp - Unmap the DMA buffers associated with the request
* @req: crypto request
*/
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
- int error_status)
+ int err)
{
struct crypto_tfm *tfm = req->tfm;
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
@@ -109,17 +155,33 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
unsigned int digestsize, updated_digestsize;
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ ctx_req.req.aead_req = (struct aead_request *)req;
+ ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
+ dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
+ ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
+ if (ctx_req.ctx.reqctx->skb) {
+ kfree_skb(ctx_req.ctx.reqctx->skb);
+ ctx_req.ctx.reqctx->skb = NULL;
+ }
+ if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
+ chcr_verify_tag(ctx_req.req.aead_req, input,
+ &err);
+ ctx_req.ctx.reqctx->verify = VERIFY_HW;
+ }
+ break;
+
case CRYPTO_ALG_TYPE_BLKCIPHER:
ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
ctx_req.ctx.ablk_ctx =
ablkcipher_request_ctx(ctx_req.req.ablk_req);
- if (!error_status) {
+ if (!err) {
fw6_pld = (struct cpl_fw6_pld *)input;
memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
AES_BLOCK_SIZE);
}
dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
- ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
+ ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
if (ctx_req.ctx.ablk_ctx->skb) {
kfree_skb(ctx_req.ctx.ablk_ctx->skb);
ctx_req.ctx.ablk_ctx->skb = NULL;
@@ -138,8 +200,10 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
updated_digestsize = SHA256_DIGEST_SIZE;
else if (digestsize == SHA384_DIGEST_SIZE)
updated_digestsize = SHA512_DIGEST_SIZE;
- if (ctx_req.ctx.ahash_ctx->skb)
+ if (ctx_req.ctx.ahash_ctx->skb) {
+ kfree_skb(ctx_req.ctx.ahash_ctx->skb);
ctx_req.ctx.ahash_ctx->skb = NULL;
+ }
if (ctx_req.ctx.ahash_ctx->result == 1) {
ctx_req.ctx.ahash_ctx->result = 0;
memcpy(ctx_req.req.ahash_req->result, input +
@@ -150,11 +214,9 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
sizeof(struct cpl_fw6_pld),
updated_digestsize);
}
- kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
- ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
break;
}
- return 0;
+ return err;
}
/*
@@ -178,40 +240,81 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
return flits + sgl_len(cnt);
}
-static struct shash_desc *chcr_alloc_shash(unsigned int ds)
+static inline void get_aes_decrypt_key(unsigned char *dec_key,
+ const unsigned char *key,
+ unsigned int keylength)
+{
+ u32 temp;
+ u32 w_ring[MAX_NK];
+ int i, j, k;
+ u8 nr, nk;
+
+ switch (keylength) {
+ case AES_KEYLENGTH_128BIT:
+ nk = KEYLENGTH_4BYTES;
+ nr = NUMBER_OF_ROUNDS_10;
+ break;
+ case AES_KEYLENGTH_192BIT:
+ nk = KEYLENGTH_6BYTES;
+ nr = NUMBER_OF_ROUNDS_12;
+ break;
+ case AES_KEYLENGTH_256BIT:
+ nk = KEYLENGTH_8BYTES;
+ nr = NUMBER_OF_ROUNDS_14;
+ break;
+ default:
+ return;
+ }
+ for (i = 0; i < nk; i++)
+ w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
+
+ i = 0;
+ temp = w_ring[nk - 1];
+ while (i + nk < (nr + 1) * 4) {
+ if (!(i % nk)) {
+ /* RotWord(temp) */
+ temp = (temp << 8) | (temp >> 24);
+ temp = aes_ks_subword(temp);
+ temp ^= round_constant[i / nk];
+ } else if (nk == 8 && (i % 4 == 0)) {
+ temp = aes_ks_subword(temp);
+ }
+ w_ring[i % nk] ^= temp;
+ temp = w_ring[i % nk];
+ i++;
+ }
+ i--;
+ for (k = 0, j = i % nk; k < nk; k++) {
+ *((u32 *)dec_key + k) = htonl(w_ring[j]);
+ j--;
+ if (j < 0)
+ j += nk;
+ }
+}
+
+static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
{
struct crypto_shash *base_hash = NULL;
- struct shash_desc *desc;
switch (ds) {
case SHA1_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha1-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha1", 0, 0);
break;
case SHA224_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha224-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha224", 0, 0);
break;
case SHA256_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha256-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha256", 0, 0);
break;
case SHA384_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha384-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha384", 0, 0);
break;
case SHA512_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha512-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha512", 0, 0);
break;
}
- if (IS_ERR(base_hash)) {
- pr_err("Can not allocate sha-generic algo.\n");
- return (void *)base_hash;
- }
- desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash),
- GFP_KERNEL);
- if (!desc)
- return ERR_PTR(-ENOMEM);
- desc->tfm = base_hash;
- desc->flags = crypto_shash_get_flags(base_hash);
- return desc;
+ return base_hash;
}
static int chcr_compute_partial_hash(struct shash_desc *desc,
@@ -279,31 +382,18 @@ static inline int is_hmac(struct crypto_tfm *tfm)
struct chcr_alg_template *chcr_crypto_alg =
container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
alg.hash);
- if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) ==
- CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
+ if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
return 1;
return 0;
}
-static inline unsigned int ch_nents(struct scatterlist *sg,
- unsigned int *total_size)
-{
- unsigned int nents;
-
- for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) {
- nents++;
- *total_size += sg->length;
- }
- return nents;
-}
-
static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
struct scatterlist *sg,
struct phys_sge_parm *sg_param)
{
struct phys_sge_pairs *to;
- unsigned int out_buf_size = sg_param->obsize;
- unsigned int nents = sg_param->nents, i, j, tot_len = 0;
+ int out_buf_size = sg_param->obsize;
+ unsigned int nents = sg_param->nents, i, j = 0;
phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
| CPL_RX_PHYS_DSGL_ISRDMA_V(0));
@@ -321,25 +411,24 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
sizeof(struct cpl_rx_phys_dsgl));
for (i = 0; nents; to++) {
- for (j = i; (nents && (j < (8 + i))); j++, nents--) {
- to->len[j] = htons(sg->length);
+ for (j = 0; j < 8 && nents; j++, nents--) {
+ out_buf_size -= sg_dma_len(sg);
+ to->len[j] = htons(sg_dma_len(sg));
to->addr[j] = cpu_to_be64(sg_dma_address(sg));
- if (out_buf_size) {
- if (tot_len + sg_dma_len(sg) >= out_buf_size) {
- to->len[j] = htons(out_buf_size -
- tot_len);
- return;
- }
- tot_len += sg_dma_len(sg);
- }
sg = sg_next(sg);
}
}
+ if (out_buf_size) {
+ j--;
+ to--;
+ to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
+ }
}
-static inline unsigned
-int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
- struct scatterlist *sg, struct phys_sge_parm *sg_param)
+static inline int map_writesg_phys_cpl(struct device *dev,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct scatterlist *sg,
+ struct phys_sge_parm *sg_param)
{
if (!sg || !sg_param->nents)
return 0;
@@ -353,6 +442,14 @@ int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
return 0;
}
+static inline int get_aead_subtype(struct crypto_aead *aead)
+{
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(alg, struct chcr_alg_template, alg.aead);
+ return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
+}
+
static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
@@ -362,8 +459,23 @@ static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
+static inline void write_buffer_to_skb(struct sk_buff *skb,
+ unsigned int *frags,
+ char *bfr,
+ u8 bfr_len)
+{
+ skb->len += bfr_len;
+ skb->data_len += bfr_len;
+ skb->truesize += bfr_len;
+ get_page(virt_to_page(bfr));
+ skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
+ offset_in_page(bfr), bfr_len);
+ (*frags)++;
+}
+
+
static inline void
-write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
+write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
struct scatterlist *sg, unsigned int count)
{
struct page *spage;
@@ -372,8 +484,9 @@ write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
skb->len += count;
skb->data_len += count;
skb->truesize += count;
+
while (count > 0) {
- if (sg && (!(sg->length)))
+ if (!sg || (!(sg->length)))
break;
spage = sg_page(sg);
get_page(spage);
@@ -389,29 +502,25 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
struct _key_ctx *key_ctx)
{
if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
- get_aes_decrypt_key(key_ctx->key, ablkctx->key,
- ablkctx->enckey_len << 3);
- memset(key_ctx->key + ablkctx->enckey_len, 0,
- CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len);
+ memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
} else {
memcpy(key_ctx->key,
ablkctx->key + (ablkctx->enckey_len >> 1),
ablkctx->enckey_len >> 1);
- get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1),
- ablkctx->key, ablkctx->enckey_len << 2);
+ memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
+ ablkctx->rrkey, ablkctx->enckey_len >> 1);
}
return 0;
}
static inline void create_wreq(struct chcr_context *ctx,
- struct fw_crypto_lookaside_wr *wreq,
+ struct chcr_wr *chcr_req,
void *req, struct sk_buff *skb,
int kctx_len, int hash_sz,
- unsigned int phys_dsgl)
+ int is_iv,
+ unsigned int sc_len)
{
struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1);
- struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1);
int iv_loc = IV_DSGL;
int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
unsigned int immdatalen = 0, nr_frags = 0;
@@ -423,27 +532,27 @@ static inline void create_wreq(struct chcr_context *ctx,
nr_frags = skb_shinfo(skb)->nr_frags;
}
- wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
- (kctx_len >> 4));
- wreq->pld_size_hash_size =
+ chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
+ ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
+ chcr_req->wreq.pld_size_hash_size =
htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
- wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
+ chcr_req->wreq.len16_pkd =
+ htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
(calc_tx_flits_ofld(skb) * 8), 16)));
- wreq->cookie = cpu_to_be64((uintptr_t)req);
- wreq->rx_chid_to_rx_q_id =
+ chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
+ chcr_req->wreq.rx_chid_to_rx_q_id =
FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
- (hash_sz) ? IV_NOP : iv_loc);
+ is_iv ? iv_loc : IV_NOP);
- ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
- ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
- 16) - ((sizeof(*wreq)) >> 4)));
+ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
+ chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
+ 16) - ((sizeof(chcr_req->wreq)) >> 4)));
- sc_imm->cmd_more = FILL_CMD_MORE(immdatalen);
- sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len +
- ((hash_sz) ? DUMMY_BYTES :
- (sizeof(struct cpl_rx_phys_dsgl) +
- phys_dsgl)) + immdatalen);
+ chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+ sizeof(chcr_req->key_ctx) +
+ kctx_len + sc_len + immdatalen);
}
/**
@@ -454,86 +563,83 @@ static inline void create_wreq(struct chcr_context *ctx,
* @op_type: encryption or decryption
*/
static struct sk_buff
-*create_cipher_wr(struct crypto_async_request *req_base,
- struct chcr_context *ctx, unsigned short qid,
+*create_cipher_wr(struct ablkcipher_request *req,
+ unsigned short qid,
unsigned short op_type)
{
- struct ablkcipher_request *req = (struct ablkcipher_request *)req_base;
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct sk_buff *skb = NULL;
- struct _key_ctx *key_ctx;
- struct fw_crypto_lookaside_wr *wreq;
- struct cpl_tx_sec_pdu *sec_cpl;
+ struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
- struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct phys_sge_parm sg_param;
- unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
+ unsigned int frags = 0, transhdr_len, phys_dsgl;
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
if (!req->info)
return ERR_PTR(-EINVAL);
- ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize);
- ablkctx->enc = op_type;
-
+ reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ if (reqctx->dst_nents <= 0) {
+ pr_err("AES:Invalid Destination sg lists\n");
+ return ERR_PTR(-EINVAL);
+ }
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
- (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE))
+ (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
+ pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
+ ablkctx->enckey_len, req->nbytes, ivsize);
return ERR_PTR(-EINVAL);
+ }
- phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
+ phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
- kctx_len = sizeof(*key_ctx) +
- (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+ kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
- GFP_ATOMIC);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
return ERR_PTR(-ENOMEM);
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
-
- sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
- sec_cpl->op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1);
-
- sec_cpl->pldlen = htonl(ivsize + req->nbytes);
- sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0,
- ivsize + 1, 0);
-
- sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 0,
- 0, 0);
- sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1);
+
+ chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
+ FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
+
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+ chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
ablkctx->ciph_mode,
- 0, 0, ivsize >> 1, 1);
- sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
+ 0, 0, ivsize >> 1);
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
0, 1, phys_dsgl);
- key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
- key_ctx->ctx_hdr = ablkctx->key_ctx_hdr;
+ chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
if (op_type == CHCR_DECRYPT_OP) {
- if (generate_copy_rrkey(ablkctx, key_ctx))
- goto map_fail1;
+ generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
} else {
if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
- memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key, ablkctx->key,
+ ablkctx->enckey_len);
} else {
- memcpy(key_ctx->key, ablkctx->key +
+ memcpy(chcr_req->key_ctx.key, ablkctx->key +
(ablkctx->enckey_len >> 1),
ablkctx->enckey_len >> 1);
- memcpy(key_ctx->key +
+ memcpy(chcr_req->key_ctx.key +
(ablkctx->enckey_len >> 1),
ablkctx->key,
ablkctx->enckey_len >> 1);
}
}
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len);
-
- memcpy(ablkctx->iv, req->info, ivsize);
- sg_init_table(&ablkctx->iv_sg, 1);
- sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize);
- sg_param.nents = ablkctx->dst_nents;
- sg_param.obsize = dst_bufsize;
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->nbytes;
sg_param.qid = qid;
sg_param.align = 1;
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
@@ -541,10 +647,12 @@ static struct sk_buff
goto map_fail1;
skb_set_transport_header(skb, transhdr_len);
- write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize);
- write_sg_data_page_desc(skb, &frags, req->src, req->nbytes);
- create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl);
- req_ctx->skb = skb;
+ memcpy(reqctx->iv, req->info, ivsize);
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+ write_sg_to_skb(skb, &frags, req->src, req->nbytes);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
+ reqctx->skb = skb;
skb_get(skb);
return skb;
map_fail1:
@@ -557,15 +665,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
{
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm);
unsigned int ck_size, context_size;
u16 alignment = 0;
- if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize))
- goto badkey_err;
-
- memcpy(ablkctx->key, key, keylen);
- ablkctx->enckey_len = keylen;
if (keylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keylen == AES_KEYSIZE_192) {
@@ -576,7 +678,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
} else {
goto badkey_err;
}
-
+ memcpy(ablkctx->key, key, keylen);
+ ablkctx->enckey_len = keylen;
+ get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
keylen + alignment) >> 4;
@@ -612,7 +716,6 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_async_request *req_base = &req->base;
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
@@ -622,8 +725,7 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
return -EBUSY;
}
- skb = create_cipher_wr(req_base, ctx,
- u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
+ skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
CHCR_ENCRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@ -639,7 +741,6 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_async_request *req_base = &req->base;
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
@@ -649,7 +750,7 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
return -EBUSY;
}
- skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0],
+ skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
CHCR_DECRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@ -729,50 +830,33 @@ static int get_alg_config(struct algo_param *params,
return 0;
}
-static inline int
-write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx,
- struct sk_buff *skb, unsigned int *frags, char *bfr,
- u8 bfr_len)
+static inline void chcr_free_shash(struct crypto_shash *base_hash)
{
- void *page_ptr = NULL;
-
- skb->len += bfr_len;
- skb->data_len += bfr_len;
- skb->truesize += bfr_len;
- page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA);
- if (!page_ptr)
- return -ENOMEM;
- get_page(virt_to_page(page_ptr));
- req_ctx->dummy_payload_ptr = page_ptr;
- memcpy(page_ptr, bfr, bfr_len);
- skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr),
- offset_in_page(page_ptr), bfr_len);
- (*frags)++;
- return 0;
+ crypto_free_shash(base_hash);
}
/**
- * create_final_hash_wr - Create hash work request
+ * create_hash_wr - Create hash work request
* @req - Cipher req base
*/
-static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
- struct hash_wr_param *param)
+static struct sk_buff *create_hash_wr(struct ahash_request *req,
+ struct hash_wr_param *param)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
struct sk_buff *skb = NULL;
- struct _key_ctx *key_ctx;
- struct fw_crypto_lookaside_wr *wreq;
- struct cpl_tx_sec_pdu *sec_cpl;
+ struct chcr_wr *chcr_req;
unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int kctx_len = sizeof(*key_ctx);
+ unsigned int kctx_len = 0;
u8 hash_size_in_response = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
- kctx_len += param->alg_prm.result_size + iopad_alignment;
+ kctx_len = param->alg_prm.result_size + iopad_alignment;
if (param->opad_needed)
kctx_len += param->alg_prm.result_size + iopad_alignment;
@@ -781,54 +865,54 @@ static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
else
hash_size_in_response = param->alg_prm.result_size;
transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
- GFP_ATOMIC);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
return skb;
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
- memset(wreq, 0, transhdr_len);
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
- sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
- sec_cpl->op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0);
- sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len);
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0);
+ chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
- sec_cpl->aadstart_cipherstop_hi =
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
- sec_cpl->cipherstop_lo_authinsert =
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
- sec_cpl->seqno_numivs =
+ chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
- param->opad_needed, 0, 0);
+ param->opad_needed, 0);
- sec_cpl->ivgen_hdrlen =
+ chcr_req->sec_cpl.ivgen_hdrlen =
FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
- key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
- memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size);
+ memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
+ param->alg_prm.result_size);
if (param->opad_needed)
- memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 :
- CHCR_HASH_MAX_DIGEST_SIZE),
+ memcpy(chcr_req->key_ctx.key +
+ ((param->alg_prm.result_size <= 32) ? 32 :
+ CHCR_HASH_MAX_DIGEST_SIZE),
hmacctx->opad, param->alg_prm.result_size);
- key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
+ chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
param->alg_prm.mk_size, 0,
param->opad_needed,
- (kctx_len >> 4));
- sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1);
+ ((kctx_len +
+ sizeof(chcr_req->key_ctx)) >> 4));
+ chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
skb_set_transport_header(skb, transhdr_len);
if (param->bfr_len != 0)
- write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr,
- param->bfr_len);
+ write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
+ param->bfr_len);
if (param->sg_len != 0)
- write_sg_data_page_desc(skb, &frags, req->src, param->sg_len);
+ write_sg_to_skb(skb, &frags, req->src, param->sg_len);
- create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response,
- 0);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
+ DUMMY_BYTES);
req_ctx->skb = skb;
skb_get(skb);
return skb;
@@ -854,34 +938,40 @@ static int chcr_ahash_update(struct ahash_request *req)
return -EBUSY;
}
- if (nbytes + req_ctx->bfr_len >= bs) {
- remainder = (nbytes + req_ctx->bfr_len) % bs;
- nbytes = nbytes + req_ctx->bfr_len - remainder;
+ if (nbytes + req_ctx->reqlen >= bs) {
+ remainder = (nbytes + req_ctx->reqlen) % bs;
+ nbytes = nbytes + req_ctx->reqlen - remainder;
} else {
- sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr +
- req_ctx->bfr_len, nbytes, 0);
- req_ctx->bfr_len += nbytes;
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
+ + req_ctx->reqlen, nbytes, 0);
+ req_ctx->reqlen += nbytes;
return 0;
}
params.opad_needed = 0;
params.more = 1;
params.last = 0;
- params.sg_len = nbytes - req_ctx->bfr_len;
- params.bfr_len = req_ctx->bfr_len;
+ params.sg_len = nbytes - req_ctx->reqlen;
+ params.bfr_len = req_ctx->reqlen;
params.scmd1 = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->result = 0;
req_ctx->data_len += params.sg_len + params.bfr_len;
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
- req_ctx->bfr_len = remainder;
- if (remainder)
+ if (remainder) {
+ u8 *temp;
+ /* Swap buffers */
+ temp = req_ctx->reqbfr;
+ req_ctx->reqbfr = req_ctx->skbfr;
+ req_ctx->skbfr = temp;
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- req_ctx->bfr, remainder, req->nbytes -
+ req_ctx->reqbfr, remainder, req->nbytes -
remainder);
+ }
+ req_ctx->reqlen = remainder;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -917,10 +1007,10 @@ static int chcr_ahash_final(struct ahash_request *req)
params.sg_len = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->result = 1;
- params.bfr_len = req_ctx->bfr_len;
+ params.bfr_len = req_ctx->reqlen;
req_ctx->data_len += params.bfr_len + params.sg_len;
- if (req_ctx->bfr && (req_ctx->bfr_len == 0)) {
- create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+ if (req_ctx->reqlen == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.last = 0;
params.more = 1;
params.scmd1 = 0;
@@ -931,7 +1021,10 @@ static int chcr_ahash_final(struct ahash_request *req)
params.last = 1;
params.more = 0;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
+ if (!skb)
+ return -ENOMEM;
+
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -963,12 +1056,12 @@ static int chcr_ahash_finup(struct ahash_request *req)
params.opad_needed = 0;
params.sg_len = req->nbytes;
- params.bfr_len = req_ctx->bfr_len;
+ params.bfr_len = req_ctx->reqlen;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->data_len += params.bfr_len + params.sg_len;
req_ctx->result = 1;
- if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) {
- create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+ if ((req_ctx->reqlen + req->nbytes) == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.last = 0;
params.more = 1;
params.scmd1 = 0;
@@ -979,9 +1072,10 @@ static int chcr_ahash_finup(struct ahash_request *req)
params.more = 0;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
+
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -1023,13 +1117,13 @@ static int chcr_ahash_digest(struct ahash_request *req)
req_ctx->result = 1;
req_ctx->data_len += params.bfr_len + params.sg_len;
- if (req_ctx->bfr && req->nbytes == 0) {
- create_last_hash_block(req_ctx->bfr, bs, 0);
+ if (req->nbytes == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, 0);
params.more = 1;
params.bfr_len = bs;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
@@ -1044,12 +1138,12 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct chcr_ahash_req_ctx *state = out;
- state->bfr_len = req_ctx->bfr_len;
+ state->reqlen = req_ctx->reqlen;
state->data_len = req_ctx->data_len;
- memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+ memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
- return 0;
+ return 0;
}
static int chcr_ahash_import(struct ahash_request *areq, const void *in)
@@ -1057,10 +1151,11 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
- req_ctx->bfr_len = state->bfr_len;
+ req_ctx->reqlen = state->reqlen;
req_ctx->data_len = state->data_len;
- req_ctx->dummy_payload_ptr = NULL;
- memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+ req_ctx->reqbfr = req_ctx->bfr1;
+ req_ctx->skbfr = req_ctx->bfr2;
+ memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
memcpy(req_ctx->partial_hash, state->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
return 0;
@@ -1075,15 +1170,16 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int i, err = 0, updated_digestsize;
- /*
- * use the key to calculate the ipad and opad. ipad will sent with the
+ SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
+
+ /* use the key to calculate the ipad and opad. ipad will sent with the
* first request's data. opad will be sent with the final hash result
* ipad in hmacctx->ipad and opad in hmacctx->opad location
*/
- if (!hmacctx->desc)
- return -EINVAL;
+ shash->tfm = hmacctx->base_hash;
+ shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
if (keylen > bs) {
- err = crypto_shash_digest(hmacctx->desc, key, keylen,
+ err = crypto_shash_digest(shash, key, keylen,
hmacctx->ipad);
if (err)
goto out;
@@ -1104,13 +1200,13 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
updated_digestsize = SHA256_DIGEST_SIZE;
else if (digestsize == SHA384_DIGEST_SIZE)
updated_digestsize = SHA512_DIGEST_SIZE;
- err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad,
+ err = chcr_compute_partial_hash(shash, hmacctx->ipad,
hmacctx->ipad, digestsize);
if (err)
goto out;
chcr_change_order(hmacctx->ipad, updated_digestsize);
- err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad,
+ err = chcr_compute_partial_hash(shash, hmacctx->opad,
hmacctx->opad, digestsize);
if (err)
goto out;
@@ -1124,28 +1220,29 @@ static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
{
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- int status = 0;
unsigned short context_size = 0;
- if ((key_len == (AES_KEYSIZE_128 << 1)) ||
- (key_len == (AES_KEYSIZE_256 << 1))) {
- memcpy(ablkctx->key, key, key_len);
- ablkctx->enckey_len = key_len;
- context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
- ablkctx->key_ctx_hdr =
- FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
- CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
- CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
- CHCR_KEYCTX_NO_KEY, 1,
- 0, context_size);
- ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
- } else {
+ if ((key_len != (AES_KEYSIZE_128 << 1)) &&
+ (key_len != (AES_KEYSIZE_256 << 1))) {
crypto_tfm_set_flags((struct crypto_tfm *)tfm,
CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
- status = -EINVAL;
+ return -EINVAL;
+
}
- return status;
+
+ memcpy(ablkctx->key, key, key_len);
+ ablkctx->enckey_len = key_len;
+ get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
+ ablkctx->key_ctx_hdr =
+ FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
+ CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
+ CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
+ CHCR_KEYCTX_NO_KEY, 1,
+ 0, context_size);
+ ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
+ return 0;
}
static int chcr_sha_init(struct ahash_request *areq)
@@ -1155,8 +1252,9 @@ static int chcr_sha_init(struct ahash_request *areq)
int digestsize = crypto_ahash_digestsize(tfm);
req_ctx->data_len = 0;
- req_ctx->dummy_payload_ptr = NULL;
- req_ctx->bfr_len = 0;
+ req_ctx->reqlen = 0;
+ req_ctx->reqbfr = req_ctx->bfr1;
+ req_ctx->skbfr = req_ctx->bfr2;
req_ctx->skb = NULL;
req_ctx->result = 0;
copy_hash_init_values(req_ctx->partial_hash, digestsize);
@@ -1204,29 +1302,1184 @@ static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct chcr_ahash_req_ctx));
- hmacctx->desc = chcr_alloc_shash(digestsize);
- if (IS_ERR(hmacctx->desc))
- return PTR_ERR(hmacctx->desc);
+ hmacctx->base_hash = chcr_alloc_shash(digestsize);
+ if (IS_ERR(hmacctx->base_hash))
+ return PTR_ERR(hmacctx->base_hash);
return chcr_device_init(crypto_tfm_ctx(tfm));
}
-static void chcr_free_shash(struct shash_desc *desc)
-{
- crypto_free_shash(desc->tfm);
- kfree(desc);
-}
-
static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
{
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
- if (hmacctx->desc) {
- chcr_free_shash(hmacctx->desc);
- hmacctx->desc = NULL;
+ if (hmacctx->base_hash) {
+ chcr_free_shash(hmacctx->base_hash);
+ hmacctx->base_hash = NULL;
+ }
+}
+
+static int chcr_copy_assoc(struct aead_request *req,
+ struct chcr_aead_ctx *ctx)
+{
+ SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+
+ skcipher_request_set_tfm(skreq, ctx->null);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ NULL, NULL);
+ skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
+ NULL);
+
+ return crypto_skcipher_encrypt(skreq);
+}
+
+static unsigned char get_hmac(unsigned int authsize)
+{
+ switch (authsize) {
+ case ICV_8:
+ return CHCR_SCMD_HMAC_CTRL_PL1;
+ case ICV_10:
+ return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ case ICV_12:
+ return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ }
+ return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+}
+
+
+static struct sk_buff *create_authenc_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len;
+ unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
+ unsigned int kctx_len = 0;
+ unsigned short stop_offset = 0;
+ unsigned int assoclen = req->assoclen;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int err = 0;
+ int null = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+ if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
+ goto err;
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err)
+ return ERR_PTR(err);
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
+ null = 1;
+ assoclen = 0;
+ }
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("AUTHENC:Invalid Destination sg entries\n");
+ goto err;
+ }
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
+ - sizeof(chcr_req->key_ctx);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ if (!skb)
+ goto err;
+
+ /* LLD is going to write the sge hdr. */
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ /* Write WR */
+ chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+
+ /*
+ * Input order is AAD,IV and Payload. where IV should be included as
+ * the part of authdata. All other fields should be filled according
+ * to the hardware spec
+ */
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2,
+ (ivsize ? (assoclen + 1) : 0));
+ chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ assoclen ? 1 : 0, assoclen,
+ assoclen + ivsize + 1,
+ (stop_offset & 0x1F0) >> 4);
+ chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
+ stop_offset & 0xF,
+ null ? 0 : assoclen + ivsize + 1,
+ stop_offset, stop_offset);
+ chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
+ CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ actx->auth_mode, aeadctx->hmac_ctrl,
+ ivsize >> 1);
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 1, dst_size);
+
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ if (op_type == CHCR_ENCRYPT_OP)
+ memcpy(chcr_req->key_ctx.key, aeadctx->key,
+ aeadctx->enckey_len);
+ else
+ memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
+ aeadctx->enckey_len);
+
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
+ 4), actx->h_iopad, kctx_len -
+ (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+
+ if (assoclen) {
+ /* AAD buffer in */
+ write_sg_to_skb(skb, &frags, req->src, assoclen);
+
+ }
+ write_buffer_to_skb(skb, &frags, req->iv, ivsize);
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+
+ return skb;
+dstmap_fail:
+ /* ivmap_fail: */
+ kfree_skb(skb);
+err:
+ return ERR_PTR(-EINVAL);
+}
+
+static void aes_gcm_empty_pld_pad(struct scatterlist *sg,
+ unsigned short offset)
+{
+ struct page *spage;
+ unsigned char *addr;
+
+ spage = sg_page(sg);
+ get_page(spage); /* so that it is not freed by NIC */
+#ifdef KMAP_ATOMIC_ARGS
+ addr = kmap_atomic(spage, KM_SOFTIRQ0);
+#else
+ addr = kmap_atomic(spage);
+#endif
+ memset(addr + sg->offset, 0, offset + 1);
+
+ kunmap_atomic(addr);
+}
+
+static int set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (unsigned int)(1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+static void generate_b0(struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned short op_type)
+{
+ unsigned int l, lp, m;
+ int rc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ u8 *b0 = reqctx->scratch_pad;
+
+ m = crypto_aead_authsize(aead);
+
+ memcpy(b0, reqctx->iv, 16);
+
+ lp = b0[0];
+ l = lp + 1;
+
+ /* set m, bits 3-5 */
+ *b0 |= (8 * ((m - 2) / 2));
+
+ /* set adata, bit 6, if associated data is used */
+ if (req->assoclen)
+ *b0 |= 64;
+ rc = set_msg_len(b0 + 16 - l,
+ (op_type == CHCR_DECRYPT_OP) ?
+ req->cryptlen - m : req->cryptlen, l);
+}
+
+static inline int crypto_ccm_check_iv(const u8 *iv)
+{
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (iv[0] < 1 || iv[0] > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ccm_format_packet(struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned int sub_type,
+ unsigned short op_type)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ int rc = 0;
+
+ if (req->assoclen > T5_MAX_AAD_SIZE) {
+ pr_err("CCM: Unsupported AAD data. It should be < %d\n",
+ T5_MAX_AAD_SIZE);
+ return -EINVAL;
+ }
+ if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+ reqctx->iv[0] = 3;
+ memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
+ memcpy(reqctx->iv + 4, req->iv, 8);
+ memset(reqctx->iv + 12, 0, 4);
+ *((unsigned short *)(reqctx->scratch_pad + 16)) =
+ htons(req->assoclen - 8);
+ } else {
+ memcpy(reqctx->iv, req->iv, 16);
+ *((unsigned short *)(reqctx->scratch_pad + 16)) =
+ htons(req->assoclen);
+ }
+ generate_b0(req, aeadctx, op_type);
+ /* zero the ctr value */
+ memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
+ return rc;
+}
+
+static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
+ unsigned int dst_size,
+ struct aead_request *req,
+ unsigned short op_type,
+ struct chcr_context *chcrctx)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int ivsize = AES_BLOCK_SIZE;
+ unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
+ unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
+ unsigned int c_id = chcrctx->dev->tx_channel_id;
+ unsigned int ccm_xtra;
+ unsigned char tag_offset = 0, auth_offset = 0;
+ unsigned char hmac_ctrl = get_hmac(crypto_aead_authsize(tfm));
+ unsigned int assoclen;
+
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ assoclen = req->assoclen - 8;
+ else
+ assoclen = req->assoclen;
+ ccm_xtra = CCM_B0_SIZE +
+ ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
+
+ auth_offset = req->cryptlen ?
+ (assoclen + ivsize + 1 + ccm_xtra) : 0;
+ if (op_type == CHCR_DECRYPT_OP) {
+ if (crypto_aead_authsize(tfm) != req->cryptlen)
+ tag_offset = crypto_aead_authsize(tfm);
+ else
+ auth_offset = 0;
+ }
+
+
+ sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
+ 2, (ivsize ? (assoclen + 1) : 0) +
+ ccm_xtra);
+ sec_cpl->pldlen =
+ htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
+ /* For CCM there wil be b0 always. So AAD start will be 1 always */
+ sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ 1, assoclen + ccm_xtra, assoclen
+ + ivsize + 1 + ccm_xtra, 0);
+
+ sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
+ auth_offset, tag_offset,
+ (op_type == CHCR_ENCRYPT_OP) ? 0 :
+ crypto_aead_authsize(tfm));
+ sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
+ cipher_mode, mac_mode, hmac_ctrl,
+ ivsize >> 1);
+
+ sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
+ 1, dst_size);
+}
+
+int aead_ccm_validate_input(unsigned short op_type,
+ struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned int sub_type)
+{
+ if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+ if (crypto_ccm_check_iv(req->iv)) {
+ pr_err("CCM: IV check fails\n");
+ return -EINVAL;
+ }
+ } else {
+ if (req->assoclen != 16 && req->assoclen != 20) {
+ pr_err("RFC4309: Invalid AAD length %d\n",
+ req->assoclen);
+ return -EINVAL;
+ }
+ }
+ if (aeadctx->enckey_len == 0) {
+ pr_err("CCM: Encryption key not set\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+unsigned int fill_aead_req_fields(struct sk_buff *skb,
+ struct aead_request *req,
+ struct scatterlist *src,
+ unsigned int ivsize,
+ struct chcr_aead_ctx *aeadctx)
+{
+ unsigned int frags = 0;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ /* b0 and aad length(if available) */
+
+ write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
+ (req->assoclen ? CCM_AAD_FIELD_SIZE : 0));
+ if (req->assoclen) {
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ write_sg_to_skb(skb, &frags, req->src,
+ req->assoclen - 8);
+ else
+ write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+ }
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+ if (req->cryptlen)
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+
+ return frags;
+}
+
+static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
+ unsigned int dst_size = 0, kctx_len;
+ unsigned int sub_type;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int err = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+ sub_type = get_aead_subtype(tfm);
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err) {
+ pr_err("AAD copy to destination buffer fails\n");
+ return ERR_PTR(err);
+ }
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("CCM:Invalid Destination sg entries\n");
+ goto err;
+ }
+
+
+ if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type))
+ goto err;
+
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+
+ if (!skb)
+ goto err;
+
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
+
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+ 16), aeadctx->key, aeadctx->enckey_len);
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ if (ccm_format_packet(req, aeadctx, sub_type, op_type))
+ goto dstmap_fail;
+
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+ frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+ return skb;
+dstmap_fail:
+ kfree_skb(skb);
+ skb = NULL;
+err:
+ return ERR_PTR(-EINVAL);
+}
+
+static struct sk_buff *create_gcm_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len;
+ unsigned int ivsize = AES_BLOCK_SIZE;
+ unsigned int dst_size = 0, kctx_len;
+ unsigned char tag_offset = 0;
+ unsigned int crypt_len = 0;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ unsigned char hmac_ctrl = get_hmac(authsize);
+ int err = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+ /* validate key size */
+ if (aeadctx->enckey_len == 0)
+ goto err;
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err)
+ return ERR_PTR(err);
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+
+ if (!req->cryptlen)
+ /* null-payload is not supported in the hardware.
+ * software is sending block size
+ */
+ crypt_len = AES_BLOCK_SIZE;
+ else
+ crypt_len = req->cryptlen;
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("GCM:Invalid Destination sg entries\n");
+ goto err;
+ }
+
+
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
+ AEAD_H_SIZE;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ if (!skb)
+ goto err;
+
+ /* NIC driver is going to write the sge hdr. */
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+ req->assoclen -= 8;
+
+ tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
+ ctx->dev->tx_channel_id, 2, (ivsize ?
+ (req->assoclen + 1) : 0));
+ chcr_req->sec_cpl.pldlen = htonl(req->assoclen + ivsize + crypt_len);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ req->assoclen ? 1 : 0, req->assoclen,
+ req->assoclen + ivsize + 1, 0);
+ if (req->cryptlen) {
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
+ tag_offset, tag_offset);
+ chcr_req->sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
+ CHCR_ENCRYPT_OP) ? 1 : 0,
+ CHCR_SCMD_CIPHER_MODE_AES_GCM,
+ CHCR_SCMD_AUTH_MODE_GHASH, hmac_ctrl,
+ ivsize >> 1);
+ } else {
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+ chcr_req->sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ?
+ 1 : 0, CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ 0, 0, ivsize >> 1);
+ }
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 1, dst_size);
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+ 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+
+ /* prepare a 16 byte iv */
+ /* S A L T | IV | 0x00000001 */
+ if (get_aead_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+ memcpy(reqctx->iv, aeadctx->salt, 4);
+ memcpy(reqctx->iv + 4, req->iv, 8);
+ } else {
+ memcpy(reqctx->iv, req->iv, 12);
+ }
+ *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+
+ write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+
+ if (req->cryptlen) {
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ } else {
+ aes_gcm_empty_pld_pad(req->dst, authsize - 1);
+ write_sg_to_skb(skb, &frags, dst, crypt_len);
+ }
+
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+ return skb;
+
+dstmap_fail:
+ /* ivmap_fail: */
+ kfree_skb(skb);
+ skb = NULL;
+err:
+ return skb;
+}
+
+
+
+static int chcr_aead_cra_init(struct crypto_aead *tfm)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct chcr_aead_reqctx));
+ aeadctx->null = crypto_get_default_null_skcipher();
+ if (IS_ERR(aeadctx->null))
+ return PTR_ERR(aeadctx->null);
+ return chcr_device_init(ctx);
+}
+
+static void chcr_aead_cra_exit(struct crypto_aead *tfm)
+{
+ crypto_put_default_null_skcipher();
+}
+
+static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
+ aeadctx->mayverify = VERIFY_HW;
+ return 0;
+}
+static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ u32 maxauth = crypto_aead_maxauthsize(tfm);
+
+ /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
+ * true for sha1. authsize == 12 condition should be before
+ * authsize == (maxauth >> 1)
+ */
+ if (authsize == ICV_4) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_6) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_10) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_12) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_14) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == (maxauth >> 1)) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == maxauth) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ } else {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_SW;
+ }
+ return 0;
+}
+
+
+static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_4:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_14:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_13:
+ case ICV_15:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_SW;
+ break;
+ default:
+
+ crypto_tfm_set_flags((struct crypto_tfm *) tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ default:
+ crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_4:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_6:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_10:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_14:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ default:
+ crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ unsigned char ck_size, mk_size;
+ int key_ctx_size = 0;
+
+ memcpy(aeadctx->key, key, keylen);
+ aeadctx->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) * 2;
+ if (keylen == AES_KEYSIZE_128) {
+ mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ } else {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+ }
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
+ key_ctx_size >> 4);
+ return 0;
+}
+
+static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
+ if (keylen < 3) {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+ }
+ keylen -= 3;
+ memcpy(aeadctx->salt, key + keylen, 3);
+ return chcr_aead_ccm_setkey(aead, key, keylen);
+}
+
+static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
+ struct blkcipher_desc h_desc;
+ struct scatterlist src[1];
+ unsigned int ck_size;
+ int ret = 0, key_ctx_size = 0;
+
+ if (get_aead_subtype(aead) ==
+ CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+ keylen -= 4; /* nonce/salt is present in the last 4 bytes */
+ memcpy(aeadctx->salt, key + keylen, 4);
+ }
+ if (keylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ pr_err("GCM: Invalid key length %d", keylen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(aeadctx->key, key, keylen);
+ aeadctx->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) +
+ AEAD_H_SIZE;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ 0, 0,
+ key_ctx_size >> 4);
+ /* Calculate the H = CIPH(K, 0 repeated 16 times) using sync aes
+ * blkcipher It will go on key context
+ */
+ h_desc.tfm = crypto_alloc_blkcipher("cbc(aes-generic)", 0, 0);
+ if (IS_ERR(h_desc.tfm)) {
+ aeadctx->enckey_len = 0;
+ ret = -ENOMEM;
+ goto out;
+ }
+ h_desc.flags = 0;
+ ret = crypto_blkcipher_setkey(h_desc.tfm, key, keylen);
+ if (ret) {
+ aeadctx->enckey_len = 0;
+ goto out1;
+ }
+ memset(gctx->ghash_h, 0, AEAD_H_SIZE);
+ sg_init_one(&src[0], gctx->ghash_h, AEAD_H_SIZE);
+ ret = crypto_blkcipher_encrypt(&h_desc, &src[0], &src[0], AEAD_H_SIZE);
+
+out1:
+ crypto_free_blkcipher(h_desc.tfm);
+out:
+ return ret;
+}
+
+static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(authenc);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ /* it contains auth and cipher key both*/
+ struct crypto_authenc_keys keys;
+ unsigned int bs;
+ unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
+ int err = 0, i, key_ctx_len = 0;
+ unsigned char ck_size = 0;
+ unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
+ struct crypto_shash *base_hash = NULL;
+ struct algo_param param;
+ int align;
+ u8 *o_ptr = NULL;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+ crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+ }
+
+ if (get_alg_config(&param, max_authsize)) {
+ pr_err("chcr : Unsupported digest size\n");
+ goto out;
+ }
+ if (keys.enckeylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keys.enckeylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keys.enckeylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("chcr : Unsupported cipher key\n");
+ goto out;
+ }
+
+ /* Copy only encryption key. We use authkey to generate h(ipad) and
+ * h(opad) so authkey is not needed again. authkeylen size have the
+ * size of the hash digest size.
+ */
+ memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+ aeadctx->enckey_len = keys.enckeylen;
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+
+ base_hash = chcr_alloc_shash(max_authsize);
+ if (IS_ERR(base_hash)) {
+ pr_err("chcr : Base driver cannot be loaded\n");
+ goto out;
}
+ {
+ SHASH_DESC_ON_STACK(shash, base_hash);
+ shash->tfm = base_hash;
+ shash->flags = crypto_shash_get_flags(base_hash);
+ bs = crypto_shash_blocksize(base_hash);
+ align = KEYCTX_ALIGN_PAD(max_authsize);
+ o_ptr = actx->h_iopad + param.result_size + align;
+
+ if (keys.authkeylen > bs) {
+ err = crypto_shash_digest(shash, keys.authkey,
+ keys.authkeylen,
+ o_ptr);
+ if (err) {
+ pr_err("chcr : Base driver cannot be loaded\n");
+ goto out;
+ }
+ keys.authkeylen = max_authsize;
+ } else
+ memcpy(o_ptr, keys.authkey, keys.authkeylen);
+
+ /* Compute the ipad-digest*/
+ memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+ memcpy(pad, o_ptr, keys.authkeylen);
+ for (i = 0; i < bs >> 2; i++)
+ *((unsigned int *)pad + i) ^= IPAD_DATA;
+
+ if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
+ max_authsize))
+ goto out;
+ /* Compute the opad-digest */
+ memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+ memcpy(pad, o_ptr, keys.authkeylen);
+ for (i = 0; i < bs >> 2; i++)
+ *((unsigned int *)pad + i) ^= OPAD_DATA;
+
+ if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
+ goto out;
+
+ /* convert the ipad and opad digest to network order */
+ chcr_change_order(actx->h_iopad, param.result_size);
+ chcr_change_order(o_ptr, param.result_size);
+ key_ctx_len = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
+ (param.result_size + align) * 2;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
+ 0, 1, key_ctx_len >> 4);
+ actx->auth_mode = param.auth_mode;
+ chcr_free_shash(base_hash);
+
+ return 0;
+ }
+out:
+ aeadctx->enckey_len = 0;
+ if (base_hash)
+ chcr_free_shash(base_hash);
+ return -EINVAL;
}
+static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(authenc);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ struct crypto_authenc_keys keys;
+
+ /* it contains auth and cipher key both*/
+ int key_ctx_len = 0;
+ unsigned char ck_size = 0;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+ crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+ }
+ if (keys.enckeylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keys.enckeylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keys.enckeylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("chcr : Unsupported cipher key\n");
+ goto out;
+ }
+ memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+ aeadctx->enckey_len = keys.enckeylen;
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ key_ctx_len = sizeof(struct _key_ctx)
+ + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
+
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
+ 0, key_ctx_len >> 4);
+ actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
+ return 0;
+out:
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+}
+static int chcr_aead_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+
+ reqctx->verify = VERIFY_HW;
+
+ switch (get_aead_subtype(tfm)) {
+ case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_authenc_wr);
+ case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_aead_ccm_wr);
+ default:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_gcm_wr);
+ }
+}
+
+static int chcr_aead_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ int size;
+
+ if (aeadctx->mayverify == VERIFY_SW) {
+ size = crypto_aead_maxauthsize(tfm);
+ reqctx->verify = VERIFY_SW;
+ } else {
+ size = 0;
+ reqctx->verify = VERIFY_HW;
+ }
+
+ switch (get_aead_subtype(tfm)) {
+ case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_authenc_wr);
+ case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_aead_ccm_wr);
+ default:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_gcm_wr);
+ }
+}
+
+static int chcr_aead_op(struct aead_request *req,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct sk_buff *skb;
+
+ if (ctx && !ctx->dev) {
+ pr_err("chcr : %s : No crypto device.\n", __func__);
+ return -ENXIO;
+ }
+ if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ ctx->tx_channel_id)) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ /* Form a WR from req */
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], size,
+ op_type);
+
+ if (IS_ERR(skb) || skb == NULL) {
+ pr_err("chcr : %s : failed to form WR. No memory\n", __func__);
+ return PTR_ERR(skb);
+ }
+
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
static struct chcr_alg_template driver_algs[] = {
/* AES-CBC */
{
@@ -1234,7 +2487,7 @@ static struct chcr_alg_template driver_algs[] = {
.is_registered = 0,
.alg.crypto = {
.cra_name = "cbc(aes)",
- .cra_driver_name = "cbc(aes-chcr)",
+ .cra_driver_name = "cbc-aes-chcr",
.cra_priority = CHCR_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_ASYNC,
@@ -1261,7 +2514,7 @@ static struct chcr_alg_template driver_algs[] = {
.is_registered = 0,
.alg.crypto = {
.cra_name = "xts(aes)",
- .cra_driver_name = "xts(aes-chcr)",
+ .cra_driver_name = "xts-aes-chcr",
.cra_priority = CHCR_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_ASYNC,
@@ -1354,7 +2607,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha1)",
- .cra_driver_name = "hmac(sha1-chcr)",
+ .cra_driver_name = "hmac-sha1-chcr",
.cra_blocksize = SHA1_BLOCK_SIZE,
}
}
@@ -1366,7 +2619,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA224_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha224)",
- .cra_driver_name = "hmac(sha224-chcr)",
+ .cra_driver_name = "hmac-sha224-chcr",
.cra_blocksize = SHA224_BLOCK_SIZE,
}
}
@@ -1378,7 +2631,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha256)",
- .cra_driver_name = "hmac(sha256-chcr)",
+ .cra_driver_name = "hmac-sha256-chcr",
.cra_blocksize = SHA256_BLOCK_SIZE,
}
}
@@ -1390,7 +2643,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA384_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha384)",
- .cra_driver_name = "hmac(sha384-chcr)",
+ .cra_driver_name = "hmac-sha384-chcr",
.cra_blocksize = SHA384_BLOCK_SIZE,
}
}
@@ -1402,11 +2655,205 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA512_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha512)",
- .cra_driver_name = "hmac(sha512-chcr)",
+ .cra_driver_name = "hmac-sha512-chcr",
.cra_blocksize = SHA512_BLOCK_SIZE,
}
}
},
+ /* Add AEAD Algorithms */
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_gcm_ctx),
+ },
+ .ivsize = 12,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_gcm_setkey,
+ .setauthsize = chcr_gcm_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_gcm_ctx),
+
+ },
+ .ivsize = 8,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_gcm_setkey,
+ .setauthsize = chcr_4106_4309_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_aead_ccm_setkey,
+ .setauthsize = chcr_ccm_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "rfc4309-ccm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx),
+
+ },
+ .ivsize = 8,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_aead_rfc4309_setkey,
+ .setauthsize = chcr_4106_4309_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha1-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha256-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha224-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha384-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha512-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(digest_null,cbc(aes))",
+ .cra_driver_name =
+ "authenc-digest_null-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = 0,
+ .setkey = chcr_aead_digest_null_setkey,
+ .setauthsize = chcr_authenc_null_setauthsize,
+ }
+ },
};
/*
@@ -1424,6 +2871,11 @@ static int chcr_unregister_alg(void)
crypto_unregister_alg(
&driver_algs[i].alg.crypto);
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ if (driver_algs[i].is_registered)
+ crypto_unregister_aead(
+ &driver_algs[i].alg.aead);
+ break;
case CRYPTO_ALG_TYPE_AHASH:
if (driver_algs[i].is_registered)
crypto_unregister_ahash(
@@ -1458,6 +2910,19 @@ static int chcr_register_alg(void)
err = crypto_register_alg(&driver_algs[i].alg.crypto);
name = driver_algs[i].alg.crypto.cra_driver_name;
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ driver_algs[i].alg.aead.base.cra_priority =
+ CHCR_CRA_PRIORITY;
+ driver_algs[i].alg.aead.base.cra_flags =
+ CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
+ driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
+ driver_algs[i].alg.aead.init = chcr_aead_cra_init;
+ driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
+ driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
+ err = crypto_register_aead(&driver_algs[i].alg.aead);
+ name = driver_algs[i].alg.aead.base.cra_driver_name;
+ break;
case CRYPTO_ALG_TYPE_AHASH:
a_hash = &driver_algs[i].alg.hash;
a_hash->update = chcr_ahash_update;
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 199b0bb69b89..3c7c51f7bedf 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -108,30 +108,24 @@
#define IPAD_DATA 0x36363636
#define OPAD_DATA 0x5c5c5c5c
-#define TRANSHDR_SIZE(alignedkctx_len)\
- (sizeof(struct ulptx_idata) +\
- sizeof(struct ulp_txpkt) +\
- sizeof(struct fw_crypto_lookaside_wr) +\
- sizeof(struct cpl_tx_sec_pdu) +\
- (alignedkctx_len))
-#define CIPHER_TRANSHDR_SIZE(alignedkctx_len, sge_pairs) \
- (TRANSHDR_SIZE(alignedkctx_len) + sge_pairs +\
+#define TRANSHDR_SIZE(kctx_len)\
+ (sizeof(struct chcr_wr) +\
+ kctx_len)
+#define CIPHER_TRANSHDR_SIZE(kctx_len, sge_pairs) \
+ (TRANSHDR_SIZE((kctx_len)) + (sge_pairs) +\
sizeof(struct cpl_rx_phys_dsgl))
-#define HASH_TRANSHDR_SIZE(alignedkctx_len)\
- (TRANSHDR_SIZE(alignedkctx_len) + DUMMY_BYTES)
+#define HASH_TRANSHDR_SIZE(kctx_len)\
+ (TRANSHDR_SIZE(kctx_len) + DUMMY_BYTES)
-#define SEC_CPL_OFFSET (sizeof(struct fw_crypto_lookaside_wr) + \
- sizeof(struct ulp_txpkt) + \
- sizeof(struct ulptx_idata))
-#define FILL_SEC_CPL_OP_IVINSR(id, len, hldr, ofst) \
+#define FILL_SEC_CPL_OP_IVINSR(id, len, ofst) \
htonl( \
CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \
CPL_TX_SEC_PDU_RXCHID_V((id)) | \
CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \
CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \
CPL_TX_SEC_PDU_CPLLEN_V((len)) | \
- CPL_TX_SEC_PDU_PLACEHOLDER_V((hldr)) | \
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(0) | \
CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst)))
#define FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \
@@ -148,7 +142,7 @@
CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \
CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst)))
-#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size, nivs) \
+#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size) \
htonl( \
SCMD_SEQ_NO_CTRL_V(0) | \
SCMD_STATUS_PRESENT_V(0) | \
@@ -159,7 +153,7 @@
SCMD_AUTH_MODE_V((amode)) | \
SCMD_HMAC_CTRL_V((opad)) | \
SCMD_IV_SIZE_V((size)) | \
- SCMD_NUM_IVS_V((nivs)))
+ SCMD_NUM_IVS_V(0))
#define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \
SCMD_ENB_DBGID_V(0) | \
@@ -264,13 +258,15 @@ enum {
* where they indicate the size of the integrity check value (ICV)
*/
enum {
- AES_CCM_ICV_4 = 4,
- AES_CCM_ICV_6 = 6,
- AES_CCM_ICV_8 = 8,
- AES_CCM_ICV_10 = 10,
- AES_CCM_ICV_12 = 12,
- AES_CCM_ICV_14 = 14,
- AES_CCM_ICV_16 = 16
+ ICV_4 = 4,
+ ICV_6 = 6,
+ ICV_8 = 8,
+ ICV_10 = 10,
+ ICV_12 = 12,
+ ICV_13 = 13,
+ ICV_14 = 14,
+ ICV_15 = 15,
+ ICV_16 = 16
};
struct hash_op_params {
@@ -394,7 +390,7 @@ static const u8 aes_sbox[256] = {
187, 22
};
-static u32 aes_ks_subword(const u32 w)
+static inline u32 aes_ks_subword(const u32 w)
{
u8 bytes[4];
@@ -412,61 +408,4 @@ static u32 round_constant[11] = {
0x1B000000, 0x36000000, 0x6C000000
};
-/* dec_key - OUTPUT - Reverse round key
- * key - INPUT - key
- * keylength - INPUT - length of the key in number of bits
- */
-static inline void get_aes_decrypt_key(unsigned char *dec_key,
- const unsigned char *key,
- unsigned int keylength)
-{
- u32 temp;
- u32 w_ring[MAX_NK];
- int i, j, k;
- u8 nr, nk;
-
- switch (keylength) {
- case AES_KEYLENGTH_128BIT:
- nk = KEYLENGTH_4BYTES;
- nr = NUMBER_OF_ROUNDS_10;
- break;
-
- case AES_KEYLENGTH_192BIT:
- nk = KEYLENGTH_6BYTES;
- nr = NUMBER_OF_ROUNDS_12;
- break;
- case AES_KEYLENGTH_256BIT:
- nk = KEYLENGTH_8BYTES;
- nr = NUMBER_OF_ROUNDS_14;
- break;
- default:
- return;
- }
- for (i = 0; i < nk; i++ )
- w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
-
- i = 0;
- temp = w_ring[nk - 1];
- while(i + nk < (nr + 1) * 4) {
- if(!(i % nk)) {
- /* RotWord(temp) */
- temp = (temp << 8) | (temp >> 24);
- temp = aes_ks_subword(temp);
- temp ^= round_constant[i / nk];
- }
- else if (nk == 8 && (i % 4 == 0))
- temp = aes_ks_subword(temp);
- w_ring[i % nk] ^= temp;
- temp = w_ring[i % nk];
- i++;
- }
- i--;
- for (k = 0, j = i % nk; k < nk; k++) {
- *((u32 *)dec_key + k) = htonl(w_ring[j]);
- j--;
- if(j < 0)
- j += nk;
- }
-}
-
#endif /* __CHCR_ALGO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 4d7f6700fd7e..918da8e6e2d8 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -110,14 +110,12 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
if (ack_err_status) {
if (CHK_MAC_ERR_BIT(ack_err_status) ||
CHK_PAD_ERR_BIT(ack_err_status))
- error_status = -EINVAL;
+ error_status = -EBADMSG;
}
/* call completion callback with failure status */
if (req) {
- if (!chcr_handle_resp(req, input, error_status))
- req->complete(req, error_status);
- else
- return -EINVAL;
+ error_status = chcr_handle_resp(req, input, error_status);
+ req->complete(req, error_status);
} else {
pr_err("Incorrect request address from the firmware\n");
return -EFAULT;
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 2a5c671a4232..c7088a4e0a49 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -52,13 +52,27 @@
#define MAC_ERROR_BIT 0
#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
+#define MAX_SALT 4
struct uld_ctx;
+struct _key_ctx {
+ __be32 ctx_hdr;
+ u8 salt[MAX_SALT];
+ __be64 reserverd;
+ unsigned char key[0];
+};
+
+struct chcr_wr {
+ struct fw_crypto_lookaside_wr wreq;
+ struct ulp_txpkt ulptx;
+ struct ulptx_idata sc_imm;
+ struct cpl_tx_sec_pdu sec_cpl;
+ struct _key_ctx key_ctx;
+};
+
struct chcr_dev {
- /* Request submited to h/w and waiting for response. */
spinlock_t lock_chcr_dev;
- struct crypto_queue pending_queue;
struct uld_ctx *u_ctx;
unsigned char tx_channel_id;
};
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index d7d75605da8b..d5af7d64a763 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -36,6 +36,14 @@
#ifndef __CHCR_CRYPTO_H__
#define __CHCR_CRYPTO_H__
+#define GHASH_BLOCK_SIZE 16
+#define GHASH_DIGEST_SIZE 16
+
+#define CCM_B0_SIZE 16
+#define CCM_AAD_FIELD_SIZE 2
+#define T5_MAX_AAD_SIZE 512
+
+
/* Define following if h/w is not dropping the AAD and IV data before
* giving the processed data
*/
@@ -63,22 +71,36 @@
#define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0
#define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1
-#define CHCR_SCMD_CIPHER_MODE_NOP 0
-#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
-#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
-#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
+#define CHCR_SCMD_CIPHER_MODE_NOP 0
+#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
+#define CHCR_SCMD_CIPHER_MODE_AES_GCM 2
+#define CHCR_SCMD_CIPHER_MODE_AES_CTR 3
+#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
+#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
+#define CHCR_SCMD_CIPHER_MODE_AES_CCM 7
#define CHCR_SCMD_AUTH_MODE_NOP 0
#define CHCR_SCMD_AUTH_MODE_SHA1 1
#define CHCR_SCMD_AUTH_MODE_SHA224 2
#define CHCR_SCMD_AUTH_MODE_SHA256 3
+#define CHCR_SCMD_AUTH_MODE_GHASH 4
#define CHCR_SCMD_AUTH_MODE_SHA512_224 5
#define CHCR_SCMD_AUTH_MODE_SHA512_256 6
#define CHCR_SCMD_AUTH_MODE_SHA512_384 7
#define CHCR_SCMD_AUTH_MODE_SHA512_512 8
+#define CHCR_SCMD_AUTH_MODE_CBCMAC 9
+#define CHCR_SCMD_AUTH_MODE_CMAC 10
#define CHCR_SCMD_HMAC_CTRL_NOP 0
#define CHCR_SCMD_HMAC_CTRL_NO_TRUNC 1
+#define CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366 2
+#define CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT 3
+#define CHCR_SCMD_HMAC_CTRL_PL1 4
+#define CHCR_SCMD_HMAC_CTRL_PL2 5
+#define CHCR_SCMD_HMAC_CTRL_PL3 6
+#define CHCR_SCMD_HMAC_CTRL_DIV2 7
+#define VERIFY_HW 0
+#define VERIFY_SW 1
#define CHCR_SCMD_IVGEN_CTRL_HW 0
#define CHCR_SCMD_IVGEN_CTRL_SW 1
@@ -106,39 +128,74 @@
#define IV_IMMEDIATE 1
#define IV_DSGL 2
+#define AEAD_H_SIZE 16
+
#define CRYPTO_ALG_SUB_TYPE_MASK 0x0f000000
#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC 0x04000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000
+#define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000
#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
-#define MAX_SALT 4
#define MAX_SCRATCH_PAD_SIZE 32
#define CHCR_HASH_MAX_BLOCK_SIZE_64 64
#define CHCR_HASH_MAX_BLOCK_SIZE_128 128
/* Aligned to 128 bit boundary */
-struct _key_ctx {
- __be32 ctx_hdr;
- u8 salt[MAX_SALT];
- __be64 reserverd;
- unsigned char key[0];
-};
struct ablk_ctx {
- u8 enc;
- unsigned int processed_len;
__be32 key_ctx_hdr;
unsigned int enckey_len;
- unsigned int dst_nents;
- struct scatterlist iv_sg;
u8 key[CHCR_AES_MAX_KEY_LEN];
- u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
unsigned char ciph_mode;
+ u8 rrkey[AES_MAX_KEY_SIZE];
+};
+struct chcr_aead_reqctx {
+ struct sk_buff *skb;
+ short int dst_nents;
+ u16 verify;
+ u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
+ unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE];
+};
+
+struct chcr_gcm_ctx {
+ u8 ghash_h[AEAD_H_SIZE];
};
+struct chcr_authenc_ctx {
+ u8 dec_rrkey[AES_MAX_KEY_SIZE];
+ u8 h_iopad[2 * CHCR_HASH_MAX_DIGEST_SIZE];
+ unsigned char auth_mode;
+};
+
+struct __aead_ctx {
+ struct chcr_gcm_ctx gcm[0];
+ struct chcr_authenc_ctx authenc[0];
+};
+
+
+
+struct chcr_aead_ctx {
+ __be32 key_ctx_hdr;
+ unsigned int enckey_len;
+ struct crypto_skcipher *null;
+ u8 salt[MAX_SALT];
+ u8 key[CHCR_AES_MAX_KEY_LEN];
+ u16 hmac_ctrl;
+ u16 mayverify;
+ struct __aead_ctx ctx[0];
+};
+
+
+
struct hmac_ctx {
- struct shash_desc *desc;
+ struct crypto_shash *base_hash;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
@@ -146,6 +203,7 @@ struct hmac_ctx {
struct __crypto_ctx {
struct hmac_ctx hmacctx[0];
struct ablk_ctx ablkctx[0];
+ struct chcr_aead_ctx aeadctx[0];
};
struct chcr_context {
@@ -156,18 +214,22 @@ struct chcr_context {
struct chcr_ahash_req_ctx {
u32 result;
- char bfr[CHCR_HASH_MAX_BLOCK_SIZE_128];
- u8 bfr_len;
+ u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 *reqbfr;
+ u8 *skbfr;
+ u8 reqlen;
/* DMA the partial hash in it */
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
u64 data_len; /* Data len till time */
- void *dummy_payload_ptr;
/* SKB which is being sent to the hardware for processing */
struct sk_buff *skb;
};
struct chcr_blkcipher_req_ctx {
struct sk_buff *skb;
+ unsigned int dst_nents;
+ u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
};
struct chcr_alg_template {
@@ -176,16 +238,19 @@ struct chcr_alg_template {
union {
struct crypto_alg crypto;
struct ahash_alg hash;
+ struct aead_alg aead;
} alg;
};
struct chcr_req_ctx {
union {
struct ahash_request *ahash_req;
+ struct aead_request *aead_req;
struct ablkcipher_request *ablk_req;
} req;
union {
struct chcr_ahash_req_ctx *ahash_ctx;
+ struct chcr_aead_reqctx *reqctx;
struct chcr_blkcipher_req_ctx *ablk_ctx;
} ctx;
};
@@ -195,9 +260,15 @@ struct sge_opaque_hdr {
dma_addr_t addr[MAX_SKB_FRAGS + 1];
};
-typedef struct sk_buff *(*create_wr_t)(struct crypto_async_request *req,
- struct chcr_context *ctx,
+typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
unsigned short qid,
+ int size,
unsigned short op_type);
+static int chcr_aead_op(struct aead_request *req_base,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn);
+static inline int get_aead_subtype(struct crypto_aead *aead);
+
#endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 37dadb2a4feb..6e7a5c77a00a 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -375,10 +375,6 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
if (!dma->padding_pool)
return -ENOMEM;
- dma->iv_pool = dmam_pool_create("cesa_iv", dev, 16, 1, 0);
- if (!dma->iv_pool)
- return -ENOMEM;
-
cesa->dma = dma;
return 0;
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index e423d33decd4..a768da7138a1 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -277,7 +277,7 @@ struct mv_cesa_op_ctx {
#define CESA_TDMA_DUMMY 0
#define CESA_TDMA_DATA 1
#define CESA_TDMA_OP 2
-#define CESA_TDMA_IV 3
+#define CESA_TDMA_RESULT 3
/**
* struct mv_cesa_tdma_desc - TDMA descriptor
@@ -393,7 +393,6 @@ struct mv_cesa_dev_dma {
struct dma_pool *op_pool;
struct dma_pool *cache_pool;
struct dma_pool *padding_pool;
- struct dma_pool *iv_pool;
};
/**
@@ -839,7 +838,7 @@ mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
memset(chain, 0, sizeof(*chain));
}
-int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
u32 size, u32 flags, gfp_t gfp_flags);
struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index d19dc9614e6e..098871a22a54 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -212,7 +212,8 @@ mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
struct mv_cesa_req *basereq;
basereq = &creq->base;
- memcpy(ablkreq->info, basereq->chain.last->data, ivsize);
+ memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv,
+ ivsize);
} else {
memcpy_fromio(ablkreq->info,
engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
@@ -373,8 +374,9 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
/* Add output data for IV */
ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
- ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
- ivsize, CESA_TDMA_SRC_IN_SRAM, flags);
+ ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
+ CESA_SA_DATA_SRAM_OFFSET,
+ CESA_TDMA_SRC_IN_SRAM, flags);
if (ret)
goto err_free_tdma;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 77712b375b84..317cf029c0cf 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -311,24 +311,40 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
int i;
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
- for (i = 0; i < digsize / 4; i++)
- creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
- if (creq->last_req) {
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
+ (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
+ __le32 *data = NULL;
+
/*
- * Hardware's MD5 digest is in little endian format, but
- * SHA in big endian format
+ * Result is already in the correct endianess when the SA is
+ * used
*/
- if (creq->algo_le) {
- __le32 *result = (void *)ahashreq->result;
+ data = creq->base.chain.last->op->ctx.hash.hash;
+ for (i = 0; i < digsize / 4; i++)
+ creq->state[i] = cpu_to_le32(data[i]);
- for (i = 0; i < digsize / 4; i++)
- result[i] = cpu_to_le32(creq->state[i]);
- } else {
- __be32 *result = (void *)ahashreq->result;
+ memcpy(ahashreq->result, data, digsize);
+ } else {
+ for (i = 0; i < digsize / 4; i++)
+ creq->state[i] = readl_relaxed(engine->regs +
+ CESA_IVDIG(i));
+ if (creq->last_req) {
+ /*
+ * Hardware's MD5 digest is in little endian format, but
+ * SHA in big endian format
+ */
+ if (creq->algo_le) {
+ __le32 *result = (void *)ahashreq->result;
+
+ for (i = 0; i < digsize / 4; i++)
+ result[i] = cpu_to_le32(creq->state[i]);
+ } else {
+ __be32 *result = (void *)ahashreq->result;
- for (i = 0; i < digsize / 4; i++)
- result[i] = cpu_to_be32(creq->state[i]);
+ for (i = 0; i < digsize / 4; i++)
+ result[i] = cpu_to_be32(creq->state[i]);
+ }
}
}
@@ -503,6 +519,12 @@ mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
CESA_SA_DESC_CFG_LAST_FRAG,
CESA_SA_DESC_CFG_FRAG_MSK);
+ ret = mv_cesa_dma_add_result_op(chain,
+ CESA_SA_CFG_SRAM_OFFSET,
+ CESA_SA_DATA_SRAM_OFFSET,
+ CESA_TDMA_SRC_IN_SRAM, flags);
+ if (ret)
+ return ERR_PTR(-ENOMEM);
return op;
}
@@ -563,6 +585,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
struct mv_cesa_op_ctx *op = NULL;
unsigned int frag_len;
int ret;
+ u32 type;
basereq->chain.first = NULL;
basereq->chain.last = NULL;
@@ -634,7 +657,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
goto err_free_tdma;
}
- if (op) {
+ /*
+ * If results are copied via DMA, this means that this
+ * request can be directly processed by the engine,
+ * without partial updates. So we can chain it at the
+ * DMA level with other requests.
+ */
+ type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
+
+ if (op && type != CESA_TDMA_RESULT) {
/* Add dummy desc to wait for crypto operation end */
ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
if (ret)
@@ -647,8 +678,10 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
else
creq->cache_ptr = 0;
- basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
- CESA_TDMA_BREAK_CHAIN);
+ basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
+
+ if (type != CESA_TDMA_RESULT)
+ basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
return 0;
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 9fd7a5fbaa1b..4416b88eca70 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -69,9 +69,6 @@ void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
if (type == CESA_TDMA_OP)
dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
le32_to_cpu(tdma->src));
- else if (type == CESA_TDMA_IV)
- dma_pool_free(cesa_dev->dma->iv_pool, tdma->data,
- le32_to_cpu(tdma->dst));
tdma = tdma->next;
dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
@@ -209,29 +206,37 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
return new_tdma;
}
-int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
u32 size, u32 flags, gfp_t gfp_flags)
{
-
- struct mv_cesa_tdma_desc *tdma;
- u8 *iv;
- dma_addr_t dma_handle;
+ struct mv_cesa_tdma_desc *tdma, *op_desc;
tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
if (IS_ERR(tdma))
return PTR_ERR(tdma);
- iv = dma_pool_alloc(cesa_dev->dma->iv_pool, gfp_flags, &dma_handle);
- if (!iv)
- return -ENOMEM;
+ /* We re-use an existing op_desc object to retrieve the context
+ * and result instead of allocating a new one.
+ * There is at least one object of this type in a CESA crypto
+ * req, just pick the first one in the chain.
+ */
+ for (op_desc = chain->first; op_desc; op_desc = op_desc->next) {
+ u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK;
+
+ if (type == CESA_TDMA_OP)
+ break;
+ }
+
+ if (!op_desc)
+ return -EIO;
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
tdma->src = src;
- tdma->dst = cpu_to_le32(dma_handle);
- tdma->data = iv;
+ tdma->dst = op_desc->src;
+ tdma->op = op_desc->op;
flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
- tdma->flags = flags | CESA_TDMA_IV;
+ tdma->flags = flags | CESA_TDMA_RESULT;
return 0;
}
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 104e9ce9400a..451fa18c1c7b 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1073,7 +1073,7 @@ static int mv_probe(struct platform_device *pdev)
if (!res)
return -ENXIO;
- cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ cp = devm_kzalloc(&pdev->dev, sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
@@ -1163,7 +1163,6 @@ err_irq:
err_thread:
kthread_stop(cp->queue_th);
err:
- kfree(cp);
cpg = NULL;
return ret;
}
@@ -1187,7 +1186,6 @@ static int mv_remove(struct platform_device *pdev)
clk_put(cp->clk);
}
- kfree(cp);
cpg = NULL;
return 0;
}
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 42f0f229f7f7..036057abb257 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -32,7 +32,6 @@
#include <linux/scatterlist.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/types.h>
#include <asm/hvcall.h>
#include <asm/vio.h>
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 0c49956ee0ce..1d9ecd368b5b 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -390,7 +390,7 @@ static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
if (status & SAHARA_STATUS_MODE_BATCH)
dev_dbg(dev->device, " - Batch Mode.\n");
else if (status & SAHARA_STATUS_MODE_DEDICATED)
- dev_dbg(dev->device, " - Decidated Mode.\n");
+ dev_dbg(dev->device, " - Dedicated Mode.\n");
else if (status & SAHARA_STATUS_MODE_DEBUG)
dev_dbg(dev->device, " - Debug Mode.\n");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 0418a2f41dc0..0bba6a19d36a 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -590,7 +590,7 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
if (v_lo & TALITOS_CCPSR_LO_MDTE)
dev_err(dev, "master data transfer error\n");
if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
- dev_err(dev, is_sec1 ? "pointeur not complete error\n"
+ dev_err(dev, is_sec1 ? "pointer not complete error\n"
: "s/g data length zero error\n");
if (v_lo & TALITOS_CCPSR_LO_FPZ)
dev_err(dev, is_sec1 ? "parity error\n"
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
index de6e241b0866..55f7c392582f 100644
--- a/drivers/crypto/vmx/Makefile
+++ b/drivers/crypto/vmx/Makefile
@@ -10,10 +10,12 @@ endif
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
-$(src)/aesp8-ppc.S: $(src)/aesp8-ppc.pl
- $(call cmd,perl)
+targets += aesp8-ppc.S ghashp8-ppc.S
+
+$(obj)/aesp8-ppc.S: $(src)/aesp8-ppc.pl FORCE
+ $(call if_changed,perl)
-$(src)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl
- $(call cmd,perl)
+$(obj)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl FORCE
+ $(call if_changed,perl)
-.PRECIOUS: $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S
+clean-files := aesp8-ppc.S ghashp8-ppc.S
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 56e6c4c7c60d..d836d4ce5ee4 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -274,9 +274,10 @@ static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
struct arizona *arizona = info->arizona;
const char *widget = arizona_extcon_get_micbias(info);
struct snd_soc_dapm_context *dapm = arizona->dapm;
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
int ret;
- ret = snd_soc_dapm_force_enable_pin(dapm, widget);
+ ret = snd_soc_component_force_enable_pin(component, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to enable %s: %d\n",
widget, ret);
@@ -284,7 +285,7 @@ static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
snd_soc_dapm_sync(dapm);
if (!arizona->pdata.micd_force_micbias) {
- ret = snd_soc_dapm_disable_pin(arizona->dapm, widget);
+ ret = snd_soc_component_disable_pin(component, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to disable %s: %d\n",
widget, ret);
@@ -349,6 +350,7 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
struct arizona *arizona = info->arizona;
const char *widget = arizona_extcon_get_micbias(info);
struct snd_soc_dapm_context *dapm = arizona->dapm;
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
bool change;
int ret;
@@ -356,7 +358,7 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
ARIZONA_MICD_ENA, 0,
&change);
- ret = snd_soc_dapm_disable_pin(dapm, widget);
+ ret = snd_soc_component_disable_pin(component, widget);
if (ret != 0)
dev_warn(arizona->dev,
"Failed to disable %s: %d\n",
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 6621b13c370f..d564d25df8ab 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -6,7 +6,7 @@
#
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
-cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
+cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index cd4599c0523b..4070b7386e9d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -138,7 +138,7 @@ config HID_ASUS
tristate "Asus"
depends on I2C_HID
---help---
- Support for Asus notebook built-in keyboard via i2c.
+ Support for Asus notebook built-in keyboard and touchpad via i2c.
Supported devices:
- EeeBook X205TA
@@ -214,7 +214,7 @@ config HID_CMEDIA
config HID_CP2112
tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support"
- depends on USB_HID && I2C && GPIOLIB
+ depends on USB_HID && I2C && GPIOLIB && GPIOLIB_IRQCHIP
---help---
Support for Silicon Labs CP2112 HID USB to SMBus Master Bridge.
This is a HID device driver which registers as an i2c adapter
@@ -512,6 +512,14 @@ config HID_MAGICMOUSE
Say Y here if you want support for the multi-touch features of the
Apple Wireless "Magic" Mouse and the Apple Wireless "Magic" Trackpad.
+config HID_MAYFLASH
+ tristate "Mayflash game controller adapter force feedback"
+ depends on HID
+ select INPUT_FF_MEMLESS
+ ---help---
+ Say Y here if you have HJZ Mayflash PS3 game controller adapters
+ and want to enable force feedback support.
+
config HID_MICROSOFT
tristate "Microsoft non-fully HID-compliant devices"
depends on HID
@@ -861,6 +869,13 @@ config THRUSTMASTER_FF
a THRUSTMASTER Dual Trigger 3-in-1 or a THRUSTMASTER Ferrari GT
Rumble Force or Force Feedback Wheel.
+config HID_UDRAW_PS3
+ tristate "THQ PS3 uDraw tablet"
+ depends on HID
+ ---help---
+ Say Y here if you want to use the THQ uDraw gaming tablet for
+ the PS3.
+
config HID_WACOM
tristate "Wacom Intuos/Graphire tablet support (USB)"
depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 86b2b5785fd2..4d111f23e801 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
+obj-$(CONFIG_HID_MAYFLASH) += hid-mf.o
obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
obj-$(CONFIG_HID_MULTITOUCH) += hid-multitouch.o
@@ -96,6 +97,7 @@ obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o
+obj-$(CONFIG_HID_UDRAW_PS3) += hid-udraw-ps3.o
obj-$(CONFIG_HID_LED) += hid-led.o
obj-$(CONFIG_HID_XINMO) += hid-xinmo.o
obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 7a811ec4f2e1..d40ed9fdf68d 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -11,6 +11,12 @@
* This module based on hid-ortek by
* Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
* Copyright (c) 2011 Jiri Kosina
+ *
+ * This module has been updated to add support for Asus i2c touchpad.
+ *
+ * Copyright (c) 2016 Brendan McGrath <redmcg@redmandi.dyndns.org>
+ * Copyright (c) 2016 Victor Vlasenko <victor.vlasenko@sysgears.com>
+ * Copyright (c) 2016 Frederik Wenigwieser <frederik.wenigwieser@gmail.com>
*/
/*
@@ -20,16 +26,287 @@
* any later version.
*/
-#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
+#include <linux/input/mt.h>
#include "hid-ids.h"
+MODULE_AUTHOR("Yusuke Fujimaki <usk.fujimaki@gmail.com>");
+MODULE_AUTHOR("Brendan McGrath <redmcg@redmandi.dyndns.org>");
+MODULE_AUTHOR("Victor Vlasenko <victor.vlasenko@sysgears.com>");
+MODULE_AUTHOR("Frederik Wenigwieser <frederik.wenigwieser@gmail.com>");
+MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
+
+#define FEATURE_REPORT_ID 0x0d
+#define INPUT_REPORT_ID 0x5d
+
+#define INPUT_REPORT_SIZE 28
+
+#define MAX_CONTACTS 5
+
+#define MAX_X 2794
+#define MAX_Y 1758
+#define MAX_TOUCH_MAJOR 8
+#define MAX_PRESSURE 128
+
+#define CONTACT_DATA_SIZE 5
+
+#define BTN_LEFT_MASK 0x01
+#define CONTACT_TOOL_TYPE_MASK 0x80
+#define CONTACT_X_MSB_MASK 0xf0
+#define CONTACT_Y_MSB_MASK 0x0f
+#define CONTACT_TOUCH_MAJOR_MASK 0x07
+#define CONTACT_PRESSURE_MASK 0x7f
+
+#define QUIRK_FIX_NOTEBOOK_REPORT BIT(0)
+#define QUIRK_NO_INIT_REPORTS BIT(1)
+#define QUIRK_SKIP_INPUT_MAPPING BIT(2)
+#define QUIRK_IS_MULTITOUCH BIT(3)
+
+#define NOTEBOOK_QUIRKS QUIRK_FIX_NOTEBOOK_REPORT
+#define TOUCHPAD_QUIRKS (QUIRK_NO_INIT_REPORTS | \
+ QUIRK_SKIP_INPUT_MAPPING | \
+ QUIRK_IS_MULTITOUCH)
+
+#define TRKID_SGN ((TRKID_MAX + 1) >> 1)
+
+struct asus_drvdata {
+ unsigned long quirks;
+ struct input_dev *input;
+};
+
+static void asus_report_contact_down(struct input_dev *input,
+ int toolType, u8 *data)
+{
+ int touch_major, pressure;
+ int x = (data[0] & CONTACT_X_MSB_MASK) << 4 | data[1];
+ int y = MAX_Y - ((data[0] & CONTACT_Y_MSB_MASK) << 8 | data[2]);
+
+ if (toolType == MT_TOOL_PALM) {
+ touch_major = MAX_TOUCH_MAJOR;
+ pressure = MAX_PRESSURE;
+ } else {
+ touch_major = (data[3] >> 4) & CONTACT_TOUCH_MAJOR_MASK;
+ pressure = data[4] & CONTACT_PRESSURE_MASK;
+ }
+
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major);
+ input_report_abs(input, ABS_MT_PRESSURE, pressure);
+}
+
+/* Required for Synaptics Palm Detection */
+static void asus_report_tool_width(struct input_dev *input)
+{
+ struct input_mt *mt = input->mt;
+ struct input_mt_slot *oldest;
+ int oldid, count, i;
+
+ oldest = NULL;
+ oldid = mt->trkid;
+ count = 0;
+
+ for (i = 0; i < mt->num_slots; ++i) {
+ struct input_mt_slot *ps = &mt->slots[i];
+ int id = input_mt_get_value(ps, ABS_MT_TRACKING_ID);
+
+ if (id < 0)
+ continue;
+ if ((id - oldid) & TRKID_SGN) {
+ oldest = ps;
+ oldid = id;
+ }
+ count++;
+ }
+
+ if (oldest) {
+ input_report_abs(input, ABS_TOOL_WIDTH,
+ input_mt_get_value(oldest, ABS_MT_TOUCH_MAJOR));
+ }
+}
+
+static void asus_report_input(struct input_dev *input, u8 *data)
+{
+ int i;
+ u8 *contactData = data + 2;
+
+ for (i = 0; i < MAX_CONTACTS; i++) {
+ bool down = !!(data[1] & BIT(i+3));
+ int toolType = contactData[3] & CONTACT_TOOL_TYPE_MASK ?
+ MT_TOOL_PALM : MT_TOOL_FINGER;
+
+ input_mt_slot(input, i);
+ input_mt_report_slot_state(input, toolType, down);
+
+ if (down) {
+ asus_report_contact_down(input, toolType, contactData);
+ contactData += CONTACT_DATA_SIZE;
+ }
+ }
+
+ input_report_key(input, BTN_LEFT, data[1] & BTN_LEFT_MASK);
+ asus_report_tool_width(input);
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+}
+
+static int asus_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH &&
+ data[0] == INPUT_REPORT_ID &&
+ size == INPUT_REPORT_SIZE) {
+ asus_report_input(drvdata->input, data);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+ int ret;
+ struct input_dev *input = hi->input;
+
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, MAX_X, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, MAX_Y, 0, 0);
+ input_set_abs_params(input, ABS_TOOL_WIDTH, 0, MAX_TOUCH_MAJOR, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, MAX_TOUCH_MAJOR, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, MAX_PRESSURE, 0, 0);
+
+ __set_bit(BTN_LEFT, input->keybit);
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+
+ ret = input_mt_init_slots(input, MAX_CONTACTS, INPUT_MT_POINTER);
+
+ if (ret) {
+ hid_err(hdev, "Asus input mt init slots failed: %d\n", ret);
+ return ret;
+ }
+
+ drvdata->input = input;
+ }
+
+ return 0;
+}
+
+static int asus_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit,
+ int *max)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_SKIP_INPUT_MAPPING) {
+ /* Don't map anything from the HID report.
+ * We do it all manually in asus_input_configured
+ */
+ return -1;
+ }
+
+ return 0;
+}
+
+static int asus_start_multitouch(struct hid_device *hdev)
+{
+ int ret;
+ const unsigned char buf[] = { FEATURE_REPORT_ID, 0x00, 0x03, 0x01, 0x00 };
+ unsigned char *dmabuf = kmemdup(buf, sizeof(buf), GFP_KERNEL);
+
+ if (!dmabuf) {
+ ret = -ENOMEM;
+ hid_err(hdev, "Asus failed to alloc dma buf: %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_raw_request(hdev, dmabuf[0], dmabuf, sizeof(buf),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+
+ kfree(dmabuf);
+
+ if (ret != sizeof(buf)) {
+ hid_err(hdev, "Asus failed to start multitouch: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused asus_reset_resume(struct hid_device *hdev)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
+ return asus_start_multitouch(hdev);
+
+ return 0;
+}
+
+static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct asus_drvdata *drvdata;
+
+ drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (drvdata == NULL) {
+ hid_err(hdev, "Can't alloc Asus descriptor\n");
+ return -ENOMEM;
+ }
+
+ hid_set_drvdata(hdev, drvdata);
+
+ drvdata->quirks = id->driver_data;
+
+ if (drvdata->quirks & QUIRK_NO_INIT_REPORTS)
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "Asus hid parse failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "Asus hw start failed: %d\n", ret);
+ return ret;
+ }
+
+ if (!drvdata->input) {
+ hid_err(hdev, "Asus input not registered\n");
+ ret = -ENOMEM;
+ goto err_stop_hw;
+ }
+
+ drvdata->input->name = "Asus TouchPad";
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+ ret = asus_start_multitouch(hdev);
+ if (ret)
+ goto err_stop_hw;
+ }
+
+ return 0;
+err_stop_hw:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x65) {
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_FIX_NOTEBOOK_REPORT &&
+ *rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x65) {
hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
rdesc[55] = 0xdd;
}
@@ -37,15 +314,25 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
static const struct hid_device_id asus_devices[] = {
- { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), NOTEBOOK_QUIRKS},
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_TOUCHPAD), TOUCHPAD_QUIRKS },
{ }
};
MODULE_DEVICE_TABLE(hid, asus_devices);
static struct hid_driver asus_driver = {
- .name = "asus",
- .id_table = asus_devices,
- .report_fixup = asus_report_fixup
+ .name = "asus",
+ .id_table = asus_devices,
+ .report_fixup = asus_report_fixup,
+ .probe = asus_probe,
+ .input_mapping = asus_input_mapping,
+ .input_configured = asus_input_configured,
+#ifdef CONFIG_PM
+ .reset_resume = asus_reset_resume,
+#endif
+ .raw_event = asus_raw_event
};
module_hid_driver(asus_driver);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2b89c701076f..cff060b56da9 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -727,8 +727,9 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
(hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP ||
+ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 ||
+ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
hid->group == HID_GROUP_MULTITOUCH)
hid->group = HID_GROUP_GENERIC;
@@ -1857,6 +1858,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_TOUCHPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
@@ -1883,6 +1885,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
+#if IS_ENABLED(CONFIG_HID_MAYFLASH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
+#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
@@ -1983,8 +1988,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
@@ -2059,6 +2065,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
@@ -2086,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 60d30203a5fa..f31a778b0851 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -24,6 +24,7 @@
* http://www.silabs.com/Support%20Documents/TechnicalDocs/AN495.pdf
*/
+#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/hid.h>
#include <linux/i2c.h>
@@ -168,6 +169,12 @@ struct cp2112_device {
struct gpio_chip gc;
u8 *in_out_buffer;
spinlock_t lock;
+
+ struct gpio_desc *desc[8];
+ bool gpio_poll;
+ struct delayed_work gpio_poll_worker;
+ unsigned long irq_mask;
+ u8 gpio_prev_state;
};
static int gpio_push_pull = 0xFF;
@@ -233,7 +240,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
spin_unlock_irqrestore(&dev->lock, flags);
}
-static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int cp2112_gpio_get_all(struct gpio_chip *chip)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
@@ -252,7 +259,7 @@ static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
goto exit;
}
- ret = (buf[1] >> offset) & 1;
+ ret = buf[1];
exit:
spin_unlock_irqrestore(&dev->lock, flags);
@@ -260,6 +267,17 @@ exit:
return ret;
}
+static int cp2112_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ int ret;
+
+ ret = cp2112_gpio_get_all(chip);
+ if (ret < 0)
+ return ret;
+
+ return (ret >> offset) & 1;
+}
+
static int cp2112_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
@@ -1041,6 +1059,166 @@ static void chmod_sysfs_attrs(struct hid_device *hdev)
}
}
+static void cp2112_gpio_irq_ack(struct irq_data *d)
+{
+}
+
+static void cp2112_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ __clear_bit(d->hwirq, &dev->irq_mask);
+}
+
+static void cp2112_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ __set_bit(d->hwirq, &dev->irq_mask);
+}
+
+static void cp2112_gpio_poll_callback(struct work_struct *work)
+{
+ struct cp2112_device *dev = container_of(work, struct cp2112_device,
+ gpio_poll_worker.work);
+ struct irq_data *d;
+ u8 gpio_mask;
+ u8 virqs = (u8)dev->irq_mask;
+ u32 irq_type;
+ int irq, virq, ret;
+
+ ret = cp2112_gpio_get_all(&dev->gc);
+ if (ret == -ENODEV) /* the hardware has been disconnected */
+ return;
+ if (ret < 0)
+ goto exit;
+
+ gpio_mask = ret;
+
+ while (virqs) {
+ virq = ffs(virqs) - 1;
+ virqs &= ~BIT(virq);
+
+ if (!dev->gc.to_irq)
+ break;
+
+ irq = dev->gc.to_irq(&dev->gc, virq);
+
+ d = irq_get_irq_data(irq);
+ if (!d)
+ continue;
+
+ irq_type = irqd_get_trigger_type(d);
+
+ if (gpio_mask & BIT(virq)) {
+ /* Level High */
+
+ if (irq_type & IRQ_TYPE_LEVEL_HIGH)
+ handle_nested_irq(irq);
+
+ if ((irq_type & IRQ_TYPE_EDGE_RISING) &&
+ !(dev->gpio_prev_state & BIT(virq)))
+ handle_nested_irq(irq);
+ } else {
+ /* Level Low */
+
+ if (irq_type & IRQ_TYPE_LEVEL_LOW)
+ handle_nested_irq(irq);
+
+ if ((irq_type & IRQ_TYPE_EDGE_FALLING) &&
+ (dev->gpio_prev_state & BIT(virq)))
+ handle_nested_irq(irq);
+ }
+ }
+
+ dev->gpio_prev_state = gpio_mask;
+
+exit:
+ if (dev->gpio_poll)
+ schedule_delayed_work(&dev->gpio_poll_worker, 10);
+}
+
+
+static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
+
+ cp2112_gpio_direction_input(gc, d->hwirq);
+
+ if (!dev->gpio_poll) {
+ dev->gpio_poll = true;
+ schedule_delayed_work(&dev->gpio_poll_worker, 0);
+ }
+
+ cp2112_gpio_irq_unmask(d);
+ return 0;
+}
+
+static void cp2112_gpio_irq_shutdown(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ cancel_delayed_work_sync(&dev->gpio_poll_worker);
+}
+
+static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
+{
+ return 0;
+}
+
+static struct irq_chip cp2112_gpio_irqchip = {
+ .name = "cp2112-gpio",
+ .irq_startup = cp2112_gpio_irq_startup,
+ .irq_shutdown = cp2112_gpio_irq_shutdown,
+ .irq_ack = cp2112_gpio_irq_ack,
+ .irq_mask = cp2112_gpio_irq_mask,
+ .irq_unmask = cp2112_gpio_irq_unmask,
+ .irq_set_type = cp2112_gpio_irq_type,
+};
+
+static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
+ int pin)
+{
+ int ret;
+
+ if (dev->desc[pin])
+ return -EINVAL;
+
+ dev->desc[pin] = gpiochip_request_own_desc(&dev->gc, pin,
+ "HID/I2C:Event");
+ if (IS_ERR(dev->desc[pin])) {
+ dev_err(dev->gc.parent, "Failed to request GPIO\n");
+ return PTR_ERR(dev->desc[pin]);
+ }
+
+ ret = gpiochip_lock_as_irq(&dev->gc, pin);
+ if (ret) {
+ dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
+ goto err_desc;
+ }
+
+ ret = gpiod_to_irq(dev->desc[pin]);
+ if (ret < 0) {
+ dev_err(dev->gc.parent, "Failed to translate GPIO to IRQ\n");
+ goto err_lock;
+ }
+
+ return ret;
+
+err_lock:
+ gpiochip_unlock_as_irq(&dev->gc, pin);
+err_desc:
+ gpiochip_free_own_desc(dev->desc[pin]);
+ dev->desc[pin] = NULL;
+ return ret;
+}
+
static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct cp2112_device *dev;
@@ -1163,8 +1341,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
chmod_sysfs_attrs(hdev);
hid_hw_power(hdev, PM_HINT_NORMAL);
+ ret = gpiochip_irqchip_add(&dev->gc, &cp2112_gpio_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev->gc.parent, "failed to add IRQ chip\n");
+ goto err_sysfs_remove;
+ }
+
return ret;
+err_sysfs_remove:
+ sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
err_gpiochip_remove:
gpiochip_remove(&dev->gc);
err_free_i2c:
@@ -1181,10 +1368,22 @@ err_hid_stop:
static void cp2112_remove(struct hid_device *hdev)
{
struct cp2112_device *dev = hid_get_drvdata(hdev);
+ int i;
sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
- gpiochip_remove(&dev->gc);
i2c_del_adapter(&dev->adap);
+
+ if (dev->gpio_poll) {
+ dev->gpio_poll = false;
+ cancel_delayed_work_sync(&dev->gpio_poll_worker);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev->desc); i++) {
+ gpiochip_unlock_as_irq(&dev->gc, i);
+ gpiochip_free_own_desc(dev->desc[i]);
+ }
+
+ gpiochip_remove(&dev->gc);
/* i2c_del_adapter has finished removing all i2c devices from our
* adapter. Well behaved devices should no longer call our cp2112_xfer
* and should have waited for any pending calls to finish. It has also
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 575aa65436d1..ec277b96eaa1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -171,6 +171,7 @@
#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b
#define USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD 0x8585
+#define USB_DEVICE_ID_ASUSTEK_TOUCHPAD 0x0101
#define USB_VENDOR_ID_ATEN 0x0557
#define USB_DEVICE_ID_ATEN_UC100KM 0x2004
@@ -315,8 +316,10 @@
#define USB_VENDOR_ID_DMI 0x0c0b
#define USB_DEVICE_ID_DMI_ENC 0x5fab
-#define USB_VENDOR_ID_DRAGONRISE 0x0079
-#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
+#define USB_VENDOR_ID_DRAGONRISE 0x0079
+#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
+#define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801
+#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE 0x1843
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
@@ -718,8 +721,9 @@
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 0x07dc
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 0x07e2
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP 0x07dd
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 0x07e4
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 0x07e8
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP 0x07e9
-#define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07de
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
#define USB_VENDOR_ID_MOJO 0x8282
@@ -903,6 +907,8 @@
#define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306
#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
#define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5
#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002
@@ -959,6 +965,9 @@
#define USB_VENDOR_ID_THINGM 0x27b8
#define USB_DEVICE_ID_BLINK1 0x01ed
+#define USB_VENDOR_ID_THQ 0x20d6
+#define USB_DEVICE_ID_THQ_PS3_UDRAW 0xcb17
+
#define USB_VENDOR_ID_THRUSTMASTER 0x044f
#define USB_VENDOR_ID_TIVO 0x150a
@@ -1034,6 +1043,10 @@
#define USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH 0x0500
#define USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET 0x0502
+#define USB_VENDOR_ID_WEIDA 0x2575
+#define USB_DEVICE_ID_WEIDA_8752 0xC300
+#define USB_DEVICE_ID_WEIDA_8755 0xC301
+
#define USB_VENDOR_ID_WISEGROUP 0x0925
#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005
#define USB_DEVICE_ID_SUPER_JOY_BOX_3 0x8888
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index fb9ace1cef8b..d05f903c7614 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -253,6 +253,7 @@ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
case ABS_RX:
case ABS_RY:
case ABS_RZ:
+ case ABS_WHEEL:
case ABS_TILT_X:
case ABS_TILT_Y:
if (field->unit == 0x14) { /* If degrees */
@@ -1468,6 +1469,31 @@ static void hidinput_cleanup_hidinput(struct hid_device *hid,
kfree(hidinput);
}
+static struct hid_input *hidinput_match(struct hid_report *report)
+{
+ struct hid_device *hid = report->device;
+ struct hid_input *hidinput;
+
+ list_for_each_entry(hidinput, &hid->inputs, list) {
+ if (hidinput->report &&
+ hidinput->report->id == report->id)
+ return hidinput;
+ }
+
+ return NULL;
+}
+
+static inline void hidinput_configure_usages(struct hid_input *hidinput,
+ struct hid_report *report)
+{
+ int i, j;
+
+ for (i = 0; i < report->maxfield; i++)
+ for (j = 0; j < report->field[i]->maxusage; j++)
+ hidinput_configure_usage(hidinput, report->field[i],
+ report->field[i]->usage + j);
+}
+
/*
* Register the input device; print a message.
* Configure the input layer interface
@@ -1478,8 +1504,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
{
struct hid_driver *drv = hid->driver;
struct hid_report *report;
- struct hid_input *hidinput = NULL;
- int i, j, k;
+ struct hid_input *next, *hidinput = NULL;
+ int i, k;
INIT_LIST_HEAD(&hid->inputs);
INIT_WORK(&hid->led_work, hidinput_led_worker);
@@ -1509,43 +1535,40 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
if (!report->maxfield)
continue;
+ /*
+ * Find the previous hidinput report attached
+ * to this report id.
+ */
+ if (hid->quirks & HID_QUIRK_MULTI_INPUT)
+ hidinput = hidinput_match(report);
+
if (!hidinput) {
hidinput = hidinput_allocate(hid);
if (!hidinput)
goto out_unwind;
}
- for (i = 0; i < report->maxfield; i++)
- for (j = 0; j < report->field[i]->maxusage; j++)
- hidinput_configure_usage(hidinput, report->field[i],
- report->field[i]->usage + j);
-
- if ((hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
- !hidinput_has_been_populated(hidinput))
- continue;
+ hidinput_configure_usages(hidinput, report);
- if (hid->quirks & HID_QUIRK_MULTI_INPUT) {
- /* This will leave hidinput NULL, so that it
- * allocates another one if we have more inputs on
- * the same interface. Some devices (e.g. Happ's
- * UGCI) cram a lot of unrelated inputs into the
- * same interface. */
+ if (hid->quirks & HID_QUIRK_MULTI_INPUT)
hidinput->report = report;
- if (drv->input_configured &&
- drv->input_configured(hid, hidinput))
- goto out_cleanup;
- if (input_register_device(hidinput->input))
- goto out_cleanup;
- hidinput = NULL;
- }
}
}
- if (hidinput && (hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
- !hidinput_has_been_populated(hidinput)) {
- /* no need to register an input device not populated */
- hidinput_cleanup_hidinput(hid, hidinput);
- hidinput = NULL;
+ list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
+ if ((hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
+ !hidinput_has_been_populated(hidinput)) {
+ /* no need to register an input device not populated */
+ hidinput_cleanup_hidinput(hid, hidinput);
+ continue;
+ }
+
+ if (drv->input_configured &&
+ drv->input_configured(hid, hidinput))
+ goto out_unwind;
+ if (input_register_device(hidinput->input))
+ goto out_unwind;
+ hidinput->registered = true;
}
if (list_empty(&hid->inputs)) {
@@ -1553,20 +1576,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
goto out_unwind;
}
- if (hidinput) {
- if (drv->input_configured &&
- drv->input_configured(hid, hidinput))
- goto out_cleanup;
- if (input_register_device(hidinput->input))
- goto out_cleanup;
- }
-
return 0;
-out_cleanup:
- list_del(&hidinput->list);
- input_free_device(hidinput->input);
- kfree(hidinput);
out_unwind:
/* unwind the ones we already registered */
hidinput_disconnect(hid);
@@ -1583,7 +1594,10 @@ void hidinput_disconnect(struct hid_device *hid)
list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
list_del(&hidinput->list);
- input_unregister_device(hidinput->input);
+ if (hidinput->registered)
+ input_unregister_device(hidinput->input);
+ else
+ input_free_device(hidinput->input);
kfree(hidinput);
}
diff --git a/drivers/hid/hid-mf.c b/drivers/hid/hid-mf.c
new file mode 100644
index 000000000000..d9090765a6e5
--- /dev/null
+++ b/drivers/hid/hid-mf.c
@@ -0,0 +1,166 @@
+/*
+ * Force feedback support for Mayflash game controller adapters.
+ *
+ * These devices are manufactured by Mayflash but identify themselves
+ * using the vendor ID of DragonRise Inc.
+ *
+ * Tested with:
+ * 0079:1801 "DragonRise Inc. Mayflash PS3 Game Controller Adapter"
+ *
+ * The following adapters probably work too, but need to be tested:
+ * 0079:1800 "DragonRise Inc. Mayflash WIIU Game Controller Adapter"
+ * 0079:1843 "DragonRise Inc. Mayflash GameCube Game Controller Adapter"
+ *
+ * Copyright (c) 2016 Marcel Hasler <mahasler@gmail.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+struct mf_device {
+ struct hid_report *report;
+};
+
+static int mf_play(struct input_dev *dev, void *data, struct ff_effect *effect)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct mf_device *mf = data;
+ int strong, weak;
+
+ strong = effect->u.rumble.strong_magnitude;
+ weak = effect->u.rumble.weak_magnitude;
+
+ dbg_hid("Called with 0x%04x 0x%04x.\n", strong, weak);
+
+ strong = strong * 0xff / 0xffff;
+ weak = weak * 0xff / 0xffff;
+
+ dbg_hid("Running with 0x%02x 0x%02x.\n", strong, weak);
+
+ mf->report->field[0]->value[0] = weak;
+ mf->report->field[0]->value[1] = strong;
+ hid_hw_request(hid, mf->report, HID_REQ_SET_REPORT);
+
+ return 0;
+}
+
+static int mf_init(struct hid_device *hid)
+{
+ struct mf_device *mf;
+
+ struct list_head *report_list =
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+
+ struct list_head *report_ptr;
+ struct hid_report *report;
+
+ struct list_head *input_ptr = &hid->inputs;
+ struct hid_input *input;
+
+ struct input_dev *dev;
+
+ int error;
+
+ /* Setup each of the four inputs */
+ list_for_each(report_ptr, report_list) {
+ report = list_entry(report_ptr, struct hid_report, list);
+
+ if (report->maxfield < 1 || report->field[0]->report_count < 2) {
+ hid_err(hid, "Invalid report, this should never happen!\n");
+ return -ENODEV;
+ }
+
+ if (list_is_last(input_ptr, &hid->inputs)) {
+ hid_err(hid, "Missing input, this should never happen!\n");
+ return -ENODEV;
+ }
+
+ input_ptr = input_ptr->next;
+ input = list_entry(input_ptr, struct hid_input, list);
+
+ mf = kzalloc(sizeof(struct mf_device), GFP_KERNEL);
+ if (!mf)
+ return -ENOMEM;
+
+ dev = input->input;
+ set_bit(FF_RUMBLE, dev->ffbit);
+
+ error = input_ff_create_memless(dev, mf, mf_play);
+ if (error) {
+ kfree(mf);
+ return error;
+ }
+
+ mf->report = report;
+ mf->report->field[0]->value[0] = 0x00;
+ mf->report->field[0]->value[1] = 0x00;
+ hid_hw_request(hid, mf->report, HID_REQ_SET_REPORT);
+ }
+
+ hid_info(hid, "Force feedback for HJZ Mayflash game controller "
+ "adapters by Marcel Hasler <mahasler@gmail.com>\n");
+
+ return 0;
+}
+
+static int mf_probe(struct hid_device *hid, const struct hid_device_id *id)
+{
+ int error;
+
+ dev_dbg(&hid->dev, "Mayflash HID hardware probe...\n");
+
+ /* Split device into four inputs */
+ hid->quirks |= HID_QUIRK_MULTI_INPUT;
+
+ error = hid_parse(hid);
+ if (error) {
+ hid_err(hid, "HID parse failed.\n");
+ return error;
+ }
+
+ error = hid_hw_start(hid, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (error) {
+ hid_err(hid, "HID hw start failed\n");
+ return error;
+ }
+
+ error = mf_init(hid);
+ if (error) {
+ hid_err(hid, "Force feedback init failed.\n");
+ hid_hw_stop(hid);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct hid_device_id mf_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, mf_devices);
+
+static struct hid_driver mf_driver = {
+ .name = "hid_mf",
+ .id_table = mf_devices,
+ .probe = mf_probe,
+};
+module_hid_driver(mf_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index c6cd392e9f99..74b7b84a0420 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -280,9 +280,11 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP),
.driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4),
+ .driver_data = MS_HIDINPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2),
.driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
.driver_data = MS_HIDINPUT },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index fb6f1f447279..6dca66806844 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -108,6 +108,7 @@ struct mt_device {
int cc_value_index; /* contact count value index in the field */
unsigned last_slot_field; /* the last field of a slot */
unsigned mt_report_id; /* the report ID of the multitouch device */
+ unsigned long initial_quirks; /* initial quirks state */
__s16 inputmode; /* InputMode HID feature, -1 if non-existent */
__s16 inputmode_index; /* InputMode HID feature index in the report */
__s16 maxcontact_report_id; /* Maximum Contact Number HID feature,
@@ -318,13 +319,10 @@ static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
u8 *buf;
/*
- * Only fetch the feature report if initial reports are not already
- * been retrieved. Currently this is only done for Windows 8 touch
- * devices.
+ * Do not fetch the feature report if the device has been explicitly
+ * marked as non-capable.
*/
- if (!(hdev->quirks & HID_QUIRK_NO_INIT_REPORTS))
- return;
- if (td->mtclass.name != MT_CLS_WIN_8)
+ if (td->initial_quirks & HID_QUIRK_NO_INIT_REPORTS)
return;
buf = hid_alloc_report_buf(report, GFP_KERNEL);
@@ -567,6 +565,14 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_UP_BUTTON:
code = BTN_MOUSE + ((usage->hid - 1) & HID_USAGE);
+ /*
+ * MS PTP spec says that external buttons left and right have
+ * usages 2 and 3.
+ */
+ if (cls->name == MT_CLS_WIN_8 &&
+ field->application == HID_DG_TOUCHPAD &&
+ (usage->hid & HID_USAGE) > 1)
+ code--;
hid_map_usage(hi, usage, bit, max, EV_KEY, code);
input_set_capability(hi->input, EV_KEY, code);
return 1;
@@ -842,7 +848,9 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (!td->mtclass.export_all_inputs &&
field->application != HID_DG_TOUCHSCREEN &&
field->application != HID_DG_PEN &&
- field->application != HID_DG_TOUCHPAD)
+ field->application != HID_DG_TOUCHPAD &&
+ field->application != HID_GD_KEYBOARD &&
+ field->application != HID_CP_CONSUMER_CONTROL)
return -1;
/*
@@ -1083,36 +1091,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
- /* This allows the driver to correctly support devices
- * that emit events over several HID messages.
- */
- hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
-
- /*
- * This allows the driver to handle different input sensors
- * that emits events through different reports on the same HID
- * device.
- */
- hdev->quirks |= HID_QUIRK_MULTI_INPUT;
- hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
-
- /*
- * Handle special quirks for Windows 8 certified devices.
- */
- if (id->group == HID_GROUP_MULTITOUCH_WIN_8)
- /*
- * Some multitouch screens do not like to be polled for input
- * reports. Fortunately, the Win8 spec says that all touches
- * should be sent during each report, making the initialization
- * of input reports unnecessary.
- *
- * In addition some touchpads do not behave well if we read
- * all feature reports from them. Instead we prevent
- * initial report fetching and then selectively fetch each
- * report we are interested in.
- */
- hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
-
td = devm_kzalloc(&hdev->dev, sizeof(struct mt_device), GFP_KERNEL);
if (!td) {
dev_err(&hdev->dev, "cannot allocate multitouch data\n");
@@ -1136,6 +1114,39 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID)
td->serial_maybe = true;
+ /*
+ * Store the initial quirk state
+ */
+ td->initial_quirks = hdev->quirks;
+
+ /* This allows the driver to correctly support devices
+ * that emit events over several HID messages.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
+
+ /*
+ * This allows the driver to handle different input sensors
+ * that emits events through different reports on the same HID
+ * device.
+ */
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
+
+ /*
+ * Some multitouch screens do not like to be polled for input
+ * reports. Fortunately, the Win8 spec says that all touches
+ * should be sent during each report, making the initialization
+ * of input reports unnecessary. For Win7 devices, well, let's hope
+ * they will still be happy (this is only be a problem if a touch
+ * was already there while probing the device).
+ *
+ * In addition some touchpads do not behave well if we read
+ * all feature reports from them. Instead we prevent
+ * initial report fetching and then selectively fetch each
+ * report we are interested in.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
ret = hid_parse(hdev);
if (ret != 0)
return ret;
@@ -1204,8 +1215,11 @@ static int mt_resume(struct hid_device *hdev)
static void mt_remove(struct hid_device *hdev)
{
+ struct mt_device *td = hid_get_drvdata(hdev);
+
sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
hid_hw_stop(hdev);
+ hdev->quirks = td->initial_quirks;
}
/*
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 60875625cbdf..5c925228847c 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -795,6 +795,12 @@ static const struct hid_device_id sensor_hub_devices[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
USB_DEVICE_ID_MS_TYPE_COVER_2),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
+ 0x07bd), /* Microsoft Surface 3 */
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROCHIP,
+ 0x0f01), /* MM7150 */
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
USB_DEVICE_ID_STM_HID_SENSOR),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index b0bb99a821bd..7687c0875395 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -36,6 +36,8 @@
#include <linux/list.h>
#include <linux/idr.h>
#include <linux/input/mt.h>
+#include <linux/crc32.h>
+#include <asm/unaligned.h>
#include "hid-ids.h"
@@ -374,7 +376,7 @@ static u8 dualshock4_usb_rdesc[] = {
0x65, 0x00, /* Unit, */
0x05, 0x09, /* Usage Page (Button), */
0x19, 0x01, /* Usage Minimum (01h), */
- 0x29, 0x0E, /* Usage Maximum (0Eh), */
+ 0x29, 0x0D, /* Usage Maximum (0Dh), */
0x15, 0x00, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x75, 0x01, /* Report Size (1), */
@@ -403,14 +405,14 @@ static u8 dualshock4_usb_rdesc[] = {
0x19, 0x40, /* Usage Minimum (40h), */
0x29, 0x42, /* Usage Maximum (42h), */
0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
- 0x26, 0x00, 0x7F, /* Logical Maximum (32767), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x19, 0x43, /* Usage Minimum (43h), */
0x29, 0x45, /* Usage Maximum (45h), */
- 0x16, 0x00, 0xE0, /* Logical Minimum (-8192), */
- 0x26, 0xFF, 0x1F, /* Logical Maximum (8191), */
+ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
@@ -687,7 +689,7 @@ static u8 dualshock4_bt_rdesc[] = {
0x81, 0x42, /* Input (Variable, Null State), */
0x05, 0x09, /* Usage Page (Button), */
0x19, 0x01, /* Usage Minimum (01h), */
- 0x29, 0x0E, /* Usage Maximum (0Eh), */
+ 0x29, 0x0D, /* Usage Maximum (0Dh), */
0x15, 0x00, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x75, 0x01, /* Report Size (1), */
@@ -712,14 +714,14 @@ static u8 dualshock4_bt_rdesc[] = {
0x19, 0x40, /* Usage Minimum (40h), */
0x29, 0x42, /* Usage Maximum (42h), */
0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
- 0x26, 0x00, 0x7F, /* Logical Maximum (32767), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x19, 0x43, /* Usage Minimum (43h), */
0x29, 0x45, /* Usage Maximum (45h), */
- 0x16, 0x00, 0xE0, /* Logical Minimum (-8192), */
- 0x26, 0xFF, 0x1F, /* Logical Maximum (8191), */
+ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
@@ -975,6 +977,32 @@ static const unsigned int buzz_keymap[] = {
[20] = BTN_TRIGGER_HAPPY20,
};
+static const unsigned int ds4_absmap[] = {
+ [0x30] = ABS_X,
+ [0x31] = ABS_Y,
+ [0x32] = ABS_RX, /* right stick X */
+ [0x33] = ABS_Z, /* L2 */
+ [0x34] = ABS_RZ, /* R2 */
+ [0x35] = ABS_RY, /* right stick Y */
+};
+
+static const unsigned int ds4_keymap[] = {
+ [0x1] = BTN_WEST, /* Square */
+ [0x2] = BTN_SOUTH, /* Cross */
+ [0x3] = BTN_EAST, /* Circle */
+ [0x4] = BTN_NORTH, /* Triangle */
+ [0x5] = BTN_TL, /* L1 */
+ [0x6] = BTN_TR, /* R1 */
+ [0x7] = BTN_TL2, /* L2 */
+ [0x8] = BTN_TR2, /* R2 */
+ [0x9] = BTN_SELECT, /* Share */
+ [0xa] = BTN_START, /* Options */
+ [0xb] = BTN_THUMBL, /* L3 */
+ [0xc] = BTN_THUMBR, /* R3 */
+ [0xd] = BTN_MODE, /* PS */
+};
+
+
static enum power_supply_property sony_battery_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_CAPACITY,
@@ -1019,14 +1047,24 @@ struct motion_output_report_02 {
u8 rumble;
};
-#define DS4_REPORT_0x02_SIZE 37
-#define DS4_REPORT_0x05_SIZE 32
-#define DS4_REPORT_0x11_SIZE 78
-#define DS4_REPORT_0x81_SIZE 7
+#define DS4_FEATURE_REPORT_0x02_SIZE 37
+#define DS4_FEATURE_REPORT_0x81_SIZE 7
+#define DS4_INPUT_REPORT_0x11_SIZE 78
+#define DS4_OUTPUT_REPORT_0x05_SIZE 32
+#define DS4_OUTPUT_REPORT_0x11_SIZE 78
#define SIXAXIS_REPORT_0xF2_SIZE 17
#define SIXAXIS_REPORT_0xF5_SIZE 8
#define MOTION_REPORT_0x02_SIZE 49
+/* Offsets relative to USB input report (0x1). Bluetooth (0x11) requires an
+ * additional +2.
+ */
+#define DS4_INPUT_REPORT_BUTTON_OFFSET 5
+#define DS4_INPUT_REPORT_BATTERY_OFFSET 30
+#define DS4_INPUT_REPORT_TOUCHPAD_OFFSET 33
+
+#define DS4_TOUCHPAD_SUFFIX " Touchpad"
+
static DEFINE_SPINLOCK(sony_dev_list_lock);
static LIST_HEAD(sony_device_list);
static DEFINE_IDA(sony_device_id_allocator);
@@ -1035,6 +1073,7 @@ struct sony_sc {
spinlock_t lock;
struct list_head list_node;
struct hid_device *hdev;
+ struct input_dev *touchpad;
struct led_classdev *leds[MAX_LEDS];
unsigned long quirks;
struct work_struct state_worker;
@@ -1130,6 +1169,37 @@ static int ps3remote_mapping(struct hid_device *hdev, struct hid_input *hi,
return 1;
}
+static int ds4_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) {
+ unsigned int key = usage->hid & HID_USAGE;
+
+ if (key >= ARRAY_SIZE(ds4_keymap))
+ return -1;
+
+ key = ds4_keymap[key];
+ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, key);
+ return 1;
+ } else if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK) {
+ unsigned int abs = usage->hid & HID_USAGE;
+
+ /* Let the HID parser deal with the HAT. */
+ if (usage->hid == HID_GD_HATSWITCH)
+ return 0;
+
+ if (abs >= ARRAY_SIZE(ds4_absmap))
+ return -1;
+
+ abs = ds4_absmap[abs];
+ hid_map_usage_clear(hi, usage, bit, max, EV_ABS, abs);
+ return 1;
+ }
+
+ return 0;
+}
+
static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
{
@@ -1219,23 +1289,22 @@ static void sixaxis_parse_report(struct sony_sc *sc, u8 *rd, int size)
static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
{
- struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
- struct hid_input, list);
- struct input_dev *input_dev = hidinput->input;
unsigned long flags;
- int n, offset;
+ int n, m, offset, num_touch_data, max_touch_data;
u8 cable_state, battery_capacity, battery_charging;
- /*
- * Battery and touchpad data starts at byte 30 in the USB report and
- * 32 in Bluetooth report.
- */
- offset = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 30 : 32;
+ /* When using Bluetooth the header is 2 bytes longer, so skip these. */
+ int data_offset = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 0 : 2;
+
+ /* Second bit of third button byte is for the touchpad button. */
+ offset = data_offset + DS4_INPUT_REPORT_BUTTON_OFFSET;
+ input_report_key(sc->touchpad, BTN_LEFT, rd[offset+2] & 0x2);
/*
- * The lower 4 bits of byte 30 contain the battery level
+ * The lower 4 bits of byte 30 (or 32 for BT) contain the battery level
* and the 5th bit contains the USB cable state.
*/
+ offset = data_offset + DS4_INPUT_REPORT_BATTERY_OFFSET;
cable_state = (rd[offset] >> 4) & 0x01;
battery_capacity = rd[offset] & 0x0F;
@@ -1262,30 +1331,52 @@ static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
sc->battery_charging = battery_charging;
spin_unlock_irqrestore(&sc->lock, flags);
- offset += 5;
-
/*
- * The Dualshock 4 multi-touch trackpad data starts at offset 35 on USB
- * and 37 on Bluetooth.
- * The first 7 bits of the first byte is a counter and bit 8 is a touch
- * indicator that is 0 when pressed and 1 when not pressed.
- * The next 3 bytes are two 12 bit touch coordinates, X and Y.
- * The data for the second touch is in the same format and immediatly
- * follows the data for the first.
+ * The Dualshock 4 multi-touch trackpad data starts at offset 33 on USB
+ * and 35 on Bluetooth.
+ * The first byte indicates the number of touch data in the report.
+ * Trackpad data starts 2 bytes later (e.g. 35 for USB).
*/
- for (n = 0; n < 2; n++) {
- u16 x, y;
+ offset = data_offset + DS4_INPUT_REPORT_TOUCHPAD_OFFSET;
+ max_touch_data = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 3 : 4;
+ if (rd[offset] > 0 && rd[offset] <= max_touch_data)
+ num_touch_data = rd[offset];
+ else
+ num_touch_data = 1;
+ offset += 1;
+
+ for (m = 0; m < num_touch_data; m++) {
+ /* Skip past timestamp */
+ offset += 1;
+
+ /*
+ * The first 7 bits of the first byte is a counter and bit 8 is
+ * a touch indicator that is 0 when pressed and 1 when not
+ * pressed.
+ * The next 3 bytes are two 12 bit touch coordinates, X and Y.
+ * The data for the second touch is in the same format and
+ * immediately follows the data for the first.
+ */
+ for (n = 0; n < 2; n++) {
+ u16 x, y;
+ bool active;
- x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
- y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
+ x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
+ y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
- input_mt_slot(input_dev, n);
- input_mt_report_slot_state(input_dev, MT_TOOL_FINGER,
- !(rd[offset] >> 7));
- input_report_abs(input_dev, ABS_MT_POSITION_X, x);
- input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ active = !(rd[offset] >> 7);
+ input_mt_slot(sc->touchpad, n);
+ input_mt_report_slot_state(sc->touchpad, MT_TOOL_FINGER, active);
- offset += 4;
+ if (active) {
+ input_report_abs(sc->touchpad, ABS_MT_POSITION_X, x);
+ input_report_abs(sc->touchpad, ABS_MT_POSITION_Y, y);
+ }
+
+ offset += 4;
+ }
+ input_mt_sync_frame(sc->touchpad);
+ input_sync(sc->touchpad);
}
}
@@ -1324,6 +1415,21 @@ static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
} else if (((sc->quirks & DUALSHOCK4_CONTROLLER_USB) && rd[0] == 0x01 &&
size == 64) || ((sc->quirks & DUALSHOCK4_CONTROLLER_BT)
&& rd[0] == 0x11 && size == 78)) {
+ if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) {
+ /* CRC check */
+ u8 bthdr = 0xA1;
+ u32 crc;
+ u32 report_crc;
+
+ crc = crc32_le(0xFFFFFFFF, &bthdr, 1);
+ crc = ~crc32_le(crc, rd, DS4_INPUT_REPORT_0x11_SIZE-4);
+ report_crc = get_unaligned_le32(&rd[DS4_INPUT_REPORT_0x11_SIZE-4]);
+ if (crc != report_crc) {
+ hid_dbg(sc->hdev, "DualShock 4 input report's CRC check failed, received crc 0x%0x != 0x%0x\n",
+ report_crc, crc);
+ return -EILSEQ;
+ }
+ }
dualshock4_parse_report(sc, rd, size);
}
@@ -1367,47 +1473,84 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
if (sc->quirks & PS3REMOTE)
return ps3remote_mapping(hdev, hi, field, usage, bit, max);
+
+ if (sc->quirks & DUALSHOCK4_CONTROLLER)
+ return ds4_mapping(hdev, hi, field, usage, bit, max);
+
/* Let hid-core decide for the others */
return 0;
}
-static int sony_register_touchpad(struct hid_input *hi, int touch_count,
+static int sony_register_touchpad(struct sony_sc *sc, int touch_count,
int w, int h)
{
- struct input_dev *input_dev = hi->input;
+ size_t name_sz;
+ char *name;
int ret;
- ret = input_mt_init_slots(input_dev, touch_count, 0);
+ sc->touchpad = input_allocate_device();
+ if (!sc->touchpad)
+ return -ENOMEM;
+
+ input_set_drvdata(sc->touchpad, sc);
+ sc->touchpad->dev.parent = &sc->hdev->dev;
+ sc->touchpad->phys = sc->hdev->phys;
+ sc->touchpad->uniq = sc->hdev->uniq;
+ sc->touchpad->id.bustype = sc->hdev->bus;
+ sc->touchpad->id.vendor = sc->hdev->vendor;
+ sc->touchpad->id.product = sc->hdev->product;
+ sc->touchpad->id.version = sc->hdev->version;
+
+ /* Append a suffix to the controller name as there are various
+ * DS4 compatible non-Sony devices with different names.
+ */
+ name_sz = strlen(sc->hdev->name) + sizeof(DS4_TOUCHPAD_SUFFIX);
+ name = kzalloc(name_sz, GFP_KERNEL);
+ if (!name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ snprintf(name, name_sz, "%s" DS4_TOUCHPAD_SUFFIX, sc->hdev->name);
+ sc->touchpad->name = name;
+
+ ret = input_mt_init_slots(sc->touchpad, touch_count, 0);
if (ret < 0)
- return ret;
+ goto err;
+
+ /* We map the button underneath the touchpad to BTN_LEFT. */
+ __set_bit(EV_KEY, sc->touchpad->evbit);
+ __set_bit(BTN_LEFT, sc->touchpad->keybit);
+ __set_bit(INPUT_PROP_BUTTONPAD, sc->touchpad->propbit);
- input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, w, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, h, 0, 0);
+ input_set_abs_params(sc->touchpad, ABS_MT_POSITION_X, 0, w, 0, 0);
+ input_set_abs_params(sc->touchpad, ABS_MT_POSITION_Y, 0, h, 0, 0);
+
+ ret = input_register_device(sc->touchpad);
+ if (ret < 0)
+ goto err;
return 0;
+
+err:
+ kfree(sc->touchpad->name);
+ sc->touchpad->name = NULL;
+
+ input_free_device(sc->touchpad);
+ sc->touchpad = NULL;
+
+ return ret;
}
-static int sony_input_configured(struct hid_device *hdev,
- struct hid_input *hidinput)
+static void sony_unregister_touchpad(struct sony_sc *sc)
{
- struct sony_sc *sc = hid_get_drvdata(hdev);
- int ret;
+ if (!sc->touchpad)
+ return;
- /*
- * The Dualshock 4 touchpad supports 2 touches and has a
- * resolution of 1920x942 (44.86 dots/mm).
- */
- if (sc->quirks & DUALSHOCK4_CONTROLLER) {
- ret = sony_register_touchpad(hidinput, 2, 1920, 942);
- if (ret) {
- hid_err(sc->hdev,
- "Unable to initialize multi-touch slots: %d\n",
- ret);
- return ret;
- }
- }
+ kfree(sc->touchpad->name);
+ sc->touchpad->name = NULL;
- return 0;
+ input_unregister_device(sc->touchpad);
+ sc->touchpad = NULL;
}
/*
@@ -1483,11 +1626,11 @@ static int dualshock4_set_operational_bt(struct hid_device *hdev)
u8 *buf;
int ret;
- buf = kmalloc(DS4_REPORT_0x02_SIZE, GFP_KERNEL);
+ buf = kmalloc(DS4_FEATURE_REPORT_0x02_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = hid_hw_raw_request(hdev, 0x02, buf, DS4_REPORT_0x02_SIZE,
+ ret = hid_hw_raw_request(hdev, 0x02, buf, DS4_FEATURE_REPORT_0x02_SIZE,
HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
kfree(buf);
@@ -1892,14 +2035,14 @@ static void dualshock4_send_output_report(struct sony_sc *sc)
* 0xD0 - 66hz
*/
if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
- memset(buf, 0, DS4_REPORT_0x05_SIZE);
+ memset(buf, 0, DS4_OUTPUT_REPORT_0x05_SIZE);
buf[0] = 0x05;
buf[1] = 0xFF;
offset = 4;
} else {
- memset(buf, 0, DS4_REPORT_0x11_SIZE);
+ memset(buf, 0, DS4_OUTPUT_REPORT_0x11_SIZE);
buf[0] = 0x11;
- buf[1] = 0x80;
+ buf[1] = 0xC0; /* HID + CRC */
buf[3] = 0x0F;
offset = 6;
}
@@ -1925,10 +2068,17 @@ static void dualshock4_send_output_report(struct sony_sc *sc)
buf[offset++] = sc->led_delay_off[3];
if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
- hid_hw_output_report(hdev, buf, DS4_REPORT_0x05_SIZE);
- else
- hid_hw_raw_request(hdev, 0x11, buf, DS4_REPORT_0x11_SIZE,
- HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+ hid_hw_output_report(hdev, buf, DS4_OUTPUT_REPORT_0x05_SIZE);
+ else {
+ /* CRC generation */
+ u8 bthdr = 0xA2;
+ u32 crc;
+
+ crc = crc32_le(0xFFFFFFFF, &bthdr, 1);
+ crc = ~crc32_le(crc, buf, DS4_OUTPUT_REPORT_0x11_SIZE-4);
+ put_unaligned_le32(crc, &buf[74]);
+ hid_hw_output_report(hdev, buf, DS4_OUTPUT_REPORT_0x11_SIZE);
+ }
}
static void motion_send_output_report(struct sony_sc *sc)
@@ -1972,10 +2122,10 @@ static int sony_allocate_output_report(struct sony_sc *sc)
kmalloc(sizeof(union sixaxis_output_report_01),
GFP_KERNEL);
else if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
- sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x11_SIZE,
+ sc->output_report_dmabuf = kmalloc(DS4_OUTPUT_REPORT_0x11_SIZE,
GFP_KERNEL);
else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
- sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x05_SIZE,
+ sc->output_report_dmabuf = kmalloc(DS4_OUTPUT_REPORT_0x05_SIZE,
GFP_KERNEL);
else if (sc->quirks & MOTION_CONTROLLER)
sc->output_report_dmabuf = kmalloc(MOTION_REPORT_0x02_SIZE,
@@ -2220,7 +2370,7 @@ static int sony_check_add(struct sony_sc *sc)
return 0;
}
} else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
- buf = kmalloc(DS4_REPORT_0x81_SIZE, GFP_KERNEL);
+ buf = kmalloc(DS4_FEATURE_REPORT_0x81_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -2230,10 +2380,10 @@ static int sony_check_add(struct sony_sc *sc)
* offset 1.
*/
ret = hid_hw_raw_request(sc->hdev, 0x81, buf,
- DS4_REPORT_0x81_SIZE, HID_FEATURE_REPORT,
+ DS4_FEATURE_REPORT_0x81_SIZE, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
- if (ret != DS4_REPORT_0x81_SIZE) {
+ if (ret != DS4_FEATURE_REPORT_0x81_SIZE) {
hid_err(sc->hdev, "failed to retrieve feature report 0x81 with the DualShock 4 MAC address\n");
ret = ret < 0 ? ret : -EINVAL;
goto out_free;
@@ -2329,45 +2479,12 @@ static inline void sony_cancel_work_sync(struct sony_sc *sc)
cancel_work_sync(&sc->state_worker);
}
-static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+static int sony_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput)
{
- int ret;
+ struct sony_sc *sc = hid_get_drvdata(hdev);
int append_dev_id;
- unsigned long quirks = id->driver_data;
- struct sony_sc *sc;
- unsigned int connect_mask = HID_CONNECT_DEFAULT;
-
- if (!strcmp(hdev->name, "FutureMax Dance Mat"))
- quirks |= FUTUREMAX_DANCE_MAT;
-
- sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
- if (sc == NULL) {
- hid_err(hdev, "can't alloc sony descriptor\n");
- return -ENOMEM;
- }
-
- spin_lock_init(&sc->lock);
-
- sc->quirks = quirks;
- hid_set_drvdata(hdev, sc);
- sc->hdev = hdev;
-
- ret = hid_parse(hdev);
- if (ret) {
- hid_err(hdev, "parse failed\n");
- return ret;
- }
-
- if (sc->quirks & VAIO_RDESC_CONSTANT)
- connect_mask |= HID_CONNECT_HIDDEV_FORCE;
- else if (sc->quirks & SIXAXIS_CONTROLLER)
- connect_mask |= HID_CONNECT_HIDDEV_FORCE;
-
- ret = hid_hw_start(hdev, connect_mask);
- if (ret) {
- hid_err(hdev, "hw start failed\n");
- return ret;
- }
+ int ret;
ret = sony_set_device_id(sc);
if (ret < 0) {
@@ -2415,11 +2532,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
sony_init_output_report(sc, sixaxis_send_output_report);
} else if (sc->quirks & DUALSHOCK4_CONTROLLER) {
if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) {
- /*
- * The DualShock 4 wants output reports sent on the ctrl
- * endpoint when connected via Bluetooth.
- */
- hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP;
ret = dualshock4_set_operational_bt(hdev);
if (ret < 0) {
hid_err(hdev, "failed to set the Dualshock 4 operational mode\n");
@@ -2427,6 +2539,18 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
+ /*
+ * The Dualshock 4 touchpad supports 2 touches and has a
+ * resolution of 1920x942 (44.86 dots/mm).
+ */
+ ret = sony_register_touchpad(sc, 2, 1920, 942);
+ if (ret) {
+ hid_err(sc->hdev,
+ "Unable to initialize multi-touch slots: %d\n",
+ ret);
+ return ret;
+ }
+
sony_init_output_report(sc, dualshock4_send_output_report);
} else if (sc->quirks & MOTION_CONTROLLER) {
sony_init_output_report(sc, motion_send_output_report);
@@ -2482,17 +2606,84 @@ err_stop:
return ret;
}
+static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ unsigned long quirks = id->driver_data;
+ struct sony_sc *sc;
+ unsigned int connect_mask = HID_CONNECT_DEFAULT;
+
+ if (!strcmp(hdev->name, "FutureMax Dance Mat"))
+ quirks |= FUTUREMAX_DANCE_MAT;
+
+ sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
+ if (sc == NULL) {
+ hid_err(hdev, "can't alloc sony descriptor\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&sc->lock);
+
+ sc->quirks = quirks;
+ hid_set_drvdata(hdev, sc);
+ sc->hdev = hdev;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ return ret;
+ }
+
+ if (sc->quirks & VAIO_RDESC_CONSTANT)
+ connect_mask |= HID_CONNECT_HIDDEV_FORCE;
+ else if (sc->quirks & SIXAXIS_CONTROLLER)
+ connect_mask |= HID_CONNECT_HIDDEV_FORCE;
+
+ /* Patch the hw version on DS4 compatible devices, so applications can
+ * distinguish between the default HID mappings and the mappings defined
+ * by the Linux game controller spec. This is important for the SDL2
+ * library, which has a game controller database, which uses device ids
+ * in combination with version as a key.
+ */
+ if (sc->quirks & DUALSHOCK4_CONTROLLER)
+ hdev->version |= 0x8000;
+
+ ret = hid_hw_start(hdev, connect_mask);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ /* sony_input_configured can fail, but this doesn't result
+ * in hid_hw_start failures (intended). Check whether
+ * the HID layer claimed the device else fail.
+ * We don't know the actual reason for the failure, most
+ * likely it is due to EEXIST in case of double connection
+ * of USB and Bluetooth, but could have been due to ENOMEM
+ * or other reasons as well.
+ */
+ if (!(hdev->claimed & HID_CLAIMED_INPUT)) {
+ hid_err(hdev, "failed to claim input\n");
+ return -ENODEV;
+ }
+
+ return ret;
+}
+
static void sony_remove(struct hid_device *hdev)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
+ hid_hw_close(hdev);
+
if (sc->quirks & SONY_LED_SUPPORT)
sony_leds_remove(sc);
- if (sc->quirks & SONY_BATTERY_SUPPORT) {
- hid_hw_close(hdev);
+ if (sc->quirks & SONY_BATTERY_SUPPORT)
sony_battery_remove(sc);
- }
+
+ if (sc->touchpad)
+ sony_unregister_touchpad(sc);
sony_cancel_work_sync(sc);
@@ -2596,6 +2787,12 @@ static const struct hid_device_id sony_devices[] = {
.driver_data = DUALSHOCK4_CONTROLLER_USB },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
.driver_data = DUALSHOCK4_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = DUALSHOCK4_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
/* Nyko Core Controller for PS3 */
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER),
.driver_data = SIXAXIS_CONTROLLER_USB | SINO_LITE_CONTROLLER },
diff --git a/drivers/hid/hid-udraw-ps3.c b/drivers/hid/hid-udraw-ps3.c
new file mode 100644
index 000000000000..88ea390c10ad
--- /dev/null
+++ b/drivers/hid/hid-udraw-ps3.c
@@ -0,0 +1,474 @@
+/*
+ * HID driver for THQ PS3 uDraw tablet
+ *
+ * Copyright (C) 2016 Red Hat Inc. All Rights Reserved
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include "hid-ids.h"
+
+MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
+MODULE_DESCRIPTION("PS3 uDraw tablet driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Protocol information from:
+ * http://brandonw.net/udraw/
+ * and the source code of:
+ * https://vvvv.org/contribution/udraw-hid
+ */
+
+/*
+ * The device is setup with multiple input devices:
+ * - the touch area which works as a touchpad
+ * - the tablet area which works as a touchpad/drawing tablet
+ * - a joypad with a d-pad, and 7 buttons
+ * - an accelerometer device
+ */
+
+enum {
+ TOUCH_NONE,
+ TOUCH_PEN,
+ TOUCH_FINGER,
+ TOUCH_TWOFINGER
+};
+
+enum {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z
+};
+
+/*
+ * Accelerometer min/max values
+ * in order, X, Y and Z
+ */
+static struct {
+ int min;
+ int max;
+} accel_limits[] = {
+ [AXIS_X] = { 490, 534 },
+ [AXIS_Y] = { 490, 534 },
+ [AXIS_Z] = { 492, 536 }
+};
+
+#define DEVICE_NAME "THQ uDraw Game Tablet for PS3"
+/* resolution in pixels */
+#define RES_X 1920
+#define RES_Y 1080
+/* size in mm */
+#define WIDTH 160
+#define HEIGHT 90
+#define PRESSURE_OFFSET 113
+#define MAX_PRESSURE (255 - PRESSURE_OFFSET)
+
+struct udraw {
+ struct input_dev *joy_input_dev;
+ struct input_dev *touch_input_dev;
+ struct input_dev *pen_input_dev;
+ struct input_dev *accel_input_dev;
+ struct hid_device *hdev;
+
+ /*
+ * The device's two-finger support is pretty unreliable, as
+ * the device could report a single touch when the two fingers
+ * are too close together, and the distance between fingers, even
+ * though reported is not in the same unit as the touches.
+ *
+ * We'll make do without it, and try to report the first touch
+ * as reliably as possible.
+ */
+ int last_one_finger_x;
+ int last_one_finger_y;
+ int last_two_finger_x;
+ int last_two_finger_y;
+};
+
+static int clamp_accel(int axis, int offset)
+{
+ axis = clamp(axis,
+ accel_limits[offset].min,
+ accel_limits[offset].max);
+ axis = (axis - accel_limits[offset].min) /
+ ((accel_limits[offset].max -
+ accel_limits[offset].min) * 0xFF);
+ return axis;
+}
+
+static int udraw_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int len)
+{
+ struct udraw *udraw = hid_get_drvdata(hdev);
+ int touch;
+ int x, y, z;
+
+ if (len != 27)
+ return 0;
+
+ if (data[11] == 0x00)
+ touch = TOUCH_NONE;
+ else if (data[11] == 0x40)
+ touch = TOUCH_PEN;
+ else if (data[11] == 0x80)
+ touch = TOUCH_FINGER;
+ else
+ touch = TOUCH_TWOFINGER;
+
+ /* joypad */
+ input_report_key(udraw->joy_input_dev, BTN_WEST, data[0] & 1);
+ input_report_key(udraw->joy_input_dev, BTN_SOUTH, !!(data[0] & 2));
+ input_report_key(udraw->joy_input_dev, BTN_EAST, !!(data[0] & 4));
+ input_report_key(udraw->joy_input_dev, BTN_NORTH, !!(data[0] & 8));
+
+ input_report_key(udraw->joy_input_dev, BTN_SELECT, !!(data[1] & 1));
+ input_report_key(udraw->joy_input_dev, BTN_START, !!(data[1] & 2));
+ input_report_key(udraw->joy_input_dev, BTN_MODE, !!(data[1] & 16));
+
+ x = y = 0;
+ switch (data[2]) {
+ case 0x0:
+ y = -127;
+ break;
+ case 0x1:
+ y = -127;
+ x = 127;
+ break;
+ case 0x2:
+ x = 127;
+ break;
+ case 0x3:
+ y = 127;
+ x = 127;
+ break;
+ case 0x4:
+ y = 127;
+ break;
+ case 0x5:
+ y = 127;
+ x = -127;
+ break;
+ case 0x6:
+ x = -127;
+ break;
+ case 0x7:
+ y = -127;
+ x = -127;
+ break;
+ default:
+ break;
+ }
+
+ input_report_abs(udraw->joy_input_dev, ABS_X, x);
+ input_report_abs(udraw->joy_input_dev, ABS_Y, y);
+
+ input_sync(udraw->joy_input_dev);
+
+ /* For pen and touchpad */
+ x = y = 0;
+ if (touch != TOUCH_NONE) {
+ if (data[15] != 0x0F)
+ x = data[15] * 256 + data[17];
+ if (data[16] != 0x0F)
+ y = data[16] * 256 + data[18];
+ }
+
+ if (touch == TOUCH_FINGER) {
+ /* Save the last one-finger touch */
+ udraw->last_one_finger_x = x;
+ udraw->last_one_finger_y = y;
+ udraw->last_two_finger_x = -1;
+ udraw->last_two_finger_y = -1;
+ } else if (touch == TOUCH_TWOFINGER) {
+ /*
+ * We have a problem because x/y is the one for the
+ * second finger but we want the first finger given
+ * to user-space otherwise it'll look as if it jumped.
+ *
+ * See the udraw struct definition for why this was
+ * implemented this way.
+ */
+ if (udraw->last_two_finger_x == -1) {
+ /* Save the position of the 2nd finger */
+ udraw->last_two_finger_x = x;
+ udraw->last_two_finger_y = y;
+
+ x = udraw->last_one_finger_x;
+ y = udraw->last_one_finger_y;
+ } else {
+ /*
+ * Offset the 2-finger coords using the
+ * saved data from the first finger
+ */
+ x = x - (udraw->last_two_finger_x
+ - udraw->last_one_finger_x);
+ y = y - (udraw->last_two_finger_y
+ - udraw->last_one_finger_y);
+ }
+ }
+
+ /* touchpad */
+ if (touch == TOUCH_FINGER || touch == TOUCH_TWOFINGER) {
+ input_report_key(udraw->touch_input_dev, BTN_TOUCH, 1);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_FINGER,
+ touch == TOUCH_FINGER);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_DOUBLETAP,
+ touch == TOUCH_TWOFINGER);
+
+ input_report_abs(udraw->touch_input_dev, ABS_X, x);
+ input_report_abs(udraw->touch_input_dev, ABS_Y, y);
+ } else {
+ input_report_key(udraw->touch_input_dev, BTN_TOUCH, 0);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_FINGER, 0);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_DOUBLETAP, 0);
+ }
+ input_sync(udraw->touch_input_dev);
+
+ /* pen */
+ if (touch == TOUCH_PEN) {
+ int level;
+
+ level = clamp(data[13] - PRESSURE_OFFSET,
+ 0, MAX_PRESSURE);
+
+ input_report_key(udraw->pen_input_dev, BTN_TOUCH, (level != 0));
+ input_report_key(udraw->pen_input_dev, BTN_TOOL_PEN, 1);
+ input_report_abs(udraw->pen_input_dev, ABS_PRESSURE, level);
+ input_report_abs(udraw->pen_input_dev, ABS_X, x);
+ input_report_abs(udraw->pen_input_dev, ABS_Y, y);
+ } else {
+ input_report_key(udraw->pen_input_dev, BTN_TOUCH, 0);
+ input_report_key(udraw->pen_input_dev, BTN_TOOL_PEN, 0);
+ input_report_abs(udraw->pen_input_dev, ABS_PRESSURE, 0);
+ }
+ input_sync(udraw->pen_input_dev);
+
+ /* accel */
+ x = (data[19] + (data[20] << 8));
+ x = clamp_accel(x, AXIS_X);
+ y = (data[21] + (data[22] << 8));
+ y = clamp_accel(y, AXIS_Y);
+ z = (data[23] + (data[24] << 8));
+ z = clamp_accel(z, AXIS_Z);
+ input_report_abs(udraw->accel_input_dev, ABS_X, x);
+ input_report_abs(udraw->accel_input_dev, ABS_Y, y);
+ input_report_abs(udraw->accel_input_dev, ABS_Z, z);
+ input_sync(udraw->accel_input_dev);
+
+ /* let hidraw and hiddev handle the report */
+ return 0;
+}
+
+static int udraw_open(struct input_dev *dev)
+{
+ struct udraw *udraw = input_get_drvdata(dev);
+
+ return hid_hw_open(udraw->hdev);
+}
+
+static void udraw_close(struct input_dev *dev)
+{
+ struct udraw *udraw = input_get_drvdata(dev);
+
+ hid_hw_close(udraw->hdev);
+}
+
+static struct input_dev *allocate_and_setup(struct hid_device *hdev,
+ const char *name)
+{
+ struct input_dev *input_dev;
+
+ input_dev = devm_input_allocate_device(&hdev->dev);
+ if (!input_dev)
+ return NULL;
+
+ input_dev->name = name;
+ input_dev->phys = hdev->phys;
+ input_dev->dev.parent = &hdev->dev;
+ input_dev->open = udraw_open;
+ input_dev->close = udraw_close;
+ input_dev->uniq = hdev->uniq;
+ input_dev->id.bustype = hdev->bus;
+ input_dev->id.vendor = hdev->vendor;
+ input_dev->id.product = hdev->product;
+ input_dev->id.version = hdev->version;
+ input_set_drvdata(input_dev, hid_get_drvdata(hdev));
+
+ return input_dev;
+}
+
+static bool udraw_setup_touch(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Touchpad");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
+
+ input_set_abs_params(input_dev, ABS_X, 0, RES_X, 1, 0);
+ input_abs_set_res(input_dev, ABS_X, RES_X / WIDTH);
+ input_set_abs_params(input_dev, ABS_Y, 0, RES_Y, 1, 0);
+ input_abs_set_res(input_dev, ABS_Y, RES_Y / HEIGHT);
+
+ set_bit(BTN_TOUCH, input_dev->keybit);
+ set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+
+ set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+ udraw->touch_input_dev = input_dev;
+
+ return true;
+}
+
+static bool udraw_setup_pen(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Pen");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
+
+ input_set_abs_params(input_dev, ABS_X, 0, RES_X, 1, 0);
+ input_abs_set_res(input_dev, ABS_X, RES_X / WIDTH);
+ input_set_abs_params(input_dev, ABS_Y, 0, RES_Y, 1, 0);
+ input_abs_set_res(input_dev, ABS_Y, RES_Y / HEIGHT);
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ 0, MAX_PRESSURE, 0, 0);
+
+ set_bit(BTN_TOUCH, input_dev->keybit);
+ set_bit(BTN_TOOL_PEN, input_dev->keybit);
+
+ set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+ udraw->pen_input_dev = input_dev;
+
+ return true;
+}
+
+static bool udraw_setup_accel(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Accelerometer");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_ABS);
+
+ /* 1G accel is reported as ~256, so clamp to 2G */
+ input_set_abs_params(input_dev, ABS_X, -512, 512, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, -512, 512, 0, 0);
+ input_set_abs_params(input_dev, ABS_Z, -512, 512, 0, 0);
+
+ set_bit(INPUT_PROP_ACCELEROMETER, input_dev->propbit);
+
+ udraw->accel_input_dev = input_dev;
+
+ return true;
+}
+
+static bool udraw_setup_joypad(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Joypad");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
+
+ set_bit(BTN_SOUTH, input_dev->keybit);
+ set_bit(BTN_NORTH, input_dev->keybit);
+ set_bit(BTN_EAST, input_dev->keybit);
+ set_bit(BTN_WEST, input_dev->keybit);
+ set_bit(BTN_SELECT, input_dev->keybit);
+ set_bit(BTN_START, input_dev->keybit);
+ set_bit(BTN_MODE, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_X, -127, 127, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, -127, 127, 0, 0);
+
+ udraw->joy_input_dev = input_dev;
+
+ return true;
+}
+
+static int udraw_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct udraw *udraw;
+ int ret;
+
+ udraw = devm_kzalloc(&hdev->dev, sizeof(struct udraw), GFP_KERNEL);
+ if (!udraw)
+ return -ENOMEM;
+
+ udraw->hdev = hdev;
+ udraw->last_two_finger_x = -1;
+ udraw->last_two_finger_y = -1;
+
+ hid_set_drvdata(hdev, udraw);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ return ret;
+ }
+
+ if (!udraw_setup_joypad(udraw, hdev) ||
+ !udraw_setup_touch(udraw, hdev) ||
+ !udraw_setup_pen(udraw, hdev) ||
+ !udraw_setup_accel(udraw, hdev)) {
+ hid_err(hdev, "could not allocate interfaces\n");
+ return -ENOMEM;
+ }
+
+ ret = input_register_device(udraw->joy_input_dev) ||
+ input_register_device(udraw->touch_input_dev) ||
+ input_register_device(udraw->pen_input_dev) ||
+ input_register_device(udraw->accel_input_dev);
+ if (ret) {
+ hid_err(hdev, "failed to register interfaces\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW | HID_CONNECT_DRIVER);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct hid_device_id udraw_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, udraw_devices);
+
+static struct hid_driver udraw_driver = {
+ .name = "hid-udraw",
+ .id_table = udraw_devices,
+ .raw_event = udraw_raw_event,
+ .probe = udraw_probe,
+};
+module_hid_driver(udraw_driver);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index b3ec4f2de875..78fb32a7b103 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -22,6 +22,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/input.h>
+#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pm.h>
@@ -37,10 +38,15 @@
#include <linux/mutex.h>
#include <linux/acpi.h>
#include <linux/of.h>
-#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
#include <linux/i2c/i2c-hid.h>
+#include "../hid-ids.h"
+
+/* quirks to control the device */
+#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
+
/* flags */
#define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1
@@ -143,10 +149,9 @@ struct i2c_hid {
char *argsbuf; /* Command arguments buffer */
unsigned long flags; /* device flags */
+ unsigned long quirks; /* Various quirks */
wait_queue_head_t wait; /* For waiting the interrupt */
- struct gpio_desc *desc;
- int irq;
struct i2c_hid_platform_data pdata;
@@ -154,6 +159,39 @@ struct i2c_hid {
struct mutex reset_lock;
};
+static const struct i2c_hid_quirks {
+ __u16 idVendor;
+ __u16 idProduct;
+ __u32 quirks;
+} i2c_hid_quirks[] = {
+ { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8752,
+ I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+ { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
+ I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+ { 0, 0 }
+};
+
+/*
+ * i2c_hid_lookup_quirk: return any quirks associated with a I2C HID device
+ * @idVendor: the 16-bit vendor ID
+ * @idProduct: the 16-bit product ID
+ *
+ * Returns: a u32 quirks value.
+ */
+static u32 i2c_hid_lookup_quirk(const u16 idVendor, const u16 idProduct)
+{
+ u32 quirks = 0;
+ int n;
+
+ for (n = 0; i2c_hid_quirks[n].idVendor; n++)
+ if (i2c_hid_quirks[n].idVendor == idVendor &&
+ (i2c_hid_quirks[n].idProduct == (__u16)HID_ANY_ID ||
+ i2c_hid_quirks[n].idProduct == idProduct))
+ quirks = i2c_hid_quirks[n].quirks;
+
+ return quirks;
+}
+
static int __i2c_hid_command(struct i2c_client *client,
const struct i2c_hid_cmd *command, u8 reportID,
u8 reportType, u8 *args, int args_len,
@@ -346,11 +384,27 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
i2c_hid_dbg(ihid, "%s\n", __func__);
+ /*
+ * Some devices require to send a command to wakeup before power on.
+ * The call will get a return value (EREMOTEIO) but device will be
+ * triggered and activated. After that, it goes like a normal device.
+ */
+ if (power_state == I2C_HID_PWR_ON &&
+ ihid->quirks & I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV) {
+ ret = i2c_hid_command(client, &hid_set_power_cmd, NULL, 0);
+
+ /* Device was already activated */
+ if (!ret)
+ goto set_pwr_exit;
+ }
+
ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
0, NULL, 0, NULL, 0);
+
if (ret)
dev_err(&client->dev, "failed to change power setting.\n");
+set_pwr_exit:
return ret;
}
@@ -716,9 +770,11 @@ static int i2c_hid_start(struct hid_device *hid)
i2c_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
if (bufsize > ihid->bufsize) {
+ disable_irq(client->irq);
i2c_hid_free_buffers(ihid);
ret = i2c_hid_alloc_buffers(ihid, bufsize);
+ enable_irq(client->irq);
if (ret)
return ret;
@@ -806,18 +862,21 @@ static struct hid_ll_driver i2c_hid_ll_driver = {
static int i2c_hid_init_irq(struct i2c_client *client)
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
+ unsigned long irqflags = 0;
int ret;
- dev_dbg(&client->dev, "Requesting IRQ: %d\n", ihid->irq);
+ dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
- ret = request_threaded_irq(ihid->irq, NULL, i2c_hid_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- client->name, ihid);
+ if (!irq_get_trigger_type(client->irq))
+ irqflags = IRQF_TRIGGER_LOW;
+
+ ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
+ irqflags | IRQF_ONESHOT, client->name, ihid);
if (ret < 0) {
dev_warn(&client->dev,
"Could not register for %s interrupt, irq = %d,"
" ret = %d\n",
- client->name, ihid->irq, ret);
+ client->name, client->irq, ret);
return ret;
}
@@ -864,14 +923,6 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
}
#ifdef CONFIG_ACPI
-
-/* Default GPIO mapping */
-static const struct acpi_gpio_params i2c_hid_irq_gpio = { 0, 0, true };
-static const struct acpi_gpio_mapping i2c_hid_acpi_gpios[] = {
- { "gpios", &i2c_hid_irq_gpio, 1 },
- { },
-};
-
static int i2c_hid_acpi_pdata(struct i2c_client *client,
struct i2c_hid_platform_data *pdata)
{
@@ -882,7 +933,6 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
union acpi_object *obj;
struct acpi_device *adev;
acpi_handle handle;
- int ret;
handle = ACPI_HANDLE(&client->dev);
if (!handle || acpi_bus_get_device(handle, &adev))
@@ -898,9 +948,7 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
pdata->hid_descriptor_address = obj->integer.value;
ACPI_FREE(obj);
- /* GPIOs are optional */
- ret = acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios);
- return ret < 0 && ret != -ENXIO ? ret : 0;
+ return 0;
}
static const struct acpi_device_id i2c_hid_acpi_match[] = {
@@ -964,6 +1012,19 @@ static int i2c_hid_probe(struct i2c_client *client,
dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
+ if (!client->irq) {
+ dev_err(&client->dev,
+ "HID over i2c has not been provided an Int IRQ\n");
+ return -EINVAL;
+ }
+
+ if (client->irq < 0) {
+ if (client->irq != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "HID over i2c doesn't have a valid IRQ\n");
+ return client->irq;
+ }
+
ihid = kzalloc(sizeof(struct i2c_hid), GFP_KERNEL);
if (!ihid)
return -ENOMEM;
@@ -983,23 +1044,6 @@ static int i2c_hid_probe(struct i2c_client *client,
ihid->pdata = *platform_data;
}
- if (client->irq > 0) {
- ihid->irq = client->irq;
- } else if (ACPI_COMPANION(&client->dev)) {
- ihid->desc = gpiod_get(&client->dev, NULL, GPIOD_IN);
- if (IS_ERR(ihid->desc)) {
- dev_err(&client->dev, "Failed to get GPIO interrupt\n");
- return PTR_ERR(ihid->desc);
- }
-
- ihid->irq = gpiod_to_irq(ihid->desc);
- if (ihid->irq < 0) {
- gpiod_put(ihid->desc);
- dev_err(&client->dev, "Failed to convert GPIO to IRQ\n");
- return ihid->irq;
- }
- }
-
i2c_set_clientdata(client, ihid);
ihid->client = client;
@@ -1050,6 +1094,8 @@ static int i2c_hid_probe(struct i2c_client *client,
client->name, hid->vendor, hid->product);
strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
+ ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
+
ret = hid_add_device(hid);
if (ret) {
if (ret != -ENODEV)
@@ -1064,16 +1110,13 @@ err_mem_free:
hid_destroy_device(hid);
err_irq:
- free_irq(ihid->irq, ihid);
+ free_irq(client->irq, ihid);
err_pm:
pm_runtime_put_noidle(&client->dev);
pm_runtime_disable(&client->dev);
err:
- if (ihid->desc)
- gpiod_put(ihid->desc);
-
i2c_hid_free_buffers(ihid);
kfree(ihid);
return ret;
@@ -1092,18 +1135,13 @@ static int i2c_hid_remove(struct i2c_client *client)
hid = ihid->hid;
hid_destroy_device(hid);
- free_irq(ihid->irq, ihid);
+ free_irq(client->irq, ihid);
if (ihid->bufsize)
i2c_hid_free_buffers(ihid);
- if (ihid->desc)
- gpiod_put(ihid->desc);
-
kfree(ihid);
- acpi_dev_remove_driver_gpios(ACPI_COMPANION(&client->dev));
-
return 0;
}
@@ -1142,11 +1180,11 @@ static int i2c_hid_suspend(struct device *dev)
/* Save some power */
i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
- disable_irq(ihid->irq);
+ disable_irq(client->irq);
}
if (device_may_wakeup(&client->dev)) {
- wake_status = enable_irq_wake(ihid->irq);
+ wake_status = enable_irq_wake(client->irq);
if (!wake_status)
ihid->irq_wake_enabled = true;
else
@@ -1166,7 +1204,7 @@ static int i2c_hid_resume(struct device *dev)
int wake_status;
if (device_may_wakeup(&client->dev) && ihid->irq_wake_enabled) {
- wake_status = disable_irq_wake(ihid->irq);
+ wake_status = disable_irq_wake(client->irq);
if (!wake_status)
ihid->irq_wake_enabled = false;
else
@@ -1179,7 +1217,7 @@ static int i2c_hid_resume(struct device *dev)
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- enable_irq(ihid->irq);
+ enable_irq(client->irq);
ret = i2c_hid_hwreset(client);
if (ret)
return ret;
@@ -1197,19 +1235,17 @@ static int i2c_hid_resume(struct device *dev)
static int i2c_hid_runtime_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct i2c_hid *ihid = i2c_get_clientdata(client);
i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
- disable_irq(ihid->irq);
+ disable_irq(client->irq);
return 0;
}
static int i2c_hid_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct i2c_hid *ihid = i2c_get_clientdata(client);
- enable_irq(ihid->irq);
+ enable_irq(client->irq);
i2c_hid_set_power(client, I2C_HID_PWR_ON);
return 0;
}
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 0c9ac4d5d850..842d8416a7a6 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -19,7 +19,6 @@
#include <linux/jiffies.h>
#include "client.h"
#include "hw-ish.h"
-#include "utils.h"
#include "hbm.h"
/* For FW reset flow */
@@ -310,6 +309,7 @@ static int write_ipc_from_queue(struct ishtp_device *dev)
((uint32_t)tv_utc.tv_usec);
ts_format.ts1_source = HOST_SYSTEM_TIME_USEC;
ts_format.ts2_source = HOST_UTC_TIME_USEC;
+ ts_format.reserved = 0;
time_update.primary_host_time = usec_system;
time_update.secondary_host_time = usec_utc;
@@ -427,6 +427,59 @@ static int ipc_send_mng_msg(struct ishtp_device *dev, uint32_t msg_code,
sizeof(uint32_t) + size);
}
+#define WAIT_FOR_FW_RDY 0x1
+#define WAIT_FOR_INPUT_RDY 0x2
+
+/**
+ * timed_wait_for_timeout() - wait special event with timeout
+ * @dev: ISHTP device pointer
+ * @condition: indicate the condition for waiting
+ * @timeinc: time slice for every wait cycle, in ms
+ * @timeout: time in ms for timeout
+ *
+ * This function will check special event to be ready in a loop, the loop
+ * period is specificd in timeinc. Wait timeout will causes failure.
+ *
+ * Return: 0 for success else failure code
+ */
+static int timed_wait_for_timeout(struct ishtp_device *dev, int condition,
+ unsigned int timeinc, unsigned int timeout)
+{
+ bool complete = false;
+ int ret;
+
+ do {
+ if (condition == WAIT_FOR_FW_RDY) {
+ complete = ishtp_fw_is_ready(dev);
+ } else if (condition == WAIT_FOR_INPUT_RDY) {
+ complete = ish_is_input_ready(dev);
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!complete) {
+ unsigned long left_time;
+
+ left_time = msleep_interruptible(timeinc);
+ timeout -= (timeinc - left_time);
+ }
+ } while (!complete && timeout > 0);
+
+ if (complete)
+ ret = 0;
+ else
+ ret = -EBUSY;
+
+out:
+ return ret;
+}
+
+#define TIME_SLICE_FOR_FW_RDY_MS 100
+#define TIME_SLICE_FOR_INPUT_RDY_MS 100
+#define TIMEOUT_FOR_FW_RDY_MS 2000
+#define TIMEOUT_FOR_INPUT_RDY_MS 2000
+
/**
* ish_fw_reset_handler() - FW reset handler
* @dev: ishtp device pointer
@@ -456,8 +509,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
ishtp_reset_handler(dev);
if (!ish_is_input_ready(dev))
- timed_wait_for_timeout(WAIT_FOR_SEND_SLICE,
- ish_is_input_ready(dev), (2 * HZ));
+ timed_wait_for_timeout(dev, WAIT_FOR_INPUT_RDY,
+ TIME_SLICE_FOR_INPUT_RDY_MS, TIMEOUT_FOR_INPUT_RDY_MS);
/* ISH FW is dead */
if (!ish_is_input_ready(dev))
@@ -472,8 +525,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
sizeof(uint32_t));
/* Wait for ISH FW'es ILUP and ISHTP_READY */
- timed_wait_for_timeout(WAIT_FOR_SEND_SLICE, ishtp_fw_is_ready(dev),
- (2 * HZ));
+ timed_wait_for_timeout(dev, WAIT_FOR_FW_RDY,
+ TIME_SLICE_FOR_FW_RDY_MS, TIMEOUT_FOR_FW_RDY_MS);
if (!ishtp_fw_is_ready(dev)) {
/* ISH FW is dead */
uint32_t ish_status;
@@ -487,6 +540,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
return 0;
}
+#define TIMEOUT_FOR_HW_RDY_MS 300
+
/**
* ish_fw_reset_work_fn() - FW reset worker function
* @unused: not used
@@ -500,7 +555,7 @@ static void fw_reset_work_fn(struct work_struct *unused)
rv = ish_fw_reset_handler(ishtp_dev);
if (!rv) {
/* ISH is ILUP & ISHTP-ready. Restart ISHTP */
- schedule_timeout(HZ / 3);
+ msleep_interruptible(TIMEOUT_FOR_HW_RDY_MS);
ishtp_dev->recvd_hw_ready = 1;
wake_up_interruptible(&ishtp_dev->wait_hw_ready);
diff --git a/drivers/hid/intel-ish-hid/ipc/utils.h b/drivers/hid/intel-ish-hid/ipc/utils.h
deleted file mode 100644
index 5a82123dc7b4..000000000000
--- a/drivers/hid/intel-ish-hid/ipc/utils.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Utility macros of ISH
- *
- * Copyright (c) 2014-2016, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-#ifndef UTILS__H
-#define UTILS__H
-
-#define WAIT_FOR_SEND_SLICE (HZ / 10)
-#define WAIT_FOR_CONNECT_SLICE (HZ / 10)
-
-/*
- * Waits for specified event when a thread that triggers event can't signal
- * Also, waits *at_least* `timeinc` after condition is satisfied
- */
-#define timed_wait_for(timeinc, condition) \
- do { \
- int completed = 0; \
- do { \
- unsigned long j; \
- int done = 0; \
- \
- completed = (condition); \
- for (j = jiffies, done = 0; !done; ) { \
- schedule_timeout(timeinc); \
- if (time_is_before_eq_jiffies(j + timeinc)) \
- done = 1; \
- } \
- } while (!(completed)); \
- } while (0)
-
-
-/*
- * Waits for specified event when a thread that triggers event
- * can't signal with timeout (use whenever we may hang)
- */
-#define timed_wait_for_timeout(timeinc, condition, timeout) \
- do { \
- int t = timeout; \
- do { \
- unsigned long j; \
- int done = 0; \
- \
- for (j = jiffies, done = 0; !done; ) { \
- schedule_timeout(timeinc); \
- if (time_is_before_eq_jiffies(j + timeinc)) \
- done = 1; \
- } \
- t -= timeinc; \
- if (t <= 0) \
- break; \
- } while (!(condition)); \
- } while (0)
-
-#endif /* UTILS__H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 256521509d20..f4cbc744e657 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -585,14 +585,7 @@ int ishtp_bus_new_client(struct ishtp_device *dev)
*/
i = dev->fw_client_presentation_num - 1;
device_uuid = dev->fw_clients[i].props.protocol_name;
- dev_name = kasprintf(GFP_KERNEL,
- "{%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X}",
- device_uuid.b[3], device_uuid.b[2], device_uuid.b[1],
- device_uuid.b[0], device_uuid.b[5], device_uuid.b[4],
- device_uuid.b[7], device_uuid.b[6], device_uuid.b[8],
- device_uuid.b[9], device_uuid.b[10], device_uuid.b[11],
- device_uuid.b[12], device_uuid.b[13], device_uuid.b[14],
- device_uuid.b[15]);
+ dev_name = kasprintf(GFP_KERNEL, "{%pUL}", device_uuid.b);
if (!dev_name)
return -ENOMEM;
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
index 74bffee60774..59460b66e689 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.c
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -378,11 +378,10 @@ static void ishtp_hbm_cl_disconnect_res(struct ishtp_device *dev,
list_for_each_entry(cl, &dev->cl_list, link) {
if (!rs->status && ishtp_hbm_cl_addr_equal(cl, rs)) {
cl->state = ISHTP_CL_DISCONNECTED;
+ wake_up_interruptible(&cl->wait_ctrl_res);
break;
}
}
- if (cl)
- wake_up_interruptible(&cl->wait_ctrl_res);
spin_unlock_irqrestore(&dev->cl_list_lock, flags);
}
@@ -431,11 +430,10 @@ static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev,
cl->state = ISHTP_CL_DISCONNECTED;
cl->status = -ENODEV;
}
+ wake_up_interruptible(&cl->wait_ctrl_res);
break;
}
}
- if (cl)
- wake_up_interruptible(&cl->wait_ctrl_res);
spin_unlock_irqrestore(&dev->cl_list_lock, flags);
}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ae83af649a60..333108ef18cf 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1459,7 +1459,7 @@ static int hid_post_reset(struct usb_interface *intf)
rdesc = kmalloc(hid->dev_rsize, GFP_KERNEL);
if (!rdesc) {
dbg_hid("couldn't allocate rdesc memory (post_reset)\n");
- return 1;
+ return -ENOMEM;
}
status = hid_get_class_descriptor(dev,
interface->desc.bInterfaceNumber,
@@ -1467,13 +1467,13 @@ static int hid_post_reset(struct usb_interface *intf)
if (status < 0) {
dbg_hid("reading report descriptor failed (post_reset)\n");
kfree(rdesc);
- return 1;
+ return status;
}
status = memcmp(rdesc, hid->dev_rdesc, hid->dev_rsize);
kfree(rdesc);
if (status != 0) {
dbg_hid("report descriptor changed\n");
- return 1;
+ return -EPERM;
}
/* No need to do another reset or clear a halted endpoint */
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index e6cfd323babc..b3e01c82af05 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -82,6 +82,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
@@ -101,8 +103,9 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index b4800ea891cb..d303e413306d 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -210,7 +210,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac);
void wacom_wac_usage_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage);
-int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
+void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value);
void wacom_wac_report(struct hid_device *hdev, struct hid_report *report);
void wacom_battery_work(struct work_struct *work);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 5e7a5648e708..b9779bcbd140 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -122,6 +122,7 @@ static void wacom_feature_mapping(struct hid_device *hdev,
struct hid_data *hid_data = &wacom->wacom_wac.hid_data;
u8 *data;
int ret;
+ int n;
switch (usage->hid) {
case HID_DG_CONTACTMAX:
@@ -159,22 +160,48 @@ static void wacom_feature_mapping(struct hid_device *hdev,
case HID_UP_DIGITIZER:
if (field->report->id == 0x0B &&
- (field->application == WACOM_G9_DIGITIZER ||
- field->application == WACOM_G11_DIGITIZER)) {
+ (field->application == WACOM_HID_G9_PEN ||
+ field->application == WACOM_HID_G11_PEN)) {
wacom->wacom_wac.mode_report = field->report->id;
wacom->wacom_wac.mode_value = 0;
}
break;
- case WACOM_G9_PAGE:
- case WACOM_G11_PAGE:
+ case WACOM_HID_WD_DATAMODE:
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 2;
+ break;
+
+ case WACOM_HID_UP_G9:
+ case WACOM_HID_UP_G11:
if (field->report->id == 0x03 &&
- (field->application == WACOM_G9_TOUCHSCREEN ||
- field->application == WACOM_G11_TOUCHSCREEN)) {
+ (field->application == WACOM_HID_G9_TOUCHSCREEN ||
+ field->application == WACOM_HID_G11_TOUCHSCREEN)) {
wacom->wacom_wac.mode_report = field->report->id;
wacom->wacom_wac.mode_value = 0;
}
break;
+ case WACOM_HID_WD_OFFSETLEFT:
+ case WACOM_HID_WD_OFFSETTOP:
+ case WACOM_HID_WD_OFFSETRIGHT:
+ case WACOM_HID_WD_OFFSETBOTTOM:
+ /* read manually */
+ n = hid_report_len(field->report);
+ data = hid_alloc_report_buf(field->report, GFP_KERNEL);
+ if (!data)
+ break;
+ data[0] = field->report->id;
+ ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
+ data, n, WAC_CMD_RETRIES);
+ if (ret == n) {
+ ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT,
+ data, n, 0);
+ } else {
+ hid_warn(hdev, "%s: could not retrieve sensor offsets\n",
+ __func__);
+ }
+ kfree(data);
+ break;
}
}
@@ -240,6 +267,30 @@ static void wacom_usage_mapping(struct hid_device *hdev,
features->touch_max = 1;
}
+ /*
+ * ISDv4 devices which predate HID's adoption of the
+ * HID_DG_BARELSWITCH2 usage use 0x000D0000 in its
+ * position instead. We can accurately detect if a
+ * usage with that value should be HID_DG_BARRELSWITCH2
+ * based on the surrounding usages, which have remained
+ * constant across generations.
+ */
+ if (features->type == HID_GENERIC &&
+ usage->hid == 0x000D0000 &&
+ field->application == HID_DG_PEN &&
+ field->physical == HID_DG_STYLUS) {
+ int i = usage->usage_index;
+
+ if (i-4 >= 0 && i+1 < field->maxusage &&
+ field->usage[i-4].hid == HID_DG_TIPSWITCH &&
+ field->usage[i-3].hid == HID_DG_BARRELSWITCH &&
+ field->usage[i-2].hid == HID_DG_ERASER &&
+ field->usage[i-1].hid == HID_DG_INVERT &&
+ field->usage[i+1].hid == HID_DG_INRANGE) {
+ usage->hid = HID_DG_BARRELSWITCH2;
+ }
+ }
+
switch (usage->hid) {
case HID_GD_X:
features->x_max = field->logical_maximum;
@@ -689,11 +740,6 @@ static int wacom_add_shared_data(struct hid_device *hdev)
return retval;
}
- if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
- wacom_wac->shared->touch = hdev;
- else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
- wacom_wac->shared->pen = hdev;
-
out:
mutex_unlock(&wacom_udev_list_lock);
return retval;
@@ -1916,6 +1962,19 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
/* shift everything including the terminator */
memmove(gap, gap+1, strlen(gap));
}
+
+ /* strip off excessive prefixing */
+ if (strstr(name, "Wacom Co.,Ltd. Wacom ") == name) {
+ int n = strlen(name);
+ int x = strlen("Wacom Co.,Ltd. ");
+ memmove(name, name+x, n-x+1);
+ }
+ if (strstr(name, "Wacom Co., Ltd. Wacom ") == name) {
+ int n = strlen(name);
+ int x = strlen("Wacom Co., Ltd. ");
+ memmove(name, name+x, n-x+1);
+ }
+
/* get rid of trailing whitespace */
if (name[strlen(name)-1] == ' ')
name[strlen(name)-1] = '\0';
@@ -1977,6 +2036,10 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if (error)
goto fail;
+ error = wacom_add_shared_data(hdev);
+ if (error)
+ goto fail;
+
/*
* Bamboo Pad has a generic hid handling for the Pen, and we switch it
* into debug mode for the touch part.
@@ -2017,9 +2080,10 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
wacom_update_name(wacom, wireless ? " (WL)" : "");
- error = wacom_add_shared_data(hdev);
- if (error)
- goto fail;
+ if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
+ wacom_wac->shared->touch = hdev;
+ else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
+ wacom_wac->shared->pen = hdev;
if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
(features->quirks & WACOM_QUIRK_BATTERY)) {
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1cb79925730d..b1a9a3ca6d56 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -41,6 +41,8 @@ MODULE_PARM_DESC(touch_arbitration, " on (Y) off (N)");
static void wacom_report_numbered_buttons(struct input_dev *input_dev,
int button_count, int mask);
+static int wacom_numbered_button_to_key(int n);
+
/*
* Percent of battery capacity for Graphire.
* 8th value means AC online and show 100% capacity.
@@ -588,6 +590,11 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
return 1;
}
+static int wacom_intuos_id_mangle(int tool_id)
+{
+ return (tool_id & ~0xFFF) << 4 | (tool_id & 0xFFF);
+}
+
static int wacom_intuos_get_tool_type(int tool_id)
{
int tool_type;
@@ -595,7 +602,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
switch (tool_id) {
case 0x812: /* Inking pen */
case 0x801: /* Intuos3 Inking pen */
- case 0x120802: /* Intuos4/5 Inking Pen */
+ case 0x12802: /* Intuos4/5 Inking Pen */
case 0x012:
tool_type = BTN_TOOL_PENCIL;
break;
@@ -610,11 +617,11 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
case 0x8e2: /* IntuosHT2 pen */
case 0x022:
- case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */
- case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
- case 0x160802: /* Cintiq 13HD Pro Pen */
- case 0x180802: /* DTH2242 Pen */
- case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
+ case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */
+ case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
+ case 0x16802: /* Cintiq 13HD Pro Pen */
+ case 0x18802: /* DTH2242 Pen */
+ case 0x10802: /* Intuos4/5 13HD/24HD General Pen */
tool_type = BTN_TOOL_PEN;
break;
@@ -638,6 +645,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
break;
case 0x82a: /* Eraser */
+ case 0x84a:
case 0x85a:
case 0x91a:
case 0xd1a:
@@ -648,12 +656,12 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */
case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */
case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
- case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
- case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
- case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
- case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
- case 0x18080a: /* DTH2242 Eraser */
- case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
+ case 0x1480a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
+ case 0x1090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
+ case 0x1080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
+ case 0x1680a: /* Cintiq 13HD Pro Pen Eraser */
+ case 0x1880a: /* DTH2242 Eraser */
+ case 0x1080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
tool_type = BTN_TOOL_RUBBER;
break;
@@ -662,7 +670,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x112:
case 0x913: /* Intuos3 Airbrush */
case 0x902: /* Intuos4/5 13HD/24HD Airbrush */
- case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */
+ case 0x10902: /* Intuos4/5 13HD/24HD Airbrush */
tool_type = BTN_TOOL_AIRBRUSH;
break;
@@ -693,7 +701,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
(data[6] << 4) + (data[7] >> 4);
wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) |
- ((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12);
+ ((data[7] & 0x0f) << 16) | ((data[8] & 0xf0) << 8);
wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]);
@@ -923,7 +931,7 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
* don't report events for invalid data
*/
/* older I4 styli don't work with new Cintiqs */
- if ((!((wacom->id[idx] >> 20) & 0x01) &&
+ if ((!((wacom->id[idx] >> 16) & 0x01) &&
(features->type == WACOM_21UX2)) ||
/* Only large Intuos support Lense Cursor */
(wacom->tool[idx] == BTN_TOOL_LENS &&
@@ -1059,7 +1067,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
break;
}
- input_report_abs(input, ABS_MISC, wacom->id[idx]); /* report tool id */
+ input_report_abs(input, ABS_MISC,
+ wacom_intuos_id_mangle(wacom->id[idx])); /* report tool id */
input_report_key(input, wacom->tool[idx], 1);
input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
wacom->reporting_data = true;
@@ -1435,11 +1444,59 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
return 0;
}
+static int wacom_equivalent_usage(int usage)
+{
+ if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) {
+ int subpage = (usage & 0xFF00) << 8;
+ int subusage = (usage & 0xFF);
+
+ if (subpage == WACOM_HID_SP_PAD ||
+ subpage == WACOM_HID_SP_BUTTON ||
+ subpage == WACOM_HID_SP_DIGITIZER ||
+ subpage == WACOM_HID_SP_DIGITIZERINFO ||
+ usage == WACOM_HID_WD_SENSE ||
+ usage == WACOM_HID_WD_SERIALHI ||
+ usage == WACOM_HID_WD_TOOLTYPE ||
+ usage == WACOM_HID_WD_DISTANCE ||
+ usage == WACOM_HID_WD_TOUCHSTRIP ||
+ usage == WACOM_HID_WD_TOUCHSTRIP2 ||
+ usage == WACOM_HID_WD_TOUCHRING ||
+ usage == WACOM_HID_WD_TOUCHRINGSTATUS) {
+ return usage;
+ }
+
+ if (subpage == HID_UP_UNDEFINED)
+ subpage = HID_UP_DIGITIZER;
+
+ return subpage | subusage;
+ }
+
+ return usage;
+}
+
static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
struct hid_field *field, __u8 type, __u16 code, int fuzz)
{
+ struct wacom *wacom = input_get_drvdata(input);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
int fmin = field->logical_minimum;
int fmax = field->logical_maximum;
+ unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+ int resolution_code = code;
+
+ if (equivalent_usage == HID_DG_TWIST) {
+ resolution_code = ABS_RZ;
+ }
+
+ if (equivalent_usage == HID_GD_X) {
+ fmin += features->offset_left;
+ fmax -= features->offset_right;
+ }
+ if (equivalent_usage == HID_GD_Y) {
+ fmin += features->offset_top;
+ fmax -= features->offset_bottom;
+ }
usage->type = type;
usage->code = code;
@@ -1450,7 +1507,7 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
case EV_ABS:
input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
input_abs_set_res(input, code,
- hidinput_calc_abs_res(field, code));
+ hidinput_calc_abs_res(field, resolution_code));
break;
case EV_KEY:
input_set_capability(input, EV_KEY, code);
@@ -1458,6 +1515,172 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
case EV_MSC:
input_set_capability(input, EV_MSC, code);
break;
+ case EV_SW:
+ input_set_capability(input, EV_SW, code);
+ break;
+ }
+}
+
+static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ struct input_dev *input = wacom_wac->pad_input;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ switch (equivalent_usage) {
+ case WACOM_HID_WD_BATTERY_LEVEL:
+ case WACOM_HID_WD_BATTERY_CHARGING:
+ features->quirks |= WACOM_QUIRK_BATTERY;
+ break;
+ case WACOM_HID_WD_ACCELEROMETER_X:
+ __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_ACCELEROMETER_Y:
+ __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_ACCELEROMETER_Z:
+ __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_BUTTONHOME:
+ case WACOM_HID_WD_BUTTONUP:
+ case WACOM_HID_WD_BUTTONDOWN:
+ case WACOM_HID_WD_BUTTONLEFT:
+ case WACOM_HID_WD_BUTTONRIGHT:
+ case WACOM_HID_WD_BUTTONCENTER:
+ wacom_map_usage(input, usage, field, EV_KEY,
+ wacom_numbered_button_to_key(features->numbered_buttons),
+ 0);
+ features->numbered_buttons++;
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHONOFF:
+ wacom_map_usage(input, usage, field, EV_SW, SW_MUTE_DEVICE, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHSTRIP:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_RX, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHSTRIP2:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_RY, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHRING:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ }
+
+ switch (equivalent_usage & 0xfffffff0) {
+ case WACOM_HID_WD_EXPRESSKEY00:
+ wacom_map_usage(input, usage, field, EV_KEY,
+ wacom_numbered_button_to_key(features->numbered_buttons),
+ 0);
+ features->numbered_buttons++;
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ }
+}
+
+static void wacom_wac_pad_battery_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ switch (equivalent_usage) {
+ case WACOM_HID_WD_BATTERY_LEVEL:
+ wacom_wac->hid_data.battery_capacity = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ break;
+
+ case WACOM_HID_WD_BATTERY_CHARGING:
+ wacom_wac->hid_data.bat_charging = value;
+ wacom_wac->hid_data.ps_connected = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ break;
+ }
+}
+
+static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct input_dev *input = wacom_wac->pad_input;
+ struct wacom_features *features = &wacom_wac->features;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) {
+ wacom_wac->hid_data.inrange_state |= value;
+ }
+
+ switch (equivalent_usage) {
+ case WACOM_HID_WD_TOUCHRINGSTATUS:
+ break;
+
+ default:
+ features->input_event_flag = true;
+ input_event(input, usage->type, usage->code, value);
+ break;
+ }
+}
+
+static void wacom_wac_pad_pre_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
+ wacom_wac->hid_data.inrange_state = 0;
+}
+
+static void wacom_wac_pad_battery_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+
+ if (features->quirks & WACOM_QUIRK_BATTERY) {
+ int capacity = wacom_wac->hid_data.battery_capacity;
+ bool charging = wacom_wac->hid_data.bat_charging;
+ bool connected = wacom_wac->hid_data.bat_connected;
+ bool powered = wacom_wac->hid_data.ps_connected;
+
+ wacom_notify_battery(wacom_wac, capacity, charging,
+ connected, powered);
+ }
+}
+
+static void wacom_wac_pad_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ struct input_dev *input = wacom_wac->pad_input;
+ bool active = wacom_wac->hid_data.inrange_state != 0;
+
+ /* report prox for expresskey events */
+ if (wacom_equivalent_usage(report->field[0]->physical) == HID_DG_TABLETFUNCTIONKEY) {
+ features->input_event_flag = true;
+ input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0);
+ }
+
+ if (features->input_event_flag) {
+ features->input_event_flag = false;
+ input_sync(input);
}
}
@@ -1466,25 +1689,43 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
struct input_dev *input = wacom_wac->pen_input;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4);
break;
case HID_GD_Y:
wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 4);
break;
+ case WACOM_HID_WD_DISTANCE:
+ case HID_GD_Z:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_DISTANCE, 0);
+ break;
case HID_DG_TIPPRESSURE:
wacom_map_usage(input, usage, field, EV_ABS, ABS_PRESSURE, 0);
break;
case HID_DG_INRANGE:
wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
break;
+ case HID_DG_BATTERYSTRENGTH:
+ features->quirks |= WACOM_QUIRK_BATTERY;
+ break;
case HID_DG_INVERT:
wacom_map_usage(input, usage, field, EV_KEY,
BTN_TOOL_RUBBER, 0);
break;
+ case HID_DG_TILT_X:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_TILT_X, 0);
+ break;
+ case HID_DG_TILT_Y:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_TILT_Y, 0);
+ break;
+ case HID_DG_TWIST:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
+ break;
case HID_DG_ERASER:
case HID_DG_TIPSWITCH:
wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0);
@@ -1498,39 +1739,131 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
case HID_DG_TOOLSERIALNUMBER:
wacom_map_usage(input, usage, field, EV_MSC, MSC_SERIAL, 0);
break;
+ case WACOM_HID_WD_SENSE:
+ features->quirks |= WACOM_QUIRK_SENSE;
+ wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
+ break;
+ case WACOM_HID_WD_SERIALHI:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_MISC, 0);
+ set_bit(EV_KEY, input->evbit);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+ input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
+ input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
+ input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
+ input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+ break;
+ case WACOM_HID_WD_FINGERWHEEL:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+ break;
}
}
-static int wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field,
+static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
struct input_dev *input = wacom_wac->pen_input;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- /* checking which Tool / tip switch to send */
- switch (usage->hid) {
+ switch (equivalent_usage) {
+ case HID_GD_Z:
+ /*
+ * HID_GD_Z "should increase as the control's position is
+ * moved from high to low", while ABS_DISTANCE instead
+ * increases in value as the tool moves from low to high.
+ */
+ value = field->logical_maximum - value;
+ break;
case HID_DG_INRANGE:
wacom_wac->hid_data.inrange_state = value;
- return 0;
+ if (!(features->quirks & WACOM_QUIRK_SENSE))
+ wacom_wac->hid_data.sense_state = value;
+ return;
+ case HID_DG_BATTERYSTRENGTH:
+ wacom_wac->hid_data.battery_capacity = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ break;
case HID_DG_INVERT:
wacom_wac->hid_data.invert_state = value;
- return 0;
+ return;
case HID_DG_ERASER:
case HID_DG_TIPSWITCH:
wacom_wac->hid_data.tipswitch |= value;
- return 0;
+ return;
+ case HID_DG_TOOLSERIALNUMBER:
+ wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
+ wacom_wac->serial[0] |= value;
+ return;
+ case WACOM_HID_WD_SENSE:
+ wacom_wac->hid_data.sense_state = value;
+ return;
+ case WACOM_HID_WD_SERIALHI:
+ wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
+ wacom_wac->serial[0] |= ((__u64)value) << 32;
+ /*
+ * Non-USI EMR devices may contain additional tool type
+ * information here. See WACOM_HID_WD_TOOLTYPE case for
+ * more details.
+ */
+ if (value >> 20 == 1) {
+ wacom_wac->id[0] |= value & 0xFFFFF;
+ }
+ return;
+ case WACOM_HID_WD_TOOLTYPE:
+ /*
+ * Some devices (MobileStudio Pro, and possibly later
+ * devices as well) do not return the complete tool
+ * type in their WACOM_HID_WD_TOOLTYPE usage. Use a
+ * bitwise OR so the complete value can be built
+ * up over time :(
+ */
+ wacom_wac->id[0] |= value;
+ return;
+ case WACOM_HID_WD_OFFSETLEFT:
+ if (features->offset_left && value != features->offset_left)
+ hid_warn(hdev, "%s: overriding exising left offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_left);
+ features->offset_left = value;
+ return;
+ case WACOM_HID_WD_OFFSETRIGHT:
+ if (features->offset_right && value != features->offset_right)
+ hid_warn(hdev, "%s: overriding exising right offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_right);
+ features->offset_right = value;
+ return;
+ case WACOM_HID_WD_OFFSETTOP:
+ if (features->offset_top && value != features->offset_top)
+ hid_warn(hdev, "%s: overriding exising top offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_top);
+ features->offset_top = value;
+ return;
+ case WACOM_HID_WD_OFFSETBOTTOM:
+ if (features->offset_bottom && value != features->offset_bottom)
+ hid_warn(hdev, "%s: overriding exising bottom offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_bottom);
+ features->offset_bottom = value;
+ return;
}
/* send pen events only when touch is up or forced out
* or touch arbitration is off
*/
if (!usage->type || delay_pen_events(wacom_wac))
- return 0;
+ return;
- input_event(input, usage->type, usage->code, value);
+ /* send pen events only when the pen is in/entering/leaving proximity */
+ if (!wacom_wac->hid_data.inrange_state && !wacom_wac->tool[0])
+ return;
- return 0;
+ input_event(input, usage->type, usage->code, value);
}
static void wacom_wac_pen_pre_report(struct hid_device *hdev,
@@ -1546,24 +1879,53 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct input_dev *input = wacom_wac->pen_input;
bool prox = wacom_wac->hid_data.inrange_state;
+ bool range = wacom_wac->hid_data.sense_state;
- if (!wacom_wac->shared->stylus_in_proximity) /* first in prox */
+ if (!wacom_wac->tool[0] && prox) { /* first in prox */
/* Going into proximity select tool */
- wacom_wac->tool[0] = wacom_wac->hid_data.invert_state ?
- BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+ if (wacom_wac->hid_data.invert_state)
+ wacom_wac->tool[0] = BTN_TOOL_RUBBER;
+ else if (wacom_wac->id[0])
+ wacom_wac->tool[0] = wacom_intuos_get_tool_type(wacom_wac->id[0]);
+ else
+ wacom_wac->tool[0] = BTN_TOOL_PEN;
+ }
/* keep pen state for touch events */
- wacom_wac->shared->stylus_in_proximity = prox;
+ wacom_wac->shared->stylus_in_proximity = range;
- if (!delay_pen_events(wacom_wac)) {
+ if (!delay_pen_events(wacom_wac) && wacom_wac->tool[0]) {
+ int id = wacom_wac->id[0];
+
+ /*
+ * Non-USI EMR tools should have their IDs mangled to
+ * match the legacy behavior of wacom_intuos_general
+ */
+ if (wacom_wac->serial[0] >> 52 == 1)
+ id = wacom_intuos_id_mangle(id);
+
+ /*
+ * To ensure compatibility with xf86-input-wacom, we should
+ * report the BTN_TOOL_* event prior to the ABS_MISC or
+ * MSC_SERIAL events.
+ */
input_report_key(input, BTN_TOUCH,
wacom_wac->hid_data.tipswitch);
input_report_key(input, wacom_wac->tool[0], prox);
+ if (wacom_wac->serial[0]) {
+ input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
+ input_report_abs(input, ABS_MISC, id);
+ }
wacom_wac->hid_data.tipswitch = false;
input_sync(input);
}
+
+ if (!prox) {
+ wacom_wac->tool[0] = 0;
+ wacom_wac->id[0] = 0;
+ }
}
static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
@@ -1573,8 +1935,9 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct input_dev *input = wacom_wac->touch_input;
unsigned touch_max = wacom_wac->features.touch_max;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
if (touch_max == 1)
wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4);
@@ -1644,13 +2007,14 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
}
}
-static int wacom_wac_finger_event(struct hid_device *hdev,
+static void wacom_wac_finger_event(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage, __s32 value)
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
wacom_wac->hid_data.x = value;
break;
@@ -1673,11 +2037,9 @@ static int wacom_wac_finger_event(struct hid_device *hdev,
if (usage->usage_index + 1 == field->report_count) {
- if (usage->hid == wacom_wac->hid_data.last_slot_field)
+ if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
}
-
- return 0;
}
static void wacom_wac_finger_pre_report(struct hid_device *hdev,
@@ -1762,28 +2124,30 @@ void wacom_wac_usage_mapping(struct hid_device *hdev,
/* currently, only direct devices have proper hid report descriptors */
features->device_type |= WACOM_DEVICETYPE_DIRECT;
- if (WACOM_PEN_FIELD(field))
- return wacom_wac_pen_usage_mapping(hdev, field, usage);
-
- if (WACOM_FINGER_FIELD(field))
- return wacom_wac_finger_usage_mapping(hdev, field, usage);
+ if (WACOM_PAD_FIELD(field))
+ wacom_wac_pad_usage_mapping(hdev, field, usage);
+ else if (WACOM_PEN_FIELD(field))
+ wacom_wac_pen_usage_mapping(hdev, field, usage);
+ else if (WACOM_FINGER_FIELD(field))
+ wacom_wac_finger_usage_mapping(hdev, field, usage);
}
-int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
+void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct wacom *wacom = hid_get_drvdata(hdev);
if (wacom->wacom_wac.features.type != HID_GENERIC)
- return 0;
-
- if (WACOM_PEN_FIELD(field))
- return wacom_wac_pen_event(hdev, field, usage, value);
-
- if (WACOM_FINGER_FIELD(field))
- return wacom_wac_finger_event(hdev, field, usage, value);
+ return;
- return 0;
+ if (WACOM_PAD_FIELD(field)) {
+ wacom_wac_pad_battery_event(hdev, field, usage, value);
+ if (wacom->wacom_wac.pad_input)
+ wacom_wac_pad_event(hdev, field, usage, value);
+ } else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+ wacom_wac_pen_event(hdev, field, usage, value);
+ else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
+ wacom_wac_finger_event(hdev, field, usage, value);
}
static void wacom_report_events(struct hid_device *hdev, struct hid_report *report)
@@ -1814,19 +2178,23 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
if (wacom_wac->features.type != HID_GENERIC)
return;
- if (WACOM_PEN_FIELD(field))
+ if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input)
+ wacom_wac_pad_pre_report(hdev, report);
+ else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
wacom_wac_pen_pre_report(hdev, report);
-
- if (WACOM_FINGER_FIELD(field))
+ else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
wacom_wac_finger_pre_report(hdev, report);
wacom_report_events(hdev, report);
- if (WACOM_PEN_FIELD(field))
- return wacom_wac_pen_report(hdev, report);
-
- if (WACOM_FINGER_FIELD(field))
- return wacom_wac_finger_report(hdev, report);
+ if (WACOM_PAD_FIELD(field)) {
+ wacom_wac_pad_battery_report(hdev, report);
+ if (wacom->wacom_wac.pad_input)
+ wacom_wac_pad_report(hdev, report);
+ } else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+ wacom_wac_pen_report(hdev, report);
+ else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
+ wacom_wac_finger_report(hdev, report);
}
static int wacom_bpt_touch(struct wacom_wac *wacom)
@@ -2399,6 +2767,8 @@ void wacom_setup_device_quirks(struct wacom *wacom)
struct wacom_features *features = &wacom->wacom_wac.features;
/* The pen and pad share the same interface on most devices */
+ if (features->numbered_buttons > 0)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
if (features->type == GRAPHIRE_BT || features->type == WACOM_G4 ||
features->type == DTUS ||
(features->type >= INTUOS3S && features->type <= WACOM_MO)) {
@@ -2448,7 +2818,7 @@ void wacom_setup_device_quirks(struct wacom *wacom)
/*
* Raw Wacom-mode pen and touch events both come from interface
* 0, whose HID descriptor has an application usage of 0xFF0D
- * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
+ * (i.e., WACOM_HID_WD_DIGITIZER). We route pen packets back
* out through the HID_GENERIC device created for interface 1,
* so rewrite this one to be of type WACOM_DEVICETYPE_TOUCH.
*/
@@ -2530,10 +2900,12 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(ABS_MISC, input_dev->absbit);
- input_set_abs_params(input_dev, ABS_X, features->x_min,
- features->x_max, features->x_fuzz, 0);
- input_set_abs_params(input_dev, ABS_Y, features->y_min,
- features->y_max, features->y_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_X, 0 + features->offset_left,
+ features->x_max - features->offset_right,
+ features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0 + features->offset_top,
+ features->y_max - features->offset_bottom,
+ features->y_fuzz, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0,
features->pressure_max, features->pressure_fuzz, 0);
@@ -2769,17 +3141,29 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
return 0;
}
+static int wacom_numbered_button_to_key(int n)
+{
+ if (n < 10)
+ return BTN_0 + n;
+ else if (n < 16)
+ return BTN_A + (n-10);
+ else if (n < 18)
+ return BTN_BASE + (n-16);
+ else
+ return 0;
+}
+
static void wacom_setup_numbered_buttons(struct input_dev *input_dev,
int button_count)
{
int i;
- for (i = 0; i < button_count && i < 10; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
- for (i = 10; i < button_count && i < 16; i++)
- __set_bit(BTN_A + (i-10), input_dev->keybit);
- for (i = 16; i < button_count && i < 18; i++)
- __set_bit(BTN_BASE + (i-16), input_dev->keybit);
+ for (i = 0; i < button_count; i++) {
+ int key = wacom_numbered_button_to_key(i);
+
+ if (key)
+ __set_bit(key, input_dev->keybit);
+ }
}
static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group)
@@ -2881,12 +3265,12 @@ static void wacom_report_numbered_buttons(struct input_dev *input_dev,
for (i = 0; i < wacom->led.count; i++)
wacom_update_led(wacom, button_count, mask, i);
- for (i = 0; i < button_count && i < 10; i++)
- input_report_key(input_dev, BTN_0 + i, mask & (1 << i));
- for (i = 10; i < button_count && i < 16; i++)
- input_report_key(input_dev, BTN_A + (i-10), mask & (1 << i));
- for (i = 16; i < button_count && i < 18; i++)
- input_report_key(input_dev, BTN_BASE + (i-16), mask & (1 << i));
+ for (i = 0; i < button_count; i++) {
+ int key = wacom_numbered_button_to_key(i);
+
+ if (key)
+ input_report_key(input_dev, key, mask & (1 << i));
+ }
}
int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
@@ -2906,8 +3290,12 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
__set_bit(ABS_MISC, input_dev->absbit);
/* kept for making legacy xf86-input-wacom accepting the pad */
- input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0);
+ if (!(input_dev->absinfo && (input_dev->absinfo[ABS_X].minimum ||
+ input_dev->absinfo[ABS_X].maximum)))
+ input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0);
+ if (!(input_dev->absinfo && (input_dev->absinfo[ABS_Y].minimum ||
+ input_dev->absinfo[ABS_Y].maximum)))
+ input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0);
/* kept for making udev and libwacom accepting the pad */
__set_bit(BTN_STYLUS, input_dev->keybit);
@@ -3027,6 +3415,9 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
break;
+ case HID_GENERIC:
+ break;
+
default:
/* no pad supported */
return -ENODEV;
@@ -3233,26 +3624,30 @@ static const struct wacom_features wacom_features_0x317 =
INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0xF4 =
- { "Wacom Cintiq 24HD", 104080, 65200, 2047, 63,
+ { "Wacom Cintiq 24HD", 104480, 65600, 2047, 63,
WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0xF8 =
- { "Wacom Cintiq 24HD touch", 104080, 65200, 2047, 63, /* Pen */
+ { "Wacom Cintiq 24HD touch", 104480, 65600, 2047, 63, /* Pen */
WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
static const struct wacom_features wacom_features_0xF6 =
{ "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x32A =
- { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63,
+ { "Wacom Cintiq 27QHD", 120140, 67920, 2047, 63,
WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x32B =
- { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
+ { "Wacom Cintiq 27QHD touch", 120140, 67920, 2047, 63,
WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x32C };
static const struct wacom_features wacom_features_0x32C =
{ "Wacom Cintiq 27QHD touch", .type = WACOM_27QHDT,
@@ -3267,13 +3662,15 @@ static const struct wacom_features wacom_features_0xC6 =
{ "Wacom Cintiq 12WX", 53020, 33440, 1023, 63,
WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 10 };
static const struct wacom_features wacom_features_0x304 =
- { "Wacom Cintiq 13HD", 59152, 33448, 1023, 63,
+ { "Wacom Cintiq 13HD", 59552, 33848, 1023, 63,
WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x333 =
- { "Wacom Cintiq 13HD touch", 59152, 33448, 2047, 63,
+ { "Wacom Cintiq 13HD touch", 59552, 33848, 2047, 63,
WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x335 };
static const struct wacom_features wacom_features_0x335 =
{ "Wacom Cintiq 13HD touch", .type = WACOM_24HDT, /* Touch */
@@ -3290,42 +3687,50 @@ static const struct wacom_features wacom_features_0xF0 =
{ "Wacom DTU1631", 34623, 19553, 511, 0,
DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xFB =
- { "Wacom DTU1031", 21896, 13760, 511, 0,
+ { "Wacom DTU1031", 22096, 13960, 511, 0,
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x32F =
- { "Wacom DTU1031X", 22472, 12728, 511, 0,
+ { "Wacom DTU1031X", 22672, 12928, 511, 0,
DTUSX, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 0,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x336 =
- { "Wacom DTU1141", 23472, 13203, 1023, 0,
+ { "Wacom DTU1141", 23672, 13403, 1023, 0,
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x57 =
- { "Wacom DTK2241", 95640, 54060, 2047, 63,
+ { "Wacom DTK2241", 95840, 54260, 2047, 63,
DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x59 = /* Pen */
- { "Wacom DTH2242", 95640, 54060, 2047, 63,
+ { "Wacom DTH2242", 95840, 54260, 2047, 63,
DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D };
static const struct wacom_features wacom_features_0x5D = /* Touch */
{ "Wacom DTH2242", .type = WACOM_24HDT,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0xCC =
- { "Wacom Cintiq 21UX2", 86800, 65200, 2047, 63,
+ { "Wacom Cintiq 21UX2", 87200, 65600, 2047, 63,
WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0xFA =
- { "Wacom Cintiq 22HD", 95440, 53860, 2047, 63,
+ { "Wacom Cintiq 22HD", 95840, 54260, 2047, 63,
WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x5B =
- { "Wacom Cintiq 22HDT", 95440, 53860, 2047, 63,
+ { "Wacom Cintiq 22HDT", 95840, 54260, 2047, 63,
WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
static const struct wacom_features wacom_features_0x5E =
{ "Wacom Cintiq 22HDT", .type = WACOM_24HDT,
@@ -3469,18 +3874,20 @@ static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", 12800, 8000, 255, 0,
TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x307 =
- { "Wacom ISDv5 307", 59152, 33448, 2047, 63,
+ { "Wacom ISDv5 307", 59552, 33848, 2047, 63,
CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 };
static const struct wacom_features wacom_features_0x309 =
{ "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x30A =
- { "Wacom ISDv5 30A", 59152, 33448, 2047, 63,
+ { "Wacom ISDv5 30A", 59552, 33848, 2047, 63,
CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30C };
static const struct wacom_features wacom_features_0x30C =
{ "Wacom ISDv5 30C", .type = WACOM_24HDT, /* Touch */
@@ -3496,6 +3903,7 @@ static const struct wacom_features wacom_features_0x325 =
{ "Wacom ISDv5 325", 59552, 33848, 2047, 63,
CINTIQ_COMPANION_2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 11,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x326 };
static const struct wacom_features wacom_features_0x326 = /* Touch */
{ "Wacom ISDv5 326", .type = HID_GENERIC, .oVid = USB_VENDOR_ID_WACOM,
@@ -3525,8 +3933,9 @@ static const struct wacom_features wacom_features_0x33E =
INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x343 =
- { "Wacom DTK1651", 34616, 19559, 1023, 0,
+ { "Wacom DTK1651", 34816, 19759, 1023, 0,
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_HID_ANY_ID =
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 324c40b0c119..fb0e50acb10d 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -74,6 +74,7 @@
/* device quirks */
#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0001
+#define WACOM_QUIRK_SENSE 0x0002
#define WACOM_QUIRK_BATTERY 0x0008
/* device types */
@@ -84,23 +85,66 @@
#define WACOM_DEVICETYPE_WL_MONITOR 0x0008
#define WACOM_DEVICETYPE_DIRECT 0x0010
-#define WACOM_VENDORDEFINED_PEN 0xff0d0001
-#define WACOM_G9_PAGE 0xff090000
-#define WACOM_G9_DIGITIZER (WACOM_G9_PAGE | 0x02)
-#define WACOM_G9_TOUCHSCREEN (WACOM_G9_PAGE | 0x11)
-#define WACOM_G11_PAGE 0xff110000
-#define WACOM_G11_DIGITIZER (WACOM_G11_PAGE | 0x02)
-#define WACOM_G11_TOUCHSCREEN (WACOM_G11_PAGE | 0x11)
+#define WACOM_HID_UP_WACOMDIGITIZER 0xff0d0000
+#define WACOM_HID_SP_PAD 0x00040000
+#define WACOM_HID_SP_BUTTON 0x00090000
+#define WACOM_HID_SP_DIGITIZER 0x000d0000
+#define WACOM_HID_SP_DIGITIZERINFO 0x00100000
+#define WACOM_HID_WD_DIGITIZER (WACOM_HID_UP_WACOMDIGITIZER | 0x01)
+#define WACOM_HID_WD_SENSE (WACOM_HID_UP_WACOMDIGITIZER | 0x36)
+#define WACOM_HID_WD_DIGITIZERFNKEYS (WACOM_HID_UP_WACOMDIGITIZER | 0x39)
+#define WACOM_HID_WD_SERIALHI (WACOM_HID_UP_WACOMDIGITIZER | 0x5c)
+#define WACOM_HID_WD_TOOLTYPE (WACOM_HID_UP_WACOMDIGITIZER | 0x77)
+#define WACOM_HID_WD_DISTANCE (WACOM_HID_UP_WACOMDIGITIZER | 0x0132)
+#define WACOM_HID_WD_TOUCHSTRIP (WACOM_HID_UP_WACOMDIGITIZER | 0x0136)
+#define WACOM_HID_WD_TOUCHSTRIP2 (WACOM_HID_UP_WACOMDIGITIZER | 0x0137)
+#define WACOM_HID_WD_TOUCHRING (WACOM_HID_UP_WACOMDIGITIZER | 0x0138)
+#define WACOM_HID_WD_TOUCHRINGSTATUS (WACOM_HID_UP_WACOMDIGITIZER | 0x0139)
+#define WACOM_HID_WD_ACCELEROMETER_X (WACOM_HID_UP_WACOMDIGITIZER | 0x0401)
+#define WACOM_HID_WD_ACCELEROMETER_Y (WACOM_HID_UP_WACOMDIGITIZER | 0x0402)
+#define WACOM_HID_WD_ACCELEROMETER_Z (WACOM_HID_UP_WACOMDIGITIZER | 0x0403)
+#define WACOM_HID_WD_BATTERY_CHARGING (WACOM_HID_UP_WACOMDIGITIZER | 0x0404)
+#define WACOM_HID_WD_BATTERY_LEVEL (WACOM_HID_UP_WACOMDIGITIZER | 0x043b)
+#define WACOM_HID_WD_EXPRESSKEY00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0910)
+#define WACOM_HID_WD_EXPRESSKEYCAP00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0950)
+#define WACOM_HID_WD_BUTTONHOME (WACOM_HID_UP_WACOMDIGITIZER | 0x0990)
+#define WACOM_HID_WD_BUTTONUP (WACOM_HID_UP_WACOMDIGITIZER | 0x0991)
+#define WACOM_HID_WD_BUTTONDOWN (WACOM_HID_UP_WACOMDIGITIZER | 0x0992)
+#define WACOM_HID_WD_BUTTONLEFT (WACOM_HID_UP_WACOMDIGITIZER | 0x0993)
+#define WACOM_HID_WD_BUTTONRIGHT (WACOM_HID_UP_WACOMDIGITIZER | 0x0994)
+#define WACOM_HID_WD_BUTTONCENTER (WACOM_HID_UP_WACOMDIGITIZER | 0x0995)
+#define WACOM_HID_WD_TOUCHONOFF (WACOM_HID_UP_WACOMDIGITIZER | 0x0996)
+#define WACOM_HID_WD_FINGERWHEEL (WACOM_HID_UP_WACOMDIGITIZER | 0x0d03)
+#define WACOM_HID_WD_OFFSETLEFT (WACOM_HID_UP_WACOMDIGITIZER | 0x0d30)
+#define WACOM_HID_WD_OFFSETTOP (WACOM_HID_UP_WACOMDIGITIZER | 0x0d31)
+#define WACOM_HID_WD_OFFSETRIGHT (WACOM_HID_UP_WACOMDIGITIZER | 0x0d32)
+#define WACOM_HID_WD_OFFSETBOTTOM (WACOM_HID_UP_WACOMDIGITIZER | 0x0d33)
+#define WACOM_HID_WD_DATAMODE (WACOM_HID_UP_WACOMDIGITIZER | 0x1002)
+#define WACOM_HID_WD_DIGITIZERINFO (WACOM_HID_UP_WACOMDIGITIZER | 0x1013)
+#define WACOM_HID_UP_G9 0xff090000
+#define WACOM_HID_G9_PEN (WACOM_HID_UP_G9 | 0x02)
+#define WACOM_HID_G9_TOUCHSCREEN (WACOM_HID_UP_G9 | 0x11)
+#define WACOM_HID_UP_G11 0xff110000
+#define WACOM_HID_G11_PEN (WACOM_HID_UP_G11 | 0x02)
+#define WACOM_HID_G11_TOUCHSCREEN (WACOM_HID_UP_G11 | 0x11)
+
+#define WACOM_PAD_FIELD(f) (((f)->physical == HID_DG_TABLETFUNCTIONKEY) || \
+ ((f)->physical == WACOM_HID_WD_DIGITIZERFNKEYS) || \
+ ((f)->physical == WACOM_HID_WD_DIGITIZERINFO))
#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_PEN) || \
((f)->application == HID_DG_PEN) || \
((f)->application == HID_DG_DIGITIZER) || \
- ((f)->application == WACOM_VENDORDEFINED_PEN))
+ ((f)->application == WACOM_HID_WD_DIGITIZER) || \
+ ((f)->application == WACOM_HID_G9_PEN) || \
+ ((f)->application == WACOM_HID_G11_PEN))
#define WACOM_FINGER_FIELD(f) (((f)->logical == HID_DG_FINGER) || \
((f)->physical == HID_DG_FINGER) || \
- ((f)->application == HID_DG_TOUCHSCREEN))
+ ((f)->application == HID_DG_TOUCHSCREEN) || \
+ ((f)->application == WACOM_HID_G9_TOUCHSCREEN) || \
+ ((f)->application == WACOM_HID_G11_TOUCHSCREEN))
enum {
PENPARTNER = 0,
@@ -167,8 +211,10 @@ struct wacom_features {
int x_resolution;
int y_resolution;
int numbered_buttons;
- int x_min;
- int y_min;
+ int offset_left;
+ int offset_right;
+ int offset_top;
+ int offset_bottom;
int device_type;
int x_phy;
int y_phy;
@@ -186,6 +232,7 @@ struct wacom_features {
int pktlen;
bool check_for_hid_type;
int hid_type;
+ bool input_event_flag;
};
struct wacom_shared {
@@ -202,6 +249,7 @@ struct wacom_shared {
struct hid_data {
__s16 inputmode; /* InputMode HID feature, -1 if non-existent */
__s16 inputmode_index; /* InputMode HID feature index in the report */
+ bool sense_state;
bool inrange_state;
bool invert_state;
bool tipswitch;
@@ -217,6 +265,10 @@ struct hid_data {
int last_slot_field;
int num_expected;
int num_received;
+ int battery_capacity;
+ int bat_charging;
+ int bat_connected;
+ int ps_connected;
};
struct wacom_remote_data {
@@ -234,7 +286,7 @@ struct wacom_wac {
unsigned char data[WACOM_PKGLEN_MAX];
int tool[2];
int id[2];
- __u32 serial[2];
+ __u64 serial[2];
bool reporting_data;
struct wacom_features features;
struct wacom_shared *shared;
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 982936334537..07ec465f1095 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -37,6 +37,8 @@ static void arizona_haptics_work(struct work_struct *work)
struct arizona_haptics,
work);
struct arizona *arizona = haptics->arizona;
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(arizona->dapm);
int ret;
if (!haptics->arizona->dapm) {
@@ -66,7 +68,7 @@ static void arizona_haptics_work(struct work_struct *work)
return;
}
- ret = snd_soc_dapm_enable_pin(arizona->dapm, "HAPTICS");
+ ret = snd_soc_component_enable_pin(component, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to start HAPTICS: %d\n",
ret);
@@ -81,7 +83,7 @@ static void arizona_haptics_work(struct work_struct *work)
}
} else {
/* This disable sequence will be a noop if already enabled */
- ret = snd_soc_dapm_disable_pin(arizona->dapm, "HAPTICS");
+ ret = snd_soc_component_disable_pin(component, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to disable HAPTICS: %d\n",
ret);
@@ -140,11 +142,14 @@ static int arizona_haptics_play(struct input_dev *input, void *data,
static void arizona_haptics_close(struct input_dev *input)
{
struct arizona_haptics *haptics = input_get_drvdata(input);
+ struct snd_soc_component *component;
cancel_work_sync(&haptics->work);
- if (haptics->arizona->dapm)
- snd_soc_dapm_disable_pin(haptics->arizona->dapm, "HAPTICS");
+ if (haptics->arizona->dapm) {
+ component = snd_soc_dapm_to_component(haptics->arizona->dapm);
+ snd_soc_component_disable_pin(component, "HAPTICS");
+ }
}
static int arizona_haptics_probe(struct platform_device *pdev)
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 90d6be3c26cc..83cf11312fd9 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
}
platform_set_drvdata(wm->battery_dev, wm);
wm->battery_dev->dev.parent = dev;
- wm->battery_dev->dev.platform_data = pdata;
+ wm->battery_dev->dev.platform_data = pdata->batt_pdata;
ret = platform_device_add(wm->battery_dev);
if (ret < 0)
goto batt_reg_err;
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index ba4beb25d872..298c8dba0321 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -855,7 +855,7 @@ struct DTag { /* Display tags */
{ 0x8c, "Reason" },
{ 0x8d, "Calling party name" },
{ 0x8e, "Called party name" },
- { 0x8f, "Orignal called name" },
+ { 0x8f, "Original called name" },
{ 0x90, "Redirecting name" },
{ 0x91, "Connected name" },
{ 0x92, "Originating restrictions" },
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 02a5345a44a6..b7767da50c26 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -240,9 +240,17 @@ config DM_BUFIO
as a cache, holding recently-read blocks in memory and performing
delayed writes.
+config DM_DEBUG_BLOCK_MANAGER_LOCKING
+ bool "Block manager locking"
+ depends on DM_BUFIO
+ ---help---
+ Block manager locking can catch various metadata corruption issues.
+
+ If unsure, say N.
+
config DM_DEBUG_BLOCK_STACK_TRACING
bool "Keep stack trace of persistent data block lock holders"
- depends on STACKTRACE_SUPPORT && DM_BUFIO
+ depends on STACKTRACE_SUPPORT && DM_DEBUG_BLOCK_MANAGER_LOCKING
select STACKTRACE
---help---
Enable this for messages that may help debug problems with the
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 2d826927a3bf..9fb2ccac958a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -27,6 +27,7 @@
#include <linux/mount.h>
#include <linux/buffer_head.h>
#include <linux/seq_file.h>
+#include <trace/events/block.h>
#include "md.h"
#include "bitmap.h"
@@ -208,11 +209,13 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde
static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
{
- struct md_rdev *rdev = NULL;
+ struct md_rdev *rdev;
struct block_device *bdev;
struct mddev *mddev = bitmap->mddev;
struct bitmap_storage *store = &bitmap->storage;
+restart:
+ rdev = NULL;
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
int size = PAGE_SIZE;
loff_t offset = mddev->bitmap_info.offset;
@@ -268,8 +271,8 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
page);
}
- if (wait)
- md_super_wait(mddev);
+ if (wait && md_super_wait(mddev) < 0)
+ goto restart;
return 0;
bad_alignment:
@@ -405,10 +408,10 @@ static int read_page(struct file *file, unsigned long index,
ret = -EIO;
out:
if (ret)
- printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %d\n",
- (int)PAGE_SIZE,
- (unsigned long long)index << PAGE_SHIFT,
- ret);
+ pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
+ (int)PAGE_SIZE,
+ (unsigned long long)index << PAGE_SHIFT,
+ ret);
return ret;
}
@@ -416,6 +419,28 @@ out:
* bitmap file superblock operations
*/
+/*
+ * bitmap_wait_writes() should be called before writing any bitmap
+ * blocks, to ensure previous writes, particularly from
+ * bitmap_daemon_work(), have completed.
+ */
+static void bitmap_wait_writes(struct bitmap *bitmap)
+{
+ if (bitmap->storage.file)
+ wait_event(bitmap->write_wait,
+ atomic_read(&bitmap->pending_writes)==0);
+ else
+ /* Note that we ignore the return value. The writes
+ * might have failed, but that would just mean that
+ * some bits which should be cleared haven't been,
+ * which is safe. The relevant bitmap blocks will
+ * probably get written again, but there is no great
+ * loss if they aren't.
+ */
+ md_super_wait(bitmap->mddev);
+}
+
+
/* update the event counter and sync the superblock to disk */
void bitmap_update_sb(struct bitmap *bitmap)
{
@@ -455,24 +480,24 @@ void bitmap_print_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->storage.sb_page)
return;
sb = kmap_atomic(bitmap->storage.sb_page);
- printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
- printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
- printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
- printk(KERN_DEBUG " uuid: %08x.%08x.%08x.%08x\n",
- *(__u32 *)(sb->uuid+0),
- *(__u32 *)(sb->uuid+4),
- *(__u32 *)(sb->uuid+8),
- *(__u32 *)(sb->uuid+12));
- printk(KERN_DEBUG " events: %llu\n",
- (unsigned long long) le64_to_cpu(sb->events));
- printk(KERN_DEBUG "events cleared: %llu\n",
- (unsigned long long) le64_to_cpu(sb->events_cleared));
- printk(KERN_DEBUG " state: %08x\n", le32_to_cpu(sb->state));
- printk(KERN_DEBUG " chunksize: %d B\n", le32_to_cpu(sb->chunksize));
- printk(KERN_DEBUG " daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
- printk(KERN_DEBUG " sync size: %llu KB\n",
- (unsigned long long)le64_to_cpu(sb->sync_size)/2);
- printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
+ pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
+ pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
+ pr_debug(" version: %d\n", le32_to_cpu(sb->version));
+ pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
+ *(__u32 *)(sb->uuid+0),
+ *(__u32 *)(sb->uuid+4),
+ *(__u32 *)(sb->uuid+8),
+ *(__u32 *)(sb->uuid+12));
+ pr_debug(" events: %llu\n",
+ (unsigned long long) le64_to_cpu(sb->events));
+ pr_debug("events cleared: %llu\n",
+ (unsigned long long) le64_to_cpu(sb->events_cleared));
+ pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
+ pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize));
+ pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
+ pr_debug(" sync size: %llu KB\n",
+ (unsigned long long)le64_to_cpu(sb->sync_size)/2);
+ pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
kunmap_atomic(sb);
}
@@ -506,14 +531,14 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
BUG_ON(!chunksize);
if (!is_power_of_2(chunksize)) {
kunmap_atomic(sb);
- printk(KERN_ERR "bitmap chunksize not a power of 2\n");
+ pr_warn("bitmap chunksize not a power of 2\n");
return -EINVAL;
}
sb->chunksize = cpu_to_le32(chunksize);
daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
- printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
+ pr_debug("Choosing daemon_sleep default (5 sec)\n");
daemon_sleep = 5 * HZ;
}
sb->daemon_sleep = cpu_to_le32(daemon_sleep);
@@ -584,7 +609,7 @@ re_read:
/* to 4k blocks */
bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
- pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
+ pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
bitmap->cluster_slot, offset);
}
@@ -634,7 +659,7 @@ re_read:
else if (write_behind > COUNTER_MAX)
reason = "write-behind limit out of range (0 - 16383)";
if (reason) {
- printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n",
+ pr_warn("%s: invalid bitmap file superblock: %s\n",
bmname(bitmap), reason);
goto out;
}
@@ -648,18 +673,15 @@ re_read:
* bitmap's UUID and event counter to the mddev's
*/
if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
- printk(KERN_INFO
- "%s: bitmap superblock UUID mismatch\n",
- bmname(bitmap));
+ pr_warn("%s: bitmap superblock UUID mismatch\n",
+ bmname(bitmap));
goto out;
}
events = le64_to_cpu(sb->events);
if (!nodes && (events < bitmap->mddev->events)) {
- printk(KERN_INFO
- "%s: bitmap file is out of date (%llu < %llu) "
- "-- forcing full recovery\n",
- bmname(bitmap), events,
- (unsigned long long) bitmap->mddev->events);
+ pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
+ bmname(bitmap), events,
+ (unsigned long long) bitmap->mddev->events);
set_bit(BITMAP_STALE, &bitmap->flags);
}
}
@@ -679,8 +701,8 @@ out:
if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
err = md_setup_cluster(bitmap->mddev, nodes);
if (err) {
- pr_err("%s: Could not setup cluster service (%d)\n",
- bmname(bitmap), err);
+ pr_warn("%s: Could not setup cluster service (%d)\n",
+ bmname(bitmap), err);
goto out_no_sb;
}
bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
@@ -847,15 +869,13 @@ static void bitmap_file_kick(struct bitmap *bitmap)
ptr = file_path(bitmap->storage.file,
path, PAGE_SIZE);
- printk(KERN_ALERT
- "%s: kicking failed bitmap file %s from array!\n",
- bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
+ pr_warn("%s: kicking failed bitmap file %s from array!\n",
+ bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
kfree(path);
} else
- printk(KERN_ALERT
- "%s: disabling internal bitmap due to errors\n",
- bmname(bitmap));
+ pr_warn("%s: disabling internal bitmap due to errors\n",
+ bmname(bitmap));
}
}
@@ -983,6 +1003,7 @@ void bitmap_unplug(struct bitmap *bitmap)
{
unsigned long i;
int dirty, need_write;
+ int writing = 0;
if (!bitmap || !bitmap->storage.filemap ||
test_bit(BITMAP_STALE, &bitmap->flags))
@@ -997,15 +1018,19 @@ void bitmap_unplug(struct bitmap *bitmap)
need_write = test_and_clear_page_attr(bitmap, i,
BITMAP_PAGE_NEEDWRITE);
if (dirty || need_write) {
+ if (!writing) {
+ bitmap_wait_writes(bitmap);
+ if (bitmap->mddev->queue)
+ blk_add_trace_msg(bitmap->mddev->queue,
+ "md bitmap_unplug");
+ }
clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
write_page(bitmap, bitmap->storage.filemap[i], 0);
+ writing = 1;
}
}
- if (bitmap->storage.file)
- wait_event(bitmap->write_wait,
- atomic_read(&bitmap->pending_writes)==0);
- else
- md_super_wait(bitmap->mddev);
+ if (writing)
+ bitmap_wait_writes(bitmap);
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
bitmap_file_kick(bitmap);
@@ -1056,14 +1081,13 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
if (outofdate)
- printk(KERN_INFO "%s: bitmap file is out of date, doing full "
- "recovery\n", bmname(bitmap));
+ pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap));
if (file && i_size_read(file->f_mapping->host) < store->bytes) {
- printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
- bmname(bitmap),
- (unsigned long) i_size_read(file->f_mapping->host),
- store->bytes);
+ pr_warn("%s: bitmap file too short %lu < %lu\n",
+ bmname(bitmap),
+ (unsigned long) i_size_read(file->f_mapping->host),
+ store->bytes);
goto err;
}
@@ -1137,16 +1161,15 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
offset = 0;
}
- printk(KERN_INFO "%s: bitmap initialized from disk: "
- "read %lu pages, set %lu of %lu bits\n",
- bmname(bitmap), store->file_pages,
- bit_cnt, chunks);
+ pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
+ bmname(bitmap), store->file_pages,
+ bit_cnt, chunks);
return 0;
err:
- printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
- bmname(bitmap), ret);
+ pr_warn("%s: bitmap initialisation failed: %d\n",
+ bmname(bitmap), ret);
return ret;
}
@@ -1225,6 +1248,10 @@ void bitmap_daemon_work(struct mddev *mddev)
}
bitmap->allclean = 1;
+ if (bitmap->mddev->queue)
+ blk_add_trace_msg(bitmap->mddev->queue,
+ "md bitmap_daemon_work");
+
/* Any file-page which is PENDING now needs to be written.
* So set NEEDWRITE now, then after we make any last-minute changes
* we will write it.
@@ -1289,6 +1316,7 @@ void bitmap_daemon_work(struct mddev *mddev)
}
spin_unlock_irq(&counts->lock);
+ bitmap_wait_writes(bitmap);
/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
* DIRTY pages need to be written by bitmap_unplug so it can wait
* for them.
@@ -1595,7 +1623,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
atomic_read(&bitmap->mddev->recovery_active) == 0);
bitmap->mddev->curr_resync_completed = sector;
- set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
s = 0;
while (s < sector && s < bitmap->mddev->resync_max_sectors) {
@@ -1825,8 +1853,8 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
if (err)
goto error;
- printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
- bitmap->counts.pages, bmname(bitmap));
+ pr_debug("created bitmap (%lu pages) for device %s\n",
+ bitmap->counts.pages, bmname(bitmap));
err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
if (err)
@@ -2029,8 +2057,10 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
!bitmap->mddev->bitmap_info.external,
mddev_is_clustered(bitmap->mddev)
? bitmap->cluster_slot : 0);
- if (ret)
+ if (ret) {
+ bitmap_file_unmap(&store);
goto err;
+ }
pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
@@ -2089,7 +2119,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
BITMAP_BLOCK_SHIFT);
blocks = old_counts.chunks << old_counts.chunkshift;
- pr_err("Could not pre-allocate in-memory bitmap for cluster raid\n");
+ pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
break;
} else
bitmap->counts.bp[page].count += 1;
@@ -2266,7 +2296,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
/* Ensure new bitmap info is stored in
* metadata promptly.
*/
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
}
rv = 0;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 262e75365cc0..84d2f0e4c754 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -820,12 +820,14 @@ enum new_flag {
static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
{
struct dm_buffer *b;
+ bool tried_noio_alloc = false;
/*
* dm-bufio is resistant to allocation failures (it just keeps
* one buffer reserved in cases all the allocations fail).
* So set flags to not try too hard:
- * GFP_NOIO: don't recurse into the I/O layer
+ * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
+ * mutex and wait ourselves.
* __GFP_NORETRY: don't retry and rather return failure
* __GFP_NOMEMALLOC: don't use emergency reserves
* __GFP_NOWARN: don't print a warning in case of failure
@@ -835,7 +837,7 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
*/
while (1) {
if (dm_bufio_cache_size_latch != 1) {
- b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
if (b)
return b;
}
@@ -843,6 +845,15 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
if (nf == NF_PREFETCH)
return NULL;
+ if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
+ dm_bufio_unlock(c);
+ b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ dm_bufio_lock(c);
+ if (b)
+ return b;
+ tried_noio_alloc = true;
+ }
+
if (!list_empty(&c->reserved_buffers)) {
b = list_entry(c->reserved_buffers.next,
struct dm_buffer, lru_list);
@@ -1585,18 +1596,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
static unsigned long
dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
- struct dm_bufio_client *c;
- unsigned long count;
-
- c = container_of(shrink, struct dm_bufio_client, shrinker);
- if (sc->gfp_mask & __GFP_FS)
- dm_bufio_lock(c);
- else if (!dm_bufio_trylock(c))
- return 0;
+ struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
- count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
- dm_bufio_unlock(c);
- return count;
+ return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]);
}
/*
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 695577812cf6..624fe4319b24 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -383,7 +383,6 @@ static int __format_metadata(struct dm_cache_metadata *cmd)
goto bad;
dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
-
r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
if (r < 0)
goto bad;
@@ -789,7 +788,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
{
if (cmd->data_block_size != data_block_size) {
- DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
+ DMERR("data_block_size (%llu) different from that in metadata (%llu)",
(unsigned long long) data_block_size,
(unsigned long long) cmd->data_block_size);
return false;
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index c33f4a6e1d7d..f19c6930a67c 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1361,7 +1361,7 @@ static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
static unsigned random_level(dm_cblock_t cblock)
{
- return hash_32_generic(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
+ return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
}
static int smq_load_mapping(struct dm_cache_policy *p,
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 59b2c50562e4..e04c61e0839e 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -989,7 +989,8 @@ static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mod
enum cache_metadata_mode old_mode = get_cache_mode(cache);
if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
- DMERR("unable to read needs_check flag, setting failure mode");
+ DMERR("%s: unable to read needs_check flag, setting failure mode.",
+ cache_device_name(cache));
new_mode = CM_FAIL;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 68a9eb4f3f36..7c6c57216bf2 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/key.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mempool.h>
@@ -23,12 +24,14 @@
#include <linux/atomic.h>
#include <linux/scatterlist.h>
#include <linux/rbtree.h>
+#include <linux/ctype.h>
#include <asm/page.h>
#include <asm/unaligned.h>
#include <crypto/hash.h>
#include <crypto/md5.h>
#include <crypto/algapi.h>
#include <crypto/skcipher.h>
+#include <keys/user-type.h>
#include <linux/device-mapper.h>
@@ -140,8 +143,9 @@ struct crypt_config {
char *cipher;
char *cipher_string;
+ char *key_string;
- struct crypt_iv_operations *iv_gen_ops;
+ const struct crypt_iv_operations *iv_gen_ops;
union {
struct iv_essiv_private essiv;
struct iv_benbi_private benbi;
@@ -758,15 +762,15 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
return r;
}
-static struct crypt_iv_operations crypt_iv_plain_ops = {
+static const struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
-static struct crypt_iv_operations crypt_iv_plain64_ops = {
+static const struct crypt_iv_operations crypt_iv_plain64_ops = {
.generator = crypt_iv_plain64_gen
};
-static struct crypt_iv_operations crypt_iv_essiv_ops = {
+static const struct crypt_iv_operations crypt_iv_essiv_ops = {
.ctr = crypt_iv_essiv_ctr,
.dtr = crypt_iv_essiv_dtr,
.init = crypt_iv_essiv_init,
@@ -774,17 +778,17 @@ static struct crypt_iv_operations crypt_iv_essiv_ops = {
.generator = crypt_iv_essiv_gen
};
-static struct crypt_iv_operations crypt_iv_benbi_ops = {
+static const struct crypt_iv_operations crypt_iv_benbi_ops = {
.ctr = crypt_iv_benbi_ctr,
.dtr = crypt_iv_benbi_dtr,
.generator = crypt_iv_benbi_gen
};
-static struct crypt_iv_operations crypt_iv_null_ops = {
+static const struct crypt_iv_operations crypt_iv_null_ops = {
.generator = crypt_iv_null_gen
};
-static struct crypt_iv_operations crypt_iv_lmk_ops = {
+static const struct crypt_iv_operations crypt_iv_lmk_ops = {
.ctr = crypt_iv_lmk_ctr,
.dtr = crypt_iv_lmk_dtr,
.init = crypt_iv_lmk_init,
@@ -793,7 +797,7 @@ static struct crypt_iv_operations crypt_iv_lmk_ops = {
.post = crypt_iv_lmk_post
};
-static struct crypt_iv_operations crypt_iv_tcw_ops = {
+static const struct crypt_iv_operations crypt_iv_tcw_ops = {
.ctr = crypt_iv_tcw_ctr,
.dtr = crypt_iv_tcw_dtr,
.init = crypt_iv_tcw_init,
@@ -994,7 +998,6 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
unsigned i, len, remaining_size;
struct page *page;
- struct bio_vec *bvec;
retry:
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
@@ -1019,12 +1022,7 @@ retry:
len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
- bvec = &clone->bi_io_vec[clone->bi_vcnt++];
- bvec->bv_page = page;
- bvec->bv_len = len;
- bvec->bv_offset = 0;
-
- clone->bi_iter.bi_size += len;
+ bio_add_page(clone, page, len, 0);
remaining_size -= len;
}
@@ -1471,7 +1469,7 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
return 0;
}
-static int crypt_setkey_allcpus(struct crypt_config *cc)
+static int crypt_setkey(struct crypt_config *cc)
{
unsigned subkey_size;
int err = 0, i, r;
@@ -1490,25 +1488,157 @@ static int crypt_setkey_allcpus(struct crypt_config *cc)
return err;
}
+#ifdef CONFIG_KEYS
+
+static bool contains_whitespace(const char *str)
+{
+ while (*str)
+ if (isspace(*str++))
+ return true;
+ return false;
+}
+
+static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
+{
+ char *new_key_string, *key_desc;
+ int ret;
+ struct key *key;
+ const struct user_key_payload *ukp;
+
+ /*
+ * Reject key_string with whitespace. dm core currently lacks code for
+ * proper whitespace escaping in arguments on DM_TABLE_STATUS path.
+ */
+ if (contains_whitespace(key_string)) {
+ DMERR("whitespace chars not allowed in key string");
+ return -EINVAL;
+ }
+
+ /* look for next ':' separating key_type from key_description */
+ key_desc = strpbrk(key_string, ":");
+ if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
+ return -EINVAL;
+
+ if (strncmp(key_string, "logon:", key_desc - key_string + 1) &&
+ strncmp(key_string, "user:", key_desc - key_string + 1))
+ return -EINVAL;
+
+ new_key_string = kstrdup(key_string, GFP_KERNEL);
+ if (!new_key_string)
+ return -ENOMEM;
+
+ key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user,
+ key_desc + 1, NULL);
+ if (IS_ERR(key)) {
+ kzfree(new_key_string);
+ return PTR_ERR(key);
+ }
+
+ rcu_read_lock();
+
+ ukp = user_key_payload(key);
+ if (!ukp) {
+ rcu_read_unlock();
+ key_put(key);
+ kzfree(new_key_string);
+ return -EKEYREVOKED;
+ }
+
+ if (cc->key_size != ukp->datalen) {
+ rcu_read_unlock();
+ key_put(key);
+ kzfree(new_key_string);
+ return -EINVAL;
+ }
+
+ memcpy(cc->key, ukp->data, cc->key_size);
+
+ rcu_read_unlock();
+ key_put(key);
+
+ /* clear the flag since following operations may invalidate previously valid key */
+ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+
+ ret = crypt_setkey(cc);
+
+ /* wipe the kernel key payload copy in each case */
+ memset(cc->key, 0, cc->key_size * sizeof(u8));
+
+ if (!ret) {
+ set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+ kzfree(cc->key_string);
+ cc->key_string = new_key_string;
+ } else
+ kzfree(new_key_string);
+
+ return ret;
+}
+
+static int get_key_size(char **key_string)
+{
+ char *colon, dummy;
+ int ret;
+
+ if (*key_string[0] != ':')
+ return strlen(*key_string) >> 1;
+
+ /* look for next ':' in key string */
+ colon = strpbrk(*key_string + 1, ":");
+ if (!colon)
+ return -EINVAL;
+
+ if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
+ return -EINVAL;
+
+ *key_string = colon;
+
+ /* remaining key string should be :<logon|user>:<key_desc> */
+
+ return ret;
+}
+
+#else
+
+static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
+{
+ return -EINVAL;
+}
+
+static int get_key_size(char **key_string)
+{
+ return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
+}
+
+#endif
+
static int crypt_set_key(struct crypt_config *cc, char *key)
{
int r = -EINVAL;
int key_string_len = strlen(key);
- /* The key size may not be changed. */
- if (cc->key_size != (key_string_len >> 1))
- goto out;
-
/* Hyphen (which gives a key_size of zero) means there is no key. */
if (!cc->key_size && strcmp(key, "-"))
goto out;
- if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
+ /* ':' means the key is in kernel keyring, short-circuit normal key processing */
+ if (key[0] == ':') {
+ r = crypt_set_keyring_key(cc, key + 1);
goto out;
+ }
- set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+ /* clear the flag since following operations may invalidate previously valid key */
+ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
- r = crypt_setkey_allcpus(cc);
+ /* wipe references to any kernel keyring key */
+ kzfree(cc->key_string);
+ cc->key_string = NULL;
+
+ if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
+ goto out;
+
+ r = crypt_setkey(cc);
+ if (!r)
+ set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
out:
/* Hex key string not needed after here, so wipe it. */
@@ -1521,8 +1651,10 @@ static int crypt_wipe_key(struct crypt_config *cc)
{
clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
memset(&cc->key, 0, cc->key_size * sizeof(u8));
+ kzfree(cc->key_string);
+ cc->key_string = NULL;
- return crypt_setkey_allcpus(cc);
+ return crypt_setkey(cc);
}
static void crypt_dtr(struct dm_target *ti)
@@ -1558,6 +1690,7 @@ static void crypt_dtr(struct dm_target *ti)
kzfree(cc->cipher);
kzfree(cc->cipher_string);
+ kzfree(cc->key_string);
/* Must zero key material before freeing */
kzfree(cc);
@@ -1726,12 +1859,13 @@ bad_mem:
/*
* Construct an encryption mapping:
- * <cipher> <key> <iv_offset> <dev_path> <start>
+ * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
*/
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct crypt_config *cc;
- unsigned int key_size, opt_params;
+ int key_size;
+ unsigned int opt_params;
unsigned long long tmpll;
int ret;
size_t iv_size_padding;
@@ -1748,7 +1882,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL;
}
- key_size = strlen(argv[1]) >> 1;
+ key_size = get_key_size(&argv[1]);
+ if (key_size < 0) {
+ ti->error = "Cannot parse key size";
+ return -EINVAL;
+ }
cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
if (!cc) {
@@ -1955,10 +2093,13 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
case STATUSTYPE_TABLE:
DMEMIT("%s ", cc->cipher_string);
- if (cc->key_size > 0)
- for (i = 0; i < cc->key_size; i++)
- DMEMIT("%02x", cc->key[i]);
- else
+ if (cc->key_size > 0) {
+ if (cc->key_string)
+ DMEMIT(":%u:%s", cc->key_size, cc->key_string);
+ else
+ for (i = 0; i < cc->key_size; i++)
+ DMEMIT("%02x", cc->key[i]);
+ } else
DMEMIT("-");
DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
@@ -2014,7 +2155,7 @@ static void crypt_resume(struct dm_target *ti)
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
{
struct crypt_config *cc = ti->private;
- int ret = -EINVAL;
+ int key_size, ret = -EINVAL;
if (argc < 2)
goto error;
@@ -2025,6 +2166,13 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
return -EINVAL;
}
if (argc == 3 && !strcasecmp(argv[1], "set")) {
+ /* The key size may not be changed. */
+ key_size = get_key_size(&argv[2]);
+ if (key_size < 0 || cc->key_size != key_size) {
+ memset(argv[2], '0', strlen(argv[2]));
+ return -EINVAL;
+ }
+
ret = crypt_set_key(cc, argv[2]);
if (ret)
return ret;
@@ -2068,7 +2216,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 14, 1},
+ .version = {1, 15, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 6a2e8dd44a1b..13305a182611 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -36,7 +36,8 @@ struct flakey_c {
};
enum feature_flag_bits {
- DROP_WRITES
+ DROP_WRITES,
+ ERROR_WRITES
};
struct per_bio_data {
@@ -76,6 +77,25 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
ti->error = "Feature drop_writes duplicated";
return -EINVAL;
+ } else if (test_bit(ERROR_WRITES, &fc->flags)) {
+ ti->error = "Feature drop_writes conflicts with feature error_writes";
+ return -EINVAL;
+ }
+
+ continue;
+ }
+
+ /*
+ * error_writes
+ */
+ if (!strcasecmp(arg_name, "error_writes")) {
+ if (test_and_set_bit(ERROR_WRITES, &fc->flags)) {
+ ti->error = "Feature error_writes duplicated";
+ return -EINVAL;
+
+ } else if (test_bit(DROP_WRITES, &fc->flags)) {
+ ti->error = "Feature error_writes conflicts with feature drop_writes";
+ return -EINVAL;
}
continue;
@@ -135,6 +155,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
return -EINVAL;
+
+ } else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
+ ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
+ return -EINVAL;
}
return 0;
@@ -200,11 +224,13 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (!(fc->up_interval + fc->down_interval)) {
ti->error = "Total (up + down) interval is zero";
+ r = -EINVAL;
goto bad;
}
if (fc->up_interval + fc->down_interval < fc->up_interval) {
ti->error = "Interval overflow";
+ r = -EINVAL;
goto bad;
}
@@ -289,22 +315,27 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
pb->bio_submitted = true;
/*
- * Error reads if neither corrupt_bio_byte or drop_writes are set.
+ * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
* Otherwise, flakey_end_io() will decide if the reads should be modified.
*/
if (bio_data_dir(bio) == READ) {
- if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags))
+ if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) &&
+ !test_bit(ERROR_WRITES, &fc->flags))
return -EIO;
goto map_bio;
}
/*
- * Drop writes?
+ * Drop or error writes?
*/
if (test_bit(DROP_WRITES, &fc->flags)) {
bio_endio(bio);
return DM_MAPIO_SUBMITTED;
}
+ else if (test_bit(ERROR_WRITES, &fc->flags)) {
+ bio_io_error(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
/*
* Corrupt matching writes.
@@ -340,10 +371,11 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
*/
corrupt_bio_data(bio, fc);
- } else if (!test_bit(DROP_WRITES, &fc->flags)) {
+ } else if (!test_bit(DROP_WRITES, &fc->flags) &&
+ !test_bit(ERROR_WRITES, &fc->flags)) {
/*
* Error read during the down_interval if drop_writes
- * wasn't configured.
+ * and error_writes were not configured.
*/
return -EIO;
}
@@ -357,7 +389,7 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
{
unsigned sz = 0;
struct flakey_c *fc = ti->private;
- unsigned drop_writes;
+ unsigned drop_writes, error_writes;
switch (type) {
case STATUSTYPE_INFO:
@@ -370,10 +402,13 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
fc->down_interval);
drop_writes = test_bit(DROP_WRITES, &fc->flags);
- DMEMIT("%u ", drop_writes + (fc->corrupt_bio_byte > 0) * 5);
+ error_writes = test_bit(ERROR_WRITES, &fc->flags);
+ DMEMIT("%u ", drop_writes + error_writes + (fc->corrupt_bio_byte > 0) * 5);
if (drop_writes)
DMEMIT("drop_writes ");
+ else if (error_writes)
+ DMEMIT("error_writes ");
if (fc->corrupt_bio_byte)
DMEMIT("corrupt_bio_byte %u %c %u %u ",
@@ -410,7 +445,7 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
- .version = {1, 3, 1},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 0bf1a12e35fe..03940bf36f6c 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -162,7 +162,10 @@ struct dpages {
struct page **p, unsigned long *len, unsigned *offset);
void (*next_page)(struct dpages *dp);
- unsigned context_u;
+ union {
+ unsigned context_u;
+ struct bvec_iter context_bi;
+ };
void *context_ptr;
void *vma_invalidate_address;
@@ -204,25 +207,36 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
static void bio_get_page(struct dpages *dp, struct page **p,
unsigned long *len, unsigned *offset)
{
- struct bio_vec *bvec = dp->context_ptr;
- *p = bvec->bv_page;
- *len = bvec->bv_len - dp->context_u;
- *offset = bvec->bv_offset + dp->context_u;
+ struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
+ dp->context_bi);
+
+ *p = bvec.bv_page;
+ *len = bvec.bv_len;
+ *offset = bvec.bv_offset;
+
+ /* avoid figuring it out again in bio_next_page() */
+ dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
}
static void bio_next_page(struct dpages *dp)
{
- struct bio_vec *bvec = dp->context_ptr;
- dp->context_ptr = bvec + 1;
- dp->context_u = 0;
+ unsigned int len = (unsigned int)dp->context_bi.bi_sector;
+
+ bvec_iter_advance((struct bio_vec *)dp->context_ptr,
+ &dp->context_bi, len);
}
static void bio_dp_init(struct dpages *dp, struct bio *bio)
{
dp->get_page = bio_get_page;
dp->next_page = bio_next_page;
- dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
- dp->context_u = bio->bi_iter.bi_bvec_done;
+
+ /*
+ * We just use bvec iterator to retrieve pages, so it is ok to
+ * access the bvec table directly here
+ */
+ dp->context_ptr = bio->bi_io_vec;
+ dp->context_bi = bio->bi_iter;
}
/*
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 966eb4b61aed..c72a77048b73 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1697,7 +1697,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
{
struct dm_ioctl *dmi;
int secure_data;
- const size_t minimum_data_size = sizeof(*param_kernel) - sizeof(param_kernel->data);
+ const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
if (copy_from_user(param_kernel, user, minimum_data_size))
return -EFAULT;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e477af8596e2..6400cffb986d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -372,16 +372,13 @@ static int __pg_init_all_paths(struct multipath *m)
return atomic_read(&m->pg_init_in_progress);
}
-static int pg_init_all_paths(struct multipath *m)
+static void pg_init_all_paths(struct multipath *m)
{
- int r;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- r = __pg_init_all_paths(m);
+ __pg_init_all_paths(m);
spin_unlock_irqrestore(&m->lock, flags);
-
- return r;
}
static void __switch_pg(struct multipath *m, struct priority_group *pg)
@@ -583,16 +580,17 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
* .request_fn stacked on blk-mq path(s) and
* blk-mq stacked on blk-mq path(s).
*/
- *__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
- rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
- if (IS_ERR(*__clone)) {
- /* ENOMEM, requeue */
+ clone = blk_mq_alloc_request(bdev_get_queue(bdev),
+ rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
+ if (IS_ERR(clone)) {
+ /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
clear_request_fn_mpio(m, map_context);
return r;
}
- (*__clone)->bio = (*__clone)->biotail = NULL;
- (*__clone)->rq_disk = bdev->bd_disk;
- (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ clone->bio = clone->biotail = NULL;
+ clone->rq_disk = bdev->bd_disk;
+ clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ *__clone = clone;
}
if (pgpath->pg->ps.type->start_io)
@@ -852,18 +850,22 @@ retain:
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
if (attached_handler_name) {
/*
+ * Clear any hw_handler_params associated with a
+ * handler that isn't already attached.
+ */
+ if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
+ kfree(m->hw_handler_params);
+ m->hw_handler_params = NULL;
+ }
+
+ /*
* Reset hw_handler_name to match the attached handler
- * and clear any hw_handler_params associated with the
- * ignored handler.
*
* NB. This modifies the table line to show the actual
* handler instead of the original table passed in.
*/
kfree(m->hw_handler_name);
m->hw_handler_name = attached_handler_name;
-
- kfree(m->hw_handler_params);
- m->hw_handler_params = NULL;
}
}
@@ -1002,6 +1004,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
}
m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
+ if (!m->hw_handler_name)
+ return -EINVAL;
if (hw_argc > 1) {
char *p;
@@ -1362,7 +1366,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
- (pgnum > m->nr_priority_groups)) {
+ !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
DMWARN("invalid PG number supplied to switch_pg_num");
return -EINVAL;
}
@@ -1394,7 +1398,7 @@ static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
- (pgnum > m->nr_priority_groups)) {
+ !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
DMWARN("invalid PG number supplied to bypass_pg");
return -EINVAL;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 6d53810963f7..b8f978e551d7 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -160,7 +160,6 @@ struct raid_dev {
CTR_FLAG_DAEMON_SLEEP | \
CTR_FLAG_MIN_RECOVERY_RATE | \
CTR_FLAG_MAX_RECOVERY_RATE | \
- CTR_FLAG_MAX_WRITE_BEHIND | \
CTR_FLAG_STRIPE_CACHE | \
CTR_FLAG_REGION_SIZE | \
CTR_FLAG_DELTA_DISKS | \
@@ -171,7 +170,6 @@ struct raid_dev {
CTR_FLAG_DAEMON_SLEEP | \
CTR_FLAG_MIN_RECOVERY_RATE | \
CTR_FLAG_MAX_RECOVERY_RATE | \
- CTR_FLAG_MAX_WRITE_BEHIND | \
CTR_FLAG_STRIPE_CACHE | \
CTR_FLAG_REGION_SIZE | \
CTR_FLAG_DELTA_DISKS | \
@@ -2011,7 +2009,7 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
/* Force writing of superblocks to disk */
- set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
/* Any superblock is better than none, choose that if given */
return refdev ? 0 : 1;
@@ -2050,16 +2048,17 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
mddev->reshape_position = MaxSector;
+ mddev->raid_disks = le32_to_cpu(sb->num_devices);
+ mddev->level = le32_to_cpu(sb->level);
+ mddev->layout = le32_to_cpu(sb->layout);
+ mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
+
/*
* Reshaping is supported, e.g. reshape_position is valid
* in superblock and superblock content is authoritative.
*/
if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
/* Superblock is authoritative wrt given raid set layout! */
- mddev->raid_disks = le32_to_cpu(sb->num_devices);
- mddev->level = le32_to_cpu(sb->level);
- mddev->layout = le32_to_cpu(sb->layout);
- mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
mddev->new_level = le32_to_cpu(sb->new_level);
mddev->new_layout = le32_to_cpu(sb->new_layout);
mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
@@ -2087,38 +2086,44 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
/*
* No takeover/reshaping, because we don't have the extended v1.9.0 metadata
*/
- if (le32_to_cpu(sb->level) != mddev->new_level) {
- DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
- return -EINVAL;
- }
- if (le32_to_cpu(sb->layout) != mddev->new_layout) {
- DMERR("Reshaping raid sets not yet supported. (raid layout change)");
- DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
- DMERR(" Old layout: %s w/ %d copies",
- raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
- raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
- DMERR(" New layout: %s w/ %d copies",
- raid10_md_layout_to_format(mddev->layout),
- raid10_md_layout_to_copies(mddev->layout));
- return -EINVAL;
- }
- if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) {
- DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
- return -EINVAL;
- }
+ struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout);
+ struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
- /* We can only change the number of devices in raid1 with old (i.e. pre 1.0.7) metadata */
- if (!rt_is_raid1(rs->raid_type) &&
- (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
- DMERR("Reshaping raid sets not yet supported. (device count change from %u to %u)",
- sb->num_devices, mddev->raid_disks);
+ if (rs_takeover_requested(rs)) {
+ if (rt_cur && rt_new)
+ DMERR("Takeover raid sets from %s to %s not yet supported by metadata. (raid level change)",
+ rt_cur->name, rt_new->name);
+ else
+ DMERR("Takeover raid sets not yet supported by metadata. (raid level change)");
+ return -EINVAL;
+ } else if (rs_reshape_requested(rs)) {
+ DMERR("Reshaping raid sets not yet supported by metadata. (raid layout change keeping level)");
+ if (mddev->layout != mddev->new_layout) {
+ if (rt_cur && rt_new)
+ DMERR(" current layout %s vs new layout %s",
+ rt_cur->name, rt_new->name);
+ else
+ DMERR(" current layout 0x%X vs new layout 0x%X",
+ le32_to_cpu(sb->layout), mddev->new_layout);
+ }
+ if (mddev->chunk_sectors != mddev->new_chunk_sectors)
+ DMERR(" current stripe sectors %u vs new stripe sectors %u",
+ mddev->chunk_sectors, mddev->new_chunk_sectors);
+ if (rs->delta_disks)
+ DMERR(" current %u disks vs new %u disks",
+ mddev->raid_disks, mddev->raid_disks + rs->delta_disks);
+ if (rs_is_raid10(rs)) {
+ DMERR(" Old layout: %s w/ %u copies",
+ raid10_md_layout_to_format(mddev->layout),
+ raid10_md_layout_to_copies(mddev->layout));
+ DMERR(" New layout: %s w/ %u copies",
+ raid10_md_layout_to_format(mddev->new_layout),
+ raid10_md_layout_to_copies(mddev->new_layout));
+ }
return -EINVAL;
}
DMINFO("Discovered old metadata format; upgrading to extended metadata format");
-
- /* Table line is checked vs. authoritative superblock */
- rs_set_new(rs);
}
if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
@@ -2211,7 +2216,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
continue;
if (role != r->raid_disk) {
- if (__is_raid10_near(mddev->layout)) {
+ if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) {
if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
rs->raid_disks % rs->raid10_copies) {
rs->ti->error =
@@ -2994,6 +2999,9 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
}
+ /* Disable/enable discard support on raid set. */
+ configure_discard_support(rs);
+
mddev_unlock(&rs->md);
return 0;
@@ -3497,7 +3505,7 @@ static void rs_update_sbs(struct raid_set *rs)
struct mddev *mddev = &rs->md;
int ro = mddev->ro;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
mddev->ro = 0;
md_update_sb(mddev, 1);
mddev->ro = ro;
@@ -3580,12 +3588,6 @@ static int raid_preresume(struct dm_target *ti)
if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
rs_update_sbs(rs);
- /*
- * Disable/enable discard support on raid set after any
- * conversion, because devices can have been added
- */
- configure_discard_support(rs);
-
/* Load the bitmap from disk unless raid0 */
r = __load_dirty_region_bitmap(rs);
if (r)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index b2a9e2d161e4..9d7275fb541a 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -23,11 +23,7 @@ static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
#define RESERVED_REQUEST_BASED_IOS 256
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
-#ifdef CONFIG_DM_MQ_DEFAULT
-static bool use_blk_mq = true;
-#else
-static bool use_blk_mq = false;
-#endif
+static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT);
bool dm_use_blk_mq_default(void)
{
@@ -210,6 +206,9 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
*/
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{
+ struct request_queue *q = md->queue;
+ unsigned long flags;
+
atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */
@@ -222,8 +221,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
* back into ->request_fn() could deadlock attempting to grab the
* queue lock again.
*/
- if (!md->queue->mq_ops && run_queue)
- blk_run_queue_async(md->queue);
+ if (!q->mq_ops && run_queue) {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_run_queue_async(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
/*
* dm_put() must be at the end of this function. See the comment above
@@ -798,7 +800,7 @@ static void dm_old_request_fn(struct request_queue *q)
pos = blk_rq_pos(rq);
if ((dm_old_request_peeked_before_merge_deadline(md) &&
- md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
+ md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
(ti->type->busy && ti->type->busy(ti))) {
blk_delay_queue(q, 10);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index c4b53b332607..0a427de23ed2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -871,7 +871,7 @@ static int dm_table_determine_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
- bool verify_blk_mq = false;
+ unsigned sq_count = 0, mq_count = 0;
struct dm_target *tgt;
struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
@@ -924,12 +924,6 @@ static int dm_table_determine_type(struct dm_table *t)
BUG_ON(!request_based); /* No targets in this table */
- if (list_empty(devices) && __table_type_request_based(live_md_type)) {
- /* inherit live MD type */
- t->type = live_md_type;
- return 0;
- }
-
/*
* The only way to establish DM_TYPE_MQ_REQUEST_BASED is by
* having a compatible target use dm_table_set_type.
@@ -948,6 +942,19 @@ verify_rq_based:
return -EINVAL;
}
+ if (list_empty(devices)) {
+ int srcu_idx;
+ struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
+
+ /* inherit live table's type and all_blk_mq */
+ if (live_table) {
+ t->type = live_table->type;
+ t->all_blk_mq = live_table->all_blk_mq;
+ }
+ dm_put_live_table(t->md, srcu_idx);
+ return 0;
+ }
+
/* Non-request-stackable devices can't be used for request-based dm */
list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
@@ -959,19 +966,19 @@ verify_rq_based:
}
if (q->mq_ops)
- verify_blk_mq = true;
+ mq_count++;
+ else
+ sq_count++;
}
+ if (sq_count && mq_count) {
+ DMERR("table load rejected: not all devices are blk-mq request-stackable");
+ return -EINVAL;
+ }
+ t->all_blk_mq = mq_count > 0;
- if (verify_blk_mq) {
- /* verify _all_ devices in the table are blk-mq devices */
- list_for_each_entry(dd, devices, list)
- if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) {
- DMERR("table load rejected: not all devices"
- " are blk-mq request-stackable");
- return -EINVAL;
- }
-
- t->all_blk_mq = true;
+ if (t->type == DM_TYPE_MQ_REQUEST_BASED && !t->all_blk_mq) {
+ DMERR("table load rejected: all devices are not blk-mq request-stackable");
+ return -EINVAL;
}
return 0;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 0aba34a7b3b3..7335d8a3fc47 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -868,7 +868,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev);
if (r) {
- ti->error = "Data device lookup failed";
+ ti->error = "Hash device lookup failed";
goto bad;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ffa97b742a68..3086da5664f3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1886,9 +1886,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- spin_lock_irq(q->queue_lock);
- queue_flag_set(QUEUE_FLAG_DYING, q);
- spin_unlock_irq(q->queue_lock);
+ blk_set_queue_dying(q);
if (dm_request_based(md) && md->kworker_task)
kthread_flush_worker(&md->kworker);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 86f5d435901d..5975c9915684 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -21,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <trace/events/block.h>
#include "md.h"
#include "linear.h"
@@ -101,8 +102,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
sector_t sectors;
if (j < 0 || j >= raid_disks || disk->rdev) {
- printk(KERN_ERR "md/linear:%s: disk numbering problem. Aborting!\n",
- mdname(mddev));
+ pr_warn("md/linear:%s: disk numbering problem. Aborting!\n",
+ mdname(mddev));
goto out;
}
@@ -123,8 +124,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
discard_supported = true;
}
if (cnt != raid_disks) {
- printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n",
- mdname(mddev));
+ pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
+ mdname(mddev));
goto out;
}
@@ -227,22 +228,22 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
}
do {
- tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
+ sector_t bio_sector = bio->bi_iter.bi_sector;
+ tmp_dev = which_dev(mddev, bio_sector);
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
end_sector = tmp_dev->end_sector;
data_offset = tmp_dev->rdev->data_offset;
bio->bi_bdev = tmp_dev->rdev->bdev;
- if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
- bio->bi_iter.bi_sector < start_sector))
+ if (unlikely(bio_sector >= end_sector ||
+ bio_sector < start_sector))
goto out_of_bounds;
if (unlikely(bio_end_sector(bio) > end_sector)) {
/* This bio crosses a device boundary, so we have to
* split it.
*/
- split = bio_split(bio, end_sector -
- bio->bi_iter.bi_sector,
+ split = bio_split(bio, end_sector - bio_sector,
GFP_NOIO, fs_bio_set);
bio_chain(split, bio);
} else {
@@ -256,15 +257,18 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
bio_endio(split);
- } else
+ } else {
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
+ split, disk_devt(mddev->gendisk),
+ bio_sector);
generic_make_request(split);
+ }
} while (split != bio);
return;
out_of_bounds:
- printk(KERN_ERR
- "md/linear:%s: make_request: Sector %llu out of bounds on "
- "dev %s: %llu sectors, offset %llu\n",
+ pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n",
mdname(mddev),
(unsigned long long)bio->bi_iter.bi_sector,
bdevname(tmp_dev->rdev->bdev, b),
@@ -275,7 +279,6 @@ out_of_bounds:
static void linear_status (struct seq_file *seq, struct mddev *mddev)
{
-
seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f975cd08923d..82821ee0d57f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -30,6 +30,18 @@
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ Errors, Warnings, etc.
+ Please use:
+ pr_crit() for error conditions that risk data loss
+ pr_err() for error conditions that are unexpected, like an IO error
+ or internal inconsistency
+ pr_warn() for error conditions that could have been predicated, like
+ adding a device to an array when it has incompatible metadata
+ pr_info() for every interesting, very rare events, like an array starting
+ or stopping, or resync starting or stopping
+ pr_debug() for everything else.
+
*/
#include <linux/kthread.h>
@@ -52,6 +64,7 @@
#include <linux/raid/md_p.h>
#include <linux/raid/md_u.h>
#include <linux/slab.h>
+#include <trace/events/block.h>
#include "md.h"
#include "bitmap.h"
#include "md-cluster.h"
@@ -684,11 +697,8 @@ static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
static int alloc_disk_sb(struct md_rdev *rdev)
{
rdev->sb_page = alloc_page(GFP_KERNEL);
- if (!rdev->sb_page) {
- printk(KERN_ALERT "md: out of memory.\n");
+ if (!rdev->sb_page)
return -ENOMEM;
- }
-
return 0;
}
@@ -715,9 +725,15 @@ static void super_written(struct bio *bio)
struct mddev *mddev = rdev->mddev;
if (bio->bi_error) {
- printk("md: super_written gets error=%d\n", bio->bi_error);
+ pr_err("md: super_written gets error=%d\n", bio->bi_error);
md_error(mddev, rdev);
- }
+ if (!test_bit(Faulty, &rdev->flags)
+ && (bio->bi_opf & MD_FAILFAST)) {
+ set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
+ set_bit(LastDev, &rdev->flags);
+ }
+ } else
+ clear_bit(LastDev, &rdev->flags);
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
@@ -734,7 +750,13 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
* if zero is reached.
* If an error occurred, call md_error
*/
- struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
+ struct bio *bio;
+ int ff = 0;
+
+ if (test_bit(Faulty, &rdev->flags))
+ return;
+
+ bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
atomic_inc(&rdev->nr_pending);
@@ -743,16 +765,24 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA;
+
+ if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
+ test_bit(FailFast, &rdev->flags) &&
+ !test_bit(LastDev, &rdev->flags))
+ ff = MD_FAILFAST;
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff;
atomic_inc(&mddev->pending_writes);
submit_bio(bio);
}
-void md_super_wait(struct mddev *mddev)
+int md_super_wait(struct mddev *mddev)
{
/* wait for all superblock writes that were scheduled to complete */
wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
+ if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
+ return -EAGAIN;
+ return 0;
}
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
@@ -795,8 +825,8 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
return 0;
fail:
- printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
- bdevname(rdev->bdev,b));
+ pr_err("md: disabled device %s, could not read superblock.\n",
+ bdevname(rdev->bdev,b));
return -EINVAL;
}
@@ -818,7 +848,6 @@ static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
if (!tmp1 || !tmp2) {
ret = 0;
- printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
goto abort;
}
@@ -932,7 +961,7 @@ int md_check_no_bitmap(struct mddev *mddev)
{
if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
return 0;
- printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
+ pr_warn("%s: bitmaps are not supported for %s\n",
mdname(mddev), mddev->pers->name);
return 1;
}
@@ -956,7 +985,8 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
rdev->sb_start = calc_dev_sboffset(rdev);
ret = read_disk_sb(rdev, MD_SB_BYTES);
- if (ret) return ret;
+ if (ret)
+ return ret;
ret = -EINVAL;
@@ -964,17 +994,15 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
sb = page_address(rdev->sb_page);
if (sb->md_magic != MD_SB_MAGIC) {
- printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
- b);
+ pr_warn("md: invalid raid superblock magic on %s\n", b);
goto abort;
}
if (sb->major_version != 0 ||
sb->minor_version < 90 ||
sb->minor_version > 91) {
- printk(KERN_WARNING "Bad version number %d.%d on %s\n",
- sb->major_version, sb->minor_version,
- b);
+ pr_warn("Bad version number %d.%d on %s\n",
+ sb->major_version, sb->minor_version, b);
goto abort;
}
@@ -982,8 +1010,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
goto abort;
if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
- printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
- b);
+ pr_warn("md: invalid superblock checksum on %s\n", b);
goto abort;
}
@@ -1004,14 +1031,13 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
__u64 ev1, ev2;
mdp_super_t *refsb = page_address(refdev->sb_page);
if (!uuid_equal(refsb, sb)) {
- printk(KERN_WARNING "md: %s has different UUID to %s\n",
+ pr_warn("md: %s has different UUID to %s\n",
b, bdevname(refdev->bdev,b2));
goto abort;
}
if (!sb_equal(refsb, sb)) {
- printk(KERN_WARNING "md: %s has same UUID"
- " but different superblock to %s\n",
- b, bdevname(refdev->bdev, b2));
+ pr_warn("md: %s has same UUID but different superblock to %s\n",
+ b, bdevname(refdev->bdev, b2));
goto abort;
}
ev1 = md_event(sb);
@@ -1158,6 +1184,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
}
if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
+ if (desc->state & (1<<MD_DISK_FAILFAST))
+ set_bit(FailFast, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
return 0;
@@ -1283,6 +1311,8 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
}
if (test_bit(WriteMostly, &rdev2->flags))
d->state |= (1<<MD_DISK_WRITEMOSTLY);
+ if (test_bit(FailFast, &rdev2->flags))
+ d->state |= (1<<MD_DISK_FAILFAST);
}
/* now set the "removed" and "faulty" bits on any missing devices */
for (i=0 ; i < mddev->raid_disks ; i++) {
@@ -1324,9 +1354,10 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
rdev->mddev->level >= 1)
num_sectors = (sector_t)(2ULL << 32) - 2;
- md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ do {
+ md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
- md_super_wait(rdev->mddev);
+ } while (md_super_wait(rdev->mddev) < 0);
return num_sectors;
}
@@ -1413,13 +1444,13 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
return -EINVAL;
if (calc_sb_1_csum(sb) != sb->sb_csum) {
- printk("md: invalid superblock checksum on %s\n",
+ pr_warn("md: invalid superblock checksum on %s\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
if (le64_to_cpu(sb->data_size) < 10) {
- printk("md: data_size too small on %s\n",
- bdevname(rdev->bdev,b));
+ pr_warn("md: data_size too small on %s\n",
+ bdevname(rdev->bdev,b));
return -EINVAL;
}
if (sb->pad0 ||
@@ -1503,8 +1534,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
sb->level != refsb->level ||
sb->layout != refsb->layout ||
sb->chunksize != refsb->chunksize) {
- printk(KERN_WARNING "md: %s has strangely different"
- " superblock to %s\n",
+ pr_warn("md: %s has strangely different superblock to %s\n",
bdevname(rdev->bdev,b),
bdevname(refdev->bdev,b2));
return -EINVAL;
@@ -1646,8 +1676,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
case MD_DISK_ROLE_JOURNAL: /* journal device */
if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
/* journal device without journal feature */
- printk(KERN_WARNING
- "md: journal device provided without journal feature, ignoring the device\n");
+ pr_warn("md: journal device provided without journal feature, ignoring the device\n");
return -EINVAL;
}
set_bit(Journal, &rdev->flags);
@@ -1669,6 +1698,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
}
if (sb->devflags & WriteMostly1)
set_bit(WriteMostly, &rdev->flags);
+ if (sb->devflags & FailFast1)
+ set_bit(FailFast, &rdev->flags);
if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
set_bit(Replacement, &rdev->flags);
} else /* MULTIPATH are always insync */
@@ -1707,6 +1738,10 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
+ if (test_bit(FailFast, &rdev->flags))
+ sb->devflags |= FailFast1;
+ else
+ sb->devflags &= ~FailFast1;
if (test_bit(WriteMostly, &rdev->flags))
sb->devflags |= WriteMostly1;
@@ -1863,9 +1898,10 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
sb->data_size = cpu_to_le64(num_sectors);
sb->super_offset = rdev->sb_start;
sb->sb_csum = calc_sb_1_csum(sb);
- md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
- rdev->sb_page);
- md_super_wait(rdev->mddev);
+ do {
+ md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ rdev->sb_page);
+ } while (md_super_wait(rdev->mddev) < 0);
return num_sectors;
}
@@ -2004,9 +2040,9 @@ int md_integrity_register(struct mddev *mddev)
blk_integrity_register(mddev->gendisk,
bdev_get_integrity(reference->bdev));
- printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
+ pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
- printk(KERN_ERR "md: failed to create integrity pool for %s\n",
+ pr_err("md: failed to create integrity pool for %s\n",
mdname(mddev));
return -EINVAL;
}
@@ -2034,8 +2070,8 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
return 0;
if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
- printk(KERN_NOTICE "%s: incompatible integrity profile for %s\n",
- mdname(mddev), bdevname(rdev->bdev, name));
+ pr_err("%s: incompatible integrity profile for %s\n",
+ mdname(mddev), bdevname(rdev->bdev, name));
return -ENXIO;
}
@@ -2089,15 +2125,15 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
rcu_read_unlock();
if (!test_bit(Journal, &rdev->flags) &&
mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
- printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
- mdname(mddev), mddev->max_disks);
+ pr_warn("md: %s: array is limited to %d devices\n",
+ mdname(mddev), mddev->max_disks);
return -EBUSY;
}
bdevname(rdev->bdev,b);
strreplace(b, '/', '!');
rdev->mddev = mddev;
- printk(KERN_INFO "md: bind<%s>\n", b);
+ pr_debug("md: bind<%s>\n", b);
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
@@ -2116,8 +2152,8 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
return 0;
fail:
- printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
- b, mdname(mddev));
+ pr_warn("md: failed to register dev-%s for %s\n",
+ b, mdname(mddev));
return err;
}
@@ -2134,7 +2170,7 @@ static void unbind_rdev_from_array(struct md_rdev *rdev)
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
- printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
+ pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
@@ -2164,8 +2200,7 @@ static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
shared ? (struct md_rdev *)lock_rdev : rdev);
if (IS_ERR(bdev)) {
- printk(KERN_ERR "md: could not open %s.\n",
- __bdevname(dev, b));
+ pr_warn("md: could not open %s.\n", __bdevname(dev, b));
return PTR_ERR(bdev);
}
rdev->bdev = bdev;
@@ -2185,8 +2220,7 @@ static void export_rdev(struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
- printk(KERN_INFO "md: export_rdev(%s)\n",
- bdevname(rdev->bdev,b));
+ pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
md_rdev_clear(rdev);
#ifndef MODULE
if (test_bit(AutoDetected, &rdev->flags))
@@ -2288,24 +2322,24 @@ void md_update_sb(struct mddev *mddev, int force_change)
if (mddev->ro) {
if (force_change)
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
return;
}
repeat:
if (mddev_is_clustered(mddev)) {
- if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
+ if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
force_change = 1;
- if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
nospares = 1;
ret = md_cluster_ops->metadata_update_start(mddev);
/* Has someone else has updated the sb */
if (!does_sb_need_changing(mddev)) {
if (ret == 0)
md_cluster_ops->metadata_update_cancel(mddev);
- bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
- BIT(MD_CHANGE_DEVS) |
- BIT(MD_CHANGE_CLEAN));
+ bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
+ BIT(MD_SB_CHANGE_DEVS) |
+ BIT(MD_SB_CHANGE_CLEAN));
return;
}
}
@@ -2321,10 +2355,10 @@ repeat:
}
if (!mddev->persistent) {
- clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
- clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
if (!mddev->external) {
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
rdev_for_each(rdev, mddev) {
if (rdev->badblocks.changed) {
rdev->badblocks.changed = 0;
@@ -2344,9 +2378,9 @@ repeat:
mddev->utime = ktime_get_real_seconds();
- if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
+ if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
force_change = 1;
- if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
/* just a clean<-> dirty transition, possibly leave spares alone,
* though if events isn't the right even/odd, we will have to do
* spares after all
@@ -2402,6 +2436,9 @@ repeat:
pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
mdname(mddev), mddev->in_sync);
+ if (mddev->queue)
+ blk_add_trace_msg(mddev->queue, "md md_update_sb");
+rewrite:
bitmap_update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
@@ -2433,15 +2470,16 @@ repeat:
/* only need to write one superblock... */
break;
}
- md_super_wait(mddev);
- /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
+ if (md_super_wait(mddev) < 0)
+ goto rewrite;
+ /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
if (mddev_is_clustered(mddev) && ret == 0)
md_cluster_ops->metadata_update_finish(mddev);
if (mddev->in_sync != sync_req ||
- !bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_CLEAN)))
+ !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
/* have to write it out again */
goto repeat;
wake_up(&mddev->sb_wait);
@@ -2485,7 +2523,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
}
sysfs_notify_dirent_safe(rdev->sysfs_state);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -2523,51 +2561,41 @@ struct rdev_sysfs_entry {
static ssize_t
state_show(struct md_rdev *rdev, char *page)
{
- char *sep = "";
+ char *sep = ",";
size_t len = 0;
unsigned long flags = ACCESS_ONCE(rdev->flags);
if (test_bit(Faulty, &flags) ||
- rdev->badblocks.unacked_exist) {
- len+= sprintf(page+len, "%sfaulty",sep);
- sep = ",";
- }
- if (test_bit(In_sync, &flags)) {
- len += sprintf(page+len, "%sin_sync",sep);
- sep = ",";
- }
- if (test_bit(Journal, &flags)) {
- len += sprintf(page+len, "%sjournal",sep);
- sep = ",";
- }
- if (test_bit(WriteMostly, &flags)) {
- len += sprintf(page+len, "%swrite_mostly",sep);
- sep = ",";
- }
+ (!test_bit(ExternalBbl, &flags) &&
+ rdev->badblocks.unacked_exist))
+ len += sprintf(page+len, "faulty%s", sep);
+ if (test_bit(In_sync, &flags))
+ len += sprintf(page+len, "in_sync%s", sep);
+ if (test_bit(Journal, &flags))
+ len += sprintf(page+len, "journal%s", sep);
+ if (test_bit(WriteMostly, &flags))
+ len += sprintf(page+len, "write_mostly%s", sep);
if (test_bit(Blocked, &flags) ||
(rdev->badblocks.unacked_exist
- && !test_bit(Faulty, &flags))) {
- len += sprintf(page+len, "%sblocked", sep);
- sep = ",";
- }
+ && !test_bit(Faulty, &flags)))
+ len += sprintf(page+len, "blocked%s", sep);
if (!test_bit(Faulty, &flags) &&
!test_bit(Journal, &flags) &&
- !test_bit(In_sync, &flags)) {
- len += sprintf(page+len, "%sspare", sep);
- sep = ",";
- }
- if (test_bit(WriteErrorSeen, &flags)) {
- len += sprintf(page+len, "%swrite_error", sep);
- sep = ",";
- }
- if (test_bit(WantReplacement, &flags)) {
- len += sprintf(page+len, "%swant_replacement", sep);
- sep = ",";
- }
- if (test_bit(Replacement, &flags)) {
- len += sprintf(page+len, "%sreplacement", sep);
- sep = ",";
- }
+ !test_bit(In_sync, &flags))
+ len += sprintf(page+len, "spare%s", sep);
+ if (test_bit(WriteErrorSeen, &flags))
+ len += sprintf(page+len, "write_error%s", sep);
+ if (test_bit(WantReplacement, &flags))
+ len += sprintf(page+len, "want_replacement%s", sep);
+ if (test_bit(Replacement, &flags))
+ len += sprintf(page+len, "replacement%s", sep);
+ if (test_bit(ExternalBbl, &flags))
+ len += sprintf(page+len, "external_bbl%s", sep);
+ if (test_bit(FailFast, &flags))
+ len += sprintf(page+len, "failfast%s", sep);
+
+ if (len)
+ len -= strlen(sep);
return len+sprintf(page+len, "\n");
}
@@ -2587,6 +2615,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
* so that it gets rebuilt based on bitmap
* write_error - sets WriteErrorSeen
* -write_error - clears WriteErrorSeen
+ * {,-}failfast - set/clear FailFast
*/
int err = -EINVAL;
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
@@ -2610,8 +2639,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
if (err == 0) {
md_kick_rdev_from_array(rdev);
- if (mddev->pers)
- md_update_sb(mddev, 1);
+ if (mddev->pers) {
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ md_wakeup_thread(mddev->thread);
+ }
md_new_event(mddev);
}
}
@@ -2626,6 +2657,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0;
} else if (cmd_match(buf, "-blocked")) {
if (!test_bit(Faulty, &rdev->flags) &&
+ !test_bit(ExternalBbl, &rdev->flags) &&
rdev->badblocks.unacked_exist) {
/* metadata handler doesn't understand badblocks,
* so we need to fail the device
@@ -2642,6 +2674,12 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
set_bit(In_sync, &rdev->flags);
err = 0;
+ } else if (cmd_match(buf, "failfast")) {
+ set_bit(FailFast, &rdev->flags);
+ err = 0;
+ } else if (cmd_match(buf, "-failfast")) {
+ clear_bit(FailFast, &rdev->flags);
+ err = 0;
} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
!test_bit(Journal, &rdev->flags)) {
if (rdev->mddev->pers == NULL) {
@@ -2708,6 +2746,13 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
}
} else
err = -EBUSY;
+ } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
+ set_bit(ExternalBbl, &rdev->flags);
+ rdev->badblocks.shift = 0;
+ err = 0;
+ } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
+ clear_bit(ExternalBbl, &rdev->flags);
+ err = 0;
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
@@ -3211,10 +3256,8 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
sector_t size;
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
- if (!rdev) {
- printk(KERN_ERR "md: could not alloc mem for new device!\n");
+ if (!rdev)
return ERR_PTR(-ENOMEM);
- }
err = md_rdev_init(rdev);
if (err)
@@ -3231,8 +3274,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
if (!size) {
- printk(KERN_WARNING
- "md: %s has zero or unknown size, marking faulty!\n",
+ pr_warn("md: %s has zero or unknown size, marking faulty!\n",
bdevname(rdev->bdev,b));
err = -EINVAL;
goto abort_free;
@@ -3242,16 +3284,13 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
err = super_types[super_format].
load_super(rdev, NULL, super_minor);
if (err == -EINVAL) {
- printk(KERN_WARNING
- "md: %s does not have a valid v%d.%d "
- "superblock, not importing!\n",
+ pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
bdevname(rdev->bdev,b),
- super_format, super_minor);
+ super_format, super_minor);
goto abort_free;
}
if (err < 0) {
- printk(KERN_WARNING
- "md: could not read %s's sb, not importing!\n",
+ pr_warn("md: could not read %s's sb, not importing!\n",
bdevname(rdev->bdev,b));
goto abort_free;
}
@@ -3287,9 +3326,7 @@ static void analyze_sbs(struct mddev *mddev)
case 0:
break;
default:
- printk( KERN_ERR \
- "md: fatal superblock inconsistency in %s"
- " -- removing from array\n",
+ pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
bdevname(rdev->bdev,b));
md_kick_rdev_from_array(rdev);
}
@@ -3302,18 +3339,16 @@ static void analyze_sbs(struct mddev *mddev)
if (mddev->max_disks &&
(rdev->desc_nr >= mddev->max_disks ||
i > mddev->max_disks)) {
- printk(KERN_WARNING
- "md: %s: %s: only %d devices permitted\n",
- mdname(mddev), bdevname(rdev->bdev, b),
- mddev->max_disks);
+ pr_warn("md: %s: %s: only %d devices permitted\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mddev->max_disks);
md_kick_rdev_from_array(rdev);
continue;
}
if (rdev != freshest) {
if (super_types[mddev->major_version].
validate_super(mddev, rdev)) {
- printk(KERN_WARNING "md: kicking non-fresh %s"
- " from array!\n",
+ pr_warn("md: kicking non-fresh %s from array!\n",
bdevname(rdev->bdev,b));
md_kick_rdev_from_array(rdev);
continue;
@@ -3384,7 +3419,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
unsigned long msec;
if (mddev_is_clustered(mddev)) {
- pr_info("md: Safemode is disabled for clustered mode\n");
+ pr_warn("md: Safemode is disabled for clustered mode\n");
return -EINVAL;
}
@@ -3472,8 +3507,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
rv = -EINVAL;
if (!mddev->pers->quiesce) {
- printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
- mdname(mddev), mddev->pers->name);
+ pr_warn("md: %s: %s does not support online personality change\n",
+ mdname(mddev), mddev->pers->name);
goto out_unlock;
}
@@ -3491,7 +3526,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
pers = find_pers(level, clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
- printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
+ pr_warn("md: personality %s not loaded\n", clevel);
rv = -EINVAL;
goto out_unlock;
}
@@ -3505,8 +3540,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
}
if (!pers->takeover) {
module_put(pers->owner);
- printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
- mdname(mddev), clevel);
+ pr_warn("md: %s: %s does not support personality takeover\n",
+ mdname(mddev), clevel);
rv = -EINVAL;
goto out_unlock;
}
@@ -3526,8 +3561,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
module_put(pers->owner);
- printk(KERN_WARNING "md: %s: %s would not accept array\n",
- mdname(mddev), clevel);
+ pr_warn("md: %s: %s would not accept array\n",
+ mdname(mddev), clevel);
rv = PTR_ERR(priv);
goto out_unlock;
}
@@ -3570,9 +3605,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
pers->sync_request != NULL) {
/* need to add the md_redundancy_group */
if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
- printk(KERN_WARNING
- "md: cannot register extra attributes for %s\n",
- mdname(mddev));
+ pr_warn("md: cannot register extra attributes for %s\n",
+ mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
}
if (oldpers->sync_request != NULL &&
@@ -3603,9 +3637,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
clear_bit(In_sync, &rdev->flags);
else {
if (sysfs_link_rdev(mddev, rdev))
- printk(KERN_WARNING "md: cannot register rd%d"
- " for %s after level change\n",
- rdev->raid_disk, mdname(mddev));
+ pr_warn("md: cannot register rd%d for %s after level change\n",
+ rdev->raid_disk, mdname(mddev));
}
}
@@ -3618,7 +3651,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
}
blk_set_stacking_limits(&mddev->queue->limits);
pers->run(mddev);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
mddev_resume(mddev);
if (!mddev->thread)
md_update_sb(mddev, 1);
@@ -3813,7 +3846,7 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
if (!err) {
mddev->recovery_cp = n;
if (mddev->pers)
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
mddev_unlock(mddev);
return err ?: len;
@@ -3887,7 +3920,7 @@ array_state_show(struct mddev *mddev, char *page)
st = read_auto;
break;
case 0:
- if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
st = write_pending;
else if (mddev->in_sync)
st = clean;
@@ -3925,7 +3958,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
spin_lock(&mddev->lock);
if (st == active) {
restart_array(mddev);
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
+ md_wakeup_thread(mddev->thread);
wake_up(&mddev->sb_wait);
err = 0;
} else /* st == clean */ {
@@ -3935,7 +3969,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
err = 0;
} else
@@ -4001,7 +4035,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
err = 0;
} else
@@ -4015,7 +4049,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
err = restart_array(mddev);
if (err)
break;
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
@@ -5071,13 +5105,13 @@ static int md_alloc(dev_t dev, char *name)
/* This isn't possible, but as kobject_init_and_add is marked
* __must_check, we must do something with the result
*/
- printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
- disk->disk_name);
+ pr_debug("md: cannot register %s/md - name in use\n",
+ disk->disk_name);
error = 0;
}
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_bitmap_group))
- printk(KERN_DEBUG "pointless warning\n");
+ pr_debug("pointless warning\n");
mutex_unlock(&mddev->open_mutex);
abort:
mutex_unlock(&disks_mutex);
@@ -5179,15 +5213,15 @@ int md_run(struct mddev *mddev)
if (mddev->dev_sectors &&
rdev->data_offset + mddev->dev_sectors
> rdev->sb_start) {
- printk("md: %s: data overlaps metadata\n",
- mdname(mddev));
+ pr_warn("md: %s: data overlaps metadata\n",
+ mdname(mddev));
return -EINVAL;
}
} else {
if (rdev->sb_start + rdev->sb_size/512
> rdev->data_offset) {
- printk("md: %s: metadata overlaps data\n",
- mdname(mddev));
+ pr_warn("md: %s: metadata overlaps data\n",
+ mdname(mddev));
return -EINVAL;
}
}
@@ -5202,11 +5236,11 @@ int md_run(struct mddev *mddev)
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
if (mddev->level != LEVEL_NONE)
- printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
- mddev->level);
+ pr_warn("md: personality for level %d is not loaded!\n",
+ mddev->level);
else
- printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
- mddev->clevel);
+ pr_warn("md: personality for level %s is not loaded!\n",
+ mddev->clevel);
return -EINVAL;
}
spin_unlock(&pers_lock);
@@ -5236,21 +5270,16 @@ int md_run(struct mddev *mddev)
if (rdev < rdev2 &&
rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
- printk(KERN_WARNING
- "%s: WARNING: %s appears to be"
- " on the same physical disk as"
- " %s.\n",
- mdname(mddev),
- bdevname(rdev->bdev,b),
- bdevname(rdev2->bdev,b2));
+ pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
+ mdname(mddev),
+ bdevname(rdev->bdev,b),
+ bdevname(rdev2->bdev,b2));
warned = 1;
}
}
if (warned)
- printk(KERN_WARNING
- "True protection against single-disk"
- " failure might be compromised.\n");
+ pr_warn("True protection against single-disk failure might be compromised.\n");
}
mddev->recovery = 0;
@@ -5264,14 +5293,14 @@ int md_run(struct mddev *mddev)
err = pers->run(mddev);
if (err)
- printk(KERN_ERR "md: pers->run() failed ...\n");
+ pr_warn("md: pers->run() failed ...\n");
else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
- WARN_ONCE(!mddev->external_size, "%s: default size too small,"
- " but 'external_size' not in effect?\n", __func__);
- printk(KERN_ERR
- "md: invalid array_size %llu > default size %llu\n",
- (unsigned long long)mddev->array_sectors / 2,
- (unsigned long long)pers->size(mddev, 0, 0) / 2);
+ WARN_ONCE(!mddev->external_size,
+ "%s: default size too small, but 'external_size' not in effect?\n",
+ __func__);
+ pr_warn("md: invalid array_size %llu > default size %llu\n",
+ (unsigned long long)mddev->array_sectors / 2,
+ (unsigned long long)pers->size(mddev, 0, 0) / 2);
err = -EINVAL;
}
if (err == 0 && pers->sync_request &&
@@ -5281,8 +5310,8 @@ int md_run(struct mddev *mddev)
bitmap = bitmap_create(mddev, -1);
if (IS_ERR(bitmap)) {
err = PTR_ERR(bitmap);
- printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
- mdname(mddev), err);
+ pr_warn("%s: failed to create bitmap (%d)\n",
+ mdname(mddev), err);
} else
mddev->bitmap = bitmap;
@@ -5318,9 +5347,8 @@ int md_run(struct mddev *mddev)
if (pers->sync_request) {
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_redundancy_group))
- printk(KERN_WARNING
- "md: cannot register extra attributes for %s\n",
- mdname(mddev));
+ pr_warn("md: cannot register extra attributes for %s\n",
+ mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
@@ -5350,7 +5378,7 @@ int md_run(struct mddev *mddev)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- if (mddev->flags & MD_UPDATE_SB_FLAGS)
+ if (mddev->sb_flags)
md_update_sb(mddev, 0);
md_new_event(mddev);
@@ -5421,8 +5449,7 @@ static int restart_array(struct mddev *mddev)
mddev->safemode = 0;
mddev->ro = 0;
set_disk_ro(disk, 0);
- printk(KERN_INFO "md: %s switched to read-write mode.\n",
- mdname(mddev));
+ pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
/* Kick recovery or resync if necessary */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -5446,6 +5473,7 @@ static void md_clean(struct mddev *mddev)
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
mddev->flags = 0;
+ mddev->sb_flags = 0;
mddev->ro = 0;
mddev->metadata_type[0] = 0;
mddev->chunk_sectors = 0;
@@ -5490,12 +5518,15 @@ static void __md_stop_writes(struct mddev *mddev)
del_timer_sync(&mddev->safemode_timer);
+ if (mddev->pers && mddev->pers->quiesce) {
+ mddev->pers->quiesce(mddev, 1);
+ mddev->pers->quiesce(mddev, 0);
+ }
bitmap_flush(mddev);
- md_super_wait(mddev);
if (mddev->ro == 0 &&
((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
- (mddev->flags & MD_UPDATE_SB_FLAGS))) {
+ mddev->sb_flags)) {
/* mark array as shutdown cleanly */
if (!mddev_is_clustered(mddev))
mddev->in_sync = 1;
@@ -5516,8 +5547,8 @@ static void mddev_detach(struct mddev *mddev)
struct bitmap *bitmap = mddev->bitmap;
/* wait for behind writes to complete */
if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
- printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n",
- mdname(mddev));
+ pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
+ mdname(mddev));
/* need to kick something here to make sure I/O goes? */
wait_event(bitmap->behind_wait,
atomic_read(&bitmap->behind_writes) == 0);
@@ -5578,20 +5609,20 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
- if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
return -EBUSY;
mddev_unlock(mddev);
wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
&mddev->recovery));
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- printk("md: %s still in use.\n",mdname(mddev));
+ pr_warn("md: %s still in use.\n",mdname(mddev));
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -5653,7 +5684,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
mddev->sysfs_active ||
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- printk("md: %s still in use.\n",mdname(mddev));
+ pr_warn("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -5690,7 +5721,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
* Free resources if final stop
*/
if (mode == 0) {
- printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
+ pr_info("md: %s stopped.\n", mdname(mddev));
bitmap_destroy(mddev);
if (mddev->bitmap_info.file) {
@@ -5722,17 +5753,17 @@ static void autorun_array(struct mddev *mddev)
if (list_empty(&mddev->disks))
return;
- printk(KERN_INFO "md: running: ");
+ pr_info("md: running: ");
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
- printk("<%s>", bdevname(rdev->bdev,b));
+ pr_cont("<%s>", bdevname(rdev->bdev,b));
}
- printk("\n");
+ pr_cont("\n");
err = do_md_run(mddev);
if (err) {
- printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
+ pr_warn("md: do_md_run() returned %d\n", err);
do_md_stop(mddev, 0, NULL);
}
}
@@ -5755,7 +5786,7 @@ static void autorun_devices(int part)
struct mddev *mddev;
char b[BDEVNAME_SIZE];
- printk(KERN_INFO "md: autorun ...\n");
+ pr_info("md: autorun ...\n");
while (!list_empty(&pending_raid_disks)) {
int unit;
dev_t dev;
@@ -5763,13 +5794,12 @@ static void autorun_devices(int part)
rdev0 = list_entry(pending_raid_disks.next,
struct md_rdev, same_set);
- printk(KERN_INFO "md: considering %s ...\n",
- bdevname(rdev0->bdev,b));
+ pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
INIT_LIST_HEAD(&candidates);
rdev_for_each_list(rdev, tmp, &pending_raid_disks)
if (super_90_load(rdev, rdev0, 0) >= 0) {
- printk(KERN_INFO "md: adding %s ...\n",
- bdevname(rdev->bdev,b));
+ pr_debug("md: adding %s ...\n",
+ bdevname(rdev->bdev,b));
list_move(&rdev->same_set, &candidates);
}
/*
@@ -5786,8 +5816,8 @@ static void autorun_devices(int part)
unit = MINOR(dev);
}
if (rdev0->preferred_minor != unit) {
- printk(KERN_INFO "md: unit number in %s is bad: %d\n",
- bdevname(rdev0->bdev, b), rdev0->preferred_minor);
+ pr_warn("md: unit number in %s is bad: %d\n",
+ bdevname(rdev0->bdev, b), rdev0->preferred_minor);
break;
}
@@ -5796,21 +5826,17 @@ static void autorun_devices(int part)
if (!mddev || !mddev->gendisk) {
if (mddev)
mddev_put(mddev);
- printk(KERN_ERR
- "md: cannot allocate memory for md drive.\n");
break;
}
if (mddev_lock(mddev))
- printk(KERN_WARNING "md: %s locked, cannot run\n",
- mdname(mddev));
+ pr_warn("md: %s locked, cannot run\n", mdname(mddev));
else if (mddev->raid_disks || mddev->major_version
|| !list_empty(&mddev->disks)) {
- printk(KERN_WARNING
- "md: %s already running, cannot run %s\n",
+ pr_warn("md: %s already running, cannot run %s\n",
mdname(mddev), bdevname(rdev0->bdev,b));
mddev_unlock(mddev);
} else {
- printk(KERN_INFO "md: created %s\n", mdname(mddev));
+ pr_debug("md: created %s\n", mdname(mddev));
mddev->persistent = 1;
rdev_for_each_list(rdev, tmp, &candidates) {
list_del_init(&rdev->same_set);
@@ -5829,7 +5855,7 @@ static void autorun_devices(int part)
}
mddev_put(mddev);
}
- printk(KERN_INFO "md: ... autorun DONE.\n");
+ pr_info("md: ... autorun DONE.\n");
}
#endif /* !MODULE */
@@ -5964,6 +5990,8 @@ static int get_disk_info(struct mddev *mddev, void __user * arg)
info.state |= (1<<MD_DISK_JOURNAL);
if (test_bit(WriteMostly, &rdev->flags))
info.state |= (1<<MD_DISK_WRITEMOSTLY);
+ if (test_bit(FailFast, &rdev->flags))
+ info.state |= (1<<MD_DISK_FAILFAST);
} else {
info.major = info.minor = 0;
info.raid_disk = -1;
@@ -5985,8 +6013,8 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
if (mddev_is_clustered(mddev) &&
!(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
- pr_err("%s: Cannot add to clustered mddev.\n",
- mdname(mddev));
+ pr_warn("%s: Cannot add to clustered mddev.\n",
+ mdname(mddev));
return -EINVAL;
}
@@ -5998,8 +6026,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
/* expecting a device which has a superblock */
rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
if (IS_ERR(rdev)) {
- printk(KERN_WARNING
- "md: md_import_device returned %ld\n",
+ pr_warn("md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
@@ -6010,8 +6037,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0) {
- printk(KERN_WARNING
- "md: %s has different UUID to %s\n",
+ pr_warn("md: %s has different UUID to %s\n",
bdevname(rdev->bdev,b),
bdevname(rdev0->bdev,b2));
export_rdev(rdev);
@@ -6032,9 +6058,8 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
if (mddev->pers) {
int err;
if (!mddev->pers->hot_add_disk) {
- printk(KERN_WARNING
- "%s: personality does not support diskops!\n",
- mdname(mddev));
+ pr_warn("%s: personality does not support diskops!\n",
+ mdname(mddev));
return -EINVAL;
}
if (mddev->persistent)
@@ -6043,8 +6068,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev)) {
- printk(KERN_WARNING
- "md: md_import_device returned %ld\n",
+ pr_warn("md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
@@ -6075,6 +6099,10 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
set_bit(WriteMostly, &rdev->flags);
else
clear_bit(WriteMostly, &rdev->flags);
+ if (info->state & (1<<MD_DISK_FAILFAST))
+ set_bit(FailFast, &rdev->flags);
+ else
+ clear_bit(FailFast, &rdev->flags);
if (info->state & (1<<MD_DISK_JOURNAL)) {
struct md_rdev *rdev2;
@@ -6140,8 +6168,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
* for major_version==0 superblocks
*/
if (mddev->major_version != 0) {
- printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
- mdname(mddev));
+ pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
return -EINVAL;
}
@@ -6149,8 +6176,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
int err;
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
- printk(KERN_WARNING
- "md: error, md_import_device() returned %ld\n",
+ pr_warn("md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
@@ -6166,9 +6192,11 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
+ if (info->state & (1<<MD_DISK_FAILFAST))
+ set_bit(FailFast, &rdev->flags);
if (!mddev->persistent) {
- printk(KERN_INFO "md: nonpersistent superblock ...\n");
+ pr_debug("md: nonpersistent superblock ...\n");
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
} else
rdev->sb_start = calc_dev_sboffset(rdev);
@@ -6207,13 +6235,17 @@ kick_rdev:
md_cluster_ops->remove_disk(mddev, rdev);
md_kick_rdev_from_array(rdev);
- md_update_sb(mddev, 1);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ if (mddev->thread)
+ md_wakeup_thread(mddev->thread);
+ else
+ md_update_sb(mddev, 1);
md_new_event(mddev);
return 0;
busy:
- printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
- bdevname(rdev->bdev,b), mdname(mddev));
+ pr_debug("md: cannot remove active disk %s from %s ...\n",
+ bdevname(rdev->bdev,b), mdname(mddev));
return -EBUSY;
}
@@ -6227,22 +6259,19 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
return -ENODEV;
if (mddev->major_version != 0) {
- printk(KERN_WARNING "%s: HOT_ADD may only be used with"
- " version-0 superblocks.\n",
+ pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
mdname(mddev));
return -EINVAL;
}
if (!mddev->pers->hot_add_disk) {
- printk(KERN_WARNING
- "%s: personality does not support diskops!\n",
+ pr_warn("%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
}
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
- printk(KERN_WARNING
- "md: error, md_import_device() returned %ld\n",
+ pr_warn("md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return -EINVAL;
}
@@ -6255,8 +6284,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
rdev->sectors = rdev->sb_start;
if (test_bit(Faulty, &rdev->flags)) {
- printk(KERN_WARNING
- "md: can not hot-add faulty %s disk to %s!\n",
+ pr_warn("md: can not hot-add faulty %s disk to %s!\n",
bdevname(rdev->bdev,b), mdname(mddev));
err = -EINVAL;
goto abort_export;
@@ -6276,7 +6304,9 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
rdev->raid_disk = -1;
- md_update_sb(mddev, 1);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ if (!mddev->thread)
+ md_update_sb(mddev, 1);
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
@@ -6312,23 +6342,23 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
f = fget(fd);
if (f == NULL) {
- printk(KERN_ERR "%s: error: failed to get bitmap file\n",
- mdname(mddev));
+ pr_warn("%s: error: failed to get bitmap file\n",
+ mdname(mddev));
return -EBADF;
}
inode = f->f_mapping->host;
if (!S_ISREG(inode->i_mode)) {
- printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
- mdname(mddev));
+ pr_warn("%s: error: bitmap file must be a regular file\n",
+ mdname(mddev));
err = -EBADF;
} else if (!(f->f_mode & FMODE_WRITE)) {
- printk(KERN_ERR "%s: error: bitmap file must open for write\n",
- mdname(mddev));
+ pr_warn("%s: error: bitmap file must open for write\n",
+ mdname(mddev));
err = -EBADF;
} else if (atomic_read(&inode->i_writecount) != 1) {
- printk(KERN_ERR "%s: error: bitmap file is already in use\n",
- mdname(mddev));
+ pr_warn("%s: error: bitmap file is already in use\n",
+ mdname(mddev));
err = -EBUSY;
}
if (err) {
@@ -6393,8 +6423,7 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
info->major_version >= ARRAY_SIZE(super_types) ||
super_types[info->major_version].name == NULL) {
/* maybe try to auto-load a module? */
- printk(KERN_INFO
- "md: superblock version %d not known\n",
+ pr_warn("md: superblock version %d not known\n",
info->major_version);
return -EINVAL;
}
@@ -6432,9 +6461,11 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->max_disks = MD_SB_DISKS;
- if (mddev->persistent)
+ if (mddev->persistent) {
mddev->flags = 0;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ mddev->sb_flags = 0;
+ }
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
@@ -6660,8 +6691,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
if (mddev->bitmap_info.nodes) {
/* hold PW on all the bitmap lock */
if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
- printk("md: can't change bitmap to none since the"
- " array is in use by more than one node\n");
+ pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
rv = -EPERM;
md_cluster_ops->unlock_all_bitmaps(mddev);
goto err;
@@ -6829,7 +6859,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
/* need to ensure recovery thread has run */
wait_event_interruptible_timeout(mddev->sb_wait,
!test_bit(MD_RECOVERY_NEEDED,
- &mddev->flags),
+ &mddev->recovery),
msecs_to_jiffies(5000));
if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
/* Need to flush page cache, and ensure no-one else opens
@@ -6847,9 +6877,8 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
}
err = mddev_lock(mddev);
if (err) {
- printk(KERN_INFO
- "md: ioctl lock interrupted, reason %d, cmd %d\n",
- err, cmd);
+ pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
+ err, cmd);
goto out;
}
@@ -6864,30 +6893,24 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
if (mddev->pers) {
err = update_array_info(mddev, &info);
if (err) {
- printk(KERN_WARNING "md: couldn't update"
- " array info. %d\n", err);
+ pr_warn("md: couldn't update array info. %d\n", err);
goto unlock;
}
goto unlock;
}
if (!list_empty(&mddev->disks)) {
- printk(KERN_WARNING
- "md: array %s already has disks!\n",
- mdname(mddev));
+ pr_warn("md: array %s already has disks!\n", mdname(mddev));
err = -EBUSY;
goto unlock;
}
if (mddev->raid_disks) {
- printk(KERN_WARNING
- "md: array %s already initialised!\n",
- mdname(mddev));
+ pr_warn("md: array %s already initialised!\n", mdname(mddev));
err = -EBUSY;
goto unlock;
}
err = set_array_info(mddev, &info);
if (err) {
- printk(KERN_WARNING "md: couldn't set"
- " array info. %d\n", err);
+ pr_warn("md: couldn't set array info. %d\n", err);
goto unlock;
}
goto unlock;
@@ -6987,11 +7010,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
/* If a device failed while we were read-only, we
* need to make sure the metadata is updated now.
*/
- if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
+ if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
mddev_unlock(mddev);
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
mddev_lock_nointr(mddev);
}
} else {
@@ -7092,7 +7115,8 @@ static int md_open(struct block_device *bdev, fmode_t mode)
if (test_bit(MD_CLOSING, &mddev->flags)) {
mutex_unlock(&mddev->open_mutex);
- return -ENODEV;
+ err = -ENODEV;
+ goto out;
}
err = 0;
@@ -7101,6 +7125,8 @@ static int md_open(struct block_device *bdev, fmode_t mode)
check_disk_change(bdev);
out:
+ if (err)
+ mddev_put(mddev);
return err;
}
@@ -7171,10 +7197,12 @@ static int md_thread(void *arg)
wait_event_interruptible_timeout
(thread->wqueue,
test_bit(THREAD_WAKEUP, &thread->flags)
- || kthread_should_stop(),
+ || kthread_should_stop() || kthread_should_park(),
thread->timeout);
clear_bit(THREAD_WAKEUP, &thread->flags);
+ if (kthread_should_park())
+ kthread_parkme();
if (!kthread_should_stop())
thread->run(thread);
}
@@ -7588,8 +7616,8 @@ static const struct file_operations md_seq_fops = {
int register_md_personality(struct md_personality *p)
{
- printk(KERN_INFO "md: %s personality registered for level %d\n",
- p->name, p->level);
+ pr_debug("md: %s personality registered for level %d\n",
+ p->name, p->level);
spin_lock(&pers_lock);
list_add_tail(&p->list, &pers_list);
spin_unlock(&pers_lock);
@@ -7599,7 +7627,7 @@ EXPORT_SYMBOL(register_md_personality);
int unregister_md_personality(struct md_personality *p)
{
- printk(KERN_INFO "md: %s personality unregistered\n", p->name);
+ pr_debug("md: %s personality unregistered\n", p->name);
spin_lock(&pers_lock);
list_del_init(&p->list);
spin_unlock(&pers_lock);
@@ -7639,7 +7667,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
spin_lock(&pers_lock);
/* ensure module won't be unloaded */
if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
- pr_err("can't find md-cluster module or get it's reference.\n");
+ pr_warn("can't find md-cluster module or get it's reference.\n");
spin_unlock(&pers_lock);
return -ENOENT;
}
@@ -7741,8 +7769,8 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
did_change = 1;
}
@@ -7751,7 +7779,7 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
}
EXPORT_SYMBOL(md_write_start);
@@ -7772,7 +7800,7 @@ EXPORT_SYMBOL(md_write_end);
* attempting a GFP_KERNEL allocation while holding the mddev lock.
* Must be called with mddev_lock held.
*
- * In the ->external case MD_CHANGE_PENDING can not be cleared until mddev->lock
+ * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock
* is dropped, so return -EAGAIN after notifying userspace.
*/
int md_allow_write(struct mddev *mddev)
@@ -7787,8 +7815,8 @@ int md_allow_write(struct mddev *mddev)
spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
if (mddev->safemode_delay &&
mddev->safemode == 0)
mddev->safemode = 1;
@@ -7798,7 +7826,7 @@ int md_allow_write(struct mddev *mddev)
} else
spin_unlock(&mddev->lock);
- if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
return -EAGAIN;
else
return 0;
@@ -7914,11 +7942,9 @@ void md_do_sync(struct md_thread *thread)
mddev2->curr_resync >= mddev->curr_resync) {
if (mddev2_minor != mddev2->md_minor) {
mddev2_minor = mddev2->md_minor;
- printk(KERN_INFO "md: delaying %s of %s"
- " until %s has finished (they"
- " share one or more physical units)\n",
- desc, mdname(mddev),
- mdname(mddev2));
+ pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
+ desc, mdname(mddev),
+ mdname(mddev2));
}
mddev_put(mddev2);
if (signal_pending(current))
@@ -7975,12 +8001,10 @@ void md_do_sync(struct md_thread *thread)
}
}
- printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
- printk(KERN_INFO "md: minimum _guaranteed_ speed:"
- " %d KB/sec/disk.\n", speed_min(mddev));
- printk(KERN_INFO "md: using maximum available idle IO bandwidth "
- "(but not more than %d KB/sec) for %s.\n",
- speed_max(mddev), desc);
+ pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
+ pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
+ pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
+ speed_max(mddev), desc);
is_mddev_idle(mddev, 1); /* this initializes IO event counters */
@@ -7997,16 +8021,15 @@ void md_do_sync(struct md_thread *thread)
* Tune reconstruction:
*/
window = 32*(PAGE_SIZE/512);
- printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
- window/2, (unsigned long long)max_sectors/2);
+ pr_debug("md: using %dk window, over a total of %lluk.\n",
+ window/2, (unsigned long long)max_sectors/2);
atomic_set(&mddev->recovery_active, 0);
last_check = 0;
if (j>2) {
- printk(KERN_INFO
- "md: resuming %s of %s from checkpoint.\n",
- desc, mdname(mddev));
+ pr_debug("md: resuming %s of %s from checkpoint.\n",
+ desc, mdname(mddev));
mddev->curr_resync = j;
} else
mddev->curr_resync = 3; /* no longer delayed */
@@ -8038,7 +8061,7 @@ void md_do_sync(struct md_thread *thread)
j > mddev->recovery_cp)
mddev->recovery_cp = j;
update_time = jiffies;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
@@ -8133,9 +8156,9 @@ void md_do_sync(struct md_thread *thread)
}
}
}
- printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
- test_bit(MD_RECOVERY_INTR, &mddev->recovery)
- ? "interrupted" : "done");
+ pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery)
+ ? "interrupted" : "done");
/*
* this also signals 'finished resyncing' to md_stop
*/
@@ -8155,9 +8178,8 @@ void md_do_sync(struct md_thread *thread)
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
- printk(KERN_INFO
- "md: checkpointing %s of %s.\n",
- desc, mdname(mddev));
+ pr_debug("md: checkpointing %s of %s.\n",
+ desc, mdname(mddev));
if (test_bit(MD_RECOVERY_ERROR,
&mddev->recovery))
mddev->recovery_cp =
@@ -8187,8 +8209,8 @@ void md_do_sync(struct md_thread *thread)
/* set CHANGE_PENDING here since maybe another update is needed,
* so other nodes are informed. It should be harmless for normal
* raid */
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
spin_lock(&mddev->lock);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -8288,12 +8310,12 @@ static int remove_and_add_spares(struct mddev *mddev,
if (!test_bit(Journal, &rdev->flags))
spares++;
md_new_event(mddev);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
}
}
no_add:
if (removed)
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
return spares;
}
@@ -8305,8 +8327,8 @@ static void md_start_sync(struct work_struct *ws)
mddev,
"resync");
if (!mddev->sync_thread) {
- printk(KERN_ERR "%s: could not start resync thread...\n",
- mdname(mddev));
+ pr_warn("%s: could not start resync thread...\n",
+ mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -8356,8 +8378,8 @@ void md_check_recovery(struct mddev *mddev)
if (signal_pending(current)) {
if (mddev->pers->sync_request && !mddev->external) {
- printk(KERN_INFO "md: %s in immediate safe mode\n",
- mdname(mddev));
+ pr_debug("md: %s in immediate safe mode\n",
+ mdname(mddev));
mddev->safemode = 2;
}
flush_signals(current);
@@ -8366,7 +8388,7 @@ void md_check_recovery(struct mddev *mddev)
if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return;
if ( ! (
- (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
+ (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
test_bit(MD_RELOAD_SB, &mddev->flags) ||
@@ -8404,7 +8426,7 @@ void md_check_recovery(struct mddev *mddev)
md_reap_sync_thread(mddev);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
goto unlock;
}
@@ -8432,7 +8454,7 @@ void md_check_recovery(struct mddev *mddev)
mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
did_change = 1;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
@@ -8441,7 +8463,7 @@ void md_check_recovery(struct mddev *mddev)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
- if (mddev->flags & MD_UPDATE_SB_FLAGS)
+ if (mddev->sb_flags)
md_update_sb(mddev, 0);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
@@ -8537,7 +8559,7 @@ void md_reap_sync_thread(struct mddev *mddev)
if (mddev->pers->spare_active(mddev)) {
sysfs_notify(&mddev->kobj, NULL,
"degraded");
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
}
}
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
@@ -8552,7 +8574,7 @@ void md_reap_sync_thread(struct mddev *mddev)
rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1);
- /* MD_CHANGE_PENDING should be cleared by md_update_sb, so we can
+ /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
* call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
* clustered raid */
if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
@@ -8614,9 +8636,12 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
if (rv == 0) {
/* Make sure they get written out promptly */
+ if (test_bit(ExternalBbl, &rdev->flags))
+ sysfs_notify(&rdev->kobj, NULL,
+ "unacknowledged_bad_blocks");
sysfs_notify_dirent_safe(rdev->sysfs_state);
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
md_wakeup_thread(rdev->mddev->thread);
return 1;
} else
@@ -8627,12 +8652,15 @@ EXPORT_SYMBOL_GPL(rdev_set_badblocks);
int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new)
{
+ int rv;
if (is_new)
s += rdev->new_data_offset;
else
s += rdev->data_offset;
- return badblocks_clear(&rdev->badblocks,
- s, sectors);
+ rv = badblocks_clear(&rdev->badblocks, s, sectors);
+ if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
+ sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
+ return rv;
}
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
@@ -8749,7 +8777,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
rdev2->saved_raid_disk = role;
ret = remove_and_add_spares(mddev, rdev2);
pr_info("Activated spare: %s\n",
- bdevname(rdev2->bdev,b));
+ bdevname(rdev2->bdev,b));
/* wakeup mddev->thread here, so array could
* perform resync with the new activated disk */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -8785,15 +8813,18 @@ static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
* variable in case we err in the future
*/
rdev->sb_page = NULL;
- alloc_disk_sb(rdev);
- ClearPageUptodate(rdev->sb_page);
- rdev->sb_loaded = 0;
- err = super_types[mddev->major_version].load_super(rdev, NULL, mddev->minor_version);
-
+ err = alloc_disk_sb(rdev);
+ if (err == 0) {
+ ClearPageUptodate(rdev->sb_page);
+ rdev->sb_loaded = 0;
+ err = super_types[mddev->major_version].
+ load_super(rdev, NULL, mddev->minor_version);
+ }
if (err < 0) {
pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
__func__, __LINE__, rdev->desc_nr, err);
- put_page(rdev->sb_page);
+ if (rdev->sb_page)
+ put_page(rdev->sb_page);
rdev->sb_page = swapout;
rdev->sb_loaded = 1;
return err;
@@ -8871,9 +8902,6 @@ void md_autodetect_dev(dev_t dev)
mutex_lock(&detected_devices_mutex);
list_add_tail(&node_detected_dev->list, &all_detected_devices);
mutex_unlock(&detected_devices_mutex);
- } else {
- printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
- ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
}
}
@@ -8887,7 +8915,7 @@ static void autostart_arrays(int part)
i_scanned = 0;
i_passed = 0;
- printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
+ pr_info("md: Autodetecting RAID arrays.\n");
mutex_lock(&detected_devices_mutex);
while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
@@ -8912,8 +8940,7 @@ static void autostart_arrays(int part)
}
mutex_unlock(&detected_devices_mutex);
- printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
- i_scanned, i_passed);
+ pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
autorun_devices(part);
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2b2041773e79..e38936d05df1 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -30,6 +30,16 @@
#define MaxSector (~(sector_t)0)
/*
+ * These flags should really be called "NO_RETRY" rather than
+ * "FAILFAST" because they don't make any promise about time lapse,
+ * only about the number of retries, which will be zero.
+ * REQ_FAILFAST_DRIVER is not included because
+ * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.")
+ * seems to suggest that the errors it avoids retrying should usually
+ * be retried.
+ */
+#define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
+/*
* MD's 'extended' device
*/
struct md_rdev {
@@ -168,6 +178,19 @@ enum flag_bits {
* so it is safe to remove without
* another synchronize_rcu() call.
*/
+ ExternalBbl, /* External metadata provides bad
+ * block management for a disk
+ */
+ FailFast, /* Minimal retries should be attempted on
+ * this device, so use REQ_FAILFAST_DEV.
+ * Also don't try to repair failed reads.
+ * It is expects that no bad block log
+ * is present.
+ */
+ LastDev, /* Seems to be the last working dev as
+ * it didn't fail, so don't use FailFast
+ * any more for metadata
+ */
};
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
@@ -189,6 +212,31 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new);
struct md_cluster_info;
+enum mddev_flags {
+ MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */
+ MD_CLOSING, /* If set, we are closing the array, do not open
+ * it then */
+ MD_JOURNAL_CLEAN, /* A raid with journal is already clean */
+ MD_HAS_JOURNAL, /* The raid array has journal feature set */
+ MD_RELOAD_SB, /* Reload the superblock because another node
+ * updated it.
+ */
+ MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node
+ * already took resync lock, need to
+ * release the lock */
+ MD_FAILFAST_SUPPORTED, /* Using MD_FAILFAST on metadata writes is
+ * supported as calls to md_error() will
+ * never cause the array to become failed.
+ */
+};
+
+enum mddev_sb_flags {
+ MD_SB_CHANGE_DEVS, /* Some device status has changed */
+ MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */
+ MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */
+ MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
+};
+
struct mddev {
void *private;
struct md_personality *pers;
@@ -196,21 +244,7 @@ struct mddev {
int md_minor;
struct list_head disks;
unsigned long flags;
-#define MD_CHANGE_DEVS 0 /* Some device status has changed */
-#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
-#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
-#define MD_UPDATE_SB_FLAGS (1 | 2 | 4) /* If these are set, md_update_sb needed */
-#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
-#define MD_CLOSING 4 /* If set, we are closing the array, do not open
- * it then */
-#define MD_JOURNAL_CLEAN 5 /* A raid with journal is already clean */
-#define MD_HAS_JOURNAL 6 /* The raid array has journal feature set */
-#define MD_RELOAD_SB 7 /* Reload the superblock because another node
- * updated it.
- */
-#define MD_CLUSTER_RESYNC_LOCKED 8 /* cluster raid only, which means node
- * already took resync lock, need to
- * release the lock */
+ unsigned long sb_flags;
int suspended;
atomic_t active_io;
@@ -304,31 +338,6 @@ struct mddev {
int parallel_resync;
int ok_start_degraded;
- /* recovery/resync flags
- * NEEDED: we might need to start a resync/recover
- * RUNNING: a thread is running, or about to be started
- * SYNC: actually doing a resync, not a recovery
- * RECOVER: doing recovery, or need to try it.
- * INTR: resync needs to be aborted for some reason
- * DONE: thread is done and is waiting to be reaped
- * REQUEST: user-space has requested a sync (used with SYNC)
- * CHECK: user-space request for check-only, no repair
- * RESHAPE: A reshape is happening
- * ERROR: sync-action interrupted because io-error
- *
- * If neither SYNC or RESHAPE are set, then it is a recovery.
- */
-#define MD_RECOVERY_RUNNING 0
-#define MD_RECOVERY_SYNC 1
-#define MD_RECOVERY_RECOVER 2
-#define MD_RECOVERY_INTR 3
-#define MD_RECOVERY_DONE 4
-#define MD_RECOVERY_NEEDED 5
-#define MD_RECOVERY_REQUESTED 6
-#define MD_RECOVERY_CHECK 7
-#define MD_RECOVERY_RESHAPE 8
-#define MD_RECOVERY_FROZEN 9
-#define MD_RECOVERY_ERROR 10
unsigned long recovery;
/* If a RAID personality determines that recovery (of a particular
@@ -442,6 +451,23 @@ struct mddev {
unsigned int good_device_nr; /* good device num within cluster raid */
};
+enum recovery_flags {
+ /*
+ * If neither SYNC or RESHAPE are set, then it is a recovery.
+ */
+ MD_RECOVERY_RUNNING, /* a thread is running, or about to be started */
+ MD_RECOVERY_SYNC, /* actually doing a resync, not a recovery */
+ MD_RECOVERY_RECOVER, /* doing recovery, or need to try it. */
+ MD_RECOVERY_INTR, /* resync needs to be aborted for some reason */
+ MD_RECOVERY_DONE, /* thread is done and is waiting to be reaped */
+ MD_RECOVERY_NEEDED, /* we might need to start a resync/recover */
+ MD_RECOVERY_REQUESTED, /* user-space has requested a sync (used with SYNC) */
+ MD_RECOVERY_CHECK, /* user-space request for check-only, no repair */
+ MD_RECOVERY_RESHAPE, /* A reshape is happening */
+ MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
+ MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
+};
+
static inline int __must_check mddev_lock(struct mddev *mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
@@ -623,7 +649,7 @@ extern int mddev_congested(struct mddev *mddev, int bits);
extern void md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
-extern void md_super_wait(struct mddev *mddev);
+extern int md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int op, int op_flags,
bool metadata_op);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 4da06d813b8f..aa8c4e5c1ee2 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -52,7 +52,7 @@ static int multipath_map (struct mpconf *conf)
}
rcu_read_unlock();
- printk(KERN_ERR "multipath_map(): no more operational IO paths?\n");
+ pr_crit_ratelimited("multipath_map(): no more operational IO paths?\n");
return (-1);
}
@@ -97,9 +97,9 @@ static void multipath_end_request(struct bio *bio)
*/
char b[BDEVNAME_SIZE];
md_error (mp_bh->mddev, rdev);
- printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
- bdevname(rdev->bdev,b),
- (unsigned long long)bio->bi_iter.bi_sector);
+ pr_info("multipath: %s: rescheduling sector %llu\n",
+ bdevname(rdev->bdev,b),
+ (unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh);
} else
multipath_end_bh_io(mp_bh, bio->bi_error);
@@ -194,8 +194,7 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
* first check if this is a queued request for a device
* which has just failed.
*/
- printk(KERN_ALERT
- "multipath: only one IO path left and IO error.\n");
+ pr_warn("multipath: only one IO path left and IO error.\n");
/* leave it active... it's all we have */
return;
}
@@ -209,11 +208,9 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
spin_unlock_irqrestore(&conf->device_lock, flags);
}
set_bit(Faulty, &rdev->flags);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT "multipath: IO failure on %s,"
- " disabling IO path.\n"
- "multipath: Operation continuing"
- " on %d IO paths.\n",
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ pr_err("multipath: IO failure on %s, disabling IO path.\n"
+ "multipath: Operation continuing on %d IO paths.\n",
bdevname(rdev->bdev, b),
conf->raid_disks - mddev->degraded);
}
@@ -223,21 +220,21 @@ static void print_multipath_conf (struct mpconf *conf)
int i;
struct multipath_info *tmp;
- printk("MULTIPATH conf printout:\n");
+ pr_debug("MULTIPATH conf printout:\n");
if (!conf) {
- printk("(conf==NULL)\n");
+ pr_debug("(conf==NULL)\n");
return;
}
- printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
- conf->raid_disks);
+ pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ conf->raid_disks);
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->multipaths + i;
if (tmp->rdev)
- printk(" disk%d, o:%d, dev:%s\n",
- i,!test_bit(Faulty, &tmp->rdev->flags),
- bdevname(tmp->rdev->bdev,b));
+ pr_debug(" disk%d, o:%d, dev:%s\n",
+ i,!test_bit(Faulty, &tmp->rdev->flags),
+ bdevname(tmp->rdev->bdev,b));
}
}
@@ -292,8 +289,7 @@ static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev == p->rdev) {
if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
- printk(KERN_ERR "hot-remove-disk, slot %d is identified"
- " but is still operational!\n", number);
+ pr_warn("hot-remove-disk, slot %d is identified but is still operational!\n", number);
err = -EBUSY;
goto abort;
}
@@ -346,16 +342,14 @@ static void multipathd(struct md_thread *thread)
bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
if ((mp_bh->path = multipath_map (conf))<0) {
- printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
- " error for block %llu\n",
- bdevname(bio->bi_bdev,b),
- (unsigned long long)bio->bi_iter.bi_sector);
+ pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
+ bdevname(bio->bi_bdev,b),
+ (unsigned long long)bio->bi_iter.bi_sector);
multipath_end_bh_io(mp_bh, -EIO);
} else {
- printk(KERN_ERR "multipath: %s: redirecting sector %llu"
- " to another IO path\n",
- bdevname(bio->bi_bdev,b),
- (unsigned long long)bio->bi_iter.bi_sector);
+ pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
+ bdevname(bio->bi_bdev,b),
+ (unsigned long long)bio->bi_iter.bi_sector);
*bio = *(mp_bh->master_bio);
bio->bi_iter.bi_sector +=
conf->multipaths[mp_bh->path].rdev->data_offset;
@@ -389,8 +383,8 @@ static int multipath_run (struct mddev *mddev)
return -EINVAL;
if (mddev->level != LEVEL_MULTIPATH) {
- printk("multipath: %s: raid level not set to multipath IO (%d)\n",
- mdname(mddev), mddev->level);
+ pr_warn("multipath: %s: raid level not set to multipath IO (%d)\n",
+ mdname(mddev), mddev->level);
goto out;
}
/*
@@ -401,21 +395,13 @@ static int multipath_run (struct mddev *mddev)
conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL);
mddev->private = conf;
- if (!conf) {
- printk(KERN_ERR
- "multipath: couldn't allocate memory for %s\n",
- mdname(mddev));
+ if (!conf)
goto out;
- }
conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
GFP_KERNEL);
- if (!conf->multipaths) {
- printk(KERN_ERR
- "multipath: couldn't allocate memory for %s\n",
- mdname(mddev));
+ if (!conf->multipaths)
goto out_free_conf;
- }
working_disks = 0;
rdev_for_each(rdev, mddev) {
@@ -439,7 +425,7 @@ static int multipath_run (struct mddev *mddev)
INIT_LIST_HEAD(&conf->retry_list);
if (!working_disks) {
- printk(KERN_ERR "multipath: no operational IO paths for %s\n",
+ pr_warn("multipath: no operational IO paths for %s\n",
mdname(mddev));
goto out_free_conf;
}
@@ -447,27 +433,17 @@ static int multipath_run (struct mddev *mddev)
conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS,
sizeof(struct multipath_bh));
- if (conf->pool == NULL) {
- printk(KERN_ERR
- "multipath: couldn't allocate memory for %s\n",
- mdname(mddev));
+ if (conf->pool == NULL)
goto out_free_conf;
- }
- {
- mddev->thread = md_register_thread(multipathd, mddev,
- "multipath");
- if (!mddev->thread) {
- printk(KERN_ERR "multipath: couldn't allocate thread"
- " for %s\n", mdname(mddev));
- goto out_free_conf;
- }
- }
+ mddev->thread = md_register_thread(multipathd, mddev,
+ "multipath");
+ if (!mddev->thread)
+ goto out_free_conf;
- printk(KERN_INFO
- "multipath: array %s active with %d out of %d IO paths\n",
+ pr_info("multipath: array %s active with %d out of %d IO paths\n",
mdname(mddev), conf->raid_disks - mddev->degraded,
- mddev->raid_disks);
+ mddev->raid_disks);
/*
* Ok, everything is just fine now
*/
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index e83047cbb2da..7938cd21fa4c 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -700,13 +700,11 @@ static int populate_ablock_with_values(struct dm_array_info *info, struct array_
{
int r;
unsigned i;
- uint32_t nr_entries;
struct dm_btree_value_type *vt = &info->value_type;
BUG_ON(le32_to_cpu(ab->nr_entries));
BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
- nr_entries = le32_to_cpu(ab->nr_entries);
for (i = 0; i < new_nr; i++) {
r = fn(base + i, element_at(info, ab, i), context);
if (r)
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 1e33dd51c21f..a6dde7cab458 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -18,6 +18,8 @@
/*----------------------------------------------------------------*/
+#ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
+
/*
* This is a read/write semaphore with a couple of differences.
*
@@ -302,6 +304,18 @@ static void report_recursive_bug(dm_block_t b, int r)
(unsigned long long) b);
}
+#else /* !CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
+
+#define bl_init(x) do { } while (0)
+#define bl_down_read(x) 0
+#define bl_down_read_nonblock(x) 0
+#define bl_up_read(x) do { } while (0)
+#define bl_down_write(x) 0
+#define bl_up_write(x) do { } while (0)
+#define report_recursive_bug(x, y) do { } while (0)
+
+#endif /* CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
+
/*----------------------------------------------------------------*/
/*
@@ -330,8 +344,11 @@ EXPORT_SYMBOL_GPL(dm_block_data);
struct buffer_aux {
struct dm_block_validator *validator;
- struct block_lock lock;
int write_locked;
+
+#ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
+ struct block_lock lock;
+#endif
};
static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 306d2e4502c4..4c28608a0c94 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -464,7 +464,8 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
ll->nr_allocated--;
le32_add_cpu(&ie_disk.nr_free, 1);
ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit));
- }
+ } else
+ *ev = SM_NONE;
return ll->save_ie(ll, index, &ie_disk);
}
@@ -547,7 +548,6 @@ static int metadata_ll_init_index(struct ll_disk *ll)
if (r < 0)
return r;
- memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
ll->bitmap_root = dm_block_location(b);
dm_tm_unlock(ll->tm, b);
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 7e44005595c1..20557e2c60c6 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -775,17 +775,15 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
r = sm_ll_new_metadata(&smm->ll, tm);
+ if (!r) {
+ if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
+ nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
+ r = sm_ll_extend(&smm->ll, nr_blocks);
+ }
+ memcpy(&smm->sm, &ops, sizeof(smm->sm));
if (r)
return r;
- if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
- nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
- r = sm_ll_extend(&smm->ll, nr_blocks);
- if (r)
- return r;
-
- memcpy(&smm->sm, &ops, sizeof(smm->sm));
-
/*
* Now we need to update the newly created data structures with the
* allocated blocks that they were built from.
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 258986a2699d..a162fedeb51a 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -21,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <trace/events/block.h>
#include "md.h"
#include "raid0.h"
#include "raid5.h"
@@ -51,20 +52,21 @@ static void dump_zones(struct mddev *mddev)
char b[BDEVNAME_SIZE];
struct r0conf *conf = mddev->private;
int raid_disks = conf->strip_zone[0].nb_dev;
- printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
- mdname(mddev),
- conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
+ pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
+ mdname(mddev),
+ conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
for (j = 0; j < conf->nr_strip_zones; j++) {
- printk(KERN_INFO "md: zone%d=[", j);
+ char line[200];
+ int len = 0;
+
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
- printk(KERN_CONT "%s%s", k?"/":"",
- bdevname(conf->devlist[j*raid_disks
- + k]->bdev, b));
- printk(KERN_CONT "]\n");
+ len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
+ bdevname(conf->devlist[j*raid_disks
+ + k]->bdev, b));
+ pr_debug("md: zone%d=[%s]\n", j, line);
zone_size = conf->strip_zone[j].zone_end - zone_start;
- printk(KERN_INFO " zone-offset=%10lluKB, "
- "device-offset=%10lluKB, size=%10lluKB\n",
+ pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
(unsigned long long)zone_start>>1,
(unsigned long long)conf->strip_zone[j].dev_start>>1,
(unsigned long long)zone_size>>1);
@@ -142,9 +144,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
* chunk size is a multiple of that sector size
*/
if ((mddev->chunk_sectors << 9) % blksize) {
- printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
- mdname(mddev),
- mddev->chunk_sectors << 9, blksize);
+ pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
+ mdname(mddev),
+ mddev->chunk_sectors << 9, blksize);
err = -EINVAL;
goto abort;
}
@@ -186,19 +188,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
}
if (j < 0) {
- printk(KERN_ERR
- "md/raid0:%s: remove inactive devices before converting to RAID0\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
+ mdname(mddev));
goto abort;
}
if (j >= mddev->raid_disks) {
- printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
- "aborting!\n", mdname(mddev), j);
+ pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
+ mdname(mddev), j);
goto abort;
}
if (dev[j]) {
- printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
- "aborting!\n", mdname(mddev), j);
+ pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
+ mdname(mddev), j);
goto abort;
}
dev[j] = rdev1;
@@ -208,8 +209,8 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
cnt++;
}
if (cnt != mddev->raid_disks) {
- printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
- "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
+ pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
+ mdname(mddev), cnt, mddev->raid_disks);
goto abort;
}
zone->nb_dev = cnt;
@@ -357,8 +358,7 @@ static int raid0_run(struct mddev *mddev)
int ret;
if (mddev->chunk_sectors == 0) {
- printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
return -EINVAL;
}
if (md_check_no_bitmap(mddev))
@@ -399,9 +399,9 @@ static int raid0_run(struct mddev *mddev)
/* calculate array device size */
md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
- printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
- mdname(mddev),
- (unsigned long long)mddev->array_sectors);
+ pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
+ mdname(mddev),
+ (unsigned long long)mddev->array_sectors);
if (mddev->queue) {
/* calculate the max read-ahead size.
@@ -464,7 +464,8 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
}
do {
- sector_t sector = bio->bi_iter.bi_sector;
+ sector_t bio_sector = bio->bi_iter.bi_sector;
+ sector_t sector = bio_sector;
unsigned chunk_sects = mddev->chunk_sectors;
unsigned sectors = chunk_sects -
@@ -473,7 +474,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
: sector_div(sector, chunk_sects));
/* Restore due to sector_div */
- sector = bio->bi_iter.bi_sector;
+ sector = bio_sector;
if (sectors < bio_sectors(bio)) {
split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
@@ -492,8 +493,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
bio_endio(split);
- } else
+ } else {
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
+ split, disk_devt(mddev->gendisk),
+ bio_sector);
generic_make_request(split);
+ }
} while (split != bio);
}
@@ -509,17 +515,17 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
struct r0conf *priv_conf;
if (mddev->degraded != 1) {
- printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
- mdname(mddev),
- mddev->degraded);
+ pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
+ mdname(mddev),
+ mddev->degraded);
return ERR_PTR(-EINVAL);
}
rdev_for_each(rdev, mddev) {
/* check slot number for a disk */
if (rdev->raid_disk == mddev->raid_disks-1) {
- printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
rdev->sectors = mddev->dev_sectors;
@@ -533,8 +539,11 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
mddev->delta_disks = -1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
+ clear_bit(MD_HAS_JOURNAL, &mddev->flags);
+ clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
create_strip_zones(mddev, &priv_conf);
+
return priv_conf;
}
@@ -549,19 +558,19 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
* - all mirrors must be already degraded
*/
if (mddev->layout != ((1 << 8) + 2)) {
- printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
- mdname(mddev),
- mddev->layout);
+ pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
+ mdname(mddev),
+ mddev->layout);
return ERR_PTR(-EINVAL);
}
if (mddev->raid_disks & 1) {
- printk(KERN_ERR "md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
if (mddev->degraded != (mddev->raid_disks>>1)) {
- printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
@@ -574,6 +583,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
mddev->degraded = 0;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
+ clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
create_strip_zones(mddev, &priv_conf);
return priv_conf;
@@ -588,7 +598,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
* - (N - 1) mirror drives must be already faulty
*/
if ((mddev->raid_disks - 1) != mddev->degraded) {
- printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
+ pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
mdname(mddev));
return ERR_PTR(-EINVAL);
}
@@ -616,6 +626,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
+ clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
create_strip_zones(mddev, &priv_conf);
return priv_conf;
@@ -631,8 +642,8 @@ static void *raid0_takeover(struct mddev *mddev)
*/
if (mddev->bitmap) {
- printk(KERN_ERR "md/raid0: %s: cannot takeover array with bitmap\n",
- mdname(mddev));
+ pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
+ mdname(mddev));
return ERR_PTR(-EBUSY);
}
if (mddev->level == 4)
@@ -642,8 +653,8 @@ static void *raid0_takeover(struct mddev *mddev)
if (mddev->layout == ALGORITHM_PARITY_N)
return raid0_takeover_raid45(mddev);
- printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
- mdname(mddev), ALGORITHM_PARITY_N);
+ pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
+ mdname(mddev), ALGORITHM_PARITY_N);
}
if (mddev->level == 10)
@@ -652,7 +663,7 @@ static void *raid0_takeover(struct mddev *mddev)
if (mddev->level == 1)
return raid0_takeover_raid1(mddev);
- printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
+ pr_warn("Takeover from raid%i to raid0 not supported\n",
mddev->level);
return ERR_PTR(-EINVAL);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 29e2df5cd77b..a1f3fbed9100 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -37,6 +37,7 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
+#include <trace/events/block.h>
#include "md.h"
#include "raid1.h"
#include "bitmap.h"
@@ -70,6 +71,9 @@ static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
sector_t bi_sector);
static void lower_barrier(struct r1conf *conf);
+#define raid1_log(md, fmt, args...) \
+ do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
+
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct pool_info *pi = data;
@@ -325,6 +329,11 @@ static void raid1_end_read_request(struct bio *bio)
if (uptodate)
set_bit(R1BIO_Uptodate, &r1_bio->state);
+ else if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state))
+ /* This was a fail-fast read so we definitely
+ * want to retry */
+ ;
else {
/* If all other devices have failed, we want to return
* the error upwards rather than fail the last device.
@@ -347,13 +356,10 @@ static void raid1_end_read_request(struct bio *bio)
* oops, read error:
*/
char b[BDEVNAME_SIZE];
- printk_ratelimited(
- KERN_ERR "md/raid1:%s: %s: "
- "rescheduling sector %llu\n",
- mdname(conf->mddev),
- bdevname(rdev->bdev,
- b),
- (unsigned long long)r1_bio->sector);
+ pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
+ mdname(conf->mddev),
+ bdevname(rdev->bdev, b),
+ (unsigned long long)r1_bio->sector);
set_bit(R1BIO_ReadError, &r1_bio->state);
reschedule_retry(r1_bio);
/* don't drop the reference on read_disk yet */
@@ -416,7 +422,24 @@ static void raid1_end_write_request(struct bio *bio)
set_bit(MD_RECOVERY_NEEDED, &
conf->mddev->recovery);
- set_bit(R1BIO_WriteError, &r1_bio->state);
+ if (test_bit(FailFast, &rdev->flags) &&
+ (bio->bi_opf & MD_FAILFAST) &&
+ /* We never try FailFast to WriteMostly devices */
+ !test_bit(WriteMostly, &rdev->flags)) {
+ md_error(r1_bio->mddev, rdev);
+ if (!test_bit(Faulty, &rdev->flags))
+ /* This is the only remaining device,
+ * We need to retry the write without
+ * FailFast
+ */
+ set_bit(R1BIO_WriteError, &r1_bio->state);
+ else {
+ /* Finished with this branch */
+ r1_bio->bios[mirror] = NULL;
+ to_put = bio;
+ }
+ } else
+ set_bit(R1BIO_WriteError, &r1_bio->state);
} else {
/*
* Set R1BIO_Uptodate in our master bio, so that we
@@ -534,6 +557,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
best_good_sectors = 0;
has_nonrot_disk = 0;
choose_next_idle = 0;
+ clear_bit(R1BIO_FailFast, &r1_bio->state);
if ((conf->mddev->recovery_cp < this_sector + sectors) ||
(mddev_is_clustered(conf->mddev) &&
@@ -607,6 +631,10 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
} else
best_good_sectors = sectors;
+ if (best_disk >= 0)
+ /* At least two disks to choose from so failfast is OK */
+ set_bit(R1BIO_FailFast, &r1_bio->state);
+
nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
has_nonrot_disk |= nonrot;
pending = atomic_read(&rdev->nr_pending);
@@ -645,11 +673,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
}
break;
}
- /* If device is idle, use it */
- if (pending == 0) {
- best_disk = disk;
- break;
- }
if (choose_next_idle)
continue;
@@ -672,7 +695,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
* mixed ratation/non-rotational disks depending on workload.
*/
if (best_disk == -1) {
- if (has_nonrot_disk)
+ if (has_nonrot_disk || min_pending == 0)
best_disk = best_pending_disk;
else
best_disk = best_dist_disk;
@@ -745,9 +768,14 @@ static void flush_pending_writes(struct r1conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ bio->bi_bdev = rdev->bdev;
+ if (test_bit(Faulty, &rdev->flags)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
else
@@ -832,7 +860,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
else if (conf->barrier && bio_data_dir(bio) == WRITE) {
if ((conf->mddev->curr_resync_completed
>= bio_end_sector(bio)) ||
- (conf->next_resync + NEXT_NORMALIO_DISTANCE
+ (conf->start_next_window + NEXT_NORMALIO_DISTANCE
<= bio->bi_iter.bi_sector))
wait = false;
else
@@ -858,6 +886,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
* that queue to allow conf->start_next_window
* to increase.
*/
+ raid1_log(conf->mddev, "wait barrier");
wait_event_lock_irq(conf->wait_barrier,
!conf->array_frozen &&
(!conf->barrier ||
@@ -937,6 +966,7 @@ static void freeze_array(struct r1conf *conf, int extra)
*/
spin_lock_irq(&conf->resync_lock);
conf->array_frozen = 1;
+ raid1_log(conf->mddev, "wait freeze");
wait_event_lock_irq_cmd(conf->wait_barrier,
conf->nr_pending == conf->nr_queued+extra,
conf->resync_lock,
@@ -1019,9 +1049,14 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ bio->bi_bdev = rdev->bdev;
+ if (test_bit(Faulty, &rdev->flags)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
else
@@ -1136,6 +1171,7 @@ read_again:
* take care not to over-take any writes
* that are 'behind'
*/
+ raid1_log(mddev, "wait behind writes");
wait_event(bitmap->behind_wait,
atomic_read(&bitmap->behind_writes) == 0);
}
@@ -1153,8 +1189,16 @@ read_again:
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
+ if (test_bit(FailFast, &mirror->rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state))
+ read_bio->bi_opf |= MD_FAILFAST;
read_bio->bi_private = r1_bio;
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+ read_bio, disk_devt(mddev->gendisk),
+ r1_bio->sector);
+
if (max_sectors < r1_bio->sectors) {
/* could not read all from this device, so we will
* need another r1_bio.
@@ -1195,6 +1239,7 @@ read_again:
*/
if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread);
+ raid1_log(mddev, "wait queued");
wait_event(conf->wait_barrier,
conf->pending_count < max_queued_requests);
}
@@ -1286,6 +1331,7 @@ read_again:
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
r1_bio->state = 0;
allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
+ raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
start_next_window = wait_barrier(conf, bio);
/*
@@ -1363,10 +1409,21 @@ read_again:
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
bio_set_op_attrs(mbio, op, do_flush_fua | do_sync);
+ if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
+ !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
+ conf->raid_disks - mddev->degraded > 1)
+ mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r1_bio;
atomic_inc(&r1_bio->remaining);
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+ mbio, disk_devt(mddev->gendisk),
+ r1_bio->sector);
+ /* flush_pending_writes() needs access to the rdev so...*/
+ mbio->bi_bdev = (void*)conf->mirrors[i].rdev;
+
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
if (cb)
plug = container_of(cb, struct raid1_plug_cb, cb);
@@ -1436,6 +1493,7 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
* next level up know.
* else mark the drive as failed
*/
+ spin_lock_irqsave(&conf->device_lock, flags);
if (test_bit(In_sync, &rdev->flags)
&& (conf->raid_disks - mddev->degraded) == 1) {
/*
@@ -1445,10 +1503,10 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
* it is very likely to fail.
*/
conf->recovery_disabled = mddev->recovery_disabled;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
return;
}
set_bit(Blocked, &rdev->flags);
- spin_lock_irqsave(&conf->device_lock, flags);
if (test_and_clear_bit(In_sync, &rdev->flags)) {
mddev->degraded++;
set_bit(Faulty, &rdev->flags);
@@ -1459,36 +1517,35 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
* if recovery is running, make sure it aborts.
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
- printk(KERN_ALERT
- "md/raid1:%s: Disk failure on %s, disabling device.\n"
- "md/raid1:%s: Operation continuing on %d devices.\n",
- mdname(mddev), bdevname(rdev->bdev, b),
- mdname(mddev), conf->raid_disks - mddev->degraded);
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
+ pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
+ "md/raid1:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), conf->raid_disks - mddev->degraded);
}
static void print_conf(struct r1conf *conf)
{
int i;
- printk(KERN_DEBUG "RAID1 conf printout:\n");
+ pr_debug("RAID1 conf printout:\n");
if (!conf) {
- printk(KERN_DEBUG "(!conf)\n");
+ pr_debug("(!conf)\n");
return;
}
- printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
- conf->raid_disks);
+ pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ conf->raid_disks);
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev)
- printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
- i, !test_bit(In_sync, &rdev->flags),
- !test_bit(Faulty, &rdev->flags),
- bdevname(rdev->bdev,b));
+ pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
+ i, !test_bit(In_sync, &rdev->flags),
+ !test_bit(Faulty, &rdev->flags),
+ bdevname(rdev->bdev,b));
}
rcu_read_unlock();
}
@@ -1788,12 +1845,24 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
sector_t sect = r1_bio->sector;
int sectors = r1_bio->sectors;
int idx = 0;
+ struct md_rdev *rdev;
+
+ rdev = conf->mirrors[r1_bio->read_disk].rdev;
+ if (test_bit(FailFast, &rdev->flags)) {
+ /* Don't try recovering from here - just fail it
+ * ... unless it is the last working device of course */
+ md_error(mddev, rdev);
+ if (test_bit(Faulty, &rdev->flags))
+ /* Don't try to read from here, but make sure
+ * put_buf does it's thing
+ */
+ bio->bi_end_io = end_sync_write;
+ }
while(sectors) {
int s = sectors;
int d = r1_bio->read_disk;
int success = 0;
- struct md_rdev *rdev;
int start;
if (s > (PAGE_SIZE>>9))
@@ -1825,11 +1894,10 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
* work just disable and interrupt the recovery.
* Don't fail devices as that won't really help.
*/
- printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
- " for block %llu\n",
- mdname(mddev),
- bdevname(bio->bi_bdev, b),
- (unsigned long long)r1_bio->sector);
+ pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev),
+ bdevname(bio->bi_bdev, b),
+ (unsigned long long)r1_bio->sector);
for (d = 0; d < conf->raid_disks * 2; d++) {
rdev = conf->mirrors[d].rdev;
if (!rdev || test_bit(Faulty, &rdev->flags))
@@ -2013,6 +2081,9 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
continue;
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
+ if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
+ wbio->bi_opf |= MD_FAILFAST;
+
wbio->bi_end_io = end_sync_write;
atomic_inc(&r1_bio->remaining);
md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
@@ -2122,13 +2193,11 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, READ)) {
atomic_add(s, &rdev->corrected_errors);
- printk(KERN_INFO
- "md/raid1:%s: read error corrected "
- "(%d sectors at %llu on %s)\n",
- mdname(mddev), s,
- (unsigned long long)(sect +
- rdev->data_offset),
- bdevname(rdev->bdev, b));
+ pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(sect +
+ rdev->data_offset),
+ bdevname(rdev->bdev, b));
}
rdev_dec_pending(rdev, mddev);
} else
@@ -2287,6 +2356,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
struct bio *bio;
char b[BDEVNAME_SIZE];
struct md_rdev *rdev;
+ dev_t bio_dev;
+ sector_t bio_sector;
clear_bit(R1BIO_ReadError, &r1_bio->state);
/* we got a read error. Maybe the drive is bad. Maybe just
@@ -2300,10 +2371,14 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
bio = r1_bio->bios[r1_bio->read_disk];
bdevname(bio->bi_bdev, b);
+ bio_dev = bio->bi_bdev->bd_dev;
+ bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;
bio_put(bio);
r1_bio->bios[r1_bio->read_disk] = NULL;
- if (mddev->ro == 0) {
+ rdev = conf->mirrors[r1_bio->read_disk].rdev;
+ if (mddev->ro == 0
+ && !test_bit(FailFast, &rdev->flags)) {
freeze_array(conf, 1);
fix_read_error(conf, r1_bio->read_disk,
r1_bio->sector, r1_bio->sectors);
@@ -2312,14 +2387,13 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
}
- rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
+ rdev_dec_pending(rdev, conf->mddev);
read_more:
disk = read_balance(conf, r1_bio, &max_sectors);
if (disk == -1) {
- printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
- " read error for block %llu\n",
- mdname(mddev), b, (unsigned long long)r1_bio->sector);
+ pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev), b, (unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio);
} else {
const unsigned long do_sync
@@ -2330,16 +2404,17 @@ read_more:
max_sectors);
r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev;
- printk_ratelimited(KERN_ERR
- "md/raid1:%s: redirecting sector %llu"
- " to other mirror: %s\n",
- mdname(mddev),
- (unsigned long long)r1_bio->sector,
- bdevname(rdev->bdev, b));
+ pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
+ mdname(mddev),
+ (unsigned long long)r1_bio->sector,
+ bdevname(rdev->bdev, b));
bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
+ if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state))
+ bio->bi_opf |= MD_FAILFAST;
bio->bi_private = r1_bio;
if (max_sectors < r1_bio->sectors) {
/* Drat - have to split this up more */
@@ -2353,6 +2428,8 @@ read_more:
else
mbio->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
+ trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+ bio, bio_dev, bio_sector);
generic_make_request(bio);
bio = NULL;
@@ -2367,8 +2444,11 @@ read_more:
sectors_handled;
goto read_more;
- } else
+ } else {
+ trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+ bio, bio_dev, bio_sector);
generic_make_request(bio);
+ }
}
}
@@ -2384,10 +2464,10 @@ static void raid1d(struct md_thread *thread)
md_check_recovery(mddev);
if (!list_empty_careful(&conf->bio_end_io_list) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
- if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
while (!list_empty(&conf->bio_end_io_list)) {
list_move(conf->bio_end_io_list.prev, &tmp);
conf->nr_queued--;
@@ -2441,7 +2521,7 @@ static void raid1d(struct md_thread *thread)
generic_make_request(r1_bio->bios[r1_bio->read_disk]);
cond_resched();
- if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
md_check_recovery(mddev);
}
blk_finish_plug(&plug);
@@ -2623,6 +2703,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_private = r1_bio;
+ if (test_bit(FailFast, &rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
}
}
rcu_read_unlock();
@@ -2642,7 +2724,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
min_bad, 0
) && ok;
}
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
*skipped = 1;
put_buf(r1_bio);
@@ -2753,6 +2835,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (bio->bi_end_io == end_sync_read) {
read_targets--;
md_sync_acct(bio->bi_bdev, nr_sectors);
+ if (read_targets == 1)
+ bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio);
}
}
@@ -2760,6 +2844,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
atomic_set(&r1_bio->remaining, 1);
bio = r1_bio->bios[r1_bio->read_disk];
md_sync_acct(bio->bi_bdev, nr_sectors);
+ if (read_targets == 1)
+ bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio);
}
@@ -2875,12 +2961,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
err = -ENOMEM;
conf->thread = md_register_thread(raid1d, mddev, "raid1");
- if (!conf->thread) {
- printk(KERN_ERR
- "md/raid1:%s: couldn't allocate thread\n",
- mdname(mddev));
+ if (!conf->thread)
goto abort;
- }
return conf;
@@ -2905,13 +2987,13 @@ static int raid1_run(struct mddev *mddev)
bool discard_supported = false;
if (mddev->level != 1) {
- printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
- mdname(mddev), mddev->level);
+ pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
+ mdname(mddev), mddev->level);
return -EIO;
}
if (mddev->reshape_position != MaxSector) {
- printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
- mdname(mddev));
+ pr_warn("md/raid1:%s: reshape_position set but not supported\n",
+ mdname(mddev));
return -EIO;
}
/*
@@ -2950,11 +3032,9 @@ static int raid1_run(struct mddev *mddev)
mddev->recovery_cp = MaxSector;
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "md/raid1:%s: not clean"
- " -- starting background reconstruction\n",
- mdname(mddev));
- printk(KERN_INFO
- "md/raid1:%s: active with %d out of %d mirrors\n",
+ pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
+ mdname(mddev));
+ pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
mdname(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks);
@@ -2964,6 +3044,7 @@ static int raid1_run(struct mddev *mddev)
mddev->thread = conf->thread;
conf->thread = NULL;
mddev->private = conf;
+ set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
@@ -3107,9 +3188,8 @@ static int raid1_reshape(struct mddev *mddev)
rdev->raid_disk = d2;
sysfs_unlink_rdev(mddev, rdev);
if (sysfs_link_rdev(mddev, rdev))
- printk(KERN_WARNING
- "md/raid1:%s: cannot register rd%d\n",
- mdname(mddev), rdev->raid_disk);
+ pr_warn("md/raid1:%s: cannot register rd%d\n",
+ mdname(mddev), rdev->raid_disk);
}
if (rdev)
newmirrors[d2++].rdev = rdev;
@@ -3163,9 +3243,12 @@ static void *raid1_takeover(struct mddev *mddev)
mddev->new_layout = 0;
mddev->new_chunk_sectors = 0;
conf = setup_conf(mddev);
- if (!IS_ERR(conf))
+ if (!IS_ERR(conf)) {
/* Array must appear to be quiesced */
conf->array_frozen = 1;
+ clear_bit(MD_HAS_JOURNAL, &mddev->flags);
+ clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+ }
return conf;
}
return ERR_PTR(-EINVAL);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 61c39b390cd8..c52ef424a24b 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -161,14 +161,15 @@ struct r1bio {
};
/* bits for r1bio.state */
-#define R1BIO_Uptodate 0
-#define R1BIO_IsSync 1
-#define R1BIO_Degraded 2
-#define R1BIO_BehindIO 3
+enum r1bio_state {
+ R1BIO_Uptodate,
+ R1BIO_IsSync,
+ R1BIO_Degraded,
+ R1BIO_BehindIO,
/* Set ReadError on bios that experience a readerror so that
* raid1d knows what to do with them.
*/
-#define R1BIO_ReadError 4
+ R1BIO_ReadError,
/* For write-behind requests, we call bi_end_io when
* the last non-write-behind device completes, providing
* any write was successful. Otherwise we call when
@@ -176,10 +177,12 @@ struct r1bio {
* with failure when last write completes (and all failed).
* Record that bi_end_io was called with this flag...
*/
-#define R1BIO_Returned 6
+ R1BIO_Returned,
/* If a write for this request means we can clear some
* known-bad-block records, we set this flag
*/
-#define R1BIO_MadeGood 7
-#define R1BIO_WriteError 8
+ R1BIO_MadeGood,
+ R1BIO_WriteError,
+ R1BIO_FailFast,
+};
#endif
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 39fddda2fef2..ab5e86209322 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -25,6 +25,7 @@
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/kthread.h>
+#include <trace/events/block.h>
#include "md.h"
#include "raid10.h"
#include "raid0.h"
@@ -99,12 +100,16 @@ static int max_queued_requests = 1024;
static void allow_barrier(struct r10conf *conf);
static void lower_barrier(struct r10conf *conf);
static int _enough(struct r10conf *conf, int previous, int ignore);
+static int enough(struct r10conf *conf, int ignore);
static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
int *skipped);
static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
static void end_reshape_write(struct bio *bio);
static void end_reshape(struct r10conf *conf);
+#define raid10_log(md, fmt, args...) \
+ do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
+
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct r10conf *conf = data;
@@ -404,8 +409,7 @@ static void raid10_end_read_request(struct bio *bio)
* oops, read error - keep the refcount on the rdev
*/
char b[BDEVNAME_SIZE];
- printk_ratelimited(KERN_ERR
- "md/raid10:%s: %s: rescheduling sector %llu\n",
+ pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
mdname(conf->mddev),
bdevname(rdev->bdev, b),
(unsigned long long)r10_bio->sector);
@@ -447,6 +451,7 @@ static void raid10_end_write_request(struct bio *bio)
struct r10conf *conf = r10_bio->mddev->private;
int slot, repl;
struct md_rdev *rdev = NULL;
+ struct bio *to_put = NULL;
bool discard_error;
discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
@@ -474,8 +479,24 @@ static void raid10_end_write_request(struct bio *bio)
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED,
&rdev->mddev->recovery);
- set_bit(R10BIO_WriteError, &r10_bio->state);
+
dec_rdev = 0;
+ if (test_bit(FailFast, &rdev->flags) &&
+ (bio->bi_opf & MD_FAILFAST)) {
+ md_error(rdev->mddev, rdev);
+ if (!test_bit(Faulty, &rdev->flags))
+ /* This is the only remaining device,
+ * We need to retry the write without
+ * FailFast
+ */
+ set_bit(R10BIO_WriteError, &r10_bio->state);
+ else {
+ r10_bio->devs[slot].bio = NULL;
+ to_put = bio;
+ dec_rdev = 1;
+ }
+ } else
+ set_bit(R10BIO_WriteError, &r10_bio->state);
}
} else {
/*
@@ -525,6 +546,8 @@ static void raid10_end_write_request(struct bio *bio)
one_write_done(r10_bio);
if (dec_rdev)
rdev_dec_pending(rdev, conf->mddev);
+ if (to_put)
+ bio_put(to_put);
}
/*
@@ -716,6 +739,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
best_dist = MaxSector;
best_good_sectors = 0;
do_balance = 1;
+ clear_bit(R10BIO_FailFast, &r10_bio->state);
/*
* Check if we can balance. We can balance on the whole
* device if no resync is going on (recovery is ok), or below
@@ -780,15 +804,18 @@ static struct md_rdev *read_balance(struct r10conf *conf,
if (!do_balance)
break;
+ if (best_slot >= 0)
+ /* At least 2 disks to choose from so failfast is OK */
+ set_bit(R10BIO_FailFast, &r10_bio->state);
/* This optimisation is debatable, and completely destroys
* sequential read speed for 'far copies' arrays. So only
* keep it for 'near' arrays, and review those later.
*/
if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
- break;
+ new_distance = 0;
/* for far > 1 always use the lowest address */
- if (geo->far_copies > 1)
+ else if (geo->far_copies > 1)
new_distance = r10_bio->devs[slot].addr;
else
new_distance = abs(r10_bio->devs[slot].addr -
@@ -859,9 +886,14 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ bio->bi_bdev = rdev->bdev;
+ if (test_bit(Faulty, &rdev->flags)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
else
@@ -937,6 +969,7 @@ static void wait_barrier(struct r10conf *conf)
* that queue to get the nr_pending
* count down.
*/
+ raid10_log(conf->mddev, "wait barrier");
wait_event_lock_irq(conf->wait_barrier,
!conf->barrier ||
(atomic_read(&conf->nr_pending) &&
@@ -1037,9 +1070,14 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ bio->bi_bdev = rdev->bdev;
+ if (test_bit(Faulty, &rdev->flags)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
else
@@ -1083,6 +1121,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
/* IO spans the reshape position. Need to wait for
* reshape to pass
*/
+ raid10_log(conf->mddev, "wait reshape");
allow_barrier(conf);
wait_event(conf->wait_barrier,
conf->reshape_progress <= bio->bi_iter.bi_sector ||
@@ -1099,11 +1138,12 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
bio->bi_iter.bi_sector < conf->reshape_progress))) {
/* Need to update reshape_position in metadata */
mddev->reshape_position = conf->reshape_progress;
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
md_wakeup_thread(mddev->thread);
+ raid10_log(conf->mddev, "wait reshape metadata");
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
conf->reshape_safe = mddev->reshape_position;
}
@@ -1154,8 +1194,15 @@ read_again:
read_bio->bi_bdev = rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
+ if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R10BIO_FailFast, &r10_bio->state))
+ read_bio->bi_opf |= MD_FAILFAST;
read_bio->bi_private = r10_bio;
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+ read_bio, disk_devt(mddev->gendisk),
+ r10_bio->sector);
if (max_sectors < r10_bio->sectors) {
/* Could not read all from this device, so we will
* need another r10_bio.
@@ -1195,6 +1242,7 @@ read_again:
*/
if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread);
+ raid10_log(mddev, "wait queued");
wait_event(conf->wait_barrier,
conf->pending_count < max_queued_requests);
}
@@ -1322,6 +1370,7 @@ retry_write:
}
}
allow_barrier(conf);
+ raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
wait_barrier(conf);
goto retry_write;
@@ -1361,8 +1410,18 @@ retry_write:
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua);
+ if (test_bit(FailFast, &conf->mirrors[d].rdev->flags) &&
+ enough(conf, d))
+ mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r10_bio;
+ if (conf->mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+ mbio, disk_devt(conf->mddev->gendisk),
+ r10_bio->sector);
+ /* flush_pending_writes() needs access to the rdev so...*/
+ mbio->bi_bdev = (void*)rdev;
+
atomic_inc(&r10_bio->remaining);
cb = blk_check_plugged(raid10_unplug, mddev,
@@ -1405,6 +1464,13 @@ retry_write:
bio_set_op_attrs(mbio, op, do_sync | do_fua);
mbio->bi_private = r10_bio;
+ if (conf->mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+ mbio, disk_devt(conf->mddev->gendisk),
+ r10_bio->sector);
+ /* flush_pending_writes() needs access to the rdev so...*/
+ mbio->bi_bdev = (void*)rdev;
+
atomic_inc(&r10_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
@@ -1586,14 +1652,13 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
spin_unlock_irqrestore(&conf->device_lock, flags);
- printk(KERN_ALERT
- "md/raid10:%s: Disk failure on %s, disabling device.\n"
- "md/raid10:%s: Operation continuing on %d devices.\n",
- mdname(mddev), bdevname(rdev->bdev, b),
- mdname(mddev), conf->geo.raid_disks - mddev->degraded);
+ pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
+ "md/raid10:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), conf->geo.raid_disks - mddev->degraded);
}
static void print_conf(struct r10conf *conf)
@@ -1601,13 +1666,13 @@ static void print_conf(struct r10conf *conf)
int i;
struct md_rdev *rdev;
- printk(KERN_DEBUG "RAID10 conf printout:\n");
+ pr_debug("RAID10 conf printout:\n");
if (!conf) {
- printk(KERN_DEBUG "(!conf)\n");
+ pr_debug("(!conf)\n");
return;
}
- printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
- conf->geo.raid_disks);
+ pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
+ conf->geo.raid_disks);
/* This is only called with ->reconfix_mutex held, so
* rcu protection of rdev is not needed */
@@ -1615,10 +1680,10 @@ static void print_conf(struct r10conf *conf)
char b[BDEVNAME_SIZE];
rdev = conf->mirrors[i].rdev;
if (rdev)
- printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
- i, !test_bit(In_sync, &rdev->flags),
- !test_bit(Faulty, &rdev->flags),
- bdevname(rdev->bdev,b));
+ pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
+ i, !test_bit(In_sync, &rdev->flags),
+ !test_bit(Faulty, &rdev->flags),
+ bdevname(rdev->bdev,b));
}
}
@@ -1953,6 +2018,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
/* now find blocks with errors */
for (i=0 ; i < conf->copies ; i++) {
int j, d;
+ struct md_rdev *rdev;
tbio = r10_bio->devs[i].bio;
@@ -1960,6 +2026,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
continue;
if (i == first)
continue;
+ d = r10_bio->devs[i].devnum;
+ rdev = conf->mirrors[d].rdev;
if (!r10_bio->devs[i].bio->bi_error) {
/* We know that the bi_io_vec layout is the same for
* both 'first' and 'i', so we just compare them.
@@ -1982,6 +2050,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
/* Don't fix anything. */
continue;
+ } else if (test_bit(FailFast, &rdev->flags)) {
+ /* Just give up on this device */
+ md_error(rdev->mddev, rdev);
+ continue;
}
/* Ok, we need to write this bio, either to correct an
* inconsistency or to correct an unreadable block.
@@ -1999,11 +2071,12 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
bio_copy_data(tbio, fbio);
- d = r10_bio->devs[i].devnum;
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
+ if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+ tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
generic_make_request(tbio);
@@ -2109,10 +2182,8 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
ok = rdev_set_badblocks(rdev2, addr, s, 0);
if (!ok) {
/* just abort the recovery */
- printk(KERN_NOTICE
- "md/raid10:%s: recovery aborted"
- " due to read error\n",
- mdname(mddev));
+ pr_notice("md/raid10:%s: recovery aborted due to read error\n",
+ mdname(mddev));
conf->mirrors[dw].recovery_disabled
= mddev->recovery_disabled;
@@ -2259,14 +2330,11 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
char b[BDEVNAME_SIZE];
bdevname(rdev->bdev, b);
- printk(KERN_NOTICE
- "md/raid10:%s: %s: Raid device exceeded "
- "read_error threshold [cur %d:max %d]\n",
- mdname(mddev), b,
- atomic_read(&rdev->read_errors), max_read_errors);
- printk(KERN_NOTICE
- "md/raid10:%s: %s: Failing raid device\n",
- mdname(mddev), b);
+ pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
+ mdname(mddev), b,
+ atomic_read(&rdev->read_errors), max_read_errors);
+ pr_notice("md/raid10:%s: %s: Failing raid device\n",
+ mdname(mddev), b);
md_error(mddev, rdev);
r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
return;
@@ -2356,20 +2424,16 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
s, conf->tmppage, WRITE)
== 0) {
/* Well, this device is dead */
- printk(KERN_NOTICE
- "md/raid10:%s: read correction "
- "write failed"
- " (%d sectors at %llu on %s)\n",
- mdname(mddev), s,
- (unsigned long long)(
- sect +
- choose_data_offset(r10_bio,
- rdev)),
- bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "md/raid10:%s: %s: failing "
- "drive\n",
- mdname(mddev),
- bdevname(rdev->bdev, b));
+ pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(
+ sect +
+ choose_data_offset(r10_bio,
+ rdev)),
+ bdevname(rdev->bdev, b));
+ pr_notice("md/raid10:%s: %s: failing drive\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b));
}
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
@@ -2397,24 +2461,18 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
READ)) {
case 0:
/* Well, this device is dead */
- printk(KERN_NOTICE
- "md/raid10:%s: unable to read back "
- "corrected sectors"
- " (%d sectors at %llu on %s)\n",
+ pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(
sect +
choose_data_offset(r10_bio, rdev)),
bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "md/raid10:%s: %s: failing "
- "drive\n",
+ pr_notice("md/raid10:%s: %s: failing drive\n",
mdname(mddev),
bdevname(rdev->bdev, b));
break;
case 1:
- printk(KERN_INFO
- "md/raid10:%s: read error corrected"
- " (%d sectors at %llu on %s)\n",
+ pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(
sect +
@@ -2503,6 +2561,8 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
char b[BDEVNAME_SIZE];
unsigned long do_sync;
int max_sectors;
+ dev_t bio_dev;
+ sector_t bio_last_sector;
/* we got a read error. Maybe the drive is bad. Maybe just
* the block and we can fix it.
@@ -2514,38 +2574,38 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
*/
bio = r10_bio->devs[slot].bio;
bdevname(bio->bi_bdev, b);
+ bio_dev = bio->bi_bdev->bd_dev;
+ bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors;
bio_put(bio);
r10_bio->devs[slot].bio = NULL;
- if (mddev->ro == 0) {
+ if (mddev->ro)
+ r10_bio->devs[slot].bio = IO_BLOCKED;
+ else if (!test_bit(FailFast, &rdev->flags)) {
freeze_array(conf, 1);
fix_read_error(conf, mddev, r10_bio);
unfreeze_array(conf);
} else
- r10_bio->devs[slot].bio = IO_BLOCKED;
+ md_error(mddev, rdev);
rdev_dec_pending(rdev, mddev);
read_more:
rdev = read_balance(conf, r10_bio, &max_sectors);
if (rdev == NULL) {
- printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
- " read error for block %llu\n",
- mdname(mddev), b,
- (unsigned long long)r10_bio->sector);
+ pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev), b,
+ (unsigned long long)r10_bio->sector);
raid_end_bio_io(r10_bio);
return;
}
do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC);
slot = r10_bio->read_slot;
- printk_ratelimited(
- KERN_ERR
- "md/raid10:%s: %s: redirecting "
- "sector %llu to another mirror\n",
- mdname(mddev),
- bdevname(rdev->bdev, b),
- (unsigned long long)r10_bio->sector);
+ pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b),
+ (unsigned long long)r10_bio->sector);
bio = bio_clone_mddev(r10_bio->master_bio,
GFP_NOIO, mddev);
bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
@@ -2555,8 +2615,15 @@ read_more:
+ choose_data_offset(r10_bio, rdev);
bio->bi_bdev = rdev->bdev;
bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
+ if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R10BIO_FailFast, &r10_bio->state))
+ bio->bi_opf |= MD_FAILFAST;
bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request;
+ trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+ bio, bio_dev,
+ bio_last_sector - r10_bio->sectors);
+
if (max_sectors < r10_bio->sectors) {
/* Drat - have to split this up more */
struct bio *mbio = r10_bio->master_bio;
@@ -2694,10 +2761,10 @@ static void raid10d(struct md_thread *thread)
md_check_recovery(mddev);
if (!list_empty_careful(&conf->bio_end_io_list) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
- if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
while (!list_empty(&conf->bio_end_io_list)) {
list_move(conf->bio_end_io_list.prev, &tmp);
conf->nr_queued--;
@@ -2755,7 +2822,7 @@ static void raid10d(struct md_thread *thread)
}
cond_resched();
- if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
md_check_recovery(mddev);
}
blk_finish_plug(&plug);
@@ -3072,6 +3139,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ if (test_bit(FailFast, &rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
from_addr = r10_bio->devs[j].addr;
bio->bi_iter.bi_sector = from_addr +
rdev->data_offset;
@@ -3160,8 +3229,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (!any_working) {
if (!test_and_set_bit(MD_RECOVERY_INTR,
&mddev->recovery))
- printk(KERN_INFO "md/raid10:%s: insufficient "
- "working devices for recovery.\n",
+ pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
mdname(mddev));
mirror->recovery_disabled
= mddev->recovery_disabled;
@@ -3178,6 +3246,23 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
rdev_dec_pending(mrdev, mddev);
if (mreplace)
rdev_dec_pending(mreplace, mddev);
+ if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
+ /* Only want this if there is elsewhere to
+ * read from. 'j' is currently the first
+ * readable copy.
+ */
+ int targets = 1;
+ for (; j < conf->copies; j++) {
+ int d = r10_bio->devs[j].devnum;
+ if (conf->mirrors[d].rdev &&
+ test_bit(In_sync,
+ &conf->mirrors[d].rdev->flags))
+ targets++;
+ }
+ if (targets == 1)
+ r10_bio->devs[0].bio->bi_opf
+ &= ~MD_FAILFAST;
+ }
}
if (biolist == NULL) {
while (r10_bio) {
@@ -3256,6 +3341,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
count++;
@@ -3279,6 +3366,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
count++;
@@ -3489,15 +3578,14 @@ static struct r10conf *setup_conf(struct mddev *mddev)
copies = setup_geo(&geo, mddev, geo_new);
if (copies == -2) {
- printk(KERN_ERR "md/raid10:%s: chunk size must be "
- "at least PAGE_SIZE(%ld) and be a power of 2.\n",
- mdname(mddev), PAGE_SIZE);
+ pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
+ mdname(mddev), PAGE_SIZE);
goto out;
}
if (copies < 2 || copies > mddev->raid_disks) {
- printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
- mdname(mddev), mddev->new_layout);
+ pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
+ mdname(mddev), mddev->new_layout);
goto out;
}
@@ -3557,9 +3645,6 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return conf;
out:
- if (err == -ENOMEM)
- printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
- mdname(mddev));
if (conf) {
mempool_destroy(conf->r10bio_pool);
kfree(conf->mirrors);
@@ -3656,7 +3741,7 @@ static int raid10_run(struct mddev *mddev)
}
/* need to check that every block has at least one working mirror */
if (!enough(conf, -1)) {
- printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
+ pr_err("md/raid10:%s: not enough operational mirrors.\n",
mdname(mddev));
goto out_free_conf;
}
@@ -3698,11 +3783,9 @@ static int raid10_run(struct mddev *mddev)
}
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "md/raid10:%s: not clean"
- " -- starting background reconstruction\n",
- mdname(mddev));
- printk(KERN_INFO
- "md/raid10:%s: active with %d out of %d devices\n",
+ pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
+ mdname(mddev));
+ pr_info("md/raid10:%s: active with %d out of %d devices\n",
mdname(mddev), conf->geo.raid_disks - mddev->degraded,
conf->geo.raid_disks);
/*
@@ -3712,6 +3795,7 @@ static int raid10_run(struct mddev *mddev)
size = raid10_size(mddev, 0, 0);
md_set_array_sectors(mddev, size);
mddev->resync_max_sectors = size;
+ set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
if (mddev->queue) {
int stripe = conf->geo.raid_disks *
@@ -3739,7 +3823,7 @@ static int raid10_run(struct mddev *mddev)
if (max(before_length, after_length) > min_offset_diff) {
/* This cannot work */
- printk("md/raid10: offset difference not enough to continue reshape\n");
+ pr_warn("md/raid10: offset difference not enough to continue reshape\n");
goto out_free_conf;
}
conf->offset_diff = min_offset_diff;
@@ -3846,8 +3930,8 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
struct r10conf *conf;
if (mddev->degraded > 0) {
- printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
- mdname(mddev));
+ pr_warn("md/raid10:%s: Error: degraded raid0!\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
sector_div(size, devs);
@@ -3887,9 +3971,8 @@ static void *raid10_takeover(struct mddev *mddev)
/* for raid0 takeover only one zone is supported */
raid0_conf = mddev->private;
if (raid0_conf->nr_strip_zones > 1) {
- printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
- " with more than one zone.\n",
- mdname(mddev));
+ pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
return raid10_takeover_raid0(mddev,
@@ -4078,8 +4161,8 @@ static int raid10_start_reshape(struct mddev *mddev)
sector_t size = raid10_size(mddev, 0, 0);
if (size < mddev->array_sectors) {
spin_unlock_irq(&conf->device_lock);
- printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
- mdname(mddev));
+ pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
+ mdname(mddev));
return -EINVAL;
}
mddev->resync_max_sectors = size;
@@ -4126,7 +4209,7 @@ static int raid10_start_reshape(struct mddev *mddev)
spin_unlock_irq(&conf->device_lock);
mddev->raid_disks = conf->geo.raid_disks;
mddev->reshape_position = conf->reshape_progress;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -4321,9 +4404,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
else
mddev->curr_resync_completed = conf->reshape_progress;
conf->reshape_checkpoint = jiffies;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
- wait_event(mddev->sb_wait, mddev->flags == 0 ||
+ wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
allow_barrier(conf);
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 18ec1f7a98bf..3162615e57bd 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -156,5 +156,7 @@ enum r10bio_state {
* flag is set
*/
R10BIO_Previous,
+/* failfast devices did receive failfast requests. */
+ R10BIO_FailFast,
};
#endif
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 8491edcfb5a6..d7bfb6fc8aef 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2015 Shaohua Li <shli@fb.com>
+ * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -18,8 +19,10 @@
#include <linux/raid/md_p.h>
#include <linux/crc32c.h>
#include <linux/random.h>
+#include <linux/kthread.h>
#include "md.h"
#include "raid5.h"
+#include "bitmap.h"
/*
* metadata/data stored in disk with 4k size unit (a block) regardless
@@ -28,18 +31,70 @@
#define BLOCK_SECTORS (8)
/*
- * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
- * recovery scans a very long log
+ * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
+ *
+ * In write through mode, the reclaim runs every log->max_free_space.
+ * This can prevent the recovery scans for too long
*/
#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
+/* wake up reclaim thread periodically */
+#define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
+/* start flush with these full stripes */
+#define R5C_FULL_STRIPE_FLUSH_BATCH 256
+/* reclaim stripes in groups */
+#define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
+
/*
* We only need 2 bios per I/O unit to make progress, but ensure we
* have a few more available to not get too tight.
*/
#define R5L_POOL_SIZE 4
+/*
+ * r5c journal modes of the array: write-back or write-through.
+ * write-through mode has identical behavior as existing log only
+ * implementation.
+ */
+enum r5c_journal_mode {
+ R5C_JOURNAL_MODE_WRITE_THROUGH = 0,
+ R5C_JOURNAL_MODE_WRITE_BACK = 1,
+};
+
+static char *r5c_journal_mode_str[] = {"write-through",
+ "write-back"};
+/*
+ * raid5 cache state machine
+ *
+ * With the RAID cache, each stripe works in two phases:
+ * - caching phase
+ * - writing-out phase
+ *
+ * These two phases are controlled by bit STRIPE_R5C_CACHING:
+ * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
+ * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
+ *
+ * When there is no journal, or the journal is in write-through mode,
+ * the stripe is always in writing-out phase.
+ *
+ * For write-back journal, the stripe is sent to caching phase on write
+ * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
+ * the write-out phase by clearing STRIPE_R5C_CACHING.
+ *
+ * Stripes in caching phase do not write the raid disks. Instead, all
+ * writes are committed from the log device. Therefore, a stripe in
+ * caching phase handles writes as:
+ * - write to log device
+ * - return IO
+ *
+ * Stripes in writing-out phase handle writes as:
+ * - calculate parity
+ * - write pending data and parity to journal
+ * - write data and parity to raid disks
+ * - return IO for pending writes
+ */
+
struct r5l_log {
struct md_rdev *rdev;
@@ -58,7 +113,6 @@ struct r5l_log {
u64 seq; /* log head sequence */
sector_t next_checkpoint;
- u64 next_cp_seq;
struct mutex io_mutex;
struct r5l_io_unit *current_io; /* current io_unit accepting new data */
@@ -96,6 +150,18 @@ struct r5l_log {
spinlock_t no_space_stripes_lock;
bool need_cache_flush;
+
+ /* for r5c_cache */
+ enum r5c_journal_mode r5c_journal_mode;
+
+ /* all stripes in r5cache, in the order of seq at sh->log_start */
+ struct list_head stripe_in_journal_list;
+
+ spinlock_t stripe_in_journal_lock;
+ atomic_t stripe_in_journal_count;
+
+ /* to submit async io_units, to fulfill ordering of flush */
+ struct work_struct deferred_io_work;
};
/*
@@ -122,6 +188,18 @@ struct r5l_io_unit {
int state;
bool need_split_bio;
+ struct bio *split_bio;
+
+ unsigned int has_flush:1; /* include flush request */
+ unsigned int has_fua:1; /* include fua request */
+ unsigned int has_null_flush:1; /* include empty flush request */
+ /*
+ * io isn't sent yet, flush/fua request can only be submitted till it's
+ * the first IO in running_ios list
+ */
+ unsigned int io_deferred:1;
+
+ struct bio_list flush_barriers; /* size == 0 flush bios */
};
/* r5l_io_unit state */
@@ -133,6 +211,12 @@ enum r5l_io_unit_state {
IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
};
+bool r5c_is_writeback(struct r5l_log *log)
+{
+ return (log != NULL &&
+ log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
+}
+
static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
{
start += inc;
@@ -168,12 +252,235 @@ static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
io->state = state;
}
+static void
+r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev,
+ struct bio_list *return_bi)
+{
+ struct bio *wbi, *wbi2;
+
+ wbi = dev->written;
+ dev->written = NULL;
+ while (wbi && wbi->bi_iter.bi_sector <
+ dev->sector + STRIPE_SECTORS) {
+ wbi2 = r5_next_bio(wbi, dev->sector);
+ if (!raid5_dec_bi_active_stripes(wbi)) {
+ md_write_end(conf->mddev);
+ bio_list_add(return_bi, wbi);
+ }
+ wbi = wbi2;
+ }
+}
+
+void r5c_handle_cached_data_endio(struct r5conf *conf,
+ struct stripe_head *sh, int disks, struct bio_list *return_bi)
+{
+ int i;
+
+ for (i = sh->disks; i--; ) {
+ if (sh->dev[i].written) {
+ set_bit(R5_UPTODATE, &sh->dev[i].flags);
+ r5c_return_dev_pending_writes(conf, &sh->dev[i],
+ return_bi);
+ bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+ STRIPE_SECTORS,
+ !test_bit(STRIPE_DEGRADED, &sh->state),
+ 0);
+ }
+ }
+}
+
+/* Check whether we should flush some stripes to free up stripe cache */
+void r5c_check_stripe_cache_usage(struct r5conf *conf)
+{
+ int total_cached;
+
+ if (!r5c_is_writeback(conf->log))
+ return;
+
+ total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
+ atomic_read(&conf->r5c_cached_full_stripes);
+
+ /*
+ * The following condition is true for either of the following:
+ * - stripe cache pressure high:
+ * total_cached > 3/4 min_nr_stripes ||
+ * empty_inactive_list_nr > 0
+ * - stripe cache pressure moderate:
+ * total_cached > 1/2 min_nr_stripes
+ */
+ if (total_cached > conf->min_nr_stripes * 1 / 2 ||
+ atomic_read(&conf->empty_inactive_list_nr) > 0)
+ r5l_wake_reclaim(conf->log, 0);
+}
+
+/*
+ * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
+ * stripes in the cache
+ */
+void r5c_check_cached_full_stripe(struct r5conf *conf)
+{
+ if (!r5c_is_writeback(conf->log))
+ return;
+
+ /*
+ * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
+ * or a full stripe (chunk size / 4k stripes).
+ */
+ if (atomic_read(&conf->r5c_cached_full_stripes) >=
+ min(R5C_FULL_STRIPE_FLUSH_BATCH,
+ conf->chunk_sectors >> STRIPE_SHIFT))
+ r5l_wake_reclaim(conf->log, 0);
+}
+
+/*
+ * Total log space (in sectors) needed to flush all data in cache
+ *
+ * Currently, writing-out phase automatically includes all pending writes
+ * to the same sector. So the reclaim of each stripe takes up to
+ * (conf->raid_disks + 1) pages of log space.
+ *
+ * To totally avoid deadlock due to log space, the code reserves
+ * (conf->raid_disks + 1) pages for each stripe in cache, which is not
+ * necessary in most cases.
+ *
+ * To improve this, we will need writing-out phase to be able to NOT include
+ * pending writes, which will reduce the requirement to
+ * (conf->max_degraded + 1) pages per stripe in cache.
+ */
+static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
+{
+ struct r5l_log *log = conf->log;
+
+ if (!r5c_is_writeback(log))
+ return 0;
+
+ return BLOCK_SECTORS * (conf->raid_disks + 1) *
+ atomic_read(&log->stripe_in_journal_count);
+}
+
+/*
+ * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
+ *
+ * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
+ * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
+ * device is less than 2x of reclaim_required_space.
+ */
+static inline void r5c_update_log_state(struct r5l_log *log)
+{
+ struct r5conf *conf = log->rdev->mddev->private;
+ sector_t free_space;
+ sector_t reclaim_space;
+ bool wake_reclaim = false;
+
+ if (!r5c_is_writeback(log))
+ return;
+
+ free_space = r5l_ring_distance(log, log->log_start,
+ log->last_checkpoint);
+ reclaim_space = r5c_log_required_to_flush_cache(conf);
+ if (free_space < 2 * reclaim_space)
+ set_bit(R5C_LOG_CRITICAL, &conf->cache_state);
+ else {
+ if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
+ wake_reclaim = true;
+ clear_bit(R5C_LOG_CRITICAL, &conf->cache_state);
+ }
+ if (free_space < 3 * reclaim_space)
+ set_bit(R5C_LOG_TIGHT, &conf->cache_state);
+ else
+ clear_bit(R5C_LOG_TIGHT, &conf->cache_state);
+
+ if (wake_reclaim)
+ r5l_wake_reclaim(log, 0);
+}
+
+/*
+ * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
+ * This function should only be called in write-back mode.
+ */
+void r5c_make_stripe_write_out(struct stripe_head *sh)
+{
+ struct r5conf *conf = sh->raid_conf;
+ struct r5l_log *log = conf->log;
+
+ BUG_ON(!r5c_is_writeback(log));
+
+ WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+ clear_bit(STRIPE_R5C_CACHING, &sh->state);
+
+ if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ atomic_inc(&conf->preread_active_stripes);
+
+ if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) {
+ BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0);
+ atomic_dec(&conf->r5c_cached_partial_stripes);
+ }
+
+ if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
+ BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0);
+ atomic_dec(&conf->r5c_cached_full_stripes);
+ }
+}
+
+static void r5c_handle_data_cached(struct stripe_head *sh)
+{
+ int i;
+
+ for (i = sh->disks; i--; )
+ if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
+ set_bit(R5_InJournal, &sh->dev[i].flags);
+ clear_bit(R5_LOCKED, &sh->dev[i].flags);
+ }
+ clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
+}
+
+/*
+ * this journal write must contain full parity,
+ * it may also contain some data pages
+ */
+static void r5c_handle_parity_cached(struct stripe_head *sh)
+{
+ int i;
+
+ for (i = sh->disks; i--; )
+ if (test_bit(R5_InJournal, &sh->dev[i].flags))
+ set_bit(R5_Wantwrite, &sh->dev[i].flags);
+}
+
+/*
+ * Setting proper flags after writing (or flushing) data and/or parity to the
+ * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
+ */
+static void r5c_finish_cache_stripe(struct stripe_head *sh)
+{
+ struct r5l_log *log = sh->raid_conf->log;
+
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+ BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+ /*
+ * Set R5_InJournal for parity dev[pd_idx]. This means
+ * all data AND parity in the journal. For RAID 6, it is
+ * NOT necessary to set the flag for dev[qd_idx], as the
+ * two parities are written out together.
+ */
+ set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
+ } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+ r5c_handle_data_cached(sh);
+ } else {
+ r5c_handle_parity_cached(sh);
+ set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
+ }
+}
+
static void r5l_io_run_stripes(struct r5l_io_unit *io)
{
struct stripe_head *sh, *next;
list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
list_del_init(&sh->log_list);
+
+ r5c_finish_cache_stripe(sh);
+
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
}
@@ -209,9 +516,11 @@ static void r5l_move_to_end_ios(struct r5l_log *log)
}
}
+static void __r5l_stripe_write_finished(struct r5l_io_unit *io);
static void r5l_log_endio(struct bio *bio)
{
struct r5l_io_unit *io = bio->bi_private;
+ struct r5l_io_unit *io_deferred;
struct r5l_log *log = io->log;
unsigned long flags;
@@ -227,18 +536,89 @@ static void r5l_log_endio(struct bio *bio)
r5l_move_to_end_ios(log);
else
r5l_log_run_stripes(log);
+ if (!list_empty(&log->running_ios)) {
+ /*
+ * FLUSH/FUA io_unit is deferred because of ordering, now we
+ * can dispatch it
+ */
+ io_deferred = list_first_entry(&log->running_ios,
+ struct r5l_io_unit, log_sibling);
+ if (io_deferred->io_deferred)
+ schedule_work(&log->deferred_io_work);
+ }
+
spin_unlock_irqrestore(&log->io_list_lock, flags);
if (log->need_cache_flush)
md_wakeup_thread(log->rdev->mddev->thread);
+
+ if (io->has_null_flush) {
+ struct bio *bi;
+
+ WARN_ON(bio_list_empty(&io->flush_barriers));
+ while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
+ bio_endio(bi);
+ atomic_dec(&io->pending_stripe);
+ }
+ if (atomic_read(&io->pending_stripe) == 0)
+ __r5l_stripe_write_finished(io);
+ }
+}
+
+static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&log->io_list_lock, flags);
+ __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
+ spin_unlock_irqrestore(&log->io_list_lock, flags);
+
+ if (io->has_flush)
+ io->current_bio->bi_opf |= REQ_PREFLUSH;
+ if (io->has_fua)
+ io->current_bio->bi_opf |= REQ_FUA;
+ submit_bio(io->current_bio);
+
+ if (!io->split_bio)
+ return;
+
+ if (io->has_flush)
+ io->split_bio->bi_opf |= REQ_PREFLUSH;
+ if (io->has_fua)
+ io->split_bio->bi_opf |= REQ_FUA;
+ submit_bio(io->split_bio);
+}
+
+/* deferred io_unit will be dispatched here */
+static void r5l_submit_io_async(struct work_struct *work)
+{
+ struct r5l_log *log = container_of(work, struct r5l_log,
+ deferred_io_work);
+ struct r5l_io_unit *io = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&log->io_list_lock, flags);
+ if (!list_empty(&log->running_ios)) {
+ io = list_first_entry(&log->running_ios, struct r5l_io_unit,
+ log_sibling);
+ if (!io->io_deferred)
+ io = NULL;
+ else
+ io->io_deferred = 0;
+ }
+ spin_unlock_irqrestore(&log->io_list_lock, flags);
+ if (io)
+ r5l_do_submit_io(log, io);
}
static void r5l_submit_current_io(struct r5l_log *log)
{
struct r5l_io_unit *io = log->current_io;
+ struct bio *bio;
struct r5l_meta_block *block;
unsigned long flags;
u32 crc;
+ bool do_submit = true;
if (!io)
return;
@@ -247,13 +627,20 @@ static void r5l_submit_current_io(struct r5l_log *log)
block->meta_size = cpu_to_le32(io->meta_offset);
crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
block->checksum = cpu_to_le32(crc);
+ bio = io->current_bio;
log->current_io = NULL;
spin_lock_irqsave(&log->io_list_lock, flags);
- __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
+ if (io->has_flush || io->has_fua) {
+ if (io != list_first_entry(&log->running_ios,
+ struct r5l_io_unit, log_sibling)) {
+ io->io_deferred = 1;
+ do_submit = false;
+ }
+ }
spin_unlock_irqrestore(&log->io_list_lock, flags);
-
- submit_bio(io->current_bio);
+ if (do_submit)
+ r5l_do_submit_io(log, io);
}
static struct bio *r5l_bio_alloc(struct r5l_log *log)
@@ -271,6 +658,7 @@ static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
{
log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
+ r5c_update_log_state(log);
/*
* If we filled up the log device start from the beginning again,
* which will require a new bio.
@@ -297,6 +685,7 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
io->log = log;
INIT_LIST_HEAD(&io->log_sibling);
INIT_LIST_HEAD(&io->stripe_list);
+ bio_list_init(&io->flush_barriers);
io->state = IO_UNIT_RUNNING;
io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO);
@@ -367,12 +756,11 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
struct r5l_io_unit *io = log->current_io;
if (io->need_split_bio) {
- struct bio *prev = io->current_bio;
-
+ BUG_ON(io->split_bio);
+ io->split_bio = io->current_bio;
io->current_bio = r5l_bio_alloc(log);
- bio_chain(io->current_bio, prev);
-
- submit_bio(prev);
+ bio_chain(io->current_bio, io->split_bio);
+ io->need_split_bio = false;
}
if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
@@ -401,50 +789,85 @@ static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
io = log->current_io;
+ if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state))
+ io->has_flush = 1;
+
for (i = 0; i < sh->disks; i++) {
- if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+ if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
+ test_bit(R5_InJournal, &sh->dev[i].flags))
continue;
if (i == sh->pd_idx || i == sh->qd_idx)
continue;
+ if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
+ log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
+ io->has_fua = 1;
+ /*
+ * we need to flush journal to make sure recovery can
+ * reach the data with fua flag
+ */
+ io->has_flush = 1;
+ }
r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
raid5_compute_blocknr(sh, i, 0),
sh->dev[i].log_checksum, 0, false);
r5l_append_payload_page(log, sh->dev[i].page);
}
- if (sh->qd_idx >= 0) {
+ if (parity_pages == 2) {
r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
sh->sector, sh->dev[sh->pd_idx].log_checksum,
sh->dev[sh->qd_idx].log_checksum, true);
r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
- } else {
+ } else if (parity_pages == 1) {
r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
sh->sector, sh->dev[sh->pd_idx].log_checksum,
0, false);
r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
- }
+ } else /* Just writing data, not parity, in caching phase */
+ BUG_ON(parity_pages != 0);
list_add_tail(&sh->log_list, &io->stripe_list);
atomic_inc(&io->pending_stripe);
sh->log_io = io;
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return 0;
+
+ if (sh->log_start == MaxSector) {
+ BUG_ON(!list_empty(&sh->r5c));
+ sh->log_start = io->log_start;
+ spin_lock_irq(&log->stripe_in_journal_lock);
+ list_add_tail(&sh->r5c,
+ &log->stripe_in_journal_list);
+ spin_unlock_irq(&log->stripe_in_journal_lock);
+ atomic_inc(&log->stripe_in_journal_count);
+ }
return 0;
}
-static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+/* add stripe to no_space_stripes, and then wake up reclaim */
+static inline void r5l_add_no_space_stripe(struct r5l_log *log,
+ struct stripe_head *sh)
+{
+ spin_lock(&log->no_space_stripes_lock);
+ list_add_tail(&sh->log_list, &log->no_space_stripes);
+ spin_unlock(&log->no_space_stripes_lock);
+}
+
/*
* running in raid5d, where reclaim could wait for raid5d too (when it flushes
* data from log to raid disks), so we shouldn't wait for reclaim here
*/
int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
{
+ struct r5conf *conf = sh->raid_conf;
int write_disks = 0;
int data_pages, parity_pages;
- int meta_size;
int reserve;
int i;
int ret = 0;
+ bool wake_reclaim = false;
if (!log)
return -EAGAIN;
@@ -456,11 +879,15 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
return -EAGAIN;
}
+ WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+
for (i = 0; i < sh->disks; i++) {
void *addr;
- if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+ if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
+ test_bit(R5_InJournal, &sh->dev[i].flags))
continue;
+
write_disks++;
/* checksum is already calculated in last run */
if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
@@ -473,15 +900,6 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
parity_pages = 1 + !!(sh->qd_idx >= 0);
data_pages = write_disks - parity_pages;
- meta_size =
- ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
- * data_pages) +
- sizeof(struct r5l_payload_data_parity) +
- sizeof(__le32) * parity_pages;
- /* Doesn't work with very big raid array */
- if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
- return -EINVAL;
-
set_bit(STRIPE_LOG_TRAPPED, &sh->state);
/*
* The stripe must enter state machine again to finish the write, so
@@ -493,22 +911,49 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
mutex_lock(&log->io_mutex);
/* meta + data */
reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
- if (!r5l_has_free_space(log, reserve)) {
- spin_lock(&log->no_space_stripes_lock);
- list_add_tail(&sh->log_list, &log->no_space_stripes);
- spin_unlock(&log->no_space_stripes_lock);
- r5l_wake_reclaim(log, reserve);
- } else {
- ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
- if (ret) {
- spin_lock_irq(&log->io_list_lock);
- list_add_tail(&sh->log_list, &log->no_mem_stripes);
- spin_unlock_irq(&log->io_list_lock);
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+ if (!r5l_has_free_space(log, reserve)) {
+ r5l_add_no_space_stripe(log, sh);
+ wake_reclaim = true;
+ } else {
+ ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
+ if (ret) {
+ spin_lock_irq(&log->io_list_lock);
+ list_add_tail(&sh->log_list,
+ &log->no_mem_stripes);
+ spin_unlock_irq(&log->io_list_lock);
+ }
+ }
+ } else { /* R5C_JOURNAL_MODE_WRITE_BACK */
+ /*
+ * log space critical, do not process stripes that are
+ * not in cache yet (sh->log_start == MaxSector).
+ */
+ if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
+ sh->log_start == MaxSector) {
+ r5l_add_no_space_stripe(log, sh);
+ wake_reclaim = true;
+ reserve = 0;
+ } else if (!r5l_has_free_space(log, reserve)) {
+ if (sh->log_start == log->last_checkpoint)
+ BUG();
+ else
+ r5l_add_no_space_stripe(log, sh);
+ } else {
+ ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
+ if (ret) {
+ spin_lock_irq(&log->io_list_lock);
+ list_add_tail(&sh->log_list,
+ &log->no_mem_stripes);
+ spin_unlock_irq(&log->io_list_lock);
+ }
}
}
mutex_unlock(&log->io_mutex);
+ if (wake_reclaim)
+ r5l_wake_reclaim(log, reserve);
return 0;
}
@@ -525,17 +970,34 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
{
if (!log)
return -ENODEV;
- /*
- * we flush log disk cache first, then write stripe data to raid disks.
- * So if bio is finished, the log disk cache is flushed already. The
- * recovery guarantees we can recovery the bio from log disk, so we
- * don't need to flush again
- */
- if (bio->bi_iter.bi_size == 0) {
- bio_endio(bio);
- return 0;
+
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+ /*
+ * in write through (journal only)
+ * we flush log disk cache first, then write stripe data to
+ * raid disks. So if bio is finished, the log disk cache is
+ * flushed already. The recovery guarantees we can recovery
+ * the bio from log disk, so we don't need to flush again
+ */
+ if (bio->bi_iter.bi_size == 0) {
+ bio_endio(bio);
+ return 0;
+ }
+ bio->bi_opf &= ~REQ_PREFLUSH;
+ } else {
+ /* write back (with cache) */
+ if (bio->bi_iter.bi_size == 0) {
+ mutex_lock(&log->io_mutex);
+ r5l_get_meta(log, 0);
+ bio_list_add(&log->current_io->flush_barriers, bio);
+ log->current_io->has_flush = 1;
+ log->current_io->has_null_flush = 1;
+ atomic_inc(&log->current_io->pending_stripe);
+ r5l_submit_current_io(log);
+ mutex_unlock(&log->io_mutex);
+ return 0;
+ }
}
- bio->bi_opf &= ~REQ_PREFLUSH;
return -EAGAIN;
}
@@ -555,10 +1017,40 @@ static void r5l_run_no_space_stripes(struct r5l_log *log)
spin_unlock(&log->no_space_stripes_lock);
}
+/*
+ * calculate new last_checkpoint
+ * for write through mode, returns log->next_checkpoint
+ * for write back, returns log_start of first sh in stripe_in_journal_list
+ */
+static sector_t r5c_calculate_new_cp(struct r5conf *conf)
+{
+ struct stripe_head *sh;
+ struct r5l_log *log = conf->log;
+ sector_t new_cp;
+ unsigned long flags;
+
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return log->next_checkpoint;
+
+ spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
+ if (list_empty(&conf->log->stripe_in_journal_list)) {
+ /* all stripes flushed */
+ spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+ return log->next_checkpoint;
+ }
+ sh = list_first_entry(&conf->log->stripe_in_journal_list,
+ struct stripe_head, r5c);
+ new_cp = sh->log_start;
+ spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+ return new_cp;
+}
+
static sector_t r5l_reclaimable_space(struct r5l_log *log)
{
+ struct r5conf *conf = log->rdev->mddev->private;
+
return r5l_ring_distance(log, log->last_checkpoint,
- log->next_checkpoint);
+ r5c_calculate_new_cp(conf));
}
static void r5l_run_no_mem_stripe(struct r5l_log *log)
@@ -589,7 +1081,6 @@ static bool r5l_complete_finished_ios(struct r5l_log *log)
break;
log->next_checkpoint = io->log_start;
- log->next_cp_seq = io->seq;
list_del(&io->log_sibling);
mempool_free(io, log->io_pool);
@@ -604,6 +1095,7 @@ static bool r5l_complete_finished_ios(struct r5l_log *log)
static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
{
struct r5l_log *log = io->log;
+ struct r5conf *conf = log->rdev->mddev->private;
unsigned long flags;
spin_lock_irqsave(&log->io_list_lock, flags);
@@ -614,7 +1106,8 @@ static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
return;
}
- if (r5l_reclaimable_space(log) > log->max_free_space)
+ if (r5l_reclaimable_space(log) > log->max_free_space ||
+ test_bit(R5C_LOG_TIGHT, &conf->cache_state))
r5l_wake_reclaim(log, 0);
spin_unlock_irqrestore(&log->io_list_lock, flags);
@@ -713,8 +1206,8 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
* there is a deadlock. We workaround this issue with a trylock.
* FIXME: we could miss discard if we can't take reconfig mutex
*/
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
if (!mddev_trylock(mddev))
return;
md_update_sb(mddev, 1);
@@ -735,15 +1228,148 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
}
}
+/*
+ * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
+ * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
+ *
+ * must hold conf->device_lock
+ */
+static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
+{
+ BUG_ON(list_empty(&sh->lru));
+ BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+ BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
+
+ /*
+ * The stripe is not ON_RELEASE_LIST, so it is safe to call
+ * raid5_release_stripe() while holding conf->device_lock
+ */
+ BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
+ assert_spin_locked(&conf->device_lock);
+
+ list_del_init(&sh->lru);
+ atomic_inc(&sh->count);
+
+ set_bit(STRIPE_HANDLE, &sh->state);
+ atomic_inc(&conf->active_stripes);
+ r5c_make_stripe_write_out(sh);
+
+ raid5_release_stripe(sh);
+}
+
+/*
+ * if num == 0, flush all full stripes
+ * if num > 0, flush all full stripes. If less than num full stripes are
+ * flushed, flush some partial stripes until totally num stripes are
+ * flushed or there is no more cached stripes.
+ */
+void r5c_flush_cache(struct r5conf *conf, int num)
+{
+ int count;
+ struct stripe_head *sh, *next;
+
+ assert_spin_locked(&conf->device_lock);
+ if (!conf->log)
+ return;
+
+ count = 0;
+ list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) {
+ r5c_flush_stripe(conf, sh);
+ count++;
+ }
+
+ if (count >= num)
+ return;
+ list_for_each_entry_safe(sh, next,
+ &conf->r5c_partial_stripe_list, lru) {
+ r5c_flush_stripe(conf, sh);
+ if (++count >= num)
+ break;
+ }
+}
+
+static void r5c_do_reclaim(struct r5conf *conf)
+{
+ struct r5l_log *log = conf->log;
+ struct stripe_head *sh;
+ int count = 0;
+ unsigned long flags;
+ int total_cached;
+ int stripes_to_flush;
+
+ if (!r5c_is_writeback(log))
+ return;
+
+ total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
+ atomic_read(&conf->r5c_cached_full_stripes);
+
+ if (total_cached > conf->min_nr_stripes * 3 / 4 ||
+ atomic_read(&conf->empty_inactive_list_nr) > 0)
+ /*
+ * if stripe cache pressure high, flush all full stripes and
+ * some partial stripes
+ */
+ stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
+ else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
+ atomic_read(&conf->r5c_cached_full_stripes) >
+ R5C_FULL_STRIPE_FLUSH_BATCH)
+ /*
+ * if stripe cache pressure moderate, or if there is many full
+ * stripes,flush all full stripes
+ */
+ stripes_to_flush = 0;
+ else
+ /* no need to flush */
+ stripes_to_flush = -1;
+
+ if (stripes_to_flush >= 0) {
+ spin_lock_irqsave(&conf->device_lock, flags);
+ r5c_flush_cache(conf, stripes_to_flush);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ }
+
+ /* if log space is tight, flush stripes on stripe_in_journal_list */
+ if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) {
+ spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
+ spin_lock(&conf->device_lock);
+ list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
+ /*
+ * stripes on stripe_in_journal_list could be in any
+ * state of the stripe_cache state machine. In this
+ * case, we only want to flush stripe on
+ * r5c_cached_full/partial_stripes. The following
+ * condition makes sure the stripe is on one of the
+ * two lists.
+ */
+ if (!list_empty(&sh->lru) &&
+ !test_bit(STRIPE_HANDLE, &sh->state) &&
+ atomic_read(&sh->count) == 0) {
+ r5c_flush_stripe(conf, sh);
+ }
+ if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
+ break;
+ }
+ spin_unlock(&conf->device_lock);
+ spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+ }
+
+ if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
+ r5l_run_no_space_stripes(log);
+
+ md_wakeup_thread(conf->mddev->thread);
+}
static void r5l_do_reclaim(struct r5l_log *log)
{
+ struct r5conf *conf = log->rdev->mddev->private;
sector_t reclaim_target = xchg(&log->reclaim_target, 0);
sector_t reclaimable;
sector_t next_checkpoint;
- u64 next_cp_seq;
+ bool write_super;
spin_lock_irq(&log->io_list_lock);
+ write_super = r5l_reclaimable_space(log) > log->max_free_space ||
+ reclaim_target != 0 || !list_empty(&log->no_space_stripes);
/*
* move proper io_unit to reclaim list. We should not change the order.
* reclaimable/unreclaimable io_unit can be mixed in the list, we
@@ -764,12 +1390,12 @@ static void r5l_do_reclaim(struct r5l_log *log)
log->io_list_lock);
}
- next_checkpoint = log->next_checkpoint;
- next_cp_seq = log->next_cp_seq;
+ next_checkpoint = r5c_calculate_new_cp(conf);
spin_unlock_irq(&log->io_list_lock);
BUG_ON(reclaimable < 0);
- if (reclaimable == 0)
+
+ if (reclaimable == 0 || !write_super)
return;
/*
@@ -781,7 +1407,7 @@ static void r5l_do_reclaim(struct r5l_log *log)
mutex_lock(&log->io_mutex);
log->last_checkpoint = next_checkpoint;
- log->last_cp_seq = next_cp_seq;
+ r5c_update_log_state(log);
mutex_unlock(&log->io_mutex);
r5l_run_no_space_stripes(log);
@@ -795,14 +1421,17 @@ static void r5l_reclaim_thread(struct md_thread *thread)
if (!log)
return;
+ r5c_do_reclaim(conf);
r5l_do_reclaim(log);
}
-static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
+void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
{
unsigned long target;
unsigned long new = (unsigned long)space; /* overflow in theory */
+ if (!log)
+ return;
do {
target = log->reclaim_target;
if (new < target)
@@ -816,22 +1445,14 @@ void r5l_quiesce(struct r5l_log *log, int state)
struct mddev *mddev;
if (!log || state == 2)
return;
- if (state == 0) {
- /*
- * This is a special case for hotadd. In suspend, the array has
- * no journal. In resume, journal is initialized as well as the
- * reclaim thread.
- */
- if (log->reclaim_thread)
- return;
- log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
- log->rdev->mddev, "reclaim");
- } else if (state == 1) {
+ if (state == 0)
+ kthread_unpark(log->reclaim_thread->tsk);
+ else if (state == 1) {
/* make sure r5l_write_super_and_discard_space exits */
mddev = log->rdev->mddev;
wake_up(&mddev->sb_wait);
- r5l_wake_reclaim(log, -1L);
- md_unregister_thread(&log->reclaim_thread);
+ kthread_park(log->reclaim_thread->tsk);
+ r5l_wake_reclaim(log, MaxSector);
r5l_do_reclaim(log);
}
}
@@ -857,10 +1478,13 @@ struct r5l_recovery_ctx {
sector_t meta_total_blocks; /* total size of current meta and data */
sector_t pos; /* recovery position */
u64 seq; /* recovery position seq */
+ int data_parity_stripes; /* number of data_parity stripes */
+ int data_only_stripes; /* number of data_only stripes */
+ struct list_head cached_list;
};
-static int r5l_read_meta_block(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx)
+static int r5l_recovery_read_meta_block(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
{
struct page *page = ctx->meta_page;
struct r5l_meta_block *mb;
@@ -892,170 +1516,618 @@ static int r5l_read_meta_block(struct r5l_log *log,
return 0;
}
-static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx,
- sector_t stripe_sect,
- int *offset, sector_t *log_offset)
+static void
+r5l_recovery_create_empty_meta_block(struct r5l_log *log,
+ struct page *page,
+ sector_t pos, u64 seq)
{
- struct r5conf *conf = log->rdev->mddev->private;
- struct stripe_head *sh;
- struct r5l_payload_data_parity *payload;
- int disk_index;
+ struct r5l_meta_block *mb;
- sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
- while (1) {
- payload = page_address(ctx->meta_page) + *offset;
+ mb = page_address(page);
+ clear_page(mb);
+ mb->magic = cpu_to_le32(R5LOG_MAGIC);
+ mb->version = R5LOG_VERSION;
+ mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
+ mb->seq = cpu_to_le64(seq);
+ mb->position = cpu_to_le64(pos);
+}
- if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
- raid5_compute_sector(conf,
- le64_to_cpu(payload->location), 0,
- &disk_index, sh);
+static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
+ u64 seq)
+{
+ struct page *page;
+ struct r5l_meta_block *mb;
- sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
- sh->dev[disk_index].page, REQ_OP_READ, 0,
- false);
- sh->dev[disk_index].log_checksum =
- le32_to_cpu(payload->checksum[0]);
- set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
- ctx->meta_total_blocks += BLOCK_SECTORS;
- } else {
- disk_index = sh->pd_idx;
- sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
- sh->dev[disk_index].page, REQ_OP_READ, 0,
- false);
- sh->dev[disk_index].log_checksum =
- le32_to_cpu(payload->checksum[0]);
- set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
-
- if (sh->qd_idx >= 0) {
- disk_index = sh->qd_idx;
- sync_page_io(log->rdev,
- r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
- PAGE_SIZE, sh->dev[disk_index].page,
- REQ_OP_READ, 0, false);
- sh->dev[disk_index].log_checksum =
- le32_to_cpu(payload->checksum[1]);
- set_bit(R5_Wantwrite,
- &sh->dev[disk_index].flags);
- }
- ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
- }
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ r5l_recovery_create_empty_meta_block(log, page, pos, seq);
+ mb = page_address(page);
+ mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
+ mb, PAGE_SIZE));
+ if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
+ REQ_FUA, false)) {
+ __free_page(page);
+ return -EIO;
+ }
+ __free_page(page);
+ return 0;
+}
- *log_offset = r5l_ring_add(log, *log_offset,
- le32_to_cpu(payload->size));
- *offset += sizeof(struct r5l_payload_data_parity) +
- sizeof(__le32) *
- (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
- if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
- break;
+/*
+ * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
+ * to mark valid (potentially not flushed) data in the journal.
+ *
+ * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
+ * so there should not be any mismatch here.
+ */
+static void r5l_recovery_load_data(struct r5l_log *log,
+ struct stripe_head *sh,
+ struct r5l_recovery_ctx *ctx,
+ struct r5l_payload_data_parity *payload,
+ sector_t log_offset)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+ int dd_idx;
+
+ raid5_compute_sector(conf,
+ le64_to_cpu(payload->location), 0,
+ &dd_idx, sh);
+ sync_page_io(log->rdev, log_offset, PAGE_SIZE,
+ sh->dev[dd_idx].page, REQ_OP_READ, 0, false);
+ sh->dev[dd_idx].log_checksum =
+ le32_to_cpu(payload->checksum[0]);
+ ctx->meta_total_blocks += BLOCK_SECTORS;
+
+ set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
+ set_bit(STRIPE_R5C_CACHING, &sh->state);
+}
+
+static void r5l_recovery_load_parity(struct r5l_log *log,
+ struct stripe_head *sh,
+ struct r5l_recovery_ctx *ctx,
+ struct r5l_payload_data_parity *payload,
+ sector_t log_offset)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+
+ ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
+ sync_page_io(log->rdev, log_offset, PAGE_SIZE,
+ sh->dev[sh->pd_idx].page, REQ_OP_READ, 0, false);
+ sh->dev[sh->pd_idx].log_checksum =
+ le32_to_cpu(payload->checksum[0]);
+ set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
+
+ if (sh->qd_idx >= 0) {
+ sync_page_io(log->rdev,
+ r5l_ring_add(log, log_offset, BLOCK_SECTORS),
+ PAGE_SIZE, sh->dev[sh->qd_idx].page,
+ REQ_OP_READ, 0, false);
+ sh->dev[sh->qd_idx].log_checksum =
+ le32_to_cpu(payload->checksum[1]);
+ set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
}
+ clear_bit(STRIPE_R5C_CACHING, &sh->state);
+}
- for (disk_index = 0; disk_index < sh->disks; disk_index++) {
- void *addr;
- u32 checksum;
+static void r5l_recovery_reset_stripe(struct stripe_head *sh)
+{
+ int i;
+ sh->state = 0;
+ sh->log_start = MaxSector;
+ for (i = sh->disks; i--; )
+ sh->dev[i].flags = 0;
+}
+
+static void
+r5l_recovery_replay_one_stripe(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct md_rdev *rdev, *rrdev;
+ int disk_index;
+ int data_count = 0;
+
+ for (disk_index = 0; disk_index < sh->disks; disk_index++) {
if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
continue;
- addr = kmap_atomic(sh->dev[disk_index].page);
- checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
- kunmap_atomic(addr);
- if (checksum != sh->dev[disk_index].log_checksum)
- goto error;
+ if (disk_index == sh->qd_idx || disk_index == sh->pd_idx)
+ continue;
+ data_count++;
}
- for (disk_index = 0; disk_index < sh->disks; disk_index++) {
- struct md_rdev *rdev, *rrdev;
+ /*
+ * stripes that only have parity must have been flushed
+ * before the crash that we are now recovering from, so
+ * there is nothing more to recovery.
+ */
+ if (data_count == 0)
+ goto out;
- if (!test_and_clear_bit(R5_Wantwrite,
- &sh->dev[disk_index].flags))
+ for (disk_index = 0; disk_index < sh->disks; disk_index++) {
+ if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
continue;
/* in case device is broken */
+ rcu_read_lock();
rdev = rcu_dereference(conf->disks[disk_index].rdev);
- if (rdev)
- sync_page_io(rdev, stripe_sect, PAGE_SIZE,
+ if (rdev) {
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ sync_page_io(rdev, sh->sector, PAGE_SIZE,
sh->dev[disk_index].page, REQ_OP_WRITE, 0,
false);
+ rdev_dec_pending(rdev, rdev->mddev);
+ rcu_read_lock();
+ }
rrdev = rcu_dereference(conf->disks[disk_index].replacement);
- if (rrdev)
- sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
+ if (rrdev) {
+ atomic_inc(&rrdev->nr_pending);
+ rcu_read_unlock();
+ sync_page_io(rrdev, sh->sector, PAGE_SIZE,
sh->dev[disk_index].page, REQ_OP_WRITE, 0,
false);
+ rdev_dec_pending(rrdev, rrdev->mddev);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
}
- raid5_release_stripe(sh);
+ ctx->data_parity_stripes++;
+out:
+ r5l_recovery_reset_stripe(sh);
+}
+
+static struct stripe_head *
+r5c_recovery_alloc_stripe(struct r5conf *conf,
+ sector_t stripe_sect,
+ sector_t log_start)
+{
+ struct stripe_head *sh;
+
+ sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
+ if (!sh)
+ return NULL; /* no more stripe available */
+
+ r5l_recovery_reset_stripe(sh);
+ sh->log_start = log_start;
+
+ return sh;
+}
+
+static struct stripe_head *
+r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect)
+{
+ struct stripe_head *sh;
+
+ list_for_each_entry(sh, list, lru)
+ if (sh->sector == sect)
+ return sh;
+ return NULL;
+}
+
+static void
+r5c_recovery_drop_stripes(struct list_head *cached_stripe_list,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct stripe_head *sh, *next;
+
+ list_for_each_entry_safe(sh, next, cached_stripe_list, lru) {
+ r5l_recovery_reset_stripe(sh);
+ list_del_init(&sh->lru);
+ raid5_release_stripe(sh);
+ }
+}
+
+static void
+r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct stripe_head *sh, *next;
+
+ list_for_each_entry_safe(sh, next, cached_stripe_list, lru)
+ if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+ r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
+ list_del_init(&sh->lru);
+ raid5_release_stripe(sh);
+ }
+}
+
+/* if matches return 0; otherwise return -EINVAL */
+static int
+r5l_recovery_verify_data_checksum(struct r5l_log *log, struct page *page,
+ sector_t log_offset, __le32 log_checksum)
+{
+ void *addr;
+ u32 checksum;
+
+ sync_page_io(log->rdev, log_offset, PAGE_SIZE,
+ page, REQ_OP_READ, 0, false);
+ addr = kmap_atomic(page);
+ checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
+ kunmap_atomic(addr);
+ return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
+}
+
+/*
+ * before loading data to stripe cache, we need verify checksum for all data,
+ * if there is mismatch for any data page, we drop all data in the mata block
+ */
+static int
+r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+ struct r5l_meta_block *mb = page_address(ctx->meta_page);
+ sector_t mb_offset = sizeof(struct r5l_meta_block);
+ sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
+ struct page *page;
+ struct r5l_payload_data_parity *payload;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ while (mb_offset < le32_to_cpu(mb->meta_size)) {
+ payload = (void *)mb + mb_offset;
+
+ if (payload->header.type == R5LOG_PAYLOAD_DATA) {
+ if (r5l_recovery_verify_data_checksum(
+ log, page, log_offset,
+ payload->checksum[0]) < 0)
+ goto mismatch;
+ } else if (payload->header.type == R5LOG_PAYLOAD_PARITY) {
+ if (r5l_recovery_verify_data_checksum(
+ log, page, log_offset,
+ payload->checksum[0]) < 0)
+ goto mismatch;
+ if (conf->max_degraded == 2 && /* q for RAID 6 */
+ r5l_recovery_verify_data_checksum(
+ log, page,
+ r5l_ring_add(log, log_offset,
+ BLOCK_SECTORS),
+ payload->checksum[1]) < 0)
+ goto mismatch;
+ } else /* not R5LOG_PAYLOAD_DATA or R5LOG_PAYLOAD_PARITY */
+ goto mismatch;
+
+ log_offset = r5l_ring_add(log, log_offset,
+ le32_to_cpu(payload->size));
+
+ mb_offset += sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) *
+ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
+ }
+
+ put_page(page);
return 0;
-error:
- for (disk_index = 0; disk_index < sh->disks; disk_index++)
- sh->dev[disk_index].flags = 0;
- raid5_release_stripe(sh);
+mismatch:
+ put_page(page);
return -EINVAL;
}
-static int r5l_recovery_flush_one_meta(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx)
+/*
+ * Analyze all data/parity pages in one meta block
+ * Returns:
+ * 0 for success
+ * -EINVAL for unknown playload type
+ * -EAGAIN for checksum mismatch of data page
+ * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
+ */
+static int
+r5c_recovery_analyze_meta_block(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx,
+ struct list_head *cached_stripe_list)
{
- struct r5conf *conf = log->rdev->mddev->private;
- struct r5l_payload_data_parity *payload;
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
struct r5l_meta_block *mb;
- int offset;
+ struct r5l_payload_data_parity *payload;
+ int mb_offset;
sector_t log_offset;
- sector_t stripe_sector;
+ sector_t stripe_sect;
+ struct stripe_head *sh;
+ int ret;
+
+ /*
+ * for mismatch in data blocks, we will drop all data in this mb, but
+ * we will still read next mb for other data with FLUSH flag, as
+ * io_unit could finish out of order.
+ */
+ ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
+ if (ret == -EINVAL)
+ return -EAGAIN;
+ else if (ret)
+ return ret; /* -ENOMEM duo to alloc_page() failed */
mb = page_address(ctx->meta_page);
- offset = sizeof(struct r5l_meta_block);
+ mb_offset = sizeof(struct r5l_meta_block);
log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
- while (offset < le32_to_cpu(mb->meta_size)) {
+ while (mb_offset < le32_to_cpu(mb->meta_size)) {
int dd;
- payload = (void *)mb + offset;
- stripe_sector = raid5_compute_sector(conf,
- le64_to_cpu(payload->location), 0, &dd, NULL);
- if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
- &offset, &log_offset))
+ payload = (void *)mb + mb_offset;
+ stripe_sect = (payload->header.type == R5LOG_PAYLOAD_DATA) ?
+ raid5_compute_sector(
+ conf, le64_to_cpu(payload->location), 0, &dd,
+ NULL)
+ : le64_to_cpu(payload->location);
+
+ sh = r5c_recovery_lookup_stripe(cached_stripe_list,
+ stripe_sect);
+
+ if (!sh) {
+ sh = r5c_recovery_alloc_stripe(conf, stripe_sect, ctx->pos);
+ /*
+ * cannot get stripe from raid5_get_active_stripe
+ * try replay some stripes
+ */
+ if (!sh) {
+ r5c_recovery_replay_stripes(
+ cached_stripe_list, ctx);
+ sh = r5c_recovery_alloc_stripe(
+ conf, stripe_sect, ctx->pos);
+ }
+ if (!sh) {
+ pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
+ mdname(mddev),
+ conf->min_nr_stripes * 2);
+ raid5_set_cache_size(mddev,
+ conf->min_nr_stripes * 2);
+ sh = r5c_recovery_alloc_stripe(
+ conf, stripe_sect, ctx->pos);
+ }
+ if (!sh) {
+ pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
+ mdname(mddev));
+ return -ENOMEM;
+ }
+ list_add_tail(&sh->lru, cached_stripe_list);
+ }
+
+ if (payload->header.type == R5LOG_PAYLOAD_DATA) {
+ if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
+ test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
+ r5l_recovery_replay_one_stripe(conf, sh, ctx);
+ sh->log_start = ctx->pos;
+ list_move_tail(&sh->lru, cached_stripe_list);
+ }
+ r5l_recovery_load_data(log, sh, ctx, payload,
+ log_offset);
+ } else if (payload->header.type == R5LOG_PAYLOAD_PARITY)
+ r5l_recovery_load_parity(log, sh, ctx, payload,
+ log_offset);
+ else
return -EINVAL;
+
+ log_offset = r5l_ring_add(log, log_offset,
+ le32_to_cpu(payload->size));
+
+ mb_offset += sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) *
+ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
}
+
return 0;
}
-/* copy data/parity from log to raid disks */
-static void r5l_recovery_flush_log(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx)
+/*
+ * Load the stripe into cache. The stripe will be written out later by
+ * the stripe cache state machine.
+ */
+static void r5c_recovery_load_one_stripe(struct r5l_log *log,
+ struct stripe_head *sh)
{
+ struct r5dev *dev;
+ int i;
+
+ for (i = sh->disks; i--; ) {
+ dev = sh->dev + i;
+ if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
+ set_bit(R5_InJournal, &dev->flags);
+ set_bit(R5_UPTODATE, &dev->flags);
+ }
+ }
+ list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
+ atomic_inc(&log->stripe_in_journal_count);
+}
+
+/*
+ * Scan through the log for all to-be-flushed data
+ *
+ * For stripes with data and parity, namely Data-Parity stripe
+ * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
+ *
+ * For stripes with only data, namely Data-Only stripe
+ * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
+ *
+ * For a stripe, if we see data after parity, we should discard all previous
+ * data and parity for this stripe, as these data are already flushed to
+ * the array.
+ *
+ * At the end of the scan, we return the new journal_tail, which points to
+ * first data-only stripe on the journal device, or next invalid meta block.
+ */
+static int r5c_recovery_flush_log(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct stripe_head *sh;
+ int ret = 0;
+
+ /* scan through the log */
while (1) {
- if (r5l_read_meta_block(log, ctx))
- return;
- if (r5l_recovery_flush_one_meta(log, ctx))
- return;
+ if (r5l_recovery_read_meta_block(log, ctx))
+ break;
+
+ ret = r5c_recovery_analyze_meta_block(log, ctx,
+ &ctx->cached_list);
+ /*
+ * -EAGAIN means mismatch in data block, in this case, we still
+ * try scan the next metablock
+ */
+ if (ret && ret != -EAGAIN)
+ break; /* ret == -EINVAL or -ENOMEM */
ctx->seq++;
ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
}
+
+ if (ret == -ENOMEM) {
+ r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
+ return ret;
+ }
+
+ /* replay data-parity stripes */
+ r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
+
+ /* load data-only stripes to stripe cache */
+ list_for_each_entry(sh, &ctx->cached_list, lru) {
+ WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+ r5c_recovery_load_one_stripe(log, sh);
+ ctx->data_only_stripes++;
+ }
+
+ return 0;
}
-static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
- u64 seq)
+/*
+ * we did a recovery. Now ctx.pos points to an invalid meta block. New
+ * log will start here. but we can't let superblock point to last valid
+ * meta block. The log might looks like:
+ * | meta 1| meta 2| meta 3|
+ * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
+ * superblock points to meta 1, we write a new valid meta 2n. if crash
+ * happens again, new recovery will start from meta 1. Since meta 2n is
+ * valid now, recovery will think meta 3 is valid, which is wrong.
+ * The solution is we create a new meta in meta2 with its seq == meta
+ * 1's seq + 10000 and let superblock points to meta2. The same recovery
+ * will not think meta 3 is a valid meta, because its seq doesn't match
+ */
+
+/*
+ * Before recovery, the log looks like the following
+ *
+ * ---------------------------------------------
+ * | valid log | invalid log |
+ * ---------------------------------------------
+ * ^
+ * |- log->last_checkpoint
+ * |- log->last_cp_seq
+ *
+ * Now we scan through the log until we see invalid entry
+ *
+ * ---------------------------------------------
+ * | valid log | invalid log |
+ * ---------------------------------------------
+ * ^ ^
+ * |- log->last_checkpoint |- ctx->pos
+ * |- log->last_cp_seq |- ctx->seq
+ *
+ * From this point, we need to increase seq number by 10 to avoid
+ * confusing next recovery.
+ *
+ * ---------------------------------------------
+ * | valid log | invalid log |
+ * ---------------------------------------------
+ * ^ ^
+ * |- log->last_checkpoint |- ctx->pos+1
+ * |- log->last_cp_seq |- ctx->seq+10001
+ *
+ * However, it is not safe to start the state machine yet, because data only
+ * parities are not yet secured in RAID. To save these data only parities, we
+ * rewrite them from seq+11.
+ *
+ * -----------------------------------------------------------------
+ * | valid log | data only stripes | invalid log |
+ * -----------------------------------------------------------------
+ * ^ ^
+ * |- log->last_checkpoint |- ctx->pos+n
+ * |- log->last_cp_seq |- ctx->seq+10000+n
+ *
+ * If failure happens again during this process, the recovery can safe start
+ * again from log->last_checkpoint.
+ *
+ * Once data only stripes are rewritten to journal, we move log_tail
+ *
+ * -----------------------------------------------------------------
+ * | old log | data only stripes | invalid log |
+ * -----------------------------------------------------------------
+ * ^ ^
+ * |- log->last_checkpoint |- ctx->pos+n
+ * |- log->last_cp_seq |- ctx->seq+10000+n
+ *
+ * Then we can safely start the state machine. If failure happens from this
+ * point on, the recovery will start from new log->last_checkpoint.
+ */
+static int
+r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
{
+ struct stripe_head *sh, *next;
+ struct mddev *mddev = log->rdev->mddev;
struct page *page;
- struct r5l_meta_block *mb;
- u32 crc;
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page)
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
+ mdname(mddev));
return -ENOMEM;
- mb = page_address(page);
- mb->magic = cpu_to_le32(R5LOG_MAGIC);
- mb->version = R5LOG_VERSION;
- mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
- mb->seq = cpu_to_le64(seq);
- mb->position = cpu_to_le64(pos);
- crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
- mb->checksum = cpu_to_le32(crc);
+ }
- if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
- REQ_FUA, false)) {
- __free_page(page);
- return -EIO;
+ list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
+ struct r5l_meta_block *mb;
+ int i;
+ int offset;
+ sector_t write_pos;
+
+ WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+ r5l_recovery_create_empty_meta_block(log, page,
+ ctx->pos, ctx->seq);
+ mb = page_address(page);
+ offset = le32_to_cpu(mb->meta_size);
+ write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
+
+ for (i = sh->disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ struct r5l_payload_data_parity *payload;
+ void *addr;
+
+ if (test_bit(R5_InJournal, &dev->flags)) {
+ payload = (void *)mb + offset;
+ payload->header.type = cpu_to_le16(
+ R5LOG_PAYLOAD_DATA);
+ payload->size = BLOCK_SECTORS;
+ payload->location = cpu_to_le64(
+ raid5_compute_blocknr(sh, i, 0));
+ addr = kmap_atomic(dev->page);
+ payload->checksum[0] = cpu_to_le32(
+ crc32c_le(log->uuid_checksum, addr,
+ PAGE_SIZE));
+ kunmap_atomic(addr);
+ sync_page_io(log->rdev, write_pos, PAGE_SIZE,
+ dev->page, REQ_OP_WRITE, 0, false);
+ write_pos = r5l_ring_add(log, write_pos,
+ BLOCK_SECTORS);
+ offset += sizeof(__le32) +
+ sizeof(struct r5l_payload_data_parity);
+
+ }
+ }
+ mb->meta_size = cpu_to_le32(offset);
+ mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
+ mb, PAGE_SIZE));
+ sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
+ REQ_OP_WRITE, REQ_FUA, false);
+ sh->log_start = ctx->pos;
+ ctx->pos = write_pos;
+ ctx->seq += 1;
+
+ list_del_init(&sh->lru);
+ raid5_release_stripe(sh);
}
__free_page(page);
return 0;
@@ -1063,45 +2135,60 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
static int r5l_recovery_log(struct r5l_log *log)
{
+ struct mddev *mddev = log->rdev->mddev;
struct r5l_recovery_ctx ctx;
+ int ret;
+ sector_t pos;
+ struct stripe_head *sh;
ctx.pos = log->last_checkpoint;
ctx.seq = log->last_cp_seq;
ctx.meta_page = alloc_page(GFP_KERNEL);
+ ctx.data_only_stripes = 0;
+ ctx.data_parity_stripes = 0;
+ INIT_LIST_HEAD(&ctx.cached_list);
+
if (!ctx.meta_page)
return -ENOMEM;
- r5l_recovery_flush_log(log, &ctx);
+ ret = r5c_recovery_flush_log(log, &ctx);
__free_page(ctx.meta_page);
- /*
- * we did a recovery. Now ctx.pos points to an invalid meta block. New
- * log will start here. but we can't let superblock point to last valid
- * meta block. The log might looks like:
- * | meta 1| meta 2| meta 3|
- * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
- * superblock points to meta 1, we write a new valid meta 2n. if crash
- * happens again, new recovery will start from meta 1. Since meta 2n is
- * valid now, recovery will think meta 3 is valid, which is wrong.
- * The solution is we create a new meta in meta2 with its seq == meta
- * 1's seq + 10 and let superblock points to meta2. The same recovery will
- * not think meta 3 is a valid meta, because its seq doesn't match
- */
- if (ctx.seq > log->last_cp_seq) {
- int ret;
-
- ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
- if (ret)
- return ret;
- log->seq = ctx.seq + 11;
- log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
- r5l_write_super(log, ctx.pos);
- log->last_checkpoint = ctx.pos;
+ if (ret)
+ return ret;
+
+ pos = ctx.pos;
+ ctx.seq += 10000;
+
+ if (ctx.data_only_stripes == 0) {
log->next_checkpoint = ctx.pos;
+ r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
+ ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
} else {
- log->log_start = ctx.pos;
- log->seq = ctx.seq;
+ sh = list_last_entry(&ctx.cached_list, struct stripe_head, lru);
+ log->next_checkpoint = sh->log_start;
}
+
+ if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
+ pr_debug("md/raid:%s: starting from clean shutdown\n",
+ mdname(mddev));
+ else {
+ pr_debug("md/raid:%s: recoverying %d data-only stripes and %d data-parity stripes\n",
+ mdname(mddev), ctx.data_only_stripes,
+ ctx.data_parity_stripes);
+
+ if (ctx.data_only_stripes > 0)
+ if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
+ pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
+ mdname(mddev));
+ return -EIO;
+ }
+ }
+
+ log->log_start = ctx.pos;
+ log->seq = ctx.seq;
+ log->last_checkpoint = pos;
+ r5l_write_super(log, pos);
return 0;
}
@@ -1110,7 +2197,293 @@ static void r5l_write_super(struct r5l_log *log, sector_t cp)
struct mddev *mddev = log->rdev->mddev;
log->rdev->journal_tail = cp;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+}
+
+static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
+{
+ struct r5conf *conf = mddev->private;
+ int ret;
+
+ if (!conf->log)
+ return 0;
+
+ switch (conf->log->r5c_journal_mode) {
+ case R5C_JOURNAL_MODE_WRITE_THROUGH:
+ ret = snprintf(
+ page, PAGE_SIZE, "[%s] %s\n",
+ r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
+ r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
+ break;
+ case R5C_JOURNAL_MODE_WRITE_BACK:
+ ret = snprintf(
+ page, PAGE_SIZE, "%s [%s]\n",
+ r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
+ r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
+ break;
+ default:
+ ret = 0;
+ }
+ return ret;
+}
+
+static ssize_t r5c_journal_mode_store(struct mddev *mddev,
+ const char *page, size_t length)
+{
+ struct r5conf *conf = mddev->private;
+ struct r5l_log *log = conf->log;
+ int val = -1, i;
+ int len = length;
+
+ if (!log)
+ return -ENODEV;
+
+ if (len && page[len - 1] == '\n')
+ len -= 1;
+ for (i = 0; i < ARRAY_SIZE(r5c_journal_mode_str); i++)
+ if (strlen(r5c_journal_mode_str[i]) == len &&
+ strncmp(page, r5c_journal_mode_str[i], len) == 0) {
+ val = i;
+ break;
+ }
+ if (val < R5C_JOURNAL_MODE_WRITE_THROUGH ||
+ val > R5C_JOURNAL_MODE_WRITE_BACK)
+ return -EINVAL;
+
+ mddev_suspend(mddev);
+ conf->log->r5c_journal_mode = val;
+ mddev_resume(mddev);
+
+ pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
+ mdname(mddev), val, r5c_journal_mode_str[val]);
+ return length;
+}
+
+struct md_sysfs_entry
+r5c_journal_mode = __ATTR(journal_mode, 0644,
+ r5c_journal_mode_show, r5c_journal_mode_store);
+
+/*
+ * Try handle write operation in caching phase. This function should only
+ * be called in write-back mode.
+ *
+ * If all outstanding writes can be handled in caching phase, returns 0
+ * If writes requires write-out phase, call r5c_make_stripe_write_out()
+ * and returns -EAGAIN
+ */
+int r5c_try_caching_write(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct stripe_head_state *s,
+ int disks)
+{
+ struct r5l_log *log = conf->log;
+ int i;
+ struct r5dev *dev;
+ int to_cache = 0;
+
+ BUG_ON(!r5c_is_writeback(log));
+
+ if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+ /*
+ * There are two different scenarios here:
+ * 1. The stripe has some data cached, and it is sent to
+ * write-out phase for reclaim
+ * 2. The stripe is clean, and this is the first write
+ *
+ * For 1, return -EAGAIN, so we continue with
+ * handle_stripe_dirtying().
+ *
+ * For 2, set STRIPE_R5C_CACHING and continue with caching
+ * write.
+ */
+
+ /* case 1: anything injournal or anything in written */
+ if (s->injournal > 0 || s->written > 0)
+ return -EAGAIN;
+ /* case 2 */
+ set_bit(STRIPE_R5C_CACHING, &sh->state);
+ }
+
+ for (i = disks; i--; ) {
+ dev = &sh->dev[i];
+ /* if non-overwrite, use writing-out phase */
+ if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
+ !test_bit(R5_InJournal, &dev->flags)) {
+ r5c_make_stripe_write_out(sh);
+ return -EAGAIN;
+ }
+ }
+
+ for (i = disks; i--; ) {
+ dev = &sh->dev[i];
+ if (dev->towrite) {
+ set_bit(R5_Wantwrite, &dev->flags);
+ set_bit(R5_Wantdrain, &dev->flags);
+ set_bit(R5_LOCKED, &dev->flags);
+ to_cache++;
+ }
+ }
+
+ if (to_cache) {
+ set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
+ /*
+ * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
+ * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
+ * r5c_handle_data_cached()
+ */
+ set_bit(STRIPE_LOG_TRAPPED, &sh->state);
+ }
+
+ return 0;
+}
+
+/*
+ * free extra pages (orig_page) we allocated for prexor
+ */
+void r5c_release_extra_page(struct stripe_head *sh)
+{
+ struct r5conf *conf = sh->raid_conf;
+ int i;
+ bool using_disk_info_extra_page;
+
+ using_disk_info_extra_page =
+ sh->dev[0].orig_page == conf->disks[0].extra_page;
+
+ for (i = sh->disks; i--; )
+ if (sh->dev[i].page != sh->dev[i].orig_page) {
+ struct page *p = sh->dev[i].orig_page;
+
+ sh->dev[i].orig_page = sh->dev[i].page;
+ if (!using_disk_info_extra_page)
+ put_page(p);
+ }
+
+ if (using_disk_info_extra_page) {
+ clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state);
+ md_wakeup_thread(conf->mddev->thread);
+ }
+}
+
+void r5c_use_extra_page(struct stripe_head *sh)
+{
+ struct r5conf *conf = sh->raid_conf;
+ int i;
+ struct r5dev *dev;
+
+ for (i = sh->disks; i--; ) {
+ dev = &sh->dev[i];
+ if (dev->orig_page != dev->page)
+ put_page(dev->orig_page);
+ dev->orig_page = conf->disks[i].extra_page;
+ }
+}
+
+/*
+ * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
+ * stripe is committed to RAID disks.
+ */
+void r5c_finish_stripe_write_out(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct stripe_head_state *s)
+{
+ int i;
+ int do_wakeup = 0;
+
+ if (!conf->log ||
+ !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
+ return;
+
+ WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+ clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
+
+ if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return;
+
+ for (i = sh->disks; i--; ) {
+ clear_bit(R5_InJournal, &sh->dev[i].flags);
+ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+ do_wakeup = 1;
+ }
+
+ /*
+ * analyse_stripe() runs before r5c_finish_stripe_write_out(),
+ * We updated R5_InJournal, so we also update s->injournal.
+ */
+ s->injournal = 0;
+
+ if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
+ if (atomic_dec_and_test(&conf->pending_full_writes))
+ md_wakeup_thread(conf->mddev->thread);
+
+ if (do_wakeup)
+ wake_up(&conf->wait_for_overlap);
+
+ if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return;
+
+ spin_lock_irq(&conf->log->stripe_in_journal_lock);
+ list_del_init(&sh->r5c);
+ spin_unlock_irq(&conf->log->stripe_in_journal_lock);
+ sh->log_start = MaxSector;
+ atomic_dec(&conf->log->stripe_in_journal_count);
+ r5c_update_log_state(conf->log);
+}
+
+int
+r5c_cache_data(struct r5l_log *log, struct stripe_head *sh,
+ struct stripe_head_state *s)
+{
+ struct r5conf *conf = sh->raid_conf;
+ int pages = 0;
+ int reserve;
+ int i;
+ int ret = 0;
+
+ BUG_ON(!log);
+
+ for (i = 0; i < sh->disks; i++) {
+ void *addr;
+
+ if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+ continue;
+ addr = kmap_atomic(sh->dev[i].page);
+ sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
+ addr, PAGE_SIZE);
+ kunmap_atomic(addr);
+ pages++;
+ }
+ WARN_ON(pages == 0);
+
+ /*
+ * The stripe must enter state machine again to call endio, so
+ * don't delay.
+ */
+ clear_bit(STRIPE_DELAYED, &sh->state);
+ atomic_inc(&sh->count);
+
+ mutex_lock(&log->io_mutex);
+ /* meta + data */
+ reserve = (1 + pages) << (PAGE_SHIFT - 9);
+
+ if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
+ sh->log_start == MaxSector)
+ r5l_add_no_space_stripe(log, sh);
+ else if (!r5l_has_free_space(log, reserve)) {
+ if (sh->log_start == log->last_checkpoint)
+ BUG();
+ else
+ r5l_add_no_space_stripe(log, sh);
+ } else {
+ ret = r5l_log_stripe(log, sh, pages, 0);
+ if (ret) {
+ spin_lock_irq(&log->io_list_lock);
+ list_add_tail(&sh->log_list, &log->no_mem_stripes);
+ spin_unlock_irq(&log->io_list_lock);
+ }
+ }
+
+ mutex_unlock(&log->io_mutex);
+ return 0;
}
static int r5l_load_log(struct r5l_log *log)
@@ -1121,7 +2494,7 @@ static int r5l_load_log(struct r5l_log *log)
sector_t cp = log->rdev->journal_tail;
u32 stored_crc, expected_crc;
bool create_super = false;
- int ret;
+ int ret = 0;
/* Make sure it's valid */
if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
@@ -1171,11 +2544,18 @@ create:
if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
log->max_free_space = RECLAIM_MAX_FREE_SPACE;
log->last_checkpoint = cp;
- log->next_checkpoint = cp;
__free_page(page);
- return r5l_recovery_log(log);
+ if (create_super) {
+ log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
+ log->seq = log->last_cp_seq + 1;
+ log->next_checkpoint = cp;
+ } else
+ ret = r5l_recovery_log(log);
+
+ r5c_update_log_state(log);
+ return ret;
ioerr:
__free_page(page);
return ret;
@@ -1188,6 +2568,22 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
if (PAGE_SIZE != 4096)
return -EINVAL;
+
+ /*
+ * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
+ * raid_disks r5l_payload_data_parity.
+ *
+ * Write journal and cache does not work for very big array
+ * (raid_disks > 203)
+ */
+ if (sizeof(struct r5l_meta_block) +
+ ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
+ conf->raid_disks) > PAGE_SIZE) {
+ pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
+ mdname(conf->mddev), conf->raid_disks);
+ return -EINVAL;
+ }
+
log = kzalloc(sizeof(*log), GFP_KERNEL);
if (!log)
return -ENOMEM;
@@ -1227,6 +2623,8 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
log->rdev->mddev, "reclaim");
if (!log->reclaim_thread)
goto reclaim_thread;
+ log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
+
init_waitqueue_head(&log->iounit_wait);
INIT_LIST_HEAD(&log->no_mem_stripes);
@@ -1234,6 +2632,13 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
INIT_LIST_HEAD(&log->no_space_stripes);
spin_lock_init(&log->no_space_stripes_lock);
+ INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
+
+ log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+ INIT_LIST_HEAD(&log->stripe_in_journal_list);
+ spin_lock_init(&log->stripe_in_journal_lock);
+ atomic_set(&log->stripe_in_journal_count, 0);
+
if (r5l_load_log(log))
goto error;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5f9e28443c8a..06d7279bdd04 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -70,19 +70,6 @@ module_param(devices_handle_discard_safely, bool, 0644);
MODULE_PARM_DESC(devices_handle_discard_safely,
"Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
static struct workqueue_struct *raid5_wq;
-/*
- * Stripe cache
- */
-
-#define NR_STRIPES 256
-#define STRIPE_SIZE PAGE_SIZE
-#define STRIPE_SHIFT (PAGE_SHIFT - 9)
-#define STRIPE_SECTORS (STRIPE_SIZE>>9)
-#define IO_THRESHOLD 1
-#define BYPASS_THRESHOLD 1
-#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
-#define HASH_MASK (NR_HASH - 1)
-#define MAX_STRIPE_BATCH 8
static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
{
@@ -126,64 +113,6 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
local_irq_enable();
}
-/* bio's attached to a stripe+device for I/O are linked together in bi_sector
- * order without overlap. There may be several bio's per stripe+device, and
- * a bio could span several devices.
- * When walking this list for a particular stripe+device, we must never proceed
- * beyond a bio that extends past this device, as the next bio might no longer
- * be valid.
- * This function is used to determine the 'next' bio in the list, given the sector
- * of the current stripe+device
- */
-static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
-{
- int sectors = bio_sectors(bio);
- if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
- return bio->bi_next;
- else
- return NULL;
-}
-
-/*
- * We maintain a biased count of active stripes in the bottom 16 bits of
- * bi_phys_segments, and a count of processed stripes in the upper 16 bits
- */
-static inline int raid5_bi_processed_stripes(struct bio *bio)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- return (atomic_read(segments) >> 16) & 0xffff;
-}
-
-static inline int raid5_dec_bi_active_stripes(struct bio *bio)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- return atomic_sub_return(1, segments) & 0xffff;
-}
-
-static inline void raid5_inc_bi_active_stripes(struct bio *bio)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- atomic_inc(segments);
-}
-
-static inline void raid5_set_bi_processed_stripes(struct bio *bio,
- unsigned int cnt)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- int old, new;
-
- do {
- old = atomic_read(segments);
- new = (old & 0xffff) | (cnt << 16);
- } while (atomic_cmpxchg(segments, old, new) != old);
-}
-
-static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- atomic_set(segments, cnt);
-}
-
/* Find first data disk in a raid6 stripe */
static inline int raid6_d0(struct stripe_head *sh)
{
@@ -289,8 +218,27 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
struct list_head *temp_inactive_list)
{
+ int i;
+ int injournal = 0; /* number of date pages with R5_InJournal */
+
BUG_ON(!list_empty(&sh->lru));
BUG_ON(atomic_read(&conf->active_stripes)==0);
+
+ if (r5c_is_writeback(conf->log))
+ for (i = sh->disks; i--; )
+ if (test_bit(R5_InJournal, &sh->dev[i].flags))
+ injournal++;
+ /*
+ * When quiesce in r5c write back, set STRIPE_HANDLE for stripes with
+ * data in journal, so they are not released to cached lists
+ */
+ if (conf->quiesce && r5c_is_writeback(conf->log) &&
+ !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0) {
+ if (test_bit(STRIPE_R5C_CACHING, &sh->state))
+ r5c_make_stripe_write_out(sh);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ }
+
if (test_bit(STRIPE_HANDLE, &sh->state)) {
if (test_bit(STRIPE_DELAYED, &sh->state) &&
!test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
@@ -316,8 +264,30 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
< IO_THRESHOLD)
md_wakeup_thread(conf->mddev->thread);
atomic_dec(&conf->active_stripes);
- if (!test_bit(STRIPE_EXPANDING, &sh->state))
- list_add_tail(&sh->lru, temp_inactive_list);
+ if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
+ if (!r5c_is_writeback(conf->log))
+ list_add_tail(&sh->lru, temp_inactive_list);
+ else {
+ WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags));
+ if (injournal == 0)
+ list_add_tail(&sh->lru, temp_inactive_list);
+ else if (injournal == conf->raid_disks - conf->max_degraded) {
+ /* full stripe */
+ if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state))
+ atomic_inc(&conf->r5c_cached_full_stripes);
+ if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
+ atomic_dec(&conf->r5c_cached_partial_stripes);
+ list_add_tail(&sh->lru, &conf->r5c_full_stripe_list);
+ r5c_check_cached_full_stripe(conf);
+ } else {
+ /* partial stripe */
+ if (!test_and_set_bit(STRIPE_R5C_PARTIAL_STRIPE,
+ &sh->state))
+ atomic_inc(&conf->r5c_cached_partial_stripes);
+ list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list);
+ }
+ }
+ }
}
}
@@ -541,7 +511,7 @@ retry:
if (dev->toread || dev->read || dev->towrite || dev->written ||
test_bit(R5_LOCKED, &dev->flags)) {
- printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
+ pr_err("sector=%llx i=%d %p %p %p %p %d\n",
(unsigned long long)sh->sector, i, dev->toread,
dev->read, dev->towrite, dev->written,
test_bit(R5_LOCKED, &dev->flags));
@@ -680,9 +650,12 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
}
if (noblock && sh == NULL)
break;
+
+ r5c_check_stripe_cache_usage(conf);
if (!sh) {
set_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state);
+ r5l_wake_reclaim(conf->log, 0);
wait_event_lock_irq(
conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash) &&
@@ -901,8 +874,19 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
might_sleep();
- if (r5l_write_stripe(conf->log, sh) == 0)
- return;
+ if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+ /* writing out phase */
+ if (s->waiting_extra_page)
+ return;
+ if (r5l_write_stripe(conf->log, sh) == 0)
+ return;
+ } else { /* caching phase */
+ if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) {
+ r5c_cache_data(conf->log, sh, s);
+ return;
+ }
+ }
+
for (i = disks; i--; ) {
int op, op_flags = 0;
int replace_only = 0;
@@ -977,7 +961,7 @@ again:
if (bad < 0) {
set_bit(BlockedBadBlocks, &rdev->flags);
if (!conf->mddev->external &&
- conf->mddev->flags) {
+ conf->mddev->sb_flags) {
/* It is very unlikely, but we might
* still need to write out the
* bad block log - better give it
@@ -1115,7 +1099,7 @@ again:
static struct dma_async_tx_descriptor *
async_copy_data(int frombio, struct bio *bio, struct page **page,
sector_t sector, struct dma_async_tx_descriptor *tx,
- struct stripe_head *sh)
+ struct stripe_head *sh, int no_skipcopy)
{
struct bio_vec bvl;
struct bvec_iter iter;
@@ -1155,7 +1139,8 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
if (frombio) {
if (sh->raid_conf->skip_copy &&
b_offset == 0 && page_offset == 0 &&
- clen == STRIPE_SIZE)
+ clen == STRIPE_SIZE &&
+ !no_skipcopy)
*page = bio_page;
else
tx = async_memcpy(*page, bio_page, page_offset,
@@ -1237,7 +1222,7 @@ static void ops_run_biofill(struct stripe_head *sh)
while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
tx = async_copy_data(0, rbi, &dev->page,
- dev->sector, tx, sh);
+ dev->sector, tx, sh, 0);
rbi = r5_next_bio(rbi, dev->sector);
}
}
@@ -1364,10 +1349,15 @@ static int set_syndrome_sources(struct page **srcs,
if (i == sh->qd_idx || i == sh->pd_idx ||
(srctype == SYNDROME_SRC_ALL) ||
(srctype == SYNDROME_SRC_WANT_DRAIN &&
- test_bit(R5_Wantdrain, &dev->flags)) ||
+ (test_bit(R5_Wantdrain, &dev->flags) ||
+ test_bit(R5_InJournal, &dev->flags))) ||
(srctype == SYNDROME_SRC_WRITTEN &&
- dev->written))
- srcs[slot] = sh->dev[i].page;
+ dev->written)) {
+ if (test_bit(R5_InJournal, &dev->flags))
+ srcs[slot] = sh->dev[i].orig_page;
+ else
+ srcs[slot] = sh->dev[i].page;
+ }
i = raid6_next_disk(i, disks);
} while (i != d0_idx);
@@ -1546,6 +1536,13 @@ static void ops_complete_prexor(void *stripe_head_ref)
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
+
+ if (r5c_is_writeback(sh->raid_conf->log))
+ /*
+ * raid5-cache write back uses orig_page during prexor.
+ * After prexor, it is time to free orig_page
+ */
+ r5c_release_extra_page(sh);
}
static struct dma_async_tx_descriptor *
@@ -1567,7 +1564,9 @@ ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
/* Only process blocks that are known to be uptodate */
- if (test_bit(R5_Wantdrain, &dev->flags))
+ if (test_bit(R5_InJournal, &dev->flags))
+ xor_srcs[count++] = dev->orig_page;
+ else if (test_bit(R5_Wantdrain, &dev->flags))
xor_srcs[count++] = dev->page;
}
@@ -1601,6 +1600,7 @@ ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu,
static struct dma_async_tx_descriptor *
ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
{
+ struct r5conf *conf = sh->raid_conf;
int disks = sh->disks;
int i;
struct stripe_head *head_sh = sh;
@@ -1618,6 +1618,11 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
again:
dev = &sh->dev[i];
+ /*
+ * clear R5_InJournal, so when rewriting a page in
+ * journal, it is not skipped by r5l_log_stripe()
+ */
+ clear_bit(R5_InJournal, &dev->flags);
spin_lock_irq(&sh->stripe_lock);
chosen = dev->towrite;
dev->towrite = NULL;
@@ -1637,8 +1642,10 @@ again:
set_bit(R5_Discard, &dev->flags);
else {
tx = async_copy_data(1, wbi, &dev->page,
- dev->sector, tx, sh);
- if (dev->page != dev->orig_page) {
+ dev->sector, tx, sh,
+ r5c_is_writeback(conf->log));
+ if (dev->page != dev->orig_page &&
+ !r5c_is_writeback(conf->log)) {
set_bit(R5_SkipCopy, &dev->flags);
clear_bit(R5_UPTODATE, &dev->flags);
clear_bit(R5_OVERWRITE, &dev->flags);
@@ -1746,7 +1753,8 @@ again:
xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (head_sh->dev[i].written)
+ if (head_sh->dev[i].written ||
+ test_bit(R5_InJournal, &head_sh->dev[i].flags))
xor_srcs[count++] = dev->page;
}
} else {
@@ -2000,7 +2008,10 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
spin_lock_init(&sh->batch_lock);
INIT_LIST_HEAD(&sh->batch_list);
INIT_LIST_HEAD(&sh->lru);
+ INIT_LIST_HEAD(&sh->r5c);
+ INIT_LIST_HEAD(&sh->log_list);
atomic_set(&sh->count, 1);
+ sh->log_start = MaxSector;
for (i = 0; i < disks; i++) {
struct r5dev *dev = &sh->dev[i];
@@ -2240,10 +2251,24 @@ static int resize_stripes(struct r5conf *conf, int newsize)
*/
ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
if (ndisks) {
- for (i=0; i<conf->raid_disks; i++)
+ for (i = 0; i < conf->pool_size; i++)
ndisks[i] = conf->disks[i];
- kfree(conf->disks);
- conf->disks = ndisks;
+
+ for (i = conf->pool_size; i < newsize; i++) {
+ ndisks[i].extra_page = alloc_page(GFP_NOIO);
+ if (!ndisks[i].extra_page)
+ err = -ENOMEM;
+ }
+
+ if (err) {
+ for (i = conf->pool_size; i < newsize; i++)
+ if (ndisks[i].extra_page)
+ put_page(ndisks[i].extra_page);
+ kfree(ndisks);
+ } else {
+ kfree(conf->disks);
+ conf->disks = ndisks;
+ }
} else
err = -ENOMEM;
@@ -2342,10 +2367,8 @@ static void raid5_end_read_request(struct bio * bi)
* replacement device. We just fail those on
* any error
*/
- printk_ratelimited(
- KERN_INFO
- "md/raid:%s: read error corrected"
- " (%lu sectors at %llu on %s)\n",
+ pr_info_ratelimited(
+ "md/raid:%s: read error corrected (%lu sectors at %llu on %s)\n",
mdname(conf->mddev), STRIPE_SECTORS,
(unsigned long long)s,
bdevname(rdev->bdev, b));
@@ -2365,36 +2388,29 @@ static void raid5_end_read_request(struct bio * bi)
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
atomic_inc(&rdev->read_errors);
if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
- printk_ratelimited(
- KERN_WARNING
- "md/raid:%s: read error on replacement device "
- "(sector %llu on %s).\n",
+ pr_warn_ratelimited(
+ "md/raid:%s: read error on replacement device (sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)s,
bdn);
else if (conf->mddev->degraded >= conf->max_degraded) {
set_bad = 1;
- printk_ratelimited(
- KERN_WARNING
- "md/raid:%s: read error not correctable "
- "(sector %llu on %s).\n",
+ pr_warn_ratelimited(
+ "md/raid:%s: read error not correctable (sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)s,
bdn);
} else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
/* Oh, no!!! */
set_bad = 1;
- printk_ratelimited(
- KERN_WARNING
- "md/raid:%s: read error NOT corrected!! "
- "(sector %llu on %s).\n",
+ pr_warn_ratelimited(
+ "md/raid:%s: read error NOT corrected!! (sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)s,
bdn);
} else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes)
- printk(KERN_WARNING
- "md/raid:%s: Too many read errors, failing device %s.\n",
+ pr_warn("md/raid:%s: Too many read errors, failing device %s.\n",
mdname(conf->mddev), bdn);
else
retry = 1;
@@ -2526,15 +2542,14 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
- printk(KERN_ALERT
- "md/raid:%s: Disk failure on %s, disabling device.\n"
- "md/raid:%s: Operation continuing on %d devices.\n",
- mdname(mddev),
- bdevname(rdev->bdev, b),
- mdname(mddev),
- conf->raid_disks - mddev->degraded);
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
+ pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
+ "md/raid:%s: Operation continuing on %d devices.\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b),
+ mdname(mddev),
+ conf->raid_disks - mddev->degraded);
}
/*
@@ -2856,8 +2871,8 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
previous, &dummy1, &sh2);
if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
|| sh2.qd_idx != sh->qd_idx) {
- printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
- mdname(conf->mddev));
+ pr_warn("md/raid:%s: compute_blocknr: map not correct\n",
+ mdname(conf->mddev));
return 0;
}
return r_sector;
@@ -2872,6 +2887,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int level = conf->level;
if (rcw) {
+ /*
+ * In some cases, handle_stripe_dirtying initially decided to
+ * run rmw and allocates extra page for prexor. However, rcw is
+ * cheaper later on. We need to free the extra page now,
+ * because we won't be able to do that in ops_complete_prexor().
+ */
+ r5c_release_extra_page(sh);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@@ -2882,6 +2904,9 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
if (!expand)
clear_bit(R5_UPTODATE, &dev->flags);
s->locked++;
+ } else if (test_bit(R5_InJournal, &dev->flags)) {
+ set_bit(R5_LOCKED, &dev->flags);
+ s->locked++;
}
}
/* if we are not expanding this is a proper write request, and
@@ -2921,6 +2946,9 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
set_bit(R5_LOCKED, &dev->flags);
clear_bit(R5_UPTODATE, &dev->flags);
s->locked++;
+ } else if (test_bit(R5_InJournal, &dev->flags)) {
+ set_bit(R5_LOCKED, &dev->flags);
+ s->locked++;
}
}
if (!s->locked)
@@ -3564,10 +3592,10 @@ unhash:
break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
}
-static void handle_stripe_dirtying(struct r5conf *conf,
- struct stripe_head *sh,
- struct stripe_head_state *s,
- int disks)
+static int handle_stripe_dirtying(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct stripe_head_state *s,
+ int disks)
{
int rmw = 0, rcw = 0, i;
sector_t recovery_cp = conf->mddev->recovery_cp;
@@ -3592,9 +3620,12 @@ static void handle_stripe_dirtying(struct r5conf *conf,
} else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
+ if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx ||
+ test_bit(R5_InJournal, &dev->flags)) &&
!test_bit(R5_LOCKED, &dev->flags) &&
- !(test_bit(R5_UPTODATE, &dev->flags) ||
+ !((test_bit(R5_UPTODATE, &dev->flags) &&
+ (!test_bit(R5_InJournal, &dev->flags) ||
+ dev->page != dev->orig_page)) ||
test_bit(R5_Wantcompute, &dev->flags))) {
if (test_bit(R5_Insync, &dev->flags))
rmw++;
@@ -3606,13 +3637,15 @@ static void handle_stripe_dirtying(struct r5conf *conf,
i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_Wantcompute, &dev->flags))) {
+ test_bit(R5_InJournal, &dev->flags) ||
+ test_bit(R5_Wantcompute, &dev->flags))) {
if (test_bit(R5_Insync, &dev->flags))
rcw++;
else
rcw += 2*disks;
}
}
+
pr_debug("for sector %llu, rmw=%d rcw=%d\n",
(unsigned long long)sh->sector, rmw, rcw);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -3624,10 +3657,44 @@ static void handle_stripe_dirtying(struct r5conf *conf,
(unsigned long long)sh->sector, rmw);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
+ if (test_bit(R5_InJournal, &dev->flags) &&
+ dev->page == dev->orig_page &&
+ !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) {
+ /* alloc page for prexor */
+ struct page *p = alloc_page(GFP_NOIO);
+
+ if (p) {
+ dev->orig_page = p;
+ continue;
+ }
+
+ /*
+ * alloc_page() failed, try use
+ * disk_info->extra_page
+ */
+ if (!test_and_set_bit(R5C_EXTRA_PAGE_IN_USE,
+ &conf->cache_state)) {
+ r5c_use_extra_page(sh);
+ break;
+ }
+
+ /* extra_page in use, add to delayed_list */
+ set_bit(STRIPE_DELAYED, &sh->state);
+ s->waiting_extra_page = 1;
+ return -EAGAIN;
+ }
+ }
+
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ if ((dev->towrite ||
+ i == sh->pd_idx || i == sh->qd_idx ||
+ test_bit(R5_InJournal, &dev->flags)) &&
!test_bit(R5_LOCKED, &dev->flags) &&
- !(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_Wantcompute, &dev->flags)) &&
+ !((test_bit(R5_UPTODATE, &dev->flags) &&
+ (!test_bit(R5_InJournal, &dev->flags) ||
+ dev->page != dev->orig_page)) ||
+ test_bit(R5_Wantcompute, &dev->flags)) &&
test_bit(R5_Insync, &dev->flags)) {
if (test_bit(STRIPE_PREREAD_ACTIVE,
&sh->state)) {
@@ -3653,6 +3720,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
+ test_bit(R5_InJournal, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
rcw++;
if (test_bit(R5_Insync, &dev->flags) &&
@@ -3692,8 +3760,9 @@ static void handle_stripe_dirtying(struct r5conf *conf,
*/
if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
(s->locked == 0 && (rcw == 0 || rmw == 0) &&
- !test_bit(STRIPE_BIT_DELAY, &sh->state)))
+ !test_bit(STRIPE_BIT_DELAY, &sh->state)))
schedule_reconstruction(sh, s, rcw == 0, 0);
+ return 0;
}
static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
@@ -3777,7 +3846,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
case check_state_compute_run:
break;
default:
- printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
+ pr_err("%s: unknown check_state: %d sector: %llu\n",
__func__, sh->check_state,
(unsigned long long) sh->sector);
BUG();
@@ -3941,9 +4010,9 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
case check_state_compute_run:
break;
default:
- printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
- __func__, sh->check_state,
- (unsigned long long) sh->sector);
+ pr_warn("%s: unknown check_state: %d sector: %llu\n",
+ __func__, sh->check_state,
+ (unsigned long long) sh->sector);
BUG();
}
}
@@ -4183,6 +4252,11 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (rdev && !test_bit(Faulty, &rdev->flags))
do_recovery = 1;
}
+
+ if (test_bit(R5_InJournal, &dev->flags))
+ s->injournal++;
+ if (test_bit(R5_InJournal, &dev->flags) && dev->written)
+ s->just_cached++;
}
if (test_bit(STRIPE_SYNCING, &sh->state)) {
/* If there is a failed device being replaced,
@@ -4411,7 +4485,8 @@ static void handle_stripe(struct stripe_head *sh)
struct r5dev *dev = &sh->dev[i];
if (test_bit(R5_LOCKED, &dev->flags) &&
(i == sh->pd_idx || i == sh->qd_idx ||
- dev->written)) {
+ dev->written || test_bit(R5_InJournal,
+ &dev->flags))) {
pr_debug("Writing block %d\n", i);
set_bit(R5_Wantwrite, &dev->flags);
if (prexor)
@@ -4451,6 +4526,10 @@ static void handle_stripe(struct stripe_head *sh)
test_bit(R5_Discard, &qdev->flags))))))
handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
+ if (s.just_cached)
+ r5c_handle_cached_data_endio(conf, sh, disks, &s.return_bi);
+ r5l_stripe_write_finished(sh);
+
/* Now we might consider reading some blocks, either to check/generate
* parity, or to satisfy requests
* or to load a block that is being partially written.
@@ -4462,14 +4541,51 @@ static void handle_stripe(struct stripe_head *sh)
|| s.expanding)
handle_stripe_fill(sh, &s, disks);
- /* Now to consider new write requests and what else, if anything
- * should be read. We do not handle new writes when:
+ /*
+ * When the stripe finishes full journal write cycle (write to journal
+ * and raid disk), this is the clean up procedure so it is ready for
+ * next operation.
+ */
+ r5c_finish_stripe_write_out(conf, sh, &s);
+
+ /*
+ * Now to consider new write requests, cache write back and what else,
+ * if anything should be read. We do not handle new writes when:
* 1/ A 'write' operation (copy+xor) is already in flight.
* 2/ A 'check' operation is in flight, as it may clobber the parity
* block.
+ * 3/ A r5c cache log write is in flight.
*/
- if (s.to_write && !sh->reconstruct_state && !sh->check_state)
- handle_stripe_dirtying(conf, sh, &s, disks);
+
+ if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) {
+ if (!r5c_is_writeback(conf->log)) {
+ if (s.to_write)
+ handle_stripe_dirtying(conf, sh, &s, disks);
+ } else { /* write back cache */
+ int ret = 0;
+
+ /* First, try handle writes in caching phase */
+ if (s.to_write)
+ ret = r5c_try_caching_write(conf, sh, &s,
+ disks);
+ /*
+ * If caching phase failed: ret == -EAGAIN
+ * OR
+ * stripe under reclaim: !caching && injournal
+ *
+ * fall back to handle_stripe_dirtying()
+ */
+ if (ret == -EAGAIN ||
+ /* stripe under reclaim: !caching && injournal */
+ (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
+ s.injournal > 0)) {
+ ret = handle_stripe_dirtying(conf, sh, &s,
+ disks);
+ if (ret == -EAGAIN)
+ goto finish;
+ }
+ }
+ }
/* maybe we need to check and possibly fix the parity for this stripe
* Any reads will already have been scheduled, so we just see if enough
@@ -4640,9 +4756,7 @@ finish:
}
if (!bio_list_empty(&s.return_bi)) {
- if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags) &&
- (s.failed <= conf->max_degraded ||
- conf->mddev->external == 0)) {
+ if (test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->return_bi, &s.return_bi);
spin_unlock_irq(&conf->device_lock);
@@ -4698,6 +4812,10 @@ static int raid5_congested(struct mddev *mddev, int bits)
if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
return 1;
+
+ /* Also checks whether there is pressure on r5cache log space */
+ if (test_bit(R5C_LOG_TIGHT, &conf->cache_state))
+ return 1;
if (conf->quiesce)
return 1;
if (atomic_read(&conf->empty_inactive_list_nr))
@@ -5167,6 +5285,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
int remaining;
DEFINE_WAIT(w);
bool do_prepare;
+ bool do_flush = false;
if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
int ret = r5l_handle_flush_request(conf->log, bi);
@@ -5178,6 +5297,11 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
return;
}
/* ret == -EAGAIN, fallback */
+ /*
+ * if r5l_handle_flush_request() didn't clear REQ_PREFLUSH,
+ * we need to flush journal device
+ */
+ do_flush = bi->bi_opf & REQ_PREFLUSH;
}
md_write_start(mddev, bi);
@@ -5188,6 +5312,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
* data on failed drives.
*/
if (rw == READ && mddev->degraded == 0 &&
+ !r5c_is_writeback(conf->log) &&
mddev->reshape_position == MaxSector) {
bi = chunk_aligned_read(mddev, bi);
if (!bi)
@@ -5316,6 +5441,12 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
do_prepare = true;
goto retry;
}
+ if (do_flush) {
+ set_bit(STRIPE_R5C_PREFLUSH, &sh->state);
+ /* we only need flush for one stripe */
+ do_flush = false;
+ }
+
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
if ((!sh->batch_head || sh == sh->batch_head) &&
@@ -5481,9 +5612,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
- wait_event(mddev->sb_wait, mddev->flags == 0 ||
+ wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
return 0;
@@ -5579,10 +5710,10 @@ finish:
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_DEVS, &mddev->flags)
+ !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)
|| test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto ret;
@@ -5857,10 +5988,10 @@ static void raid5d(struct md_thread *thread)
md_check_recovery(mddev);
if (!bio_list_empty(&conf->return_bi) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
struct bio_list tmp = BIO_EMPTY_LIST;
spin_lock_irq(&conf->device_lock);
- if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
bio_list_merge(&tmp, &conf->return_bi);
bio_list_init(&conf->return_bi);
}
@@ -5907,7 +6038,7 @@ static void raid5d(struct md_thread *thread)
break;
handled += batch_size;
- if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
+ if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) {
spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
spin_lock_irq(&conf->device_lock);
@@ -6237,6 +6368,7 @@ static struct attribute *raid5_attrs[] = {
&raid5_group_thread_cnt.attr,
&raid5_skip_copy.attr,
&raid5_rmw_level.attr,
+ &r5c_journal_mode.attr,
NULL,
};
static struct attribute_group raid5_attrs_group = {
@@ -6363,6 +6495,8 @@ static void raid5_free_percpu(struct r5conf *conf)
static void free_conf(struct r5conf *conf)
{
+ int i;
+
if (conf->log)
r5l_exit_log(conf->log);
if (conf->shrinker.nr_deferred)
@@ -6371,6 +6505,9 @@ static void free_conf(struct r5conf *conf)
free_thread_groups(conf);
shrink_stripes(conf);
raid5_free_percpu(conf);
+ for (i = 0; i < conf->pool_size; i++)
+ if (conf->disks[i].extra_page)
+ put_page(conf->disks[i].extra_page);
kfree(conf->disks);
kfree(conf->stripe_hashtbl);
kfree(conf);
@@ -6382,8 +6519,8 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
if (alloc_scratch_buffer(conf, percpu)) {
- pr_err("%s: failed memory allocation for cpu%u\n",
- __func__, cpu);
+ pr_warn("%s: failed memory allocation for cpu%u\n",
+ __func__, cpu);
return -ENOMEM;
}
return 0;
@@ -6453,29 +6590,29 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (mddev->new_level != 5
&& mddev->new_level != 4
&& mddev->new_level != 6) {
- printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
- mdname(mddev), mddev->new_level);
+ pr_warn("md/raid:%s: raid level not set to 4/5/6 (%d)\n",
+ mdname(mddev), mddev->new_level);
return ERR_PTR(-EIO);
}
if ((mddev->new_level == 5
&& !algorithm_valid_raid5(mddev->new_layout)) ||
(mddev->new_level == 6
&& !algorithm_valid_raid6(mddev->new_layout))) {
- printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
- mdname(mddev), mddev->new_layout);
+ pr_warn("md/raid:%s: layout %d not supported\n",
+ mdname(mddev), mddev->new_layout);
return ERR_PTR(-EIO);
}
if (mddev->new_level == 6 && mddev->raid_disks < 4) {
- printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
- mdname(mddev), mddev->raid_disks);
+ pr_warn("md/raid:%s: not enough configured devices (%d, minimum 4)\n",
+ mdname(mddev), mddev->raid_disks);
return ERR_PTR(-EINVAL);
}
if (!mddev->new_chunk_sectors ||
(mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
!is_power_of_2(mddev->new_chunk_sectors)) {
- printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
- mdname(mddev), mddev->new_chunk_sectors << 9);
+ pr_warn("md/raid:%s: invalid chunk size %d\n",
+ mdname(mddev), mddev->new_chunk_sectors << 9);
return ERR_PTR(-EINVAL);
}
@@ -6517,9 +6654,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
GFP_KERNEL);
+
if (!conf->disks)
goto abort;
+ for (i = 0; i < max_disks; i++) {
+ conf->disks[i].extra_page = alloc_page(GFP_KERNEL);
+ if (!conf->disks[i].extra_page)
+ goto abort;
+ }
+
conf->mddev = mddev;
if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
@@ -6540,6 +6684,11 @@ static struct r5conf *setup_conf(struct mddev *mddev)
for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
INIT_LIST_HEAD(conf->temp_inactive_list + i);
+ atomic_set(&conf->r5c_cached_full_stripes, 0);
+ INIT_LIST_HEAD(&conf->r5c_full_stripe_list);
+ atomic_set(&conf->r5c_cached_partial_stripes, 0);
+ INIT_LIST_HEAD(&conf->r5c_partial_stripe_list);
+
conf->level = mddev->new_level;
conf->chunk_sectors = mddev->new_chunk_sectors;
if (raid5_alloc_percpu(conf) != 0)
@@ -6566,9 +6715,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (test_bit(In_sync, &rdev->flags)) {
char b[BDEVNAME_SIZE];
- printk(KERN_INFO "md/raid:%s: device %s operational as raid"
- " disk %d\n",
- mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
+ pr_info("md/raid:%s: device %s operational as raid disk %d\n",
+ mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
} else if (rdev->saved_raid_disk != raid_disk)
/* Cannot rely on bitmap to complete recovery */
conf->fullsync = 1;
@@ -6602,21 +6750,18 @@ static struct r5conf *setup_conf(struct mddev *mddev)
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4);
conf->min_nr_stripes = max(NR_STRIPES, stripes);
if (conf->min_nr_stripes != NR_STRIPES)
- printk(KERN_INFO
- "md/raid:%s: force stripe size %d for reshape\n",
+ pr_info("md/raid:%s: force stripe size %d for reshape\n",
mdname(mddev), conf->min_nr_stripes);
}
memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
if (grow_stripes(conf, conf->min_nr_stripes)) {
- printk(KERN_ERR
- "md/raid:%s: couldn't allocate %dkB for buffers\n",
- mdname(mddev), memory);
+ pr_warn("md/raid:%s: couldn't allocate %dkB for buffers\n",
+ mdname(mddev), memory);
goto abort;
} else
- printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
- mdname(mddev), memory);
+ pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory);
/*
* Losing a stripe head costs more than the time to refill it,
* it reduces the queue depth and so can hurt throughput.
@@ -6628,18 +6773,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->shrinker.batch = 128;
conf->shrinker.flags = 0;
if (register_shrinker(&conf->shrinker)) {
- printk(KERN_ERR
- "md/raid:%s: couldn't register shrinker.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: couldn't register shrinker.\n",
+ mdname(mddev));
goto abort;
}
sprintf(pers_name, "raid%d", mddev->new_level);
conf->thread = md_register_thread(raid5d, mddev, pers_name);
if (!conf->thread) {
- printk(KERN_ERR
- "md/raid:%s: couldn't allocate thread.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: couldn't allocate thread.\n",
+ mdname(mddev));
goto abort;
}
@@ -6692,9 +6835,8 @@ static int raid5_run(struct mddev *mddev)
int first = 1;
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "md/raid:%s: not clean"
- " -- starting background reconstruction\n",
- mdname(mddev));
+ pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
+ mdname(mddev));
rdev_for_each(rdev, mddev) {
long long diff;
@@ -6737,15 +6879,14 @@ static int raid5_run(struct mddev *mddev)
int new_data_disks;
if (journal_dev) {
- printk(KERN_ERR "md/raid:%s: don't support reshape with journal - aborting.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n",
+ mdname(mddev));
return -EINVAL;
}
if (mddev->new_level != mddev->level) {
- printk(KERN_ERR "md/raid:%s: unsupported reshape "
- "required - aborting.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: unsupported reshape required - aborting.\n",
+ mdname(mddev));
return -EINVAL;
}
old_disks = mddev->raid_disks - mddev->delta_disks;
@@ -6760,8 +6901,8 @@ static int raid5_run(struct mddev *mddev)
chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors);
new_data_disks = mddev->raid_disks - max_degraded;
if (sector_div(here_new, chunk_sectors * new_data_disks)) {
- printk(KERN_ERR "md/raid:%s: reshape_position not "
- "on a stripe boundary\n", mdname(mddev));
+ pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n",
+ mdname(mddev));
return -EINVAL;
}
reshape_offset = here_new * chunk_sectors;
@@ -6782,10 +6923,8 @@ static int raid5_run(struct mddev *mddev)
abs(min_offset_diff) >= mddev->new_chunk_sectors)
/* not really in-place - so OK */;
else if (mddev->ro == 0) {
- printk(KERN_ERR "md/raid:%s: in-place reshape "
- "must be started in read-only mode "
- "- aborting\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n",
+ mdname(mddev));
return -EINVAL;
}
} else if (mddev->reshape_backwards
@@ -6794,13 +6933,11 @@ static int raid5_run(struct mddev *mddev)
: (here_new * chunk_sectors >=
here_old * chunk_sectors + (-min_offset_diff))) {
/* Reading from the same stripe as writing to - bad */
- printk(KERN_ERR "md/raid:%s: reshape_position too early for "
- "auto-recovery - aborting.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n",
+ mdname(mddev));
return -EINVAL;
}
- printk(KERN_INFO "md/raid:%s: reshape will continue\n",
- mdname(mddev));
+ pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev));
/* OK, we should be able to continue; */
} else {
BUG_ON(mddev->level != mddev->new_level);
@@ -6819,8 +6956,8 @@ static int raid5_run(struct mddev *mddev)
if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
if (!journal_dev) {
- pr_err("md/raid:%s: journal disk is missing, force array readonly\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: journal disk is missing, force array readonly\n",
+ mdname(mddev));
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
} else if (mddev->recovery_cp == MaxSector)
@@ -6847,8 +6984,7 @@ static int raid5_run(struct mddev *mddev)
if (conf->disks[i].replacement &&
conf->reshape_progress != MaxSector) {
/* replacements and reshape simply do not mix. */
- printk(KERN_ERR "md: cannot handle concurrent "
- "replacement and reshape.\n");
+ pr_warn("md: cannot handle concurrent replacement and reshape.\n");
goto abort;
}
if (test_bit(In_sync, &rdev->flags)) {
@@ -6890,8 +7026,7 @@ static int raid5_run(struct mddev *mddev)
mddev->degraded = calc_degraded(conf);
if (has_failed(conf)) {
- printk(KERN_ERR "md/raid:%s: not enough operational devices"
- " (%d/%d failed)\n",
+ pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n",
mdname(mddev), mddev->degraded, conf->raid_disks);
goto abort;
}
@@ -6903,29 +7038,19 @@ static int raid5_run(struct mddev *mddev)
if (mddev->degraded > dirty_parity_disks &&
mddev->recovery_cp != MaxSector) {
if (mddev->ok_start_degraded)
- printk(KERN_WARNING
- "md/raid:%s: starting dirty degraded array"
- " - data corruption possible.\n",
- mdname(mddev));
+ pr_crit("md/raid:%s: starting dirty degraded array - data corruption possible.\n",
+ mdname(mddev));
else {
- printk(KERN_ERR
- "md/raid:%s: cannot start dirty degraded array.\n",
- mdname(mddev));
+ pr_crit("md/raid:%s: cannot start dirty degraded array.\n",
+ mdname(mddev));
goto abort;
}
}
- if (mddev->degraded == 0)
- printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
- " devices, algorithm %d\n", mdname(mddev), conf->level,
- mddev->raid_disks-mddev->degraded, mddev->raid_disks,
- mddev->new_layout);
- else
- printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
- " out of %d devices, algorithm %d\n",
- mdname(mddev), conf->level,
- mddev->raid_disks - mddev->degraded,
- mddev->raid_disks, mddev->new_layout);
+ pr_info("md/raid:%s: raid level %d active with %d out of %d devices, algorithm %d\n",
+ mdname(mddev), conf->level,
+ mddev->raid_disks-mddev->degraded, mddev->raid_disks,
+ mddev->new_layout);
print_raid5_conf(conf);
@@ -6945,9 +7070,8 @@ static int raid5_run(struct mddev *mddev)
mddev->to_remove = NULL;
else if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
- printk(KERN_WARNING
- "raid5: failed to create sysfs attributes for %s\n",
- mdname(mddev));
+ pr_warn("raid5: failed to create sysfs attributes for %s\n",
+ mdname(mddev));
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
if (mddev->queue) {
@@ -6979,6 +7103,15 @@ static int raid5_run(struct mddev *mddev)
stripe = (stripe | (stripe-1)) + 1;
mddev->queue->limits.discard_alignment = stripe;
mddev->queue->limits.discard_granularity = stripe;
+
+ /*
+ * We use 16-bit counter of active stripes in bi_phys_segments
+ * (minus one for over-loaded initialization)
+ */
+ blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS);
+ blk_queue_max_discard_sectors(mddev->queue,
+ 0xfffe * STRIPE_SECTORS);
+
/*
* unaligned part of discard request will be ignored, so can't
* guarantee discard_zeroes_data
@@ -7035,9 +7168,10 @@ static int raid5_run(struct mddev *mddev)
if (journal_dev) {
char b[BDEVNAME_SIZE];
- printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
- mdname(mddev), bdevname(journal_dev->bdev, b));
- r5l_init_log(conf, journal_dev);
+ pr_debug("md/raid:%s: using device %s as journal\n",
+ mdname(mddev), bdevname(journal_dev->bdev, b));
+ if (r5l_init_log(conf, journal_dev))
+ goto abort;
}
return 0;
@@ -7046,7 +7180,7 @@ abort:
print_raid5_conf(conf);
free_conf(conf);
mddev->private = NULL;
- printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
+ pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev));
return -EIO;
}
@@ -7080,12 +7214,12 @@ static void print_raid5_conf (struct r5conf *conf)
int i;
struct disk_info *tmp;
- printk(KERN_DEBUG "RAID conf printout:\n");
+ pr_debug("RAID conf printout:\n");
if (!conf) {
- printk("(conf==NULL)\n");
+ pr_debug("(conf==NULL)\n");
return;
}
- printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
+ pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level,
conf->raid_disks,
conf->raid_disks - conf->mddev->degraded);
@@ -7093,7 +7227,7 @@ static void print_raid5_conf (struct r5conf *conf)
char b[BDEVNAME_SIZE];
tmp = conf->disks + i;
if (tmp->rdev)
- printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
+ pr_debug(" disk %d, o:%d, dev:%s\n",
i, !test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev, b));
}
@@ -7241,8 +7375,8 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* write requests running. We should be safe
*/
r5l_init_log(conf, rdev);
- printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
- mdname(mddev), bdevname(rdev->bdev, b));
+ pr_debug("md/raid:%s: using device %s as journal\n",
+ mdname(mddev), bdevname(rdev->bdev, b));
return 0;
}
if (mddev->recovery_disabled == conf->recovery_disabled)
@@ -7346,10 +7480,10 @@ static int check_stripe_cache(struct mddev *mddev)
> conf->min_nr_stripes ||
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
> conf->min_nr_stripes) {
- printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
- mdname(mddev),
- ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
- / STRIPE_SIZE)*4);
+ pr_warn("md/raid:%s: reshape: not enough stripes. Needed %lu\n",
+ mdname(mddev),
+ ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
+ / STRIPE_SIZE)*4);
return 0;
}
return 1;
@@ -7430,8 +7564,8 @@ static int raid5_start_reshape(struct mddev *mddev)
*/
if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
< mddev->array_sectors) {
- printk(KERN_ERR "md/raid:%s: array size must be reduced "
- "before number of disks\n", mdname(mddev));
+ pr_warn("md/raid:%s: array size must be reduced before number of disks\n",
+ mdname(mddev));
return -EINVAL;
}
@@ -7501,7 +7635,7 @@ static int raid5_start_reshape(struct mddev *mddev)
}
mddev->raid_disks = conf->raid_disks;
mddev->reshape_position = conf->reshape_progress;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -7619,6 +7753,7 @@ static void raid5_quiesce(struct mddev *mddev, int state)
/* '2' tells resync/reshape to pause so that all
* active stripes can drain
*/
+ r5c_flush_cache(conf, INT_MAX);
conf->quiesce = 2;
wait_event_cmd(conf->wait_for_quiescent,
atomic_read(&conf->active_stripes) == 0 &&
@@ -7649,8 +7784,8 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
/* for raid0 takeover only one zone is supported */
if (raid0_conf->nr_strip_zones > 1) {
- printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: cannot takeover raid0 with more than one zone.\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
@@ -7671,6 +7806,7 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
static void *raid5_takeover_raid1(struct mddev *mddev)
{
int chunksect;
+ void *ret;
if (mddev->raid_disks != 2 ||
mddev->degraded > 1)
@@ -7692,7 +7828,10 @@ static void *raid5_takeover_raid1(struct mddev *mddev)
mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
mddev->new_chunk_sectors = chunksect;
- return setup_conf(mddev);
+ ret = setup_conf(mddev);
+ if (!IS_ERR_VALUE(ret))
+ clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+ return ret;
}
static void *raid5_takeover_raid6(struct mddev *mddev)
@@ -7762,7 +7901,7 @@ static int raid5_check_reshape(struct mddev *mddev)
conf->chunk_sectors = new_chunk ;
mddev->chunk_sectors = new_chunk;
}
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
}
return check_reshape(mddev);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 57ec49f0839e..ed8e1362ab36 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -226,6 +226,8 @@ struct stripe_head {
struct r5l_io_unit *log_io;
struct list_head log_list;
+ sector_t log_start; /* first meta block on the journal */
+ struct list_head r5c; /* for r5c_cache->stripe_in_journal */
/**
* struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target
@@ -264,6 +266,7 @@ struct stripe_head_state {
int syncing, expanding, expanded, replacing;
int locked, uptodate, to_read, to_write, failed, written;
int to_fill, compute, req_compute, non_overwrite;
+ int injournal, just_cached;
int failed_num[2];
int p_failed, q_failed;
int dec_preread_active;
@@ -273,6 +276,7 @@ struct stripe_head_state {
struct md_rdev *blocked_rdev;
int handle_bad_blocks;
int log_failed;
+ int waiting_extra_page;
};
/* Flags for struct r5dev.flags */
@@ -313,6 +317,11 @@ enum r5dev_flags {
*/
R5_Discard, /* Discard the stripe */
R5_SkipCopy, /* Don't copy data from bio to stripe cache */
+ R5_InJournal, /* data being written is in the journal device.
+ * if R5_InJournal is set for parity pd_idx, all the
+ * data and parity being written are in the journal
+ * device
+ */
};
/*
@@ -345,7 +354,30 @@ enum {
STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
* to batch yet.
*/
- STRIPE_LOG_TRAPPED, /* trapped into log */
+ STRIPE_LOG_TRAPPED, /* trapped into log (see raid5-cache.c)
+ * this bit is used in two scenarios:
+ *
+ * 1. write-out phase
+ * set in first entry of r5l_write_stripe
+ * clear in second entry of r5l_write_stripe
+ * used to bypass logic in handle_stripe
+ *
+ * 2. caching phase
+ * set in r5c_try_caching_write()
+ * clear when journal write is done
+ * used to initiate r5c_cache_data()
+ * also used to bypass logic in handle_stripe
+ */
+ STRIPE_R5C_CACHING, /* the stripe is in caching phase
+ * see more detail in the raid5-cache.c
+ */
+ STRIPE_R5C_PARTIAL_STRIPE, /* in r5c cache (to-be/being handled or
+ * in conf->r5c_partial_stripe_list)
+ */
+ STRIPE_R5C_FULL_STRIPE, /* in r5c cache (to-be/being handled or
+ * in conf->r5c_full_stripe_list)
+ */
+ STRIPE_R5C_PREFLUSH, /* need to flush journal device */
};
#define STRIPE_EXPAND_SYNC_FLAGS \
@@ -408,8 +440,86 @@ enum {
struct disk_info {
struct md_rdev *rdev, *replacement;
+ struct page *extra_page; /* extra page to use in prexor */
};
+/*
+ * Stripe cache
+ */
+
+#define NR_STRIPES 256
+#define STRIPE_SIZE PAGE_SIZE
+#define STRIPE_SHIFT (PAGE_SHIFT - 9)
+#define STRIPE_SECTORS (STRIPE_SIZE>>9)
+#define IO_THRESHOLD 1
+#define BYPASS_THRESHOLD 1
+#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
+#define HASH_MASK (NR_HASH - 1)
+#define MAX_STRIPE_BATCH 8
+
+/* bio's attached to a stripe+device for I/O are linked together in bi_sector
+ * order without overlap. There may be several bio's per stripe+device, and
+ * a bio could span several devices.
+ * When walking this list for a particular stripe+device, we must never proceed
+ * beyond a bio that extends past this device, as the next bio might no longer
+ * be valid.
+ * This function is used to determine the 'next' bio in the list, given the
+ * sector of the current stripe+device
+ */
+static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
+{
+ int sectors = bio_sectors(bio);
+
+ if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
+ return bio->bi_next;
+ else
+ return NULL;
+}
+
+/*
+ * We maintain a biased count of active stripes in the bottom 16 bits of
+ * bi_phys_segments, and a count of processed stripes in the upper 16 bits
+ */
+static inline int raid5_bi_processed_stripes(struct bio *bio)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+ return (atomic_read(segments) >> 16) & 0xffff;
+}
+
+static inline int raid5_dec_bi_active_stripes(struct bio *bio)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+ return atomic_sub_return(1, segments) & 0xffff;
+}
+
+static inline void raid5_inc_bi_active_stripes(struct bio *bio)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+ atomic_inc(segments);
+}
+
+static inline void raid5_set_bi_processed_stripes(struct bio *bio,
+ unsigned int cnt)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+ int old, new;
+
+ do {
+ old = atomic_read(segments);
+ new = (old & 0xffff) | (cnt << 16);
+ } while (atomic_cmpxchg(segments, old, new) != old);
+}
+
+static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+ atomic_set(segments, cnt);
+}
+
/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
* This is because we sometimes take all the spinlocks
* and creating that much locking depth can cause
@@ -432,6 +542,30 @@ struct r5worker_group {
int stripes_cnt;
};
+enum r5_cache_state {
+ R5_INACTIVE_BLOCKED, /* release of inactive stripes blocked,
+ * waiting for 25% to be free
+ */
+ R5_ALLOC_MORE, /* It might help to allocate another
+ * stripe.
+ */
+ R5_DID_ALLOC, /* A stripe was allocated, don't allocate
+ * more until at least one has been
+ * released. This avoids flooding
+ * the cache.
+ */
+ R5C_LOG_TIGHT, /* log device space tight, need to
+ * prioritize stripes at last_checkpoint
+ */
+ R5C_LOG_CRITICAL, /* log device is running out of space,
+ * only process stripes that are already
+ * occupying the log
+ */
+ R5C_EXTRA_PAGE_IN_USE, /* a stripe is using disk_info.extra_page
+ * for prexor
+ */
+};
+
struct r5conf {
struct hlist_head *stripe_hashtbl;
/* only protect corresponding hash list and inactive_list */
@@ -519,23 +653,18 @@ struct r5conf {
*/
atomic_t active_stripes;
struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
+
+ atomic_t r5c_cached_full_stripes;
+ struct list_head r5c_full_stripe_list;
+ atomic_t r5c_cached_partial_stripes;
+ struct list_head r5c_partial_stripe_list;
+
atomic_t empty_inactive_list_nr;
struct llist_head released_stripes;
wait_queue_head_t wait_for_quiescent;
wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
unsigned long cache_state;
-#define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
- * waiting for 25% to be free
- */
-#define R5_ALLOC_MORE 2 /* It might help to allocate another
- * stripe.
- */
-#define R5_DID_ALLOC 4 /* A stripe was allocated, don't allocate
- * more until at least one has been
- * released. This avoids flooding
- * the cache.
- */
struct shrinker shrinker;
int pool_size; /* number of disks in stripeheads in pool */
spinlock_t device_lock;
@@ -633,4 +762,23 @@ extern void r5l_stripe_write_finished(struct stripe_head *sh);
extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
extern void r5l_quiesce(struct r5l_log *log, int state);
extern bool r5l_log_disk_error(struct r5conf *conf);
+extern bool r5c_is_writeback(struct r5l_log *log);
+extern int
+r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh,
+ struct stripe_head_state *s, int disks);
+extern void
+r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh,
+ struct stripe_head_state *s);
+extern void r5c_release_extra_page(struct stripe_head *sh);
+extern void r5c_use_extra_page(struct stripe_head *sh);
+extern void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+extern void r5c_handle_cached_data_endio(struct r5conf *conf,
+ struct stripe_head *sh, int disks, struct bio_list *return_bi);
+extern int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh,
+ struct stripe_head_state *s);
+extern void r5c_make_stripe_write_out(struct stripe_head *sh);
+extern void r5c_flush_cache(struct r5conf *conf, int num);
+extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
+extern void r5c_check_cached_full_stripe(struct r5conf *conf);
+extern struct md_sysfs_entry r5c_journal_mode;
#endif
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 941ceff9b268..29011dfabb11 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -1455,7 +1455,7 @@ static const struct usb_device_id af9015_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CONCEPTRONIC_CTVDIGRCU,
&af9015_props, "Conceptronic USB2.0 DVB-T CTVDIGRCU V3.0", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_MC810,
- &af9015_props, "KWorld Digial MC-810", NULL) },
+ &af9015_props, "KWorld Digital MC-810", NULL) },
{ DVB_USB_DEVICE(USB_VID_KYE, USB_PID_GENIUS_TVGO_DVB_T03,
&af9015_props, "Genius TVGo DVB-T03", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U_2,
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 89c7ed16b4df..1e73064b0fb2 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -2585,10 +2585,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
(void) GetLanConfigPages(ioc);
a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- "LanAddr = %02X:%02X:%02X"
- ":%02X:%02X:%02X\n",
- ioc->name, a[5], a[4],
- a[3], a[2], a[1], a[0]));
+ "LanAddr = %pMR\n", ioc->name, a));
}
break;
@@ -2868,21 +2865,21 @@ MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
printk(KERN_INFO "%s: ", ioc->name);
if (ioc->prod_name)
- printk("%s: ", ioc->prod_name);
- printk("Capabilities={");
+ pr_cont("%s: ", ioc->prod_name);
+ pr_cont("Capabilities={");
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
- printk("Initiator");
+ pr_cont("Initiator");
i++;
}
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
- printk("%sTarget", i ? "," : "");
+ pr_cont("%sTarget", i ? "," : "");
i++;
}
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
- printk("%sLAN", i ? "," : "");
+ pr_cont("%sLAN", i ? "," : "");
i++;
}
@@ -2891,12 +2888,12 @@ MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
* This would probably evoke more questions than it's worth
*/
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
- printk("%sLogBusAddr", i ? "," : "");
+ pr_cont("%sLogBusAddr", i ? "," : "");
i++;
}
#endif
- printk("}\n");
+ pr_cont("}\n");
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -6783,8 +6780,7 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
if (ioc->bus_type == FC) {
if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
- seq_printf(m, " LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
- a[5], a[4], a[3], a[2], a[1], a[0]);
+ seq_printf(m, " LanAddr = %pMR\n", a);
}
seq_printf(m, " WWN = %08X%08X:%08X%08X\n",
ioc->fc_port_page0[p].WWNN.High,
@@ -6861,8 +6857,7 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
- y += sprintf(buffer+len+y, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
- a[5], a[4], a[3], a[2], a[1], a[0]);
+ y += sprintf(buffer+len+y, ", LanAddr=%pMR", a);
}
y += sprintf(buffer+len+y, ", IRQ=%d", ioc->pci_irq);
@@ -6896,8 +6891,7 @@ static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int
if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
- seq_printf(m, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
- a[5], a[4], a[3], a[2], a[1], a[0]);
+ seq_printf(m, ", LanAddr=%pMR", a);
}
seq_printf(m, ", IRQ=%d", ioc->pci_irq);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 6c9fc11efb87..08a807d6a44f 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1366,15 +1366,10 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt)
/* Default to untagged. Once a target structure has been allocated,
* use the Inquiry data to determine if device supports tagged.
*/
- if ((vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
- && (SCpnt->device->tagged_supported)) {
+ if ((vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES) &&
+ SCpnt->device->tagged_supported)
scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
- if (SCpnt->request && SCpnt->request->ioprio) {
- if (((SCpnt->request->ioprio & 0x7) == 1) ||
- !(SCpnt->request->ioprio & 0x7))
- scsictl |= MPI_SCSIIO_CONTROL_HEADOFQ;
- }
- } else
+ else
scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED;
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index 258757e216c4..b1700b5fa640 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -461,7 +461,7 @@ static int max77620_probe(struct i2c_client *client,
chip->rmap = devm_regmap_init_i2c(client, rmap_config);
if (IS_ERR(chip->rmap)) {
ret = PTR_ERR(chip->rmap);
- dev_err(chip->dev, "Failed to intialise regmap: %d\n", ret);
+ dev_err(chip->dev, "Failed to initialise regmap: %d\n", ret);
return ret;
}
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index 6b3bf9ab051d..c5a456b0a564 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -170,7 +170,7 @@ static void ibmasm_remove_one(struct pci_dev *pdev)
ibmasm_unregister_uart(sp);
dbg("Sending OS down message\n");
if (ibmasm_send_os_state(sp, SYSTEM_STATE_OS_DOWN))
- err("failed to get repsonse to 'Send OS State' command\n");
+ err("failed to get response to 'Send OS State' command\n");
dbg("Disabling heartbeats\n");
ibmasm_heartbeat_exit(sp);
dbg("Disabling interrupts\n");
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index f2eeb38efa65..7e803fc454d1 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -23,8 +23,6 @@ if MMC
source "drivers/mmc/core/Kconfig"
-source "drivers/mmc/card/Kconfig"
-
source "drivers/mmc/host/Kconfig"
endif # MMC
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
index 400756ec7c49..416b6d1c9ec6 100644
--- a/drivers/mmc/Makefile
+++ b/drivers/mmc/Makefile
@@ -5,5 +5,4 @@
subdir-ccflags-$(CONFIG_MMC_DEBUG) := -DDEBUG
obj-$(CONFIG_MMC) += core/
-obj-$(CONFIG_MMC) += card/
obj-$(subst m,y,$(CONFIG_MMC)) += host/
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
deleted file mode 100644
index 5562308699bc..000000000000
--- a/drivers/mmc/card/Kconfig
+++ /dev/null
@@ -1,70 +0,0 @@
-#
-# MMC/SD card drivers
-#
-
-comment "MMC/SD/SDIO Card Drivers"
-
-config MMC_BLOCK
- tristate "MMC block device driver"
- depends on BLOCK
- default y
- help
- Say Y here to enable the MMC block device driver support.
- This provides a block device driver, which you can use to
- mount the filesystem. Almost everyone wishing MMC support
- should say Y or M here.
-
-config MMC_BLOCK_MINORS
- int "Number of minors per block device"
- depends on MMC_BLOCK
- range 4 256
- default 8
- help
- Number of minors per block device. One is needed for every
- partition on the disk (plus one for the whole disk).
-
- Number of total MMC minors available is 256, so your number
- of supported block devices will be limited to 256 divided
- by this number.
-
- Default is 8 to be backwards compatible with previous
- hardwired device numbering.
-
- If unsure, say 8 here.
-
-config MMC_BLOCK_BOUNCE
- bool "Use bounce buffer for simple hosts"
- depends on MMC_BLOCK
- default y
- help
- SD/MMC is a high latency protocol where it is crucial to
- send large requests in order to get high performance. Many
- controllers, however, are restricted to continuous memory
- (i.e. they can't do scatter-gather), something the kernel
- rarely can provide.
-
- Say Y here to help these restricted hosts by bouncing
- requests back and forth from a large buffer. You will get
- a big performance gain at the cost of up to 64 KiB of
- physical memory.
-
- If unsure, say Y here.
-
-config SDIO_UART
- tristate "SDIO UART/GPS class support"
- depends on TTY
- help
- SDIO function driver for SDIO cards that implements the UART
- class, as well as the GPS class which appears like a UART.
-
-config MMC_TEST
- tristate "MMC host test driver"
- help
- Development driver that performs a series of reads and writes
- to a memory card in order to expose certain well known bugs
- in host controllers. The tests are executed by writing to the
- "test" file in debugfs under each card. Note that whatever is
- on your card will be overwritten by these tests.
-
- This driver is only of interest to those developing or
- testing a host driver. Most people should say N here.
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
deleted file mode 100644
index c73b406a06cd..000000000000
--- a/drivers/mmc/card/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-#
-# Makefile for MMC/SD card drivers
-#
-
-obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
-mmc_block-objs := block.o queue.o
-obj-$(CONFIG_MMC_TEST) += mmc_test.o
-
-obj-$(CONFIG_SDIO_UART) += sdio_uart.o
-
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 250f223aaa80..cdfa8520a4b1 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -22,3 +22,69 @@ config PWRSEQ_SIMPLE
This driver can also be built as a module. If so, the module
will be called pwrseq_simple.
+
+config MMC_BLOCK
+ tristate "MMC block device driver"
+ depends on BLOCK
+ default y
+ help
+ Say Y here to enable the MMC block device driver support.
+ This provides a block device driver, which you can use to
+ mount the filesystem. Almost everyone wishing MMC support
+ should say Y or M here.
+
+config MMC_BLOCK_MINORS
+ int "Number of minors per block device"
+ depends on MMC_BLOCK
+ range 4 256
+ default 8
+ help
+ Number of minors per block device. One is needed for every
+ partition on the disk (plus one for the whole disk).
+
+ Number of total MMC minors available is 256, so your number
+ of supported block devices will be limited to 256 divided
+ by this number.
+
+ Default is 8 to be backwards compatible with previous
+ hardwired device numbering.
+
+ If unsure, say 8 here.
+
+config MMC_BLOCK_BOUNCE
+ bool "Use bounce buffer for simple hosts"
+ depends on MMC_BLOCK
+ default y
+ help
+ SD/MMC is a high latency protocol where it is crucial to
+ send large requests in order to get high performance. Many
+ controllers, however, are restricted to continuous memory
+ (i.e. they can't do scatter-gather), something the kernel
+ rarely can provide.
+
+ Say Y here to help these restricted hosts by bouncing
+ requests back and forth from a large buffer. You will get
+ a big performance gain at the cost of up to 64 KiB of
+ physical memory.
+
+ If unsure, say Y here.
+
+config SDIO_UART
+ tristate "SDIO UART/GPS class support"
+ depends on TTY
+ help
+ SDIO function driver for SDIO cards that implements the UART
+ class, as well as the GPS class which appears like a UART.
+
+config MMC_TEST
+ tristate "MMC host test driver"
+ help
+ Development driver that performs a series of reads and writes
+ to a memory card in order to expose certain well known bugs
+ in host controllers. The tests are executed by writing to the
+ "test" file in debugfs under each card. Note that whatever is
+ on your card will be overwritten by these tests.
+
+ This driver is only of interest to those developing or
+ testing a host driver. Most people should say N here.
+
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index f007151dfdc6..b2a257dc644f 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -12,3 +12,7 @@ mmc_core-$(CONFIG_OF) += pwrseq.o
obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o
obj-$(CONFIG_PWRSEQ_EMMC) += pwrseq_emmc.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
+obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
+mmc_block-objs := block.o queue.o
+obj-$(CONFIG_MMC_TEST) += mmc_test.o
+obj-$(CONFIG_SDIO_UART) += sdio_uart.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/core/block.c
index bab3f07b1117..bab3f07b1117 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/core/block.c
diff --git a/drivers/mmc/card/block.h b/drivers/mmc/core/block.h
index cdabb2ee74be..cdabb2ee74be 100644
--- a/drivers/mmc/card/block.h
+++ b/drivers/mmc/core/block.h
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/core/mmc_test.c
index ec1d1c46eb90..3ab6e52d106c 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/mmc/card/mmc_test.c
- *
* Copyright 2007-2008 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/core/queue.c
index 6ae6bfb8b221..a6496d8027bc 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/mmc/card/queue.c
- *
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright 2006-2007 Pierre Ossman
*
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/core/queue.h
index dac8c3d010dd..dac8c3d010dd 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/core/queue.h
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/core/sdio_uart.c
index 491c187744f5..d3c91f412b69 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/core/sdio_uart.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/card/sdio_uart.c - SDIO UART/GPS driver
+ * SDIO UART/GPS driver
*
* Based on drivers/serial/8250.c and drivers/serial/serial_core.c
* by Russell King.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 3f77d0863543..6fad22adbbb9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -585,7 +585,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
mcast.mcast_list_len = mc_num;
rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET);
if (rc)
- BNX2X_ERR("Faled to set multicasts\n");
+ BNX2X_ERR("Failed to set multicasts\n");
} else {
/* clear existing mcasts */
rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index bb74e1c10ffe..c68dbf7092b1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1377,7 +1377,7 @@ static const char *attn_master_to_str(u8 master)
case 9: return "DBU";
case 10: return "DMAE";
default:
- return "Unkown";
+ return "Unknown";
}
}
@@ -1555,7 +1555,7 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
DORQ_REG_DB_DROP_DETAILS);
DP_INFO(p_hwfn->cdev,
- "DORQ db_drop: adress 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
+ "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
DORQ_REG_DB_DROP_DETAILS_ADDRESS),
(u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index d0a58282f2a8..a39ef2e7a9a6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -369,7 +369,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
break;
default:
- DP_NOTICE(p_hwfn, "Unkown personality %d\n",
+ DP_NOTICE(p_hwfn, "Unknown personality %d\n",
p_hwfn->hw_info.personality);
p_ramrod->personality = PERSONALITY_ETH;
}
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 0457e315d336..b541a1c74488 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2091,7 +2091,7 @@ int ath10k_pci_init_config(struct ath10k *ar)
ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
if (ret != 0) {
- ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
+ ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 4ac9ba04afed..c1b4bb03e997 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -338,7 +338,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
if (skb_headroom(skb) < rtap_len &&
pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
- wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
+ wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
return;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
index 1186755e55b8..e5505387260b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
@@ -134,7 +134,7 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wating too long for FW read clear HMEBox(%d)!\n",
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
boxnum);
break;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 35b3fee5a453..b40cfb076f02 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -263,21 +263,6 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
return BLK_MQ_RQ_QUEUE_OK;
}
-static inline void nvme_setup_write_zeroes(struct nvme_ns *ns,
- struct request *req, struct nvme_command *cmnd)
-{
- struct nvme_write_zeroes_cmd *write_zeroes = &cmnd->write_zeroes;
-
- memset(cmnd, 0, sizeof(*cmnd));
- write_zeroes->opcode = nvme_cmd_write_zeroes;
- write_zeroes->nsid = cpu_to_le32(ns->ns_id);
- write_zeroes->slba =
- cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
- write_zeroes->length =
- cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
- write_zeroes->control = 0;
-}
-
static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd)
{
@@ -330,8 +315,6 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
nvme_setup_flush(ns, cmd);
else if (req_op(req) == REQ_OP_DISCARD)
ret = nvme_setup_discard(ns, req, cmd);
- else if (req_op(req) == REQ_OP_WRITE_ZEROES)
- nvme_setup_write_zeroes(ns, req, cmd);
else
nvme_setup_rw(ns, req, cmd);
@@ -952,10 +935,6 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
nvme_config_discard(ns);
- if (ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES)
- blk_queue_max_write_zeroes_sectors(ns->queue,
- ((u32)(USHRT_MAX + 1) * bs) >> 9);
-
blk_mq_unfreeze_queue(disk->queue);
}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index d0f60c36d576..6f5074153dcd 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -480,7 +480,7 @@ out_free_link:
return ret;
}
-static int nvmet_port_subsys_drop_link(struct config_item *parent,
+static void nvmet_port_subsys_drop_link(struct config_item *parent,
struct config_item *target)
{
struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
@@ -493,7 +493,7 @@ static int nvmet_port_subsys_drop_link(struct config_item *parent,
goto found;
}
up_write(&nvmet_config_sem);
- return -EINVAL;
+ return;
found:
list_del(&p->entry);
@@ -502,7 +502,6 @@ found:
nvmet_disable_port(port);
up_write(&nvmet_config_sem);
kfree(p);
- return 0;
}
static struct configfs_item_operations nvmet_port_subsys_item_ops = {
@@ -556,7 +555,7 @@ out_free_link:
return ret;
}
-static int nvmet_allowed_hosts_drop_link(struct config_item *parent,
+static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
struct config_item *target)
{
struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
@@ -569,14 +568,13 @@ static int nvmet_allowed_hosts_drop_link(struct config_item *parent,
goto found;
}
up_write(&nvmet_config_sem);
- return -EINVAL;
+ return;
found:
list_del(&p->entry);
nvmet_genctr++;
up_write(&nvmet_config_sem);
kfree(p);
- return 0;
}
static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/phy-qcom-ufs-i.h
index 2bd5ce43a724..d505d98cf5f8 100644
--- a/drivers/phy/phy-qcom-ufs-i.h
+++ b/drivers/phy/phy-qcom-ufs-i.h
@@ -141,11 +141,8 @@ struct ufs_qcom_phy_specific_ops {
struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy);
int ufs_qcom_phy_power_on(struct phy *generic_phy);
int ufs_qcom_phy_power_off(struct phy *generic_phy);
-int ufs_qcom_phy_exit(struct phy *generic_phy);
-int ufs_qcom_phy_init_clks(struct phy *generic_phy,
- struct ufs_qcom_phy *phy_common);
-int ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
- struct ufs_qcom_phy *phy_common);
+int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common);
+int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common);
int ufs_qcom_phy_remove(struct phy *generic_phy,
struct ufs_qcom_phy *ufs_qcom_phy);
struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
index 6ee51490f786..c71c84734916 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-14nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
@@ -44,30 +44,12 @@ void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
static int ufs_qcom_phy_qmp_14nm_init(struct phy *generic_phy)
{
- struct ufs_qcom_phy_qmp_14nm *phy = phy_get_drvdata(generic_phy);
- struct ufs_qcom_phy *phy_common = &phy->common_cfg;
- int err;
-
- err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
- if (err) {
- dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
- __func__, err);
- goto out;
- }
-
- err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
- if (err) {
- dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
- __func__, err);
- goto out;
- }
- phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV;
- phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV;
-
- ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
+ return 0;
+}
-out:
- return err;
+static int ufs_qcom_phy_qmp_14nm_exit(struct phy *generic_phy)
+{
+ return 0;
}
static
@@ -117,7 +99,7 @@ static int ufs_qcom_phy_qmp_14nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
static const struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
.init = ufs_qcom_phy_qmp_14nm_init,
- .exit = ufs_qcom_phy_exit,
+ .exit = ufs_qcom_phy_qmp_14nm_exit,
.power_on = ufs_qcom_phy_power_on,
.power_off = ufs_qcom_phy_power_off,
.owner = THIS_MODULE,
@@ -136,6 +118,7 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct phy *generic_phy;
struct ufs_qcom_phy_qmp_14nm *phy;
+ struct ufs_qcom_phy *phy_common;
int err = 0;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
@@ -143,8 +126,9 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
err = -ENOMEM;
goto out;
}
+ phy_common = &phy->common_cfg;
- generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+ generic_phy = ufs_qcom_phy_generic_probe(pdev, phy_common,
&ufs_qcom_phy_qmp_14nm_phy_ops, &phy_14nm_ops);
if (!generic_phy) {
@@ -154,39 +138,43 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
goto out;
}
- phy_set_drvdata(generic_phy, phy);
+ err = ufs_qcom_phy_init_clks(phy_common);
+ if (err) {
+ dev_err(phy_common->dev,
+ "%s: ufs_qcom_phy_init_clks() failed %d\n",
+ __func__, err);
+ goto out;
+ }
- strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
- sizeof(phy->common_cfg.name));
+ err = ufs_qcom_phy_init_vregulators(phy_common);
+ if (err) {
+ dev_err(phy_common->dev,
+ "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+ __func__, err);
+ goto out;
+ }
+ phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV;
+ phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV;
-out:
- return err;
-}
+ ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
-static int ufs_qcom_phy_qmp_14nm_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct phy *generic_phy = to_phy(dev);
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
- int err = 0;
+ phy_set_drvdata(generic_phy, phy);
- err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
- if (err)
- dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
- __func__, err);
+ strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name));
+out:
return err;
}
static const struct of_device_id ufs_qcom_phy_qmp_14nm_of_match[] = {
{.compatible = "qcom,ufs-phy-qmp-14nm"},
+ {.compatible = "qcom,msm8996-ufs-phy-qmp-14nm"},
{},
};
MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_14nm_of_match);
static struct platform_driver ufs_qcom_phy_qmp_14nm_driver = {
.probe = ufs_qcom_phy_qmp_14nm_probe,
- .remove = ufs_qcom_phy_qmp_14nm_remove,
.driver = {
.of_match_table = ufs_qcom_phy_qmp_14nm_of_match,
.name = "ufs_qcom_phy_qmp_14nm",
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
index 770087ab05e2..1a26a64e06d3 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-20nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
@@ -63,28 +63,12 @@ void ufs_qcom_phy_qmp_20nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
static int ufs_qcom_phy_qmp_20nm_init(struct phy *generic_phy)
{
- struct ufs_qcom_phy_qmp_20nm *phy = phy_get_drvdata(generic_phy);
- struct ufs_qcom_phy *phy_common = &phy->common_cfg;
- int err = 0;
-
- err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
- if (err) {
- dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
- __func__, err);
- goto out;
- }
-
- err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
- if (err) {
- dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
- __func__, err);
- goto out;
- }
-
- ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common);
+ return 0;
+}
-out:
- return err;
+static int ufs_qcom_phy_qmp_20nm_exit(struct phy *generic_phy)
+{
+ return 0;
}
static
@@ -173,7 +157,7 @@ static int ufs_qcom_phy_qmp_20nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
.init = ufs_qcom_phy_qmp_20nm_init,
- .exit = ufs_qcom_phy_exit,
+ .exit = ufs_qcom_phy_qmp_20nm_exit,
.power_on = ufs_qcom_phy_power_on,
.power_off = ufs_qcom_phy_power_off,
.owner = THIS_MODULE,
@@ -192,6 +176,7 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct phy *generic_phy;
struct ufs_qcom_phy_qmp_20nm *phy;
+ struct ufs_qcom_phy *phy_common;
int err = 0;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
@@ -199,8 +184,9 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
err = -ENOMEM;
goto out;
}
+ phy_common = &phy->common_cfg;
- generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+ generic_phy = ufs_qcom_phy_generic_probe(pdev, phy_common,
&ufs_qcom_phy_qmp_20nm_phy_ops, &phy_20nm_ops);
if (!generic_phy) {
@@ -210,27 +196,27 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
goto out;
}
- phy_set_drvdata(generic_phy, phy);
+ err = ufs_qcom_phy_init_clks(phy_common);
+ if (err) {
+ dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+ __func__, err);
+ goto out;
+ }
- strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
- sizeof(phy->common_cfg.name));
+ err = ufs_qcom_phy_init_vregulators(phy_common);
+ if (err) {
+ dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+ __func__, err);
+ goto out;
+ }
-out:
- return err;
-}
+ ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common);
-static int ufs_qcom_phy_qmp_20nm_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct phy *generic_phy = to_phy(dev);
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
- int err = 0;
+ phy_set_drvdata(generic_phy, phy);
- err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
- if (err)
- dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
- __func__, err);
+ strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name));
+out:
return err;
}
@@ -242,7 +228,6 @@ MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_20nm_of_match);
static struct platform_driver ufs_qcom_phy_qmp_20nm_driver = {
.probe = ufs_qcom_phy_qmp_20nm_probe,
- .remove = ufs_qcom_phy_qmp_20nm_remove,
.driver = {
.of_match_table = ufs_qcom_phy_qmp_20nm_of_match,
.name = "ufs_qcom_phy_qmp_20nm",
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index 18a5b495ad65..c69568b8543d 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -22,13 +22,6 @@
#define VDDP_REF_CLK_MIN_UV 1200000
#define VDDP_REF_CLK_MAX_UV 1200000
-static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
- const char *, bool);
-static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
- const char *);
-static int ufs_qcom_phy_base_init(struct platform_device *pdev,
- struct ufs_qcom_phy *phy_common);
-
int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
struct ufs_qcom_phy_calibration *tbl_A,
int tbl_size_A,
@@ -75,45 +68,6 @@ out:
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate);
-struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
- struct ufs_qcom_phy *common_cfg,
- const struct phy_ops *ufs_qcom_phy_gen_ops,
- struct ufs_qcom_phy_specific_ops *phy_spec_ops)
-{
- int err;
- struct device *dev = &pdev->dev;
- struct phy *generic_phy = NULL;
- struct phy_provider *phy_provider;
-
- err = ufs_qcom_phy_base_init(pdev, common_cfg);
- if (err) {
- dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
- goto out;
- }
-
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider)) {
- err = PTR_ERR(phy_provider);
- dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
- goto out;
- }
-
- generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
- if (IS_ERR(generic_phy)) {
- err = PTR_ERR(generic_phy);
- dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
- generic_phy = NULL;
- goto out;
- }
-
- common_cfg->phy_spec_ops = phy_spec_ops;
- common_cfg->dev = dev;
-
-out:
- return generic_phy;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
-
/*
* This assumes the embedded phy structure inside generic_phy is of type
* struct ufs_qcom_phy. In order to function properly it's crucial
@@ -154,13 +108,50 @@ int ufs_qcom_phy_base_init(struct platform_device *pdev,
return 0;
}
-static int __ufs_qcom_phy_clk_get(struct phy *phy,
+struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
+ struct ufs_qcom_phy *common_cfg,
+ const struct phy_ops *ufs_qcom_phy_gen_ops,
+ struct ufs_qcom_phy_specific_ops *phy_spec_ops)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy = NULL;
+ struct phy_provider *phy_provider;
+
+ err = ufs_qcom_phy_base_init(pdev, common_cfg);
+ if (err) {
+ dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
+ goto out;
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ err = PTR_ERR(phy_provider);
+ dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
+ goto out;
+ }
+
+ generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
+ if (IS_ERR(generic_phy)) {
+ err = PTR_ERR(generic_phy);
+ dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
+ generic_phy = NULL;
+ goto out;
+ }
+
+ common_cfg->phy_spec_ops = phy_spec_ops;
+ common_cfg->dev = dev;
+
+out:
+ return generic_phy;
+}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
+
+static int __ufs_qcom_phy_clk_get(struct device *dev,
const char *name, struct clk **clk_out, bool err_print)
{
struct clk *clk;
int err = 0;
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
clk = devm_clk_get(dev, name);
if (IS_ERR(clk)) {
@@ -174,42 +165,44 @@ static int __ufs_qcom_phy_clk_get(struct phy *phy,
return err;
}
-static
-int ufs_qcom_phy_clk_get(struct phy *phy,
+static int ufs_qcom_phy_clk_get(struct device *dev,
const char *name, struct clk **clk_out)
{
- return __ufs_qcom_phy_clk_get(phy, name, clk_out, true);
+ return __ufs_qcom_phy_clk_get(dev, name, clk_out, true);
}
-int
-ufs_qcom_phy_init_clks(struct phy *generic_phy,
- struct ufs_qcom_phy *phy_common)
+int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common)
{
int err;
- err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
+ if (of_device_is_compatible(phy_common->dev->of_node,
+ "qcom,msm8996-ufs-phy-qmp-14nm"))
+ goto skip_txrx_clk;
+
+ err = ufs_qcom_phy_clk_get(phy_common->dev, "tx_iface_clk",
&phy_common->tx_iface_clk);
if (err)
goto out;
- err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
+ err = ufs_qcom_phy_clk_get(phy_common->dev, "rx_iface_clk",
&phy_common->rx_iface_clk);
if (err)
goto out;
- err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
+ err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_src",
&phy_common->ref_clk_src);
if (err)
goto out;
+skip_txrx_clk:
/*
* "ref_clk_parent" is optional hence don't abort init if it's not
* found.
*/
- __ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent",
+ __ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_parent",
&phy_common->ref_clk_parent, false);
- err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk",
+ err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk",
&phy_common->ref_clk);
out:
@@ -217,41 +210,14 @@ out:
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks);
-int
-ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
- struct ufs_qcom_phy *phy_common)
-{
- int err;
-
- err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll,
- "vdda-pll");
- if (err)
- goto out;
-
- err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy,
- "vdda-phy");
-
- if (err)
- goto out;
-
- /* vddp-ref-clk-* properties are optional */
- __ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk,
- "vddp-ref-clk", true);
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
-
-static int __ufs_qcom_phy_init_vreg(struct phy *phy,
+static int __ufs_qcom_phy_init_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional)
{
int err = 0;
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
char prop_name[MAX_PROP_NAME];
- vreg->name = kstrdup(name, GFP_KERNEL);
+ vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
if (!vreg->name) {
err = -ENOMEM;
goto out;
@@ -304,14 +270,36 @@ out:
return err;
}
-static int ufs_qcom_phy_init_vreg(struct phy *phy,
+static int ufs_qcom_phy_init_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg, const char *name)
{
- return __ufs_qcom_phy_init_vreg(phy, vreg, name, false);
+ return __ufs_qcom_phy_init_vreg(dev, vreg, name, false);
}
-static
-int ufs_qcom_phy_cfg_vreg(struct phy *phy,
+int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common)
+{
+ int err;
+
+ err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_pll,
+ "vdda-pll");
+ if (err)
+ goto out;
+
+ err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_phy,
+ "vdda-phy");
+
+ if (err)
+ goto out;
+
+ /* vddp-ref-clk-* properties are optional */
+ __ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vddp_ref_clk,
+ "vddp-ref-clk", true);
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
+
+static int ufs_qcom_phy_cfg_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg, bool on)
{
int ret = 0;
@@ -319,10 +307,6 @@ int ufs_qcom_phy_cfg_vreg(struct phy *phy,
const char *name = vreg->name;
int min_uV;
int uA_load;
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
-
- BUG_ON(!vreg);
if (regulator_count_voltages(reg) > 0) {
min_uV = on ? vreg->min_uV : 0;
@@ -350,18 +334,15 @@ out:
return ret;
}
-static
-int ufs_qcom_phy_enable_vreg(struct phy *phy,
+static int ufs_qcom_phy_enable_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg)
{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
int ret = 0;
if (!vreg || vreg->enabled)
goto out;
- ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true);
+ ret = ufs_qcom_phy_cfg_vreg(dev, vreg, true);
if (ret) {
dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
__func__, ret);
@@ -380,10 +361,9 @@ out:
return ret;
}
-int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy)
+static int ufs_qcom_phy_enable_ref_clk(struct ufs_qcom_phy *phy)
{
int ret = 0;
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
if (phy->is_ref_clk_enabled)
goto out;
@@ -430,14 +410,10 @@ out_disable_src:
out:
return ret;
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk);
-static
-int ufs_qcom_phy_disable_vreg(struct phy *phy,
+static int ufs_qcom_phy_disable_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg)
{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
int ret = 0;
if (!vreg || !vreg->enabled || vreg->is_always_on)
@@ -447,7 +423,7 @@ int ufs_qcom_phy_disable_vreg(struct phy *phy,
if (!ret) {
/* ignore errors on applying disable config */
- ufs_qcom_phy_cfg_vreg(phy, vreg, false);
+ ufs_qcom_phy_cfg_vreg(dev, vreg, false);
vreg->enabled = false;
} else {
dev_err(dev, "%s: %s disable failed, err=%d\n",
@@ -457,10 +433,8 @@ out:
return ret;
}
-void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
+static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy)
{
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
-
if (phy->is_ref_clk_enabled) {
clk_disable_unprepare(phy->ref_clk);
/*
@@ -473,7 +447,6 @@ void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
phy->is_ref_clk_enabled = false;
}
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk);
#define UFS_REF_CLK_EN (1 << 5)
@@ -526,9 +499,8 @@ void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk);
/* Turn ON M-PHY RMMI interface clocks */
-int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
+static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy)
{
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
int ret = 0;
if (phy->is_iface_clk_enabled)
@@ -552,20 +524,16 @@ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
out:
return ret;
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk);
/* Turn OFF M-PHY RMMI interface clocks */
-void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
+void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy)
{
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
-
if (phy->is_iface_clk_enabled) {
clk_disable_unprepare(phy->tx_iface_clk);
clk_disable_unprepare(phy->rx_iface_clk);
phy->is_iface_clk_enabled = false;
}
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk);
int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
{
@@ -634,29 +602,6 @@ int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
-int ufs_qcom_phy_remove(struct phy *generic_phy,
- struct ufs_qcom_phy *ufs_qcom_phy)
-{
- phy_power_off(generic_phy);
-
- kfree(ufs_qcom_phy->vdda_pll.name);
- kfree(ufs_qcom_phy->vdda_phy.name);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_remove);
-
-int ufs_qcom_phy_exit(struct phy *generic_phy)
-{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
-
- if (ufs_qcom_phy->is_powered_on)
- phy_power_off(generic_phy);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_exit);
-
int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
{
struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
@@ -678,7 +623,10 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
struct device *dev = phy_common->dev;
int err;
- err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy);
+ if (phy_common->is_powered_on)
+ return 0;
+
+ err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy);
if (err) {
dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
__func__, err);
@@ -688,23 +636,30 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
phy_common->phy_spec_ops->power_control(phy_common, true);
/* vdda_pll also enables ref clock LDOs so enable it first */
- err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll);
+ err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_pll);
if (err) {
dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
__func__, err);
goto out_disable_phy;
}
- err = ufs_qcom_phy_enable_ref_clk(generic_phy);
+ err = ufs_qcom_phy_enable_iface_clk(phy_common);
if (err) {
- dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
+ dev_err(dev, "%s enable phy iface clock failed, err=%d\n",
__func__, err);
goto out_disable_pll;
}
+ err = ufs_qcom_phy_enable_ref_clk(phy_common);
+ if (err) {
+ dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
+ __func__, err);
+ goto out_disable_iface_clk;
+ }
+
/* enable device PHY ref_clk pad rail */
if (phy_common->vddp_ref_clk.reg) {
- err = ufs_qcom_phy_enable_vreg(generic_phy,
+ err = ufs_qcom_phy_enable_vreg(dev,
&phy_common->vddp_ref_clk);
if (err) {
dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
@@ -717,11 +672,13 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
goto out;
out_disable_ref_clk:
- ufs_qcom_phy_disable_ref_clk(generic_phy);
+ ufs_qcom_phy_disable_ref_clk(phy_common);
+out_disable_iface_clk:
+ ufs_qcom_phy_disable_iface_clk(phy_common);
out_disable_pll:
- ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
+ ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_pll);
out_disable_phy:
- ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
+ ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_phy);
out:
return err;
}
@@ -731,15 +688,19 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy)
{
struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+ if (!phy_common->is_powered_on)
+ return 0;
+
phy_common->phy_spec_ops->power_control(phy_common, false);
if (phy_common->vddp_ref_clk.reg)
- ufs_qcom_phy_disable_vreg(generic_phy,
+ ufs_qcom_phy_disable_vreg(phy_common->dev,
&phy_common->vddp_ref_clk);
- ufs_qcom_phy_disable_ref_clk(generic_phy);
+ ufs_qcom_phy_disable_ref_clk(phy_common);
+ ufs_qcom_phy_disable_iface_clk(phy_common);
- ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
- ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
+ ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_pll);
+ ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_phy);
phy_common->is_powered_on = false;
return 0;
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index c74c3f67b8da..abeb77217a21 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -104,6 +104,16 @@ config POWER_RESET_MSM
help
Power off and restart support for Qualcomm boards.
+config POWER_RESET_PIIX4_POWEROFF
+ tristate "Intel PIIX4 power-off driver"
+ depends on PCI
+ depends on MIPS || COMPILE_TEST
+ help
+ This driver supports powering off a system using the Intel PIIX4
+ southbridge, for example the MIPS Malta development board. The
+ southbridge SOff state is entered in response to a request to
+ power off the system.
+
config POWER_RESET_LTC2952
bool "LTC2952 PowerPath power-off driver"
depends on OF_GPIO
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 1be307c7fc25..11dae3b56ff9 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
obj-$(CONFIG_POWER_RESET_IMX) += imx-snvs-poweroff.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
+obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o
obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c
index e9e24df35f26..a85dd4d233af 100644
--- a/drivers/power/reset/at91-poweroff.c
+++ b/drivers/power/reset/at91-poweroff.c
@@ -169,6 +169,7 @@ static const struct of_device_id at91_poweroff_of_match[] = {
{ .compatible = "atmel,at91sam9x5-shdwc", },
{ /*sentinel*/ }
};
+MODULE_DEVICE_TABLE(of, at91_poweroff_of_match);
static struct platform_driver at91_poweroff_driver = {
.remove = __exit_p(at91_poweroff_remove),
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 1b5d450586d1..568580cf0655 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -175,6 +175,7 @@ static const struct of_device_id at91_reset_of_match[] = {
{ .compatible = "atmel,sama5d3-rstc", .data = sama5d3_restart },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, at91_reset_of_match);
static struct notifier_block at91_restart_nb = {
.priority = 192,
@@ -242,6 +243,7 @@ static const struct platform_device_id at91_reset_plat_match[] = {
{ "at91-sam9g45-reset", (unsigned long)at91sam9g45_restart },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(platform, at91_reset_plat_match);
static struct platform_driver at91_reset_driver = {
.remove = __exit_p(at91_reset_remove),
diff --git a/drivers/power/reset/piix4-poweroff.c b/drivers/power/reset/piix4-poweroff.c
new file mode 100644
index 000000000000..bacfc95783f0
--- /dev/null
+++ b/drivers/power/reset/piix4-poweroff.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+
+static struct pci_dev *pm_dev;
+static resource_size_t io_offset;
+
+enum piix4_pm_io_reg {
+ PIIX4_FUNC3IO_PMSTS = 0x00,
+#define PIIX4_FUNC3IO_PMSTS_PWRBTN_STS BIT(8)
+ PIIX4_FUNC3IO_PMCNTRL = 0x04,
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_EN BIT(13)
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF (0x0 << 10)
+};
+
+#define PIIX4_SUSPEND_MAGIC 0x00120002
+
+static const int piix4_pm_io_region = PCI_BRIDGE_RESOURCES;
+
+static void piix4_poweroff(void)
+{
+ int spec_devid;
+ u16 sts;
+
+ /* Ensure the power button status is clear */
+ while (1) {
+ sts = inw(io_offset + PIIX4_FUNC3IO_PMSTS);
+ if (!(sts & PIIX4_FUNC3IO_PMSTS_PWRBTN_STS))
+ break;
+ outw(sts, io_offset + PIIX4_FUNC3IO_PMSTS);
+ }
+
+ /* Enable entry to suspend */
+ outw(PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF | PIIX4_FUNC3IO_PMCNTRL_SUS_EN,
+ io_offset + PIIX4_FUNC3IO_PMCNTRL);
+
+ /* If the special cycle occurs too soon this doesn't work... */
+ mdelay(10);
+
+ /*
+ * The PIIX4 will enter the suspend state only after seeing a special
+ * cycle with the correct magic data on the PCI bus. Generate that
+ * cycle now.
+ */
+ spec_devid = PCI_DEVID(0, PCI_DEVFN(0x1f, 0x7));
+ pci_bus_write_config_dword(pm_dev->bus, spec_devid, 0,
+ PIIX4_SUSPEND_MAGIC);
+
+ /* Give the system some time to power down, then error */
+ mdelay(1000);
+ pr_emerg("Unable to poweroff system\n");
+}
+
+static int piix4_poweroff_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ int res;
+
+ if (pm_dev)
+ return -EINVAL;
+
+ /* Request access to the PIIX4 PM IO registers */
+ res = pci_request_region(dev, piix4_pm_io_region,
+ "PIIX4 PM IO registers");
+ if (res) {
+ dev_err(&dev->dev, "failed to request PM IO registers: %d\n",
+ res);
+ return res;
+ }
+
+ pm_dev = dev;
+ io_offset = pci_resource_start(dev, piix4_pm_io_region);
+ pm_power_off = piix4_poweroff;
+
+ return 0;
+}
+
+static void piix4_poweroff_remove(struct pci_dev *dev)
+{
+ if (pm_power_off == piix4_poweroff)
+ pm_power_off = NULL;
+
+ pci_release_region(dev, piix4_pm_io_region);
+ pm_dev = NULL;
+}
+
+static const struct pci_device_id piix4_poweroff_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
+ { 0 },
+};
+
+static struct pci_driver piix4_poweroff_driver = {
+ .name = "piix4-poweroff",
+ .id_table = piix4_poweroff_ids,
+ .probe = piix4_poweroff_probe,
+ .remove = piix4_poweroff_remove,
+};
+
+module_pci_driver(piix4_poweroff_driver);
+MODULE_AUTHOR("Paul Burton <paul.burton@imgtec.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/reset/syscon-reboot-mode.c b/drivers/power/reset/syscon-reboot-mode.c
index 1ecb51d67149..c8c371b285b1 100644
--- a/drivers/power/reset/syscon-reboot-mode.c
+++ b/drivers/power/reset/syscon-reboot-mode.c
@@ -74,6 +74,7 @@ static const struct of_device_id syscon_reboot_mode_of_match[] = {
{ .compatible = "syscon-reboot-mode" },
{}
};
+MODULE_DEVICE_TABLE(of, syscon_reboot_mode_of_match);
static struct platform_driver syscon_reboot_mode_driver = {
.probe = syscon_reboot_mode_probe,
diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
index b0b1eb3a78c2..7549c7f74a3c 100644
--- a/drivers/power/reset/zx-reboot.c
+++ b/drivers/power/reset/zx-reboot.c
@@ -72,6 +72,7 @@ static const struct of_device_id zx_reboot_of_match[] = {
{ .compatible = "zte,sysctrl" },
{}
};
+MODULE_DEVICE_TABLE(of, zx_reboot_of_match);
static struct platform_driver zx_reboot_driver = {
.probe = zx_reboot_probe,
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 2199f673118c..c569f82a0071 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -1900,7 +1900,7 @@ static void ab8500_fg_low_bat_work(struct work_struct *work)
* ab8500_fg_battok_calc - calculate the bit pattern corresponding
* to the target voltage.
* @di: pointer to the ab8500_fg structure
- * @target target voltage
+ * @target: target voltage
*
* Returns bit pattern closest to the target voltage
* valid return values are 0-14. (0-BATT_OK_MAX_NR_INCREMENTS)
@@ -2391,7 +2391,7 @@ static void ab8500_fg_external_power_changed(struct power_supply *psy)
}
/**
- * abab8500_fg_reinit_work() - work to reset the FG algorithm
+ * ab8500_fg_reinit_work() - work to reset the FG algorithm
* @work: pointer to the work_struct structure
*
* Used to reset the current battery capacity to be able to
@@ -2528,7 +2528,7 @@ static struct kobj_type ab8500_fg_ktype = {
};
/**
- * ab8500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * ab8500_fg_sysfs_exit() - de-init of sysfs entry
* @di: pointer to the struct ab8500_chargalg
*
* This function removes the entry in sysfs.
@@ -2539,7 +2539,7 @@ static void ab8500_fg_sysfs_exit(struct ab8500_fg *di)
}
/**
- * ab8500_chargalg_sysfs_init() - init of sysfs entry
+ * ab8500_fg_sysfs_init() - init of sysfs entry
* @di: pointer to the struct ab8500_chargalg
*
* This function adds an entry in sysfs.
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 5bdde692f724..539eb41504bb 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -1120,6 +1120,7 @@ static const struct platform_device_id axp288_fg_id_table[] = {
{ .name = DEV_NAME },
{},
};
+MODULE_DEVICE_TABLE(platform, axp288_fg_id_table);
static int axp288_fuel_gauge_remove(struct platform_device *pdev)
{
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index f5746b9f4e83..e9584330aeed 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -1141,7 +1141,7 @@ static int bq24190_battery_set_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
- pm_runtime_put_sync(bdi->dev);
+ pm_runtime_get_sync(bdi->dev);
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 3b0dbc689d72..08c36b8e04bd 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -164,6 +164,25 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_DCAP] = 0x3c,
[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
},
+ [BQ27510] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = 0x28,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTES] = 0x1a,
+ [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x1e,
+ [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SOC] = 0x20,
+ [BQ27XXX_REG_DCAP] = 0x2e,
+ [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+ },
[BQ27530] = {
[BQ27XXX_REG_CTRL] = 0x00,
[BQ27XXX_REG_TEMP] = 0x06,
@@ -302,6 +321,24 @@ static enum power_supply_property bq27500_battery_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
+static enum power_supply_property bq27510_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
static enum power_supply_property bq27530_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -385,6 +422,7 @@ static struct {
BQ27XXX_PROP(BQ27000, bq27000_battery_props),
BQ27XXX_PROP(BQ27010, bq27010_battery_props),
BQ27XXX_PROP(BQ27500, bq27500_battery_props),
+ BQ27XXX_PROP(BQ27510, bq27510_battery_props),
BQ27XXX_PROP(BQ27530, bq27530_battery_props),
BQ27XXX_PROP(BQ27541, bq27541_battery_props),
BQ27XXX_PROP(BQ27545, bq27545_battery_props),
@@ -397,10 +435,11 @@ static LIST_HEAD(bq27xxx_battery_devices);
static int poll_interval_param_set(const char *val, const struct kernel_param *kp)
{
struct bq27xxx_device_info *di;
+ unsigned int prev_val = *(unsigned int *) kp->arg;
int ret;
ret = param_set_uint(val, kp);
- if (ret < 0)
+ if (ret < 0 || prev_val == *(unsigned int *) kp->arg)
return ret;
mutex_lock(&bq27xxx_list_lock);
@@ -635,7 +674,8 @@ static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
*/
static bool bq27xxx_battery_overtemp(struct bq27xxx_device_info *di, u16 flags)
{
- if (di->chip == BQ27500 || di->chip == BQ27541 || di->chip == BQ27545)
+ if (di->chip == BQ27500 || di->chip == BQ27510 ||
+ di->chip == BQ27541 || di->chip == BQ27545)
return flags & (BQ27XXX_FLAG_OTC | BQ27XXX_FLAG_OTD);
if (di->chip == BQ27530 || di->chip == BQ27421)
return flags & BQ27XXX_FLAG_OT;
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 85d4ea2a9c20..5c5c3a6f9923 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -149,8 +149,8 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
{ "bq27200", BQ27000 },
{ "bq27210", BQ27010 },
{ "bq27500", BQ27500 },
- { "bq27510", BQ27500 },
- { "bq27520", BQ27500 },
+ { "bq27510", BQ27510 },
+ { "bq27520", BQ27510 },
{ "bq27530", BQ27530 },
{ "bq27531", BQ27530 },
{ "bq27541", BQ27541 },
diff --git a/drivers/power/supply/ipaq_micro_battery.c b/drivers/power/supply/ipaq_micro_battery.c
index 4af7b770f293..2fa6edd6e8b1 100644
--- a/drivers/power/supply/ipaq_micro_battery.c
+++ b/drivers/power/supply/ipaq_micro_battery.c
@@ -313,4 +313,4 @@ module_platform_driver(micro_batt_device_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("driver for iPAQ Atmel micro battery");
-MODULE_ALIAS("platform:battery-ipaq-micro");
+MODULE_ALIAS("platform:ipaq-micro-battery");
diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
index 7321b727d484..509e2b341bd6 100644
--- a/drivers/power/supply/lp8788-charger.c
+++ b/drivers/power/supply/lp8788-charger.c
@@ -384,9 +384,6 @@ static int lp8788_update_charger_params(struct platform_device *pdev,
for (i = 0; i < pdata->num_chg_params; i++) {
param = pdata->chg_params + i;
- if (!param)
- continue;
-
if (lp8788_is_valid_charger_register(param->addr)) {
ret = lp8788_write_byte(lp, param->addr, param->val);
if (ret)
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 8689c80202b5..e7c3649b31a0 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -21,18 +21,13 @@
#include <linux/max17040_battery.h>
#include <linux/slab.h>
-#define MAX17040_VCELL_MSB 0x02
-#define MAX17040_VCELL_LSB 0x03
-#define MAX17040_SOC_MSB 0x04
-#define MAX17040_SOC_LSB 0x05
-#define MAX17040_MODE_MSB 0x06
-#define MAX17040_MODE_LSB 0x07
-#define MAX17040_VER_MSB 0x08
-#define MAX17040_VER_LSB 0x09
-#define MAX17040_RCOMP_MSB 0x0C
-#define MAX17040_RCOMP_LSB 0x0D
-#define MAX17040_CMD_MSB 0xFE
-#define MAX17040_CMD_LSB 0xFF
+#define MAX17040_VCELL 0x02
+#define MAX17040_SOC 0x04
+#define MAX17040_MODE 0x06
+#define MAX17040_VER 0x08
+#define MAX17040_RCOMP 0x0C
+#define MAX17040_CMD 0xFE
+
#define MAX17040_DELAY 1000
#define MAX17040_BATTERY_FULL 95
@@ -78,11 +73,11 @@ static int max17040_get_property(struct power_supply *psy,
return 0;
}
-static int max17040_write_reg(struct i2c_client *client, int reg, u8 value)
+static int max17040_write_reg(struct i2c_client *client, int reg, u16 value)
{
int ret;
- ret = i2c_smbus_write_byte_data(client, reg, value);
+ ret = i2c_smbus_write_word_swapped(client, reg, value);
if (ret < 0)
dev_err(&client->dev, "%s: err %d\n", __func__, ret);
@@ -94,7 +89,7 @@ static int max17040_read_reg(struct i2c_client *client, int reg)
{
int ret;
- ret = i2c_smbus_read_byte_data(client, reg);
+ ret = i2c_smbus_read_word_swapped(client, reg);
if (ret < 0)
dev_err(&client->dev, "%s: err %d\n", __func__, ret);
@@ -104,43 +99,36 @@ static int max17040_read_reg(struct i2c_client *client, int reg)
static void max17040_reset(struct i2c_client *client)
{
- max17040_write_reg(client, MAX17040_CMD_MSB, 0x54);
- max17040_write_reg(client, MAX17040_CMD_LSB, 0x00);
+ max17040_write_reg(client, MAX17040_CMD, 0x0054);
}
static void max17040_get_vcell(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- u8 msb;
- u8 lsb;
+ u16 vcell;
- msb = max17040_read_reg(client, MAX17040_VCELL_MSB);
- lsb = max17040_read_reg(client, MAX17040_VCELL_LSB);
+ vcell = max17040_read_reg(client, MAX17040_VCELL);
- chip->vcell = (msb << 4) + (lsb >> 4);
+ chip->vcell = vcell;
}
static void max17040_get_soc(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- u8 msb;
- u8 lsb;
+ u16 soc;
- msb = max17040_read_reg(client, MAX17040_SOC_MSB);
- lsb = max17040_read_reg(client, MAX17040_SOC_LSB);
+ soc = max17040_read_reg(client, MAX17040_SOC);
- chip->soc = msb;
+ chip->soc = (soc >> 8);
}
static void max17040_get_version(struct i2c_client *client)
{
- u8 msb;
- u8 lsb;
+ u16 version;
- msb = max17040_read_reg(client, MAX17040_VER_MSB);
- lsb = max17040_read_reg(client, MAX17040_VER_LSB);
+ version = max17040_read_reg(client, MAX17040_VER);
- dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver %d%d\n", msb, lsb);
+ dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver 0x%x\n", version);
}
static void max17040_get_online(struct i2c_client *client)
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index 0b2eab571528..290ddc12b040 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -184,6 +184,7 @@ static const struct platform_device_id max8997_battery_id[] = {
{ "max8997-battery", 0 },
{ }
};
+MODULE_DEVICE_TABLE(platform, max8997_battery_id);
static struct platform_driver max8997_battery_driver = {
.driver = {
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index a74d8ca383a1..1e0960b646e8 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -413,7 +413,7 @@ static int power_supply_match_device_node(struct device *dev, const void *data)
/**
* power_supply_get_by_phandle() - Search for a power supply and returns its ref
* @np: Pointer to device node holding phandle property
- * @phandle_name: Name of property holding a power supply name
+ * @property: Name of property holding a power supply name
*
* If power supply was found, it increases reference count for the
* internal power supply's device. The user should power_supply_put()
@@ -458,7 +458,7 @@ static void devm_power_supply_put(struct device *dev, void *res)
* devm_power_supply_get_by_phandle() - Resource managed version of
* power_supply_get_by_phandle()
* @dev: Pointer to device holding phandle property
- * @phandle_name: Name of property holding a power supply phandle
+ * @property: Name of property holding a power supply phandle
*
* Return: On success returns a reference to a power supply with
* matching name equals to value under @property, NULL or ERR_PTR otherwise.
diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c
index 5c5880664e09..a2740cf57ad3 100644
--- a/drivers/power/supply/wm8350_power.c
+++ b/drivers/power/supply/wm8350_power.c
@@ -182,7 +182,7 @@ static ssize_t charger_state_show(struct device *dev,
return sprintf(buf, "%s\n", charge);
}
-static DEVICE_ATTR(charger_state, 0444, charger_state_show, NULL);
+static DEVICE_ATTR_RO(charger_state);
static irqreturn_t wm8350_charger_handler(int irq, void *data)
{
diff --git a/drivers/power/supply/wm97xx_battery.c b/drivers/power/supply/wm97xx_battery.c
index 6285626d142a..e3edb31ac880 100644
--- a/drivers/power/supply/wm97xx_battery.c
+++ b/drivers/power/supply/wm97xx_battery.c
@@ -30,8 +30,7 @@ static enum power_supply_property *prop;
static unsigned long wm97xx_read_bat(struct power_supply *bat_ps)
{
- struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev.parent),
pdata->batt_aux) * pdata->batt_mult /
@@ -40,8 +39,7 @@ static unsigned long wm97xx_read_bat(struct power_supply *bat_ps)
static unsigned long wm97xx_read_temp(struct power_supply *bat_ps)
{
- struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev.parent),
pdata->temp_aux) * pdata->temp_mult /
@@ -52,8 +50,7 @@ static int wm97xx_bat_get_property(struct power_supply *bat_ps,
enum power_supply_property psp,
union power_supply_propval *val)
{
- struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -103,8 +100,7 @@ static void wm97xx_bat_external_power_changed(struct power_supply *bat_ps)
static void wm97xx_bat_update(struct power_supply *bat_ps)
{
int old_status = bat_status;
- struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
mutex_lock(&work_lock);
@@ -166,15 +162,15 @@ static int wm97xx_bat_probe(struct platform_device *dev)
int ret = 0;
int props = 1; /* POWER_SUPPLY_PROP_PRESENT */
int i = 0;
- struct wm97xx_pdata *wmdata = dev->dev.platform_data;
- struct wm97xx_batt_pdata *pdata;
+ struct wm97xx_batt_pdata *pdata = dev->dev.platform_data;
+ struct power_supply_config cfg = {};
- if (!wmdata) {
+ if (!pdata) {
dev_err(&dev->dev, "No platform data supplied\n");
return -EINVAL;
}
- pdata = wmdata->batt_pdata;
+ cfg.drv_data = pdata;
if (dev->id != -1)
return -EINVAL;
@@ -243,7 +239,7 @@ static int wm97xx_bat_probe(struct platform_device *dev)
bat_psy_desc.properties = prop;
bat_psy_desc.num_properties = props;
- bat_psy = power_supply_register(&dev->dev, &bat_psy_desc, NULL);
+ bat_psy = power_supply_register(&dev->dev, &bat_psy_desc, &cfg);
if (!IS_ERR(bat_psy)) {
schedule_work(&bat_work);
} else {
@@ -266,8 +262,7 @@ err:
static int wm97xx_bat_remove(struct platform_device *dev)
{
- struct wm97xx_pdata *wmdata = dev->dev.platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = dev->dev.platform_data;
if (pdata && gpio_is_valid(pdata->charge_gpio)) {
free_irq(gpio_to_irq(pdata->charge_gpio), dev);
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 3314bf299a51..fb44d5215e30 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -120,7 +120,7 @@ static const struct regulator_linear_range rk808_ldo3_voltage_ranges[] = {
static int rk808_buck1_2_get_voltage_sel_regmap(struct regulator_dev *rdev)
{
struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
- int id = rdev->desc->id - RK808_ID_DCDC1;
+ int id = rdev_get_id(rdev);
struct gpio_desc *gpio = pdata->dvs_gpio[id];
unsigned int val;
int ret;
@@ -193,7 +193,7 @@ static int rk808_buck1_2_set_voltage_sel(struct regulator_dev *rdev,
unsigned sel)
{
struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
- int id = rdev->desc->id - RK808_ID_DCDC1;
+ int id = rdev_get_id(rdev);
struct gpio_desc *gpio = pdata->dvs_gpio[id];
unsigned int reg = rdev->desc->vsel_reg;
unsigned old_sel;
@@ -232,7 +232,7 @@ static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int new_selector)
{
struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
- int id = rdev->desc->id - RK808_ID_DCDC1;
+ int id = rdev_get_id(rdev);
struct gpio_desc *gpio = pdata->dvs_gpio[id];
/* if there is no dvs1/2 pin, we don't need wait extra time here. */
@@ -245,8 +245,7 @@ static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev,
static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
{
unsigned int ramp_value = RK808_RAMP_RATE_10MV_PER_US;
- unsigned int reg = rk808_buck_config_regs[rdev->desc->id -
- RK808_ID_DCDC1];
+ unsigned int reg = rk808_buck_config_regs[rdev_get_id(rdev)];
switch (ramp_delay) {
case 1 ... 2000:
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index c8fed9fa1cca..968a0ab4b398 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -84,8 +84,8 @@ extern void zfcp_fc_link_test_work(struct work_struct *);
extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
-extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
-extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
+extern int zfcp_fc_exec_bsg_job(struct bsg_job *);
+extern int zfcp_fc_timeout_bsg_job(struct bsg_job *);
extern void zfcp_fc_sym_name_update(struct work_struct *);
extern unsigned int zfcp_fc_port_scan_backoff(void);
extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 237688af179b..7331eea67435 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/utsname.h>
#include <linux/random.h>
+#include <linux/bsg-lib.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include "zfcp_ext.h"
@@ -885,26 +886,30 @@ out_free:
static void zfcp_fc_ct_els_job_handler(void *data)
{
- struct fc_bsg_job *job = data;
+ struct bsg_job *job = data;
struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
struct fc_bsg_reply *jr = job->reply;
jr->reply_payload_rcv_len = job->reply_payload.payload_len;
jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
jr->result = zfcp_ct_els->status ? -EIO : 0;
- job->job_done(job);
+ bsg_job_done(job, jr->result, jr->reply_payload_rcv_len);
}
-static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
+static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct bsg_job *job)
{
u32 preamble_word1;
u8 gs_type;
struct zfcp_adapter *adapter;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_rport *rport = fc_bsg_to_rport(job);
+ struct Scsi_Host *shost;
- preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
+ preamble_word1 = bsg_request->rqst_data.r_ct.preamble_word1;
gs_type = (preamble_word1 & 0xff000000) >> 24;
- adapter = (struct zfcp_adapter *) job->shost->hostdata[0];
+ shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
+ adapter = (struct zfcp_adapter *) shost->hostdata[0];
switch (gs_type) {
case FC_FST_ALIAS:
@@ -924,7 +929,7 @@ static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
static void zfcp_fc_ct_job_handler(void *data)
{
- struct fc_bsg_job *job = data;
+ struct bsg_job *job = data;
struct zfcp_fc_wka_port *wka_port;
wka_port = zfcp_fc_job_wka_port(job);
@@ -933,11 +938,12 @@ static void zfcp_fc_ct_job_handler(void *data)
zfcp_fc_ct_els_job_handler(data);
}
-static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
+static int zfcp_fc_exec_els_job(struct bsg_job *job,
struct zfcp_adapter *adapter)
{
struct zfcp_fsf_ct_els *els = job->dd_data;
- struct fc_rport *rport = job->rport;
+ struct fc_rport *rport = fc_bsg_to_rport(job);
+ struct fc_bsg_request *bsg_request = job->request;
struct zfcp_port *port;
u32 d_id;
@@ -949,13 +955,13 @@ static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
d_id = port->d_id;
put_device(&port->dev);
} else
- d_id = ntoh24(job->request->rqst_data.h_els.port_id);
+ d_id = ntoh24(bsg_request->rqst_data.h_els.port_id);
els->handler = zfcp_fc_ct_els_job_handler;
return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
}
-static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
+static int zfcp_fc_exec_ct_job(struct bsg_job *job,
struct zfcp_adapter *adapter)
{
int ret;
@@ -978,13 +984,15 @@ static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
return ret;
}
-int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
+int zfcp_fc_exec_bsg_job(struct bsg_job *job)
{
struct Scsi_Host *shost;
struct zfcp_adapter *adapter;
struct zfcp_fsf_ct_els *ct_els = job->dd_data;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_rport *rport = fc_bsg_to_rport(job);
- shost = job->rport ? rport_to_shost(job->rport) : job->shost;
+ shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
adapter = (struct zfcp_adapter *)shost->hostdata[0];
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
@@ -994,7 +1002,7 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
ct_els->resp = job->reply_payload.sg_list;
ct_els->handler_data = job;
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
case FC_BSG_HST_ELS_NOLOGIN:
return zfcp_fc_exec_els_job(job, adapter);
@@ -1006,7 +1014,7 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
}
}
-int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
+int zfcp_fc_timeout_bsg_job(struct bsg_job *job)
{
/* hardware tracks timeout, reset bsg timeout to not interfere */
return -EAGAIN;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3e2bdb90813c..dfa93347c752 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -263,6 +263,7 @@ config SCSI_SPI_ATTRS
config SCSI_FC_ATTRS
tristate "FiberChannel Transport Attributes"
depends on SCSI && NET
+ select BLK_DEV_BSGLIB
select SCSI_NETLINK
help
If you wish to export transport-specific information about
@@ -743,40 +744,18 @@ config SCSI_ISCI
control unit found in the Intel(R) C600 series chipset.
config SCSI_GENERIC_NCR5380
- tristate "Generic NCR5380/53c400 SCSI PIO support"
- depends on ISA && SCSI
+ tristate "Generic NCR5380/53c400 SCSI ISA card support"
+ depends on ISA && SCSI && HAS_IOPORT_MAP
select SCSI_SPI_ATTRS
---help---
- This is a driver for the old NCR 53c80 series of SCSI controllers
- on boards using PIO. Most boards such as the Trantor T130 fit this
- category, along with a large number of ISA 8bit controllers shipped
- for free with SCSI scanners. If you have a PAS16, T128 or DMX3191
- you should select the specific driver for that card rather than
- generic 5380 support.
-
- It is explained in section 3.8 of the SCSI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. If it doesn't work out
- of the box, you may have to change some settings in
- <file:drivers/scsi/g_NCR5380.h>.
+ This is a driver for old ISA card SCSI controllers based on a
+ NCR 5380, 53C80, 53C400, 53C400A, or DTC 436 device.
+ Most boards such as the Trantor T130 fit this category, as do
+ various 8-bit and 16-bit ISA cards bundled with SCSI scanners.
To compile this driver as a module, choose M here: the
module will be called g_NCR5380.
-config SCSI_GENERIC_NCR5380_MMIO
- tristate "Generic NCR5380/53c400 SCSI MMIO support"
- depends on ISA && SCSI
- select SCSI_SPI_ATTRS
- ---help---
- This is a driver for the old NCR 53c80 series of SCSI controllers
- on boards using memory mapped I/O.
- It is explained in section 3.8 of the SCSI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. If it doesn't work out
- of the box, you may have to change some settings in
- <file:drivers/scsi/g_NCR5380.h>.
-
- To compile this driver as a module, choose M here: the
- module will be called g_NCR5380_mmio.
-
config SCSI_IPS
tristate "IBM ServeRAID support"
depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1520596f54a6..a2d03957cbe2 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -74,7 +74,6 @@ obj-$(CONFIG_SCSI_ISCI) += isci/
obj-$(CONFIG_SCSI_IPS) += ips.o
obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
-obj-$(CONFIG_SCSI_GENERIC_NCR5380_MMIO) += g_NCR5380_mmio.o
obj-$(CONFIG_SCSI_NCR53C406A) += NCR53c406a.o
obj-$(CONFIG_SCSI_NCR_D700) += 53c700.o NCR_D700.o
obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 790babc5ef66..d849ffa378b1 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -121,9 +121,10 @@
*
* Either real DMA *or* pseudo DMA may be implemented
*
- * NCR5380_dma_write_setup(instance, src, count) - initialize
- * NCR5380_dma_read_setup(instance, dst, count) - initialize
- * NCR5380_dma_residual(instance); - residual count
+ * NCR5380_dma_xfer_len - determine size of DMA/PDMA transfer
+ * NCR5380_dma_send_setup - execute DMA/PDMA from memory to 5380
+ * NCR5380_dma_recv_setup - execute DMA/PDMA from 5380 to memory
+ * NCR5380_dma_residual - residual byte count
*
* The generic driver is initialized by calling NCR5380_init(instance),
* after setting the appropriate host specific fields and ID. If the
@@ -178,7 +179,7 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
/**
* NCR5380_poll_politely2 - wait for two chip register values
- * @instance: controller to poll
+ * @hostdata: host private data
* @reg1: 5380 register to poll
* @bit1: Bitmask to check
* @val1: Expected value
@@ -195,18 +196,14 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
* Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT.
*/
-static int NCR5380_poll_politely2(struct Scsi_Host *instance,
- int reg1, int bit1, int val1,
- int reg2, int bit2, int val2, int wait)
+static int NCR5380_poll_politely2(struct NCR5380_hostdata *hostdata,
+ unsigned int reg1, u8 bit1, u8 val1,
+ unsigned int reg2, u8 bit2, u8 val2,
+ unsigned long wait)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ unsigned long n = hostdata->poll_loops;
unsigned long deadline = jiffies + wait;
- unsigned long n;
- /* Busy-wait for up to 10 ms */
- n = min(10000U, jiffies_to_usecs(wait));
- n *= hostdata->accesses_per_ms;
- n /= 2000;
do {
if ((NCR5380_read(reg1) & bit1) == val1)
return 0;
@@ -288,6 +285,7 @@ mrs[] = {
static void NCR5380_print(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char status, data, basr, mr, icr, i;
data = NCR5380_read(CURRENT_SCSI_DATA_REG);
@@ -337,6 +335,7 @@ static struct {
static void NCR5380_print_phase(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char status;
int i;
@@ -441,14 +440,14 @@ static void prepare_info(struct Scsi_Host *instance)
struct NCR5380_hostdata *hostdata = shost_priv(instance);
snprintf(hostdata->info, sizeof(hostdata->info),
- "%s, io_port 0x%lx, n_io_port %d, "
- "base 0x%lx, irq %d, "
+ "%s, irq %d, "
+ "io_port 0x%lx, base 0x%lx, "
"can_queue %d, cmd_per_lun %d, "
"sg_tablesize %d, this_id %d, "
"flags { %s%s%s}, "
"options { %s} ",
- instance->hostt->name, instance->io_port, instance->n_io_port,
- instance->base, instance->irq,
+ instance->hostt->name, instance->irq,
+ hostdata->io_port, hostdata->base,
instance->can_queue, instance->cmd_per_lun,
instance->sg_tablesize, instance->this_id,
hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "",
@@ -482,6 +481,7 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
struct NCR5380_hostdata *hostdata = shost_priv(instance);
int i;
unsigned long deadline;
+ unsigned long accesses_per_ms;
instance->max_lun = 7;
@@ -530,7 +530,8 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
++i;
cpu_relax();
} while (time_is_after_jiffies(deadline));
- hostdata->accesses_per_ms = i / 256;
+ accesses_per_ms = i / 256;
+ hostdata->poll_loops = NCR5380_REG_POLL_TIME * accesses_per_ms / 2;
return 0;
}
@@ -560,7 +561,7 @@ static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance)
case 3:
case 5:
shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n");
- NCR5380_poll_politely(instance,
+ NCR5380_poll_politely(hostdata,
STATUS_REG, SR_BSY, 0, 5 * HZ);
break;
case 2:
@@ -871,7 +872,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
NCR5380_read(RESET_PARITY_INTERRUPT_REG);
- transferred = hostdata->dma_len - NCR5380_dma_residual(instance);
+ transferred = hostdata->dma_len - NCR5380_dma_residual(hostdata);
hostdata->dma_len = 0;
data = (unsigned char **)&hostdata->connected->SCp.ptr;
@@ -994,7 +995,7 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
}
handled = 1;
} else {
- shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n");
+ dsprintk(NDEBUG_INTR, instance, "interrupt without IRQ bit\n");
#ifdef SUN3_SCSI_VME
dregs->csr |= CSR_DMA_ENABLE;
#endif
@@ -1075,7 +1076,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
*/
spin_unlock_irq(&hostdata->lock);
- err = NCR5380_poll_politely2(instance, MODE_REG, MR_ARBITRATE, 0,
+ err = NCR5380_poll_politely2(hostdata, MODE_REG, MR_ARBITRATE, 0,
INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS,
ICR_ARBITRATION_PROGRESS, HZ);
spin_lock_irq(&hostdata->lock);
@@ -1201,7 +1202,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
* selection.
*/
- err = NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, SR_BSY,
+ err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_BSY, SR_BSY,
msecs_to_jiffies(250));
if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
@@ -1247,7 +1248,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
/* Wait for start of REQ/ACK handshake */
- err = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ);
+ err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ);
spin_lock_irq(&hostdata->lock);
if (err < 0) {
shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
@@ -1318,6 +1319,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
unsigned char *phase, int *count,
unsigned char **data)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char p = *phase, tmp;
int c = *count;
unsigned char *d = *data;
@@ -1336,7 +1338,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
* valid
*/
- if (NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0)
+ if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0)
break;
dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n");
@@ -1381,7 +1383,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
}
- if (NCR5380_poll_politely(instance,
+ if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_REQ, 0, 5 * HZ) < 0)
break;
@@ -1440,6 +1442,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
static void do_reset(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata __maybe_unused *hostdata = shost_priv(instance);
unsigned long flags;
local_irq_save(flags);
@@ -1462,6 +1465,7 @@ static void do_reset(struct Scsi_Host *instance)
static int do_abort(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char *msgptr, phase, tmp;
int len;
int rc;
@@ -1479,7 +1483,7 @@ static int do_abort(struct Scsi_Host *instance)
* the target sees, so we just handshake.
*/
- rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
+ rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
if (rc < 0)
goto timeout;
@@ -1490,7 +1494,7 @@ static int do_abort(struct Scsi_Host *instance)
if (tmp != PHASE_MSGOUT) {
NCR5380_write(INITIATOR_COMMAND_REG,
ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
- rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 3 * HZ);
+ rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ);
if (rc < 0)
goto timeout;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
@@ -1575,9 +1579,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* starting the NCR. This is also the cleaner way for the TT.
*/
if (p & SR_IO)
- result = NCR5380_dma_recv_setup(instance, d, c);
+ result = NCR5380_dma_recv_setup(hostdata, d, c);
else
- result = NCR5380_dma_send_setup(instance, d, c);
+ result = NCR5380_dma_send_setup(hostdata, d, c);
}
/*
@@ -1609,9 +1613,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* NCR access, else the DMA setup gets trashed!
*/
if (p & SR_IO)
- result = NCR5380_dma_recv_setup(instance, d, c);
+ result = NCR5380_dma_recv_setup(hostdata, d, c);
else
- result = NCR5380_dma_send_setup(instance, d, c);
+ result = NCR5380_dma_send_setup(hostdata, d, c);
}
/* On failure, NCR5380_dma_xxxx_setup() returns a negative int. */
@@ -1678,12 +1682,12 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* byte.
*/
- if (NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+ if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ, BASR_DRQ, HZ) < 0) {
result = -1;
shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n");
}
- if (NCR5380_poll_politely(instance, STATUS_REG,
+ if (NCR5380_poll_politely(hostdata, STATUS_REG,
SR_REQ, 0, HZ) < 0) {
result = -1;
shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n");
@@ -1694,7 +1698,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* Wait for the last byte to be sent. If REQ is being asserted for
* the byte we're interested, we'll ACK it and it will go false.
*/
- if (NCR5380_poll_politely2(instance,
+ if (NCR5380_poll_politely2(hostdata,
BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) {
result = -1;
@@ -1751,22 +1755,26 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
}
#ifdef CONFIG_SUN3
- if (phase == PHASE_CMDOUT) {
- void *d;
- unsigned long count;
+ if (phase == PHASE_CMDOUT &&
+ sun3_dma_setup_done != cmd) {
+ int count;
if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
- count = cmd->SCp.buffer->length;
- d = sg_virt(cmd->SCp.buffer);
- } else {
- count = cmd->SCp.this_residual;
- d = cmd->SCp.ptr;
+ ++cmd->SCp.buffer;
+ --cmd->SCp.buffers_residual;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
}
- if (sun3_dma_setup_done != cmd &&
- sun3scsi_dma_xfer_len(count, cmd) > 0) {
- sun3scsi_dma_setup(instance, d, count,
- rq_data_dir(cmd->request));
+ count = sun3scsi_dma_xfer_len(hostdata, cmd);
+
+ if (count > 0) {
+ if (rq_data_dir(cmd->request))
+ sun3scsi_dma_send_setup(hostdata,
+ cmd->SCp.ptr, count);
+ else
+ sun3scsi_dma_recv_setup(hostdata,
+ cmd->SCp.ptr, count);
sun3_dma_setup_done = cmd;
}
#ifdef SUN3_SCSI_VME
@@ -1827,7 +1835,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
transfersize = 0;
if (!cmd->device->borken)
- transfersize = NCR5380_dma_xfer_len(instance, cmd, phase);
+ transfersize = NCR5380_dma_xfer_len(hostdata, cmd);
if (transfersize > 0) {
len = transfersize;
@@ -2073,7 +2081,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
} /* switch(phase) */
} else {
spin_unlock_irq(&hostdata->lock);
- NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ);
+ NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ);
spin_lock_irq(&hostdata->lock);
}
}
@@ -2119,7 +2127,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
*/
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
- if (NCR5380_poll_politely(instance,
+ if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
return;
@@ -2130,7 +2138,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
* Wait for target to go into MSGIN.
*/
- if (NCR5380_poll_politely(instance,
+ if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
do_abort(instance);
return;
@@ -2204,22 +2212,25 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
}
#ifdef CONFIG_SUN3
- {
- void *d;
- unsigned long count;
+ if (sun3_dma_setup_done != tmp) {
+ int count;
if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
- count = tmp->SCp.buffer->length;
- d = sg_virt(tmp->SCp.buffer);
- } else {
- count = tmp->SCp.this_residual;
- d = tmp->SCp.ptr;
+ ++tmp->SCp.buffer;
+ --tmp->SCp.buffers_residual;
+ tmp->SCp.this_residual = tmp->SCp.buffer->length;
+ tmp->SCp.ptr = sg_virt(tmp->SCp.buffer);
}
- if (sun3_dma_setup_done != tmp &&
- sun3scsi_dma_xfer_len(count, tmp) > 0) {
- sun3scsi_dma_setup(instance, d, count,
- rq_data_dir(tmp->request));
+ count = sun3scsi_dma_xfer_len(hostdata, tmp);
+
+ if (count > 0) {
+ if (rq_data_dir(tmp->request))
+ sun3scsi_dma_send_setup(hostdata,
+ tmp->SCp.ptr, count);
+ else
+ sun3scsi_dma_recv_setup(hostdata,
+ tmp->SCp.ptr, count);
sun3_dma_setup_done = tmp;
}
}
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 965d92339455..3c6ce5434449 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -219,27 +219,32 @@
#define FLAG_TOSHIBA_DELAY 128 /* Allow for borken CD-ROMs */
struct NCR5380_hostdata {
- NCR5380_implementation_fields; /* implementation specific */
- struct Scsi_Host *host; /* Host backpointer */
- unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */
- unsigned char busy[8]; /* index = target, bit = lun */
- int dma_len; /* requested length of DMA */
- unsigned char last_message; /* last message OUT */
- struct scsi_cmnd *connected; /* currently connected cmnd */
- struct scsi_cmnd *selecting; /* cmnd to be connected */
- struct list_head unissued; /* waiting to be issued */
- struct list_head autosense; /* priority issue queue */
- struct list_head disconnected; /* waiting for reconnect */
- spinlock_t lock; /* protects this struct */
- int flags;
- struct scsi_eh_save ses;
- struct scsi_cmnd *sensing;
+ NCR5380_implementation_fields; /* Board-specific data */
+ u8 __iomem *io; /* Remapped 5380 address */
+ u8 __iomem *pdma_io; /* Remapped PDMA address */
+ unsigned long poll_loops; /* Register polling limit */
+ spinlock_t lock; /* Protects this struct */
+ struct scsi_cmnd *connected; /* Currently connected cmnd */
+ struct list_head disconnected; /* Waiting for reconnect */
+ struct Scsi_Host *host; /* SCSI host backpointer */
+ struct workqueue_struct *work_q; /* SCSI host work queue */
+ struct work_struct main_task; /* Work item for main loop */
+ int flags; /* Board-specific quirks */
+ int dma_len; /* Requested length of DMA */
+ int read_overruns; /* Transfer size reduction for DMA erratum */
+ unsigned long io_port; /* Device IO port */
+ unsigned long base; /* Device base address */
+ struct list_head unissued; /* Waiting to be issued */
+ struct scsi_cmnd *selecting; /* Cmnd to be connected */
+ struct list_head autosense; /* Priority cmnd queue */
+ struct scsi_cmnd *sensing; /* Cmnd needing autosense */
+ struct scsi_eh_save ses; /* Cmnd state saved for EH */
+ unsigned char busy[8]; /* Index = target, bit = lun */
+ unsigned char id_mask; /* 1 << Host ID */
+ unsigned char id_higher_mask; /* All bits above id_mask */
+ unsigned char last_message; /* Last Message Out */
+ unsigned long region_size; /* Size of address/port range */
char info[256];
- int read_overruns; /* number of bytes to cut from a
- * transfer to handle chip overruns */
- struct work_struct main_task;
- struct workqueue_struct *work_q;
- unsigned long accesses_per_ms; /* chip register accesses per ms */
};
#ifdef __KERNEL__
@@ -252,6 +257,9 @@ struct NCR5380_cmd {
#define NCR5380_PIO_CHUNK_SIZE 256
+/* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
+#define NCR5380_REG_POLL_TIME 15
+
static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
{
return ((struct scsi_cmnd *)ncmd_ptr) - 1;
@@ -294,14 +302,45 @@ static void NCR5380_reselect(struct Scsi_Host *instance);
static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
-static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int);
+static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
+ unsigned int, u8, u8,
+ unsigned int, u8, u8, unsigned long);
-static inline int NCR5380_poll_politely(struct Scsi_Host *instance,
- int reg, int bit, int val, int wait)
+static inline int NCR5380_poll_politely(struct NCR5380_hostdata *hostdata,
+ unsigned int reg, u8 bit, u8 val,
+ unsigned long wait)
{
- return NCR5380_poll_politely2(instance, reg, bit, val,
+ if ((NCR5380_read(reg) & bit) == val)
+ return 0;
+
+ return NCR5380_poll_politely2(hostdata, reg, bit, val,
reg, bit, val, wait);
}
+static int NCR5380_dma_xfer_len(struct NCR5380_hostdata *,
+ struct scsi_cmnd *);
+static int NCR5380_dma_send_setup(struct NCR5380_hostdata *,
+ unsigned char *, int);
+static int NCR5380_dma_recv_setup(struct NCR5380_hostdata *,
+ unsigned char *, int);
+static int NCR5380_dma_residual(struct NCR5380_hostdata *);
+
+static inline int NCR5380_dma_xfer_none(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
+{
+ return 0;
+}
+
+static inline int NCR5380_dma_setup_none(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return 0;
+}
+
+static inline int NCR5380_dma_residual_none(struct NCR5380_hostdata *hostdata)
+{
+ return 0;
+}
+
#endif /* __KERNEL__ */
#endif /* NCR5380_H */
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 969c312de1be..f059c14efa0c 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1246,7 +1246,6 @@ struct aac_dev
u32 max_msix; /* max. MSI-X vectors */
u32 vector_cap; /* MSI-X vector capab.*/
int msi_enabled; /* MSI/MSI-X enabled */
- struct msix_entry msixentry[AAC_MAX_MSIX];
struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
u8 adapter_shutdown;
u32 handle_pci_error;
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 341ea327ae79..4f56b1003cc7 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -378,16 +378,12 @@ void aac_define_int_mode(struct aac_dev *dev)
if (msi_count > AAC_MAX_MSIX)
msi_count = AAC_MAX_MSIX;
- for (i = 0; i < msi_count; i++)
- dev->msixentry[i].entry = i;
-
if (msi_count > 1 &&
pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
min_msix = 2;
- i = pci_enable_msix_range(dev->pdev,
- dev->msixentry,
- min_msix,
- msi_count);
+ i = pci_alloc_irq_vectors(dev->pdev,
+ min_msix, msi_count,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
if (i > 0) {
dev->msi_enabled = 1;
msi_count = i;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 0aeecec1f5ea..9e7551fe4b19 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2043,30 +2043,22 @@ int aac_acquire_irq(struct aac_dev *dev)
int i;
int j;
int ret = 0;
- int cpu;
- cpu = cpumask_first(cpu_online_mask);
if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
for (i = 0; i < dev->max_msix; i++) {
dev->aac_msix[i].vector_no = i;
dev->aac_msix[i].dev = dev;
- if (request_irq(dev->msixentry[i].vector,
+ if (request_irq(pci_irq_vector(dev->pdev, i),
dev->a_ops.adapter_intr,
0, "aacraid", &(dev->aac_msix[i]))) {
printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
dev->name, dev->id, i);
for (j = 0 ; j < i ; j++)
- free_irq(dev->msixentry[j].vector,
+ free_irq(pci_irq_vector(dev->pdev, j),
&(dev->aac_msix[j]));
pci_disable_msix(dev->pdev);
ret = -1;
}
- if (irq_set_affinity_hint(dev->msixentry[i].vector,
- get_cpu_mask(cpu))) {
- printk(KERN_ERR "%s%d: Failed to set IRQ affinity for cpu %d\n",
- dev->name, dev->id, cpu);
- }
- cpu = cpumask_next(cpu, cpu_online_mask);
}
} else {
dev->aac_msix[0].vector_no = 0;
@@ -2096,16 +2088,9 @@ void aac_free_irq(struct aac_dev *dev)
dev->pdev->device == PMC_DEVICE_S8 ||
dev->pdev->device == PMC_DEVICE_S9) {
if (dev->max_msix > 1) {
- for (i = 0; i < dev->max_msix; i++) {
- if (irq_set_affinity_hint(
- dev->msixentry[i].vector, NULL)) {
- printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
- dev->name, dev->id, cpu);
- }
- cpu = cpumask_next(cpu, cpu_online_mask);
- free_irq(dev->msixentry[i].vector,
- &(dev->aac_msix[i]));
- }
+ for (i = 0; i < dev->max_msix; i++)
+ free_irq(pci_irq_vector(dev->pdev, i),
+ &(dev->aac_msix[i]));
} else {
free_irq(dev->pdev->irq, &(dev->aac_msix[0]));
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 79871f3519ff..e4f3e22fcbd9 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1071,7 +1071,6 @@ static struct scsi_host_template aac_driver_template = {
static void __aac_shutdown(struct aac_dev * aac)
{
int i;
- int cpu;
aac_send_shutdown(aac);
@@ -1087,24 +1086,13 @@ static void __aac_shutdown(struct aac_dev * aac)
kthread_stop(aac->thread);
}
aac_adapter_disable_int(aac);
- cpu = cpumask_first(cpu_online_mask);
if (aac->pdev->device == PMC_DEVICE_S6 ||
aac->pdev->device == PMC_DEVICE_S7 ||
aac->pdev->device == PMC_DEVICE_S8 ||
aac->pdev->device == PMC_DEVICE_S9) {
if (aac->max_msix > 1) {
for (i = 0; i < aac->max_msix; i++) {
- if (irq_set_affinity_hint(
- aac->msixentry[i].vector,
- NULL)) {
- printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
- aac->name,
- aac->id,
- cpu);
- }
- cpu = cpumask_next(cpu,
- cpu_online_mask);
- free_irq(aac->msixentry[i].vector,
+ free_irq(pci_irq_vector(aac->pdev, i),
&(aac->aac_msix[i]));
}
} else {
@@ -1350,7 +1338,7 @@ static void aac_release_resources(struct aac_dev *aac)
aac->pdev->device == PMC_DEVICE_S9) {
if (aac->max_msix > 1) {
for (i = 0; i < aac->max_msix; i++)
- free_irq(aac->msixentry[i].vector,
+ free_irq(pci_irq_vector(aac->pdev, i),
&(aac->aac_msix[i]));
} else {
free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
@@ -1396,13 +1384,13 @@ static int aac_acquire_resources(struct aac_dev *dev)
dev->aac_msix[i].vector_no = i;
dev->aac_msix[i].dev = dev;
- if (request_irq(dev->msixentry[i].vector,
+ if (request_irq(pci_irq_vector(dev->pdev, i),
dev->a_ops.adapter_intr,
0, "aacraid", &(dev->aac_msix[i]))) {
printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
name, instance, i);
for (j = 0 ; j < i ; j++)
- free_irq(dev->msixentry[j].vector,
+ free_irq(pci_irq_vector(dev->pdev, j),
&(dev->aac_msix[j]));
pci_disable_msix(dev->pdev);
goto error_iounmap;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index febbd83e2ecd..81dd0927246b 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -11030,6 +11030,9 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
ASC_DBG(2, "AdvInitGetConfig()\n");
ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0;
+#else
+ share_irq = 0;
+ ret = -ENODEV;
#endif /* CONFIG_PCI */
}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
index 2e3117aa382f..21ac265280bf 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -254,7 +254,7 @@ main(int argc, char *argv[])
argv += optind;
if (argc != 1) {
- fprintf(stderr, "%s: No input file specifiled\n", appname);
+ fprintf(stderr, "%s: No input file specified\n", appname);
usage();
/* NOTREACHED */
}
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 7c713f797535..f2671a8fa7e3 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -228,8 +228,11 @@ static int asd_init_scbs(struct asd_ha_struct *asd_ha)
bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8;
bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
- if (!asd_ha->seq.tc_index_bitmap)
+ if (!asd_ha->seq.tc_index_bitmap) {
+ kfree(asd_ha->seq.tc_index_array);
+ asd_ha->seq.tc_index_array = NULL;
return -ENOMEM;
+ }
spin_lock_init(&seq->tc_index_lock);
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index cf99f8cf4cdd..a254b32eba39 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -629,7 +629,6 @@ struct AdapterControlBlock
struct pci_dev * pdev;
struct Scsi_Host * host;
unsigned long vir2phy_offset;
- struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
/* Offset is used in making arc cdb physical to virtual calculations */
uint32_t outbound_int_enable;
uint32_t cdb_phyaddr_hi32;
@@ -671,8 +670,6 @@ struct AdapterControlBlock
/* iop init */
#define ACB_F_ABORT 0x0200
#define ACB_F_FIRMWARE_TRAP 0x0400
- #define ACB_F_MSI_ENABLED 0x1000
- #define ACB_F_MSIX_ENABLED 0x2000
struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
/* used for memory free */
struct list_head ccb_free_list;
@@ -725,7 +722,7 @@ struct AdapterControlBlock
atomic_t rq_map_token;
atomic_t ante_token_value;
uint32_t maxOutstanding;
- int msix_vector_count;
+ int vector_count;
};/* HW_DEVICE_EXTENSION */
/*
*******************************************************************************
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f0cfb0451757..9e45749d55ed 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -720,51 +720,39 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
static int
arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
{
- int i, j, r;
- struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
-
- for (i = 0; i < ARCMST_NUM_MSIX_VECTORS; i++)
- entries[i].entry = i;
- r = pci_enable_msix_range(pdev, entries, 1, ARCMST_NUM_MSIX_VECTORS);
- if (r < 0)
- goto msi_int;
- acb->msix_vector_count = r;
- for (i = 0; i < r; i++) {
- if (request_irq(entries[i].vector,
- arcmsr_do_interrupt, 0, "arcmsr", acb)) {
+ unsigned long flags;
+ int nvec, i;
+
+ nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
+ PCI_IRQ_MSIX);
+ if (nvec > 0) {
+ pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
+ flags = 0;
+ } else {
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (nvec < 1)
+ return FAILED;
+
+ flags = IRQF_SHARED;
+ }
+
+ acb->vector_count = nvec;
+ for (i = 0; i < nvec; i++) {
+ if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt,
+ flags, "arcmsr", acb)) {
pr_warn("arcmsr%d: request_irq =%d failed!\n",
- acb->host->host_no, entries[i].vector);
- for (j = 0 ; j < i ; j++)
- free_irq(entries[j].vector, acb);
- pci_disable_msix(pdev);
- goto msi_int;
+ acb->host->host_no, pci_irq_vector(pdev, i));
+ goto out_free_irq;
}
- acb->entries[i] = entries[i];
- }
- acb->acb_flags |= ACB_F_MSIX_ENABLED;
- pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
- return SUCCESS;
-msi_int:
- if (pci_enable_msi_exact(pdev, 1) < 0)
- goto legacy_int;
- if (request_irq(pdev->irq, arcmsr_do_interrupt,
- IRQF_SHARED, "arcmsr", acb)) {
- pr_warn("arcmsr%d: request_irq =%d failed!\n",
- acb->host->host_no, pdev->irq);
- pci_disable_msi(pdev);
- goto legacy_int;
- }
- acb->acb_flags |= ACB_F_MSI_ENABLED;
- pr_info("arcmsr%d: msi enabled\n", acb->host->host_no);
- return SUCCESS;
-legacy_int:
- if (request_irq(pdev->irq, arcmsr_do_interrupt,
- IRQF_SHARED, "arcmsr", acb)) {
- pr_warn("arcmsr%d: request_irq = %d failed!\n",
- acb->host->host_no, pdev->irq);
- return FAILED;
}
+
return SUCCESS;
+out_free_irq:
+ while (--i >= 0)
+ free_irq(pci_irq_vector(pdev, i), acb);
+ pci_free_irq_vectors(pdev);
+ return FAILED;
}
static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -886,15 +874,9 @@ static void arcmsr_free_irq(struct pci_dev *pdev,
{
int i;
- if (acb->acb_flags & ACB_F_MSI_ENABLED) {
- free_irq(pdev->irq, acb);
- pci_disable_msi(pdev);
- } else if (acb->acb_flags & ACB_F_MSIX_ENABLED) {
- for (i = 0; i < acb->msix_vector_count; i++)
- free_irq(acb->entries[i].vector, acb);
- pci_disable_msix(pdev);
- } else
- free_irq(pdev->irq, acb);
+ for (i = 0; i < acb->vector_count; i++)
+ free_irq(pci_irq_vector(pdev, i), acb);
+ pci_free_irq_vectors(pdev);
}
static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index 8e9cfe8f22f5..a87b99c7fb9a 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -14,49 +14,48 @@
#include <scsi/scsi_host.h>
#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
-#define NCR5380_read(reg) cumanascsi_read(instance, reg)
-#define NCR5380_write(reg, value) cumanascsi_write(instance, reg, value)
+#define NCR5380_read(reg) cumanascsi_read(hostdata, reg)
+#define NCR5380_write(reg, value) cumanascsi_write(hostdata, reg, value)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize)
+#define NCR5380_dma_xfer_len cumanascsi_dma_xfer_len
#define NCR5380_dma_recv_setup cumanascsi_pread
#define NCR5380_dma_send_setup cumanascsi_pwrite
-#define NCR5380_dma_residual(instance) (0)
+#define NCR5380_dma_residual NCR5380_dma_residual_none
#define NCR5380_intr cumanascsi_intr
#define NCR5380_queue_command cumanascsi_queue_command
#define NCR5380_info cumanascsi_info
#define NCR5380_implementation_fields \
- unsigned ctrl; \
- void __iomem *base; \
- void __iomem *dma
+ unsigned ctrl
-#include "../NCR5380.h"
+struct NCR5380_hostdata;
+static u8 cumanascsi_read(struct NCR5380_hostdata *, unsigned int);
+static void cumanascsi_write(struct NCR5380_hostdata *, unsigned int, u8);
-void cumanascsi_setup(char *str, int *ints)
-{
-}
+#include "../NCR5380.h"
#define CTRL 0x16fc
#define STAT 0x2004
#define L(v) (((v)<<16)|((v) & 0x0000ffff))
#define H(v) (((v)>>16)|((v) & 0xffff0000))
-static inline int cumanascsi_pwrite(struct Scsi_Host *host,
+static inline int cumanascsi_pwrite(struct NCR5380_hostdata *hostdata,
unsigned char *addr, int len)
{
unsigned long *laddr;
- void __iomem *dma = priv(host)->dma + 0x2000;
+ u8 __iomem *base = hostdata->io;
+ u8 __iomem *dma = hostdata->pdma_io + 0x2000;
if(!len) return 0;
- writeb(0x02, priv(host)->base + CTRL);
+ writeb(0x02, base + CTRL);
laddr = (unsigned long *)addr;
while(len >= 32)
{
unsigned int status;
unsigned long v;
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(!(status & 0x40))
@@ -75,12 +74,12 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
}
addr = (unsigned char *)laddr;
- writeb(0x12, priv(host)->base + CTRL);
+ writeb(0x12, base + CTRL);
while(len > 0)
{
unsigned int status;
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
@@ -90,7 +89,7 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
break;
}
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
@@ -101,27 +100,28 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
}
}
end:
- writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
+ writeb(hostdata->ctrl | 0x40, base + CTRL);
if (len)
return -1;
return 0;
}
-static inline int cumanascsi_pread(struct Scsi_Host *host,
+static inline int cumanascsi_pread(struct NCR5380_hostdata *hostdata,
unsigned char *addr, int len)
{
unsigned long *laddr;
- void __iomem *dma = priv(host)->dma + 0x2000;
+ u8 __iomem *base = hostdata->io;
+ u8 __iomem *dma = hostdata->pdma_io + 0x2000;
if(!len) return 0;
- writeb(0x00, priv(host)->base + CTRL);
+ writeb(0x00, base + CTRL);
laddr = (unsigned long *)addr;
while(len >= 32)
{
unsigned int status;
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(!(status & 0x40))
@@ -140,12 +140,12 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
}
addr = (unsigned char *)laddr;
- writeb(0x10, priv(host)->base + CTRL);
+ writeb(0x10, base + CTRL);
while(len > 0)
{
unsigned int status;
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
@@ -155,7 +155,7 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
break;
}
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
@@ -166,37 +166,45 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
}
}
end:
- writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
+ writeb(hostdata->ctrl | 0x40, base + CTRL);
if (len)
return -1;
return 0;
}
-static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg)
+static int cumanascsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
+{
+ return cmd->transfersize;
+}
+
+static u8 cumanascsi_read(struct NCR5380_hostdata *hostdata,
+ unsigned int reg)
{
- void __iomem *base = priv(host)->base;
- unsigned char val;
+ u8 __iomem *base = hostdata->io;
+ u8 val;
writeb(0, base + CTRL);
val = readb(base + 0x2100 + (reg << 2));
- priv(host)->ctrl = 0x40;
+ hostdata->ctrl = 0x40;
writeb(0x40, base + CTRL);
return val;
}
-static void cumanascsi_write(struct Scsi_Host *host, unsigned int reg, unsigned int value)
+static void cumanascsi_write(struct NCR5380_hostdata *hostdata,
+ unsigned int reg, u8 value)
{
- void __iomem *base = priv(host)->base;
+ u8 __iomem *base = hostdata->io;
writeb(0, base + CTRL);
writeb(value, base + 0x2100 + (reg << 2));
- priv(host)->ctrl = 0x40;
+ hostdata->ctrl = 0x40;
writeb(0x40, base + CTRL);
}
@@ -235,11 +243,11 @@ static int cumanascsi1_probe(struct expansion_card *ec,
goto out_release;
}
- priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW),
- ecard_resource_len(ec, ECARD_RES_IOCSLOW));
- priv(host)->dma = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
- ecard_resource_len(ec, ECARD_RES_MEMC));
- if (!priv(host)->base || !priv(host)->dma) {
+ priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW),
+ ecard_resource_len(ec, ECARD_RES_IOCSLOW));
+ priv(host)->pdma_io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
+ ecard_resource_len(ec, ECARD_RES_MEMC));
+ if (!priv(host)->io || !priv(host)->pdma_io) {
ret = -ENOMEM;
goto out_unmap;
}
@@ -253,7 +261,7 @@ static int cumanascsi1_probe(struct expansion_card *ec,
NCR5380_maybe_reset_bus(host);
priv(host)->ctrl = 0;
- writeb(0, priv(host)->base + CTRL);
+ writeb(0, priv(host)->io + CTRL);
ret = request_irq(host->irq, cumanascsi_intr, 0,
"CumanaSCSI-1", host);
@@ -275,8 +283,8 @@ static int cumanascsi1_probe(struct expansion_card *ec,
out_exit:
NCR5380_exit(host);
out_unmap:
- iounmap(priv(host)->base);
- iounmap(priv(host)->dma);
+ iounmap(priv(host)->io);
+ iounmap(priv(host)->pdma_io);
scsi_host_put(host);
out_release:
ecard_release_resources(ec);
@@ -287,15 +295,17 @@ static int cumanascsi1_probe(struct expansion_card *ec,
static void cumanascsi1_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
+ void __iomem *base = priv(host)->io;
+ void __iomem *dma = priv(host)->pdma_io;
ecard_set_drvdata(ec, NULL);
scsi_remove_host(host);
free_irq(host->irq, host);
NCR5380_exit(host);
- iounmap(priv(host)->base);
- iounmap(priv(host)->dma);
scsi_host_put(host);
+ iounmap(base);
+ iounmap(dma);
ecard_release_resources(ec);
}
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index a396024a3cae..6be6666534d4 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -16,21 +16,18 @@
#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
-#define NCR5380_read(reg) \
- readb(priv(instance)->base + ((reg) << 2))
-#define NCR5380_write(reg, value) \
- writeb(value, priv(instance)->base + ((reg) << 2))
+#define NCR5380_read(reg) readb(hostdata->io + ((reg) << 2))
+#define NCR5380_write(reg, value) writeb(value, hostdata->io + ((reg) << 2))
-#define NCR5380_dma_xfer_len(instance, cmd, phase) (0)
+#define NCR5380_dma_xfer_len NCR5380_dma_xfer_none
#define NCR5380_dma_recv_setup oakscsi_pread
#define NCR5380_dma_send_setup oakscsi_pwrite
-#define NCR5380_dma_residual(instance) (0)
+#define NCR5380_dma_residual NCR5380_dma_residual_none
#define NCR5380_queue_command oakscsi_queue_command
#define NCR5380_info oakscsi_info
-#define NCR5380_implementation_fields \
- void __iomem *base
+#define NCR5380_implementation_fields /* none */
#include "../NCR5380.h"
@@ -40,10 +37,10 @@
#define STAT ((128 + 16) << 2)
#define DATA ((128 + 8) << 2)
-static inline int oakscsi_pwrite(struct Scsi_Host *instance,
+static inline int oakscsi_pwrite(struct NCR5380_hostdata *hostdata,
unsigned char *addr, int len)
{
- void __iomem *base = priv(instance)->base;
+ u8 __iomem *base = hostdata->io;
printk("writing %p len %d\n",addr, len);
@@ -55,10 +52,11 @@ printk("writing %p len %d\n",addr, len);
return 0;
}
-static inline int oakscsi_pread(struct Scsi_Host *instance,
+static inline int oakscsi_pread(struct NCR5380_hostdata *hostdata,
unsigned char *addr, int len)
{
- void __iomem *base = priv(instance)->base;
+ u8 __iomem *base = hostdata->io;
+
printk("reading %p len %d\n", addr, len);
while(len > 0)
{
@@ -133,15 +131,14 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
goto release;
}
- priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
- ecard_resource_len(ec, ECARD_RES_MEMC));
- if (!priv(host)->base) {
+ priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
+ ecard_resource_len(ec, ECARD_RES_MEMC));
+ if (!priv(host)->io) {
ret = -ENOMEM;
goto unreg;
}
host->irq = NO_IRQ;
- host->n_io_port = 255;
ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP);
if (ret)
@@ -159,7 +156,7 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
out_exit:
NCR5380_exit(host);
out_unmap:
- iounmap(priv(host)->base);
+ iounmap(priv(host)->io);
unreg:
scsi_host_put(host);
release:
@@ -171,13 +168,14 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
static void oakscsi_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
+ void __iomem *base = priv(host)->io;
ecard_set_drvdata(ec, NULL);
scsi_remove_host(host);
NCR5380_exit(host);
- iounmap(priv(host)->base);
scsi_host_put(host);
+ iounmap(base);
ecard_release_resources(ec);
}
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a59ad94ea52b..105b35393ce9 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -57,6 +57,9 @@
#define NCR5380_implementation_fields /* none */
+static u8 (*atari_scsi_reg_read)(unsigned int);
+static void (*atari_scsi_reg_write)(unsigned int, u8);
+
#define NCR5380_read(reg) atari_scsi_reg_read(reg)
#define NCR5380_write(reg, value) atari_scsi_reg_write(reg, value)
@@ -64,14 +67,10 @@
#define NCR5380_abort atari_scsi_abort
#define NCR5380_info atari_scsi_info
-#define NCR5380_dma_recv_setup(instance, data, count) \
- atari_scsi_dma_setup(instance, data, count, 0)
-#define NCR5380_dma_send_setup(instance, data, count) \
- atari_scsi_dma_setup(instance, data, count, 1)
-#define NCR5380_dma_residual(instance) \
- atari_scsi_dma_residual(instance)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- atari_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO))
+#define NCR5380_dma_xfer_len atari_scsi_dma_xfer_len
+#define NCR5380_dma_recv_setup atari_scsi_dma_recv_setup
+#define NCR5380_dma_send_setup atari_scsi_dma_send_setup
+#define NCR5380_dma_residual atari_scsi_dma_residual
#define NCR5380_acquire_dma_irq(instance) falcon_get_lock(instance)
#define NCR5380_release_dma_irq(instance) falcon_release_lock()
@@ -126,9 +125,6 @@ static inline unsigned long SCSI_DMA_GETADR(void)
static void atari_scsi_fetch_restbytes(void);
-static unsigned char (*atari_scsi_reg_read)(unsigned char reg);
-static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value);
-
static unsigned long atari_dma_residual, atari_dma_startaddr;
static short atari_dma_active;
/* pointer to the dribble buffer */
@@ -457,15 +453,14 @@ static int __init atari_scsi_setup(char *str)
__setup("atascsi=", atari_scsi_setup);
#endif /* !MODULE */
-
-static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
+static unsigned long atari_scsi_dma_setup(struct NCR5380_hostdata *hostdata,
void *data, unsigned long count,
int dir)
{
unsigned long addr = virt_to_phys(data);
- dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, "
- "dir = %d\n", instance->host_no, data, addr, count, dir);
+ dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, dir = %d\n",
+ hostdata->host->host_no, data, addr, count, dir);
if (!IS_A_TT() && !STRAM_ADDR(addr)) {
/* If we have a non-DMAable address on a Falcon, use the dribble
@@ -522,8 +517,19 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
return count;
}
+static inline int atari_scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return atari_scsi_dma_setup(hostdata, data, count, 0);
+}
+
+static inline int atari_scsi_dma_send_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return atari_scsi_dma_setup(hostdata, data, count, 1);
+}
-static long atari_scsi_dma_residual(struct Scsi_Host *instance)
+static int atari_scsi_dma_residual(struct NCR5380_hostdata *hostdata)
{
return atari_dma_residual;
}
@@ -564,10 +570,11 @@ static int falcon_classify_cmd(struct scsi_cmnd *cmd)
* the overrun problem, so this question is academic :-)
*/
-static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
- struct scsi_cmnd *cmd, int write_flag)
+static int atari_scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
{
- unsigned long possible_len, limit;
+ int wanted_len = cmd->SCp.this_residual;
+ int possible_len, limit;
if (wanted_len < DMA_MIN_SIZE)
return 0;
@@ -604,7 +611,7 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
* use the dribble buffer and thus can do only STRAM_BUFFER_SIZE bytes.
*/
- if (write_flag) {
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
/* Write operation can always use the DMA, but the transfer size must
* be rounded up to the next multiple of 512 (atari_dma_setup() does
* this).
@@ -644,8 +651,8 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
possible_len = limit;
if (possible_len != wanted_len)
- dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes "
- "instead of %ld\n", possible_len, wanted_len);
+ dprintk(NDEBUG_DMA, "DMA transfer now %d bytes instead of %d\n",
+ possible_len, wanted_len);
return possible_len;
}
@@ -658,26 +665,38 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
* NCR5380_write call these functions via function pointers.
*/
-static unsigned char atari_scsi_tt_reg_read(unsigned char reg)
+static u8 atari_scsi_tt_reg_read(unsigned int reg)
{
return tt_scsi_regp[reg * 2];
}
-static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value)
+static void atari_scsi_tt_reg_write(unsigned int reg, u8 value)
{
tt_scsi_regp[reg * 2] = value;
}
-static unsigned char atari_scsi_falcon_reg_read(unsigned char reg)
+static u8 atari_scsi_falcon_reg_read(unsigned int reg)
{
- dma_wd.dma_mode_status= (u_short)(0x88 + reg);
- return (u_char)dma_wd.fdc_acces_seccount;
+ unsigned long flags;
+ u8 result;
+
+ reg += 0x88;
+ local_irq_save(flags);
+ dma_wd.dma_mode_status = (u_short)reg;
+ result = (u8)dma_wd.fdc_acces_seccount;
+ local_irq_restore(flags);
+ return result;
}
-static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value)
+static void atari_scsi_falcon_reg_write(unsigned int reg, u8 value)
{
- dma_wd.dma_mode_status = (u_short)(0x88 + reg);
+ unsigned long flags;
+
+ reg += 0x88;
+ local_irq_save(flags);
+ dma_wd.dma_mode_status = (u_short)reg;
dma_wd.fdc_acces_seccount = (u_short)value;
+ local_irq_restore(flags);
}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index d9239c2d49b1..b5112d6d7e73 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3049,8 +3049,10 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
eq_vaddress = pci_alloc_consistent(phba->pcidev,
num_eq_pages * PAGE_SIZE,
&paddr);
- if (!eq_vaddress)
+ if (!eq_vaddress) {
+ ret = -ENOMEM;
goto create_eq_error;
+ }
mem->va = eq_vaddress;
ret = be_fill_queue(eq, phba->params.num_eq_entries,
@@ -3113,8 +3115,10 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
cq_vaddress = pci_alloc_consistent(phba->pcidev,
num_cq_pages * PAGE_SIZE,
&paddr);
- if (!cq_vaddress)
+ if (!cq_vaddress) {
+ ret = -ENOMEM;
goto create_cq_error;
+ }
ret = be_fill_queue(cq, phba->params.num_cq_entries,
sizeof(struct sol_cqe), cq_vaddress);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 713745da44c6..0f9fab770339 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -111,20 +111,24 @@ struct bfa_meminfo_s {
struct bfa_mem_kva_s kva_info;
};
-/* BFA memory segment setup macros */
-#define bfa_mem_dma_setup(_meminfo, _dm_ptr, _seg_sz) do { \
- ((bfa_mem_dma_t *)(_dm_ptr))->mem_len = (_seg_sz); \
- if (_seg_sz) \
- list_add_tail(&((bfa_mem_dma_t *)_dm_ptr)->qe, \
- &(_meminfo)->dma_info.qe); \
-} while (0)
+/* BFA memory segment setup helpers */
+static inline void bfa_mem_dma_setup(struct bfa_meminfo_s *meminfo,
+ struct bfa_mem_dma_s *dm_ptr,
+ size_t seg_sz)
+{
+ dm_ptr->mem_len = seg_sz;
+ if (seg_sz)
+ list_add_tail(&dm_ptr->qe, &meminfo->dma_info.qe);
+}
-#define bfa_mem_kva_setup(_meminfo, _kva_ptr, _seg_sz) do { \
- ((bfa_mem_kva_t *)(_kva_ptr))->mem_len = (_seg_sz); \
- if (_seg_sz) \
- list_add_tail(&((bfa_mem_kva_t *)_kva_ptr)->qe, \
- &(_meminfo)->kva_info.qe); \
-} while (0)
+static inline void bfa_mem_kva_setup(struct bfa_meminfo_s *meminfo,
+ struct bfa_mem_kva_s *kva_ptr,
+ size_t seg_sz)
+{
+ kva_ptr->mem_len = seg_sz;
+ if (seg_sz)
+ list_add_tail(&kva_ptr->qe, &meminfo->kva_info.qe);
+}
/* BFA dma memory segments iterator */
#define bfa_mem_dma_sptr(_mod, _i) (&(_mod)->dma_seg[(_i)])
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index d1ad0208dfe7..a9a00169ad91 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3130,11 +3130,12 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
}
static int
-bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
+bfad_im_bsg_vendor_request(struct bsg_job *job)
{
- uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
- struct bfad_im_port_s *im_port =
- (struct bfad_im_port_s *) job->shost->hostdata[0];
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
+ struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
struct bfad_s *bfad = im_port->bfad;
struct request_queue *request_q = job->req->q;
void *payload_kbuf;
@@ -3175,18 +3176,19 @@ bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
/* Fill the BSG job reply data */
job->reply_len = job->reply_payload.payload_len;
- job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
- job->reply->result = rc;
+ bsg_reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+ bsg_reply->result = rc;
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
error:
/* free the command buffer */
kfree(payload_kbuf);
out:
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->reply_len = sizeof(uint32_t);
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
return rc;
}
@@ -3312,7 +3314,7 @@ bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
}
int
-bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
+bfad_fcxp_bsg_send(struct bsg_job *job, struct bfad_fcxp *drv_fcxp,
bfa_bsg_fcpt_t *bsg_fcpt)
{
struct bfa_fcxp_s *hal_fcxp;
@@ -3352,28 +3354,29 @@ bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
}
int
-bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
+bfad_im_bsg_els_ct_request(struct bsg_job *job)
{
struct bfa_bsg_data *bsg_data;
- struct bfad_im_port_s *im_port =
- (struct bfad_im_port_s *) job->shost->hostdata[0];
+ struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
struct bfad_s *bfad = im_port->bfad;
bfa_bsg_fcpt_t *bsg_fcpt;
struct bfad_fcxp *drv_fcxp;
struct bfa_fcs_lport_s *fcs_port;
struct bfa_fcs_rport_s *fcs_rport;
- uint32_t command_type = job->request->msgcode;
+ struct fc_bsg_request *bsg_request = bsg_request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ uint32_t command_type = bsg_request->msgcode;
unsigned long flags;
struct bfad_buf_info *rsp_buf_info;
void *req_kbuf = NULL, *rsp_kbuf = NULL;
int rc = -EINVAL;
job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* Get the payload passed in from userspace */
- bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
- sizeof(struct fc_bsg_request));
+ bsg_data = (struct bfa_bsg_data *) (((char *)bsg_request) +
+ sizeof(struct fc_bsg_request));
if (bsg_data == NULL)
goto out;
@@ -3517,13 +3520,13 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
/* fill the job->reply data */
if (drv_fcxp->req_status == BFA_STATUS_OK) {
job->reply_len = drv_fcxp->rsp_len;
- job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
- job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
+ bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
} else {
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sizeof(struct fc_bsg_ctels_reply);
job->reply_len = sizeof(uint32_t);
- job->reply->reply_data.ctels_reply.status =
+ bsg_reply->reply_data.ctels_reply.status =
FC_CTELS_STATUS_REJECT;
}
@@ -3549,20 +3552,23 @@ out_free_mem:
kfree(bsg_fcpt);
kfree(drv_fcxp);
out:
- job->reply->result = rc;
+ bsg_reply->result = rc;
if (rc == BFA_STATUS_OK)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
int
-bfad_im_bsg_request(struct fc_bsg_job *job)
+bfad_im_bsg_request(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t rc = BFA_STATUS_OK;
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_HST_VENDOR:
/* Process BSG HST Vendor requests */
rc = bfad_im_bsg_vendor_request(job);
@@ -3575,8 +3581,8 @@ bfad_im_bsg_request(struct fc_bsg_job *job)
rc = bfad_im_bsg_els_ct_request(job);
break;
default:
- job->reply->result = rc = -EINVAL;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = rc = -EINVAL;
+ bsg_reply->reply_payload_rcv_len = 0;
break;
}
@@ -3584,7 +3590,7 @@ bfad_im_bsg_request(struct fc_bsg_job *job)
}
int
-bfad_im_bsg_timeout(struct fc_bsg_job *job)
+bfad_im_bsg_timeout(struct bsg_job *job)
{
/* Don't complete the BSG job request - return -EAGAIN
* to reset bsg job timeout : for ELS/CT pass thru we
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 836fdc221edd..c81ec2a77ef5 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -166,8 +166,8 @@ extern struct device_attribute *bfad_im_vport_attrs[];
irqreturn_t bfad_intx(int irq, void *dev_id);
-int bfad_im_bsg_request(struct fc_bsg_job *job);
-int bfad_im_bsg_timeout(struct fc_bsg_job *job);
+int bfad_im_bsg_request(struct bsg_job *job);
+int bfad_im_bsg_timeout(struct bsg_job *job);
/*
* Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f9ddb6156f14..0990130821fa 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -970,7 +970,6 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
sizeof(struct libfc_function_template));
fc_elsct_init(lport);
fc_exch_init(lport);
- fc_rport_init(lport);
fc_disc_init(lport);
fc_disc_config(lport, lport);
return 0;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 08ec318afb99..739bfb62aff6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -80,7 +80,6 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
struct bnx2fc_rport *tgt,
struct fc_rport_priv *rdata)
{
- struct fc_lport *lport = rdata->local_port;
struct fc_rport *rport = rdata->rport;
struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_hba *hba = interface->hba;
@@ -160,7 +159,7 @@ ofld_err:
tgt_init_err:
if (tgt->fcoe_conn_id != -1)
bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
}
void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 4655a9f9dcea..9e6f647ff1c1 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1411,7 +1411,7 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
if (csk->atid < 0) {
pr_err("%s, NO atid available.\n", ndev->name);
- return -EINVAL;
+ goto rel_resource_without_clip;
}
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 6e6815545a71..0e9de5d62da2 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -19,6 +19,7 @@
#include <linux/rwsem.h>
#include <linux/types.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
extern const struct file_operations cxlflash_cxl_fops;
@@ -62,11 +63,6 @@ static inline void check_sizes(void)
/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
#define CMD_BUFSIZE SIZE_4K
-/* flags in IOA status area for host use */
-#define B_DONE 0x01
-#define B_ERROR 0x02 /* set with B_DONE */
-#define B_TIMEOUT 0x04 /* set with B_DONE & B_ERROR */
-
enum cxlflash_lr_state {
LINK_RESET_INVALID,
LINK_RESET_REQUIRED,
@@ -132,12 +128,9 @@ struct cxlflash_cfg {
struct afu_cmd {
struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */
struct sisl_ioasa sa; /* IOASA must follow IOARCB */
- spinlock_t slock;
- struct completion cevent;
- char *buf; /* per command buffer */
struct afu *parent;
- int slot;
- atomic_t free;
+ struct scsi_cmnd *scp;
+ struct completion cevent;
u8 cmd_tmf:1;
@@ -147,19 +140,31 @@ struct afu_cmd {
*/
} __aligned(cache_line_size());
+static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc)
+{
+ return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd));
+}
+
+static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
+{
+ struct afu_cmd *afuc = sc_to_afuc(sc);
+
+ memset(afuc, 0, sizeof(*afuc));
+ return afuc;
+}
+
struct afu {
/* Stuff requiring alignment go first. */
u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
- /*
- * Command & data for AFU commands.
- */
- struct afu_cmd cmd[CXLFLASH_NUM_CMDS];
/* Beware of alignment till here. Preferably introduce new
* fields after this point
*/
+ int (*send_cmd)(struct afu *, struct afu_cmd *);
+ void (*context_reset)(struct afu_cmd *);
+
/* AFU HW */
struct cxl_ioctl_start_work work;
struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
@@ -173,10 +178,10 @@ struct afu {
u64 *hrrq_end;
u64 *hrrq_curr;
bool toggle;
- bool read_room;
- atomic64_t room;
+ atomic_t cmds_active; /* Number of currently active AFU commands */
+ s64 room;
+ spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */
u64 hb;
- u32 cmd_couts; /* Number of command checkouts */
u32 internal_lun; /* User-desired LUN mode for this AFU */
char version[16];
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
index a0923cade6f3..6c318db90c85 100644
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -254,8 +254,14 @@ int cxlflash_manage_lun(struct scsi_device *sdev,
if (lli->parent->mode != MODE_NONE)
rc = -EBUSY;
else {
+ /*
+ * Clean up local LUN for this port and reset table
+ * tracking when no more references exist.
+ */
sdev->hostdata = NULL;
lli->port_sel &= ~CHAN2PORT(chan);
+ if (lli->port_sel == 0U)
+ lli->in_table = false;
}
}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index b301655f91cd..b17ebf6d0a7e 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -35,67 +35,6 @@ MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
/**
- * cmd_checkout() - checks out an AFU command
- * @afu: AFU to checkout from.
- *
- * Commands are checked out in a round-robin fashion. Note that since
- * the command pool is larger than the hardware queue, the majority of
- * times we will only loop once or twice before getting a command. The
- * buffer and CDB within the command are initialized (zeroed) prior to
- * returning.
- *
- * Return: The checked out command or NULL when command pool is empty.
- */
-static struct afu_cmd *cmd_checkout(struct afu *afu)
-{
- int k, dec = CXLFLASH_NUM_CMDS;
- struct afu_cmd *cmd;
-
- while (dec--) {
- k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
-
- cmd = &afu->cmd[k];
-
- if (!atomic_dec_if_positive(&cmd->free)) {
- pr_devel("%s: returning found index=%d cmd=%p\n",
- __func__, cmd->slot, cmd);
- memset(cmd->buf, 0, CMD_BUFSIZE);
- memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
- return cmd;
- }
- }
-
- return NULL;
-}
-
-/**
- * cmd_checkin() - checks in an AFU command
- * @cmd: AFU command to checkin.
- *
- * Safe to pass commands that have already been checked in. Several
- * internal tracking fields are reset as part of the checkin. Note
- * that these are intentionally reset prior to toggling the free bit
- * to avoid clobbering values in the event that the command is checked
- * out right away.
- */
-static void cmd_checkin(struct afu_cmd *cmd)
-{
- cmd->rcb.scp = NULL;
- cmd->rcb.timeout = 0;
- cmd->sa.ioasc = 0;
- cmd->cmd_tmf = false;
- cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
-
- if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
- pr_err("%s: Freeing cmd (%d) that is not in use!\n",
- __func__, cmd->slot);
- return;
- }
-
- pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
-}
-
-/**
* process_cmd_err() - command error handler
* @cmd: AFU command that experienced the error.
* @scp: SCSI command associated with the AFU command in error.
@@ -212,7 +151,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
*
* Prepares and submits command that has either completed or timed out to
* the SCSI stack. Checks AFU command back into command pool for non-internal
- * (rcb.scp populated) commands.
+ * (cmd->scp populated) commands.
*/
static void cmd_complete(struct afu_cmd *cmd)
{
@@ -222,19 +161,14 @@ static void cmd_complete(struct afu_cmd *cmd)
struct cxlflash_cfg *cfg = afu->parent;
bool cmd_is_tmf;
- spin_lock_irqsave(&cmd->slock, lock_flags);
- cmd->sa.host_use_b[0] |= B_DONE;
- spin_unlock_irqrestore(&cmd->slock, lock_flags);
-
- if (cmd->rcb.scp) {
- scp = cmd->rcb.scp;
+ if (cmd->scp) {
+ scp = cmd->scp;
if (unlikely(cmd->sa.ioasc))
process_cmd_err(cmd, scp);
else
scp->result = (DID_OK << 16);
cmd_is_tmf = cmd->cmd_tmf;
- cmd_checkin(cmd); /* Don't use cmd after here */
pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
"ioasc=%d\n", __func__, scp, scp->result,
@@ -254,49 +188,19 @@ static void cmd_complete(struct afu_cmd *cmd)
}
/**
- * context_reset() - timeout handler for AFU commands
+ * context_reset_ioarrin() - reset command owner context via IOARRIN register
* @cmd: AFU command that timed out.
- *
- * Sends a reset to the AFU.
*/
-static void context_reset(struct afu_cmd *cmd)
+static void context_reset_ioarrin(struct afu_cmd *cmd)
{
int nretry = 0;
u64 rrin = 0x1;
- u64 room = 0;
struct afu *afu = cmd->parent;
- ulong lock_flags;
+ struct cxlflash_cfg *cfg = afu->parent;
+ struct device *dev = &cfg->dev->dev;
pr_debug("%s: cmd=%p\n", __func__, cmd);
- spin_lock_irqsave(&cmd->slock, lock_flags);
-
- /* Already completed? */
- if (cmd->sa.host_use_b[0] & B_DONE) {
- spin_unlock_irqrestore(&cmd->slock, lock_flags);
- return;
- }
-
- cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
- spin_unlock_irqrestore(&cmd->slock, lock_flags);
-
- /*
- * We really want to send this reset at all costs, so spread
- * out wait time on successive retries for available room.
- */
- do {
- room = readq_be(&afu->host_map->cmd_room);
- atomic64_set(&afu->room, room);
- if (room)
- goto write_rrin;
- udelay(1 << nretry);
- } while (nretry++ < MC_ROOM_RETRY_CNT);
-
- pr_err("%s: no cmd_room to send reset\n", __func__);
- return;
-
-write_rrin:
- nretry = 0;
writeq_be(rrin, &afu->host_map->ioarrin);
do {
rrin = readq_be(&afu->host_map->ioarrin);
@@ -305,93 +209,81 @@ write_rrin:
/* Double delay each time */
udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
+
+ dev_dbg(dev, "%s: returning rrin=0x%016llX nretry=%d\n",
+ __func__, rrin, nretry);
}
/**
- * send_cmd() - sends an AFU command
+ * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
* @afu: AFU associated with the host.
* @cmd: AFU command to send.
*
* Return:
* 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
*/
-static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
+static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
{
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- int nretry = 0;
int rc = 0;
- u64 room;
- long newval;
+ s64 room;
+ ulong lock_flags;
/*
- * This routine is used by critical users such an AFU sync and to
- * send a task management function (TMF). Thus we want to retry a
- * bit before returning an error. To avoid the performance penalty
- * of MMIO, we spread the update of 'room' over multiple commands.
+ * To avoid the performance penalty of MMIO, spread the update of
+ * 'room' over multiple commands.
*/
-retry:
- newval = atomic64_dec_if_positive(&afu->room);
- if (!newval) {
- do {
- room = readq_be(&afu->host_map->cmd_room);
- atomic64_set(&afu->room, room);
- if (room)
- goto write_ioarrin;
- udelay(1 << nretry);
- } while (nretry++ < MC_ROOM_RETRY_CNT);
-
- dev_err(dev, "%s: no cmd_room to send 0x%X\n",
- __func__, cmd->rcb.cdb[0]);
-
- goto no_room;
- } else if (unlikely(newval < 0)) {
- /* This should be rare. i.e. Only if two threads race and
- * decrement before the MMIO read is done. In this case
- * just benefit from the other thread having updated
- * afu->room.
- */
- if (nretry++ < MC_ROOM_RETRY_CNT) {
- udelay(1 << nretry);
- goto retry;
+ spin_lock_irqsave(&afu->rrin_slock, lock_flags);
+ if (--afu->room < 0) {
+ room = readq_be(&afu->host_map->cmd_room);
+ if (room <= 0) {
+ dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
+ "0x%02X, room=0x%016llX\n",
+ __func__, cmd->rcb.cdb[0], room);
+ afu->room = 0;
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
}
-
- goto no_room;
+ afu->room = room - 1;
}
-write_ioarrin:
writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
out:
+ spin_unlock_irqrestore(&afu->rrin_slock, lock_flags);
pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
return rc;
-
-no_room:
- afu->read_room = true;
- kref_get(&cfg->afu->mapcount);
- schedule_work(&cfg->work_q);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
}
/**
* wait_resp() - polls for a response or timeout to a sent AFU command
* @afu: AFU associated with the host.
* @cmd: AFU command that was sent.
+ *
+ * Return:
+ * 0 on success, -1 on timeout/error
*/
-static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
+static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
{
+ int rc = 0;
ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
- if (!timeout)
- context_reset(cmd);
+ if (!timeout) {
+ afu->context_reset(cmd);
+ rc = -1;
+ }
- if (unlikely(cmd->sa.ioasc != 0))
+ if (unlikely(cmd->sa.ioasc != 0)) {
pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
"scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
cmd->sa.rc.fc_rc);
+ rc = -1;
+ }
+
+ return rc;
}
/**
@@ -405,24 +297,15 @@ static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
*/
static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
{
- struct afu_cmd *cmd;
-
u32 port_sel = scp->device->channel + 1;
- short lflag = 0;
struct Scsi_Host *host = scp->device->host;
struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+ struct afu_cmd *cmd = sc_to_afucz(scp);
struct device *dev = &cfg->dev->dev;
ulong lock_flags;
int rc = 0;
ulong to;
- cmd = cmd_checkout(afu);
- if (unlikely(!cmd)) {
- dev_err(dev, "%s: could not get a free command\n", __func__);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
/* When Task Management Function is active do not send another */
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
if (cfg->tmf_active)
@@ -430,28 +313,23 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
!cfg->tmf_active,
cfg->tmf_slock);
cfg->tmf_active = true;
- cmd->cmd_tmf = true;
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
+ cmd->scp = scp;
+ cmd->parent = afu;
+ cmd->cmd_tmf = true;
+
cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.port_sel = port_sel;
cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
-
- lflag = SISL_REQ_FLAGS_TMF_CMD;
-
cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
- SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
-
- /* Stash the scp in the reserved field, for reuse during interrupt */
- cmd->rcb.scp = scp;
-
- /* Copy the CDB from the cmd passed in */
+ SISL_REQ_FLAGS_SUP_UNDERRUN |
+ SISL_REQ_FLAGS_TMF_CMD);
memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
- /* Send the command */
- rc = send_cmd(afu, cmd);
+ rc = afu->send_cmd(afu, cmd);
if (unlikely(rc)) {
- cmd_checkin(cmd);
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
cfg->tmf_active = false;
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
@@ -507,12 +385,12 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
- struct afu_cmd *cmd;
+ struct afu_cmd *cmd = sc_to_afucz(scp);
+ struct scatterlist *sg = scsi_sglist(scp);
u32 port_sel = scp->device->channel + 1;
- int nseg, i, ncount;
- struct scatterlist *sg;
+ u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
ulong lock_flags;
- short lflag = 0;
+ int nseg = 0;
int rc = 0;
int kref_got = 0;
@@ -552,55 +430,38 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
break;
}
- cmd = cmd_checkout(afu);
- if (unlikely(!cmd)) {
- dev_err(dev, "%s: could not get a free command\n", __func__);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
kref_get(&cfg->afu->mapcount);
kref_got = 1;
+ if (likely(sg)) {
+ nseg = scsi_dma_map(scp);
+ if (unlikely(nseg < 0)) {
+ dev_err(dev, "%s: Fail DMA map!\n", __func__);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ cmd->rcb.data_len = sg_dma_len(sg);
+ cmd->rcb.data_ea = sg_dma_address(sg);
+ }
+
+ cmd->scp = scp;
+ cmd->parent = afu;
+
cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.port_sel = port_sel;
cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
if (scp->sc_data_direction == DMA_TO_DEVICE)
- lflag = SISL_REQ_FLAGS_HOST_WRITE;
- else
- lflag = SISL_REQ_FLAGS_HOST_READ;
-
- cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
- SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
-
- /* Stash the scp in the reserved field, for reuse during interrupt */
- cmd->rcb.scp = scp;
-
- nseg = scsi_dma_map(scp);
- if (unlikely(nseg < 0)) {
- dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
- __func__, nseg);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
+ req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
- ncount = scsi_sg_count(scp);
- scsi_for_each_sg(scp, sg, ncount, i) {
- cmd->rcb.data_len = sg_dma_len(sg);
- cmd->rcb.data_ea = sg_dma_address(sg);
- }
-
- /* Copy the CDB from the scsi_cmnd passed in */
+ cmd->rcb.req_flags = req_flags;
memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
- /* Send the command */
- rc = send_cmd(afu, cmd);
- if (unlikely(rc)) {
- cmd_checkin(cmd);
+ rc = afu->send_cmd(afu, cmd);
+ if (unlikely(rc))
scsi_dma_unmap(scp);
- }
-
out:
if (kref_got)
kref_put(&afu->mapcount, afu_unmap);
@@ -628,17 +489,9 @@ static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
*/
static void free_mem(struct cxlflash_cfg *cfg)
{
- int i;
- char *buf = NULL;
struct afu *afu = cfg->afu;
if (cfg->afu) {
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
- buf = afu->cmd[i].buf;
- if (!((u64)buf & (PAGE_SIZE - 1)))
- free_page((ulong)buf);
- }
-
free_pages((ulong)afu, get_order(sizeof(struct afu)));
cfg->afu = NULL;
}
@@ -650,30 +503,16 @@ static void free_mem(struct cxlflash_cfg *cfg)
*
* Safe to call with AFU in a partially allocated/initialized state.
*
- * Cleans up all state associated with the command queue, and unmaps
+ * Waits for any active internal AFU commands to timeout and then unmaps
* the MMIO space.
- *
- * - complete() will take care of commands we initiated (they'll be checked
- * in as part of the cleanup that occurs after the completion)
- *
- * - cmd_checkin() will take care of entries that we did not initiate and that
- * have not (and will not) complete because they are sitting on a [now stale]
- * hardware queue
*/
static void stop_afu(struct cxlflash_cfg *cfg)
{
- int i;
struct afu *afu = cfg->afu;
- struct afu_cmd *cmd;
if (likely(afu)) {
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
- cmd = &afu->cmd[i];
- complete(&cmd->cevent);
- if (!atomic_read(&cmd->free))
- cmd_checkin(cmd);
- }
-
+ while (atomic_read(&afu->cmds_active))
+ ssleep(1);
if (likely(afu->afu_map)) {
cxl_psa_unmap((void __iomem *)afu->afu_map);
afu->afu_map = NULL;
@@ -886,8 +725,6 @@ static void cxlflash_remove(struct pci_dev *pdev)
static int alloc_mem(struct cxlflash_cfg *cfg)
{
int rc = 0;
- int i;
- char *buf = NULL;
struct device *dev = &cfg->dev->dev;
/* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
@@ -901,25 +738,6 @@ static int alloc_mem(struct cxlflash_cfg *cfg)
}
cfg->afu->parent = cfg;
cfg->afu->afu_map = NULL;
-
- for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
- if (!((u64)buf & (PAGE_SIZE - 1))) {
- buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- if (unlikely(!buf)) {
- dev_err(dev,
- "%s: Allocate command buffers fail!\n",
- __func__);
- rc = -ENOMEM;
- free_mem(cfg);
- goto out;
- }
- }
-
- cfg->afu->cmd[i].buf = buf;
- atomic_set(&cfg->afu->cmd[i].free, 1);
- cfg->afu->cmd[i].slot = i;
- }
-
out:
return rc;
}
@@ -1549,13 +1367,6 @@ static void init_pcr(struct cxlflash_cfg *cfg)
/* Program the Endian Control for the master context */
writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
-
- /* Initialize cmd fields that never change */
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
- afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
- afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
- afu->cmd[i].rcb.rrq = 0x0;
- }
}
/**
@@ -1644,19 +1455,8 @@ out:
static int start_afu(struct cxlflash_cfg *cfg)
{
struct afu *afu = cfg->afu;
- struct afu_cmd *cmd;
-
- int i = 0;
int rc = 0;
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
- cmd = &afu->cmd[i];
-
- init_completion(&cmd->cevent);
- spin_lock_init(&cmd->slock);
- cmd->parent = afu;
- }
-
init_pcr(cfg);
/* After an AFU reset, RRQ entries are stale, clear them */
@@ -1829,6 +1629,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
goto err2;
}
+ afu->send_cmd = send_cmd_ioarrin;
+ afu->context_reset = context_reset_ioarrin;
+
pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
afu->version, afu->interface_version);
@@ -1840,7 +1643,8 @@ static int init_afu(struct cxlflash_cfg *cfg)
}
afu_err_intr_init(cfg->afu);
- atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
+ spin_lock_init(&afu->rrin_slock);
+ afu->room = readq_be(&afu->host_map->cmd_room);
/* Restore the LUN mappings */
cxlflash_restore_luntable(cfg);
@@ -1884,8 +1688,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
struct afu_cmd *cmd = NULL;
+ char *buf = NULL;
int rc = 0;
- int retry_cnt = 0;
static DEFINE_MUTEX(sync_active);
if (cfg->state != STATE_NORMAL) {
@@ -1894,27 +1698,23 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
}
mutex_lock(&sync_active);
-retry:
- cmd = cmd_checkout(afu);
- if (unlikely(!cmd)) {
- retry_cnt++;
- udelay(1000 * retry_cnt);
- if (retry_cnt < MC_RETRY_CNT)
- goto retry;
- dev_err(dev, "%s: could not get a free command\n", __func__);
+ atomic_inc(&afu->cmds_active);
+ buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
+ if (unlikely(!buf)) {
+ dev_err(dev, "%s: no memory for command\n", __func__);
rc = -1;
goto out;
}
- pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
+ cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
+ init_completion(&cmd->cevent);
+ cmd->parent = afu;
- memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
+ pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
- cmd->rcb.port_sel = 0x0; /* NA */
- cmd->rcb.lun_id = 0x0; /* NA */
- cmd->rcb.data_len = 0x0;
- cmd->rcb.data_ea = 0x0;
+ cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
@@ -1924,20 +1724,17 @@ retry:
*((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
*((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
- rc = send_cmd(afu, cmd);
+ rc = afu->send_cmd(afu, cmd);
if (unlikely(rc))
goto out;
- wait_resp(afu, cmd);
-
- /* Set on timeout */
- if (unlikely((cmd->sa.ioasc != 0) ||
- (cmd->sa.host_use_b[0] & B_ERROR)))
+ rc = wait_resp(afu, cmd);
+ if (unlikely(rc))
rc = -1;
out:
+ atomic_dec(&afu->cmds_active);
mutex_unlock(&sync_active);
- if (cmd)
- cmd_checkin(cmd);
+ kfree(buf);
pr_debug("%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -2376,8 +2173,9 @@ static struct scsi_host_template driver_template = {
.change_queue_depth = cxlflash_change_queue_depth,
.cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
.can_queue = CXLFLASH_MAX_CMDS,
+ .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
.this_id = -1,
- .sg_tablesize = SG_NONE, /* No scatter gather support */
+ .sg_tablesize = 1, /* No scatter gather support */
.max_sectors = CXLFLASH_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = cxlflash_host_attrs,
@@ -2412,7 +2210,6 @@ MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
* Handles the following events:
* - Link reset which cannot be performed on interrupt context due to
* blocking up to a few seconds
- * - Read AFU command room
* - Rescan the host
*/
static void cxlflash_worker_thread(struct work_struct *work)
@@ -2449,11 +2246,6 @@ static void cxlflash_worker_thread(struct work_struct *work)
cfg->lr_state = LINK_RESET_COMPLETE;
}
- if (afu->read_room) {
- atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
- afu->read_room = false;
- }
-
spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index 347fc1671975..1a2d09c148b3 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -72,7 +72,7 @@ struct sisl_ioarcb {
u16 timeout; /* in units specified by req_flags */
u32 rsvd1;
u8 cdb[16]; /* must be in big endian */
- struct scsi_cmnd *scp;
+ u64 reserved; /* Reserved area */
} __packed;
struct sisl_rc {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index db03c49e2350..d704752b6332 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -95,7 +95,7 @@ struct alua_port_group {
struct alua_dh_data {
struct list_head node;
- struct alua_port_group *pg;
+ struct alua_port_group __rcu *pg;
int group_id;
spinlock_t pg_lock;
struct scsi_device *sdev;
@@ -371,7 +371,7 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
/* Check for existing port group references */
spin_lock(&h->pg_lock);
- old_pg = h->pg;
+ old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
if (old_pg != pg) {
/* port group has changed. Update to new port group */
if (h->pg) {
@@ -390,7 +390,9 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
list_add_rcu(&h->node, &pg->dh_list);
spin_unlock_irqrestore(&pg->lock, flags);
- alua_rtpg_queue(h->pg, sdev, NULL, true);
+ alua_rtpg_queue(rcu_dereference_protected(h->pg,
+ lockdep_is_held(&h->pg_lock)),
+ sdev, NULL, true);
spin_unlock(&h->pg_lock);
if (old_pg)
@@ -942,7 +944,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
static int alua_set_params(struct scsi_device *sdev, const char *params)
{
struct alua_dh_data *h = sdev->handler_data;
- struct alua_port_group __rcu *pg = NULL;
+ struct alua_port_group *pg = NULL;
unsigned int optimize = 0, argc;
const char *p = params;
int result = SCSI_DH_OK;
@@ -989,7 +991,7 @@ static int alua_activate(struct scsi_device *sdev,
struct alua_dh_data *h = sdev->handler_data;
int err = SCSI_DH_OK;
struct alua_queue_data *qdata;
- struct alua_port_group __rcu *pg;
+ struct alua_port_group *pg;
qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
if (!qdata) {
@@ -1053,7 +1055,7 @@ static void alua_check(struct scsi_device *sdev, bool force)
static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
{
struct alua_dh_data *h = sdev->handler_data;
- struct alua_port_group __rcu *pg;
+ struct alua_port_group *pg;
unsigned char state = SCSI_ACCESS_STATE_OPTIMAL;
int ret = BLKPREP_OK;
@@ -1123,7 +1125,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
struct alua_port_group *pg;
spin_lock(&h->pg_lock);
- pg = h->pg;
+ pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
rcu_assign_pointer(h->pg, NULL);
h->sdev = NULL;
spin_unlock(&h->pg_lock);
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 9b5a457d4bca..6af3394d051d 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -34,13 +34,13 @@
* Definitions for the generic 5380 driver.
*/
-#define NCR5380_read(reg) inb(instance->io_port + reg)
-#define NCR5380_write(reg, value) outb(value, instance->io_port + reg)
+#define NCR5380_read(reg) inb(hostdata->base + (reg))
+#define NCR5380_write(reg, value) outb(value, hostdata->base + (reg))
-#define NCR5380_dma_xfer_len(instance, cmd, phase) (0)
-#define NCR5380_dma_recv_setup(instance, dst, len) (0)
-#define NCR5380_dma_send_setup(instance, src, len) (0)
-#define NCR5380_dma_residual(instance) (0)
+#define NCR5380_dma_xfer_len NCR5380_dma_xfer_none
+#define NCR5380_dma_recv_setup NCR5380_dma_setup_none
+#define NCR5380_dma_send_setup NCR5380_dma_setup_none
+#define NCR5380_dma_residual NCR5380_dma_residual_none
#define NCR5380_implementation_fields /* none */
@@ -71,6 +71,7 @@ static int dmx3191d_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct Scsi_Host *shost;
+ struct NCR5380_hostdata *hostdata;
unsigned long io;
int error = -ENODEV;
@@ -88,7 +89,9 @@ static int dmx3191d_probe_one(struct pci_dev *pdev,
sizeof(struct NCR5380_hostdata));
if (!shost)
goto out_release_region;
- shost->io_port = io;
+
+ hostdata = shost_priv(shost);
+ hostdata->base = io;
/* This card does not seem to raise an interrupt on pdev->irq.
* Steam-powered SCSI controllers run without an IRQ anyway.
@@ -125,7 +128,8 @@ out_host_put:
static void dmx3191d_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
- unsigned long io = shost->io_port;
+ struct NCR5380_hostdata *hostdata = shost_priv(shost);
+ unsigned long io = hostdata->base;
scsi_remove_host(shost);
@@ -149,18 +153,7 @@ static struct pci_driver dmx3191d_pci_driver = {
.remove = dmx3191d_remove_one,
};
-static int __init dmx3191d_init(void)
-{
- return pci_register_driver(&dmx3191d_pci_driver);
-}
-
-static void __exit dmx3191d_exit(void)
-{
- pci_unregister_driver(&dmx3191d_pci_driver);
-}
-
-module_init(dmx3191d_init);
-module_exit(dmx3191d_exit);
+module_pci_driver(dmx3191d_pci_driver);
MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");
MODULE_DESCRIPTION("Domex DMX3191D SCSI driver");
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 21c8d210c456..27c0dce22e72 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -651,7 +651,6 @@ static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
}
spin_unlock_irqrestore(pHba->host->host_lock, flags);
if (i >= nr) {
- kfree (reply);
printk(KERN_WARNING"%s: Too many outstanding "
"ioctl commands\n", pHba->name);
return (u32)-1;
@@ -1754,8 +1753,10 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
sg_offset = (msg[0]>>4)&0xf;
msg[2] = 0x40000000; // IOCTL context
msg[3] = adpt_ioctl_to_context(pHba, reply);
- if (msg[3] == (u32)-1)
+ if (msg[3] == (u32)-1) {
+ kfree(reply);
return -EBUSY;
+ }
memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
if(sg_offset) {
@@ -3350,7 +3351,7 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
if (opblk_va == NULL) {
dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
resblk_va, resblk_pa);
- printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
+ printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
pHba->name);
return -ENOMEM;
}
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 9bd41a35a78a..59150cad0353 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -63,6 +63,14 @@ unsigned int fcoe_debug_logging;
module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+unsigned int fcoe_e_d_tov = 2 * 1000;
+module_param_named(e_d_tov, fcoe_e_d_tov, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(e_d_tov, "E_D_TOV in ms, default 2000");
+
+unsigned int fcoe_r_a_tov = 2 * 2 * 1000;
+module_param_named(r_a_tov, fcoe_r_a_tov, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(r_a_tov, "R_A_TOV in ms, default 4000");
+
static DEFINE_MUTEX(fcoe_config_mutex);
static struct workqueue_struct *fcoe_wq;
@@ -582,7 +590,8 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
* Use default VLAN for FIP VLAN discovery protocol
*/
frame = (struct fip_frame *)skb->data;
- if (frame->fip.fip_op == ntohs(FIP_OP_VLAN) &&
+ if (ntohs(frame->eth.h_proto) == ETH_P_FIP &&
+ ntohs(frame->fip.fip_op) == FIP_OP_VLAN &&
fcoe->realdev != fcoe->netdev)
skb->dev = fcoe->realdev;
else
@@ -633,8 +642,8 @@ static int fcoe_lport_config(struct fc_lport *lport)
lport->qfull = 0;
lport->max_retry_count = 3;
lport->max_rport_retry_count = 3;
- lport->e_d_tov = 2 * 1000; /* FC-FS default */
- lport->r_a_tov = 2 * 2 * 1000;
+ lport->e_d_tov = fcoe_e_d_tov;
+ lport->r_a_tov = fcoe_r_a_tov;
lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
lport->does_npiv = 1;
@@ -2160,11 +2169,13 @@ static bool fcoe_match(struct net_device *netdev)
*/
static void fcoe_dcb_create(struct fcoe_interface *fcoe)
{
+ int ctlr_prio = TC_PRIO_BESTEFFORT;
+ int fcoe_prio = TC_PRIO_INTERACTIVE;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
#ifdef CONFIG_DCB
int dcbx;
u8 fup, up;
struct net_device *netdev = fcoe->realdev;
- struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
struct dcb_app app = {
.priority = 0,
.protocol = ETH_P_FCOE
@@ -2186,10 +2197,12 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
fup = dcb_getapp(netdev, &app);
}
- fcoe->priority = ffs(up) ? ffs(up) - 1 : 0;
- ctlr->priority = ffs(fup) ? ffs(fup) - 1 : fcoe->priority;
+ fcoe_prio = ffs(up) ? ffs(up) - 1 : 0;
+ ctlr_prio = ffs(fup) ? ffs(fup) - 1 : fcoe_prio;
}
#endif
+ fcoe->priority = fcoe_prio;
+ ctlr->priority = ctlr_prio;
}
enum fcoe_create_link_state {
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index dcf36537a767..cea57e27e713 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -801,6 +801,8 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
return -EINPROGRESS;
drop:
kfree_skb(skb);
+ LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n",
+ op, ntoh24(fh->fh_d_id));
return -EINVAL;
}
EXPORT_SYMBOL(fcoe_ctlr_els_send);
@@ -1316,7 +1318,7 @@ drop:
* The overall length has already been checked.
*/
static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
- struct fip_header *fh)
+ struct sk_buff *skb)
{
struct fip_desc *desc;
struct fip_mac_desc *mp;
@@ -1331,14 +1333,18 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
int num_vlink_desc;
int reset_phys_port = 0;
struct fip_vn_desc **vlink_desc_arr = NULL;
+ struct fip_header *fh = (struct fip_header *)skb->data;
+ struct ethhdr *eh = eth_hdr(skb);
LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
- if (!fcf || !lport->port_id) {
+ if (!fcf) {
/*
* We are yet to select best FCF, but we got CVL in the
* meantime. reset the ctlr and let it rediscover the FCF
*/
+ LIBFCOE_FIP_DBG(fip, "Resetting fcoe_ctlr as FCF has not been "
+ "selected yet\n");
mutex_lock(&fip->ctlr_mutex);
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex);
@@ -1346,6 +1352,31 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
}
/*
+ * If we've selected an FCF check that the CVL is from there to avoid
+ * processing CVLs from an unexpected source. If it is from an
+ * unexpected source drop it on the floor.
+ */
+ if (!ether_addr_equal(eh->h_source, fcf->fcf_mac)) {
+ LIBFCOE_FIP_DBG(fip, "Dropping CVL due to source address "
+ "mismatch with FCF src=%pM\n", eh->h_source);
+ return;
+ }
+
+ /*
+ * If we haven't logged into the fabric but receive a CVL we should
+ * reset everything and go back to solicitation.
+ */
+ if (!lport->port_id) {
+ LIBFCOE_FIP_DBG(fip, "lport not logged in, resoliciting\n");
+ mutex_lock(&fip->ctlr_mutex);
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
+ fc_lport_reset(fip->lp);
+ fcoe_ctlr_solicit(fip, NULL);
+ return;
+ }
+
+ /*
* mask of required descriptors. Validating each one clears its bit.
*/
desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
@@ -1576,7 +1607,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
if (op == FIP_OP_DISC && sub == FIP_SC_ADV)
fcoe_ctlr_recv_adv(fip, skb);
else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK)
- fcoe_ctlr_recv_clr_vlink(fip, fiph);
+ fcoe_ctlr_recv_clr_vlink(fip, skb);
kfree_skb(skb);
return 0;
drop:
@@ -2122,7 +2153,7 @@ static void fcoe_ctlr_vn_rport_callback(struct fc_lport *lport,
LIBFCOE_FIP_DBG(fip,
"rport FLOGI limited port_id %6.6x\n",
rdata->ids.port_id);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
}
break;
default:
@@ -2145,9 +2176,15 @@ static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport)
{
struct fc_rport_priv *rdata;
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
+ if (kref_get_unless_zero(&rdata->kref)) {
+ fc_rport_logoff(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+ }
+ rcu_read_unlock();
mutex_lock(&lport->disc.disc_mutex);
- list_for_each_entry_rcu(rdata, &lport->disc.rports, peers)
- lport->tt.rport_logoff(rdata);
lport->disc.disc_callback = NULL;
mutex_unlock(&lport->disc.disc_mutex);
}
@@ -2178,7 +2215,7 @@ static void fcoe_ctlr_disc_stop(struct fc_lport *lport)
static void fcoe_ctlr_disc_stop_final(struct fc_lport *lport)
{
fcoe_ctlr_disc_stop(lport);
- lport->tt.rport_flush_queue();
+ fc_rport_flush_queue();
synchronize_rcu();
}
@@ -2393,6 +2430,8 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
switch (fip->state) {
case FIP_ST_VNMP_CLAIM:
case FIP_ST_VNMP_UP:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_req: send reply, state %x\n",
+ fip->state);
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP,
frport->enode_mac, 0);
break;
@@ -2407,15 +2446,21 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
*/
if (fip->lp->wwpn > rdata->ids.port_name &&
!(frport->flags & FIP_FL_REC_OR_P2P)) {
+ LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
+ "port_id collision\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP,
frport->enode_mac, 0);
break;
}
/* fall through */
case FIP_ST_VNMP_START:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
+ "restart VN2VN negotiation\n");
fcoe_ctlr_vn_restart(fip);
break;
default:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_req: ignore state %x\n",
+ fip->state);
break;
}
}
@@ -2437,9 +2482,12 @@ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
case FIP_ST_VNMP_PROBE1:
case FIP_ST_VNMP_PROBE2:
case FIP_ST_VNMP_CLAIM:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_reply: restart state %x\n",
+ fip->state);
fcoe_ctlr_vn_restart(fip);
break;
case FIP_ST_VNMP_UP:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_reply: send claim notify\n");
fcoe_ctlr_vn_send_claim(fip);
break;
default:
@@ -2467,26 +2515,33 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
return;
mutex_lock(&lport->disc.disc_mutex);
- rdata = lport->tt.rport_create(lport, port_id);
+ rdata = fc_rport_create(lport, port_id);
if (!rdata) {
mutex_unlock(&lport->disc.disc_mutex);
return;
}
+ mutex_lock(&rdata->rp_mutex);
+ mutex_unlock(&lport->disc.disc_mutex);
rdata->ops = &fcoe_ctlr_vn_rport_ops;
rdata->disc_id = lport->disc.disc_id;
ids = &rdata->ids;
if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) ||
- (ids->node_name != -1 && ids->node_name != new->ids.node_name))
- lport->tt.rport_logoff(rdata);
+ (ids->node_name != -1 && ids->node_name != new->ids.node_name)) {
+ mutex_unlock(&rdata->rp_mutex);
+ LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id);
+ fc_rport_logoff(rdata);
+ mutex_lock(&rdata->rp_mutex);
+ }
ids->port_name = new->ids.port_name;
ids->node_name = new->ids.node_name;
- mutex_unlock(&lport->disc.disc_mutex);
+ mutex_unlock(&rdata->rp_mutex);
frport = fcoe_ctlr_rport(rdata);
- LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s\n",
- port_id, frport->fcoe_len ? "old" : "new");
+ LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n",
+ port_id, frport->fcoe_len ? "old" : "new",
+ rdata->rp_state);
*frport = *fcoe_ctlr_rport(new);
frport->time = 0;
}
@@ -2506,12 +2561,12 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac)
struct fcoe_rport *frport;
int ret = -1;
- rdata = lport->tt.rport_lookup(lport, port_id);
+ rdata = fc_rport_lookup(lport, port_id);
if (rdata) {
frport = fcoe_ctlr_rport(rdata);
memcpy(mac, frport->enode_mac, ETH_ALEN);
ret = 0;
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
return ret;
}
@@ -2529,6 +2584,7 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
struct fcoe_rport *frport = fcoe_ctlr_rport(new);
if (frport->flags & FIP_FL_REC_OR_P2P) {
+ LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
return;
}
@@ -2536,25 +2592,37 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
case FIP_ST_VNMP_START:
case FIP_ST_VNMP_PROBE1:
case FIP_ST_VNMP_PROBE2:
- if (new->ids.port_id == fip->port_id)
+ if (new->ids.port_id == fip->port_id) {
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+ "restart, state %d\n",
+ fip->state);
fcoe_ctlr_vn_restart(fip);
+ }
break;
case FIP_ST_VNMP_CLAIM:
case FIP_ST_VNMP_UP:
if (new->ids.port_id == fip->port_id) {
if (new->ids.port_name > fip->lp->wwpn) {
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+ "restart, port_id collision\n");
fcoe_ctlr_vn_restart(fip);
break;
}
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+ "send claim notify\n");
fcoe_ctlr_vn_send_claim(fip);
break;
}
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n",
+ new->ids.port_id);
fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac,
min((u32)frport->fcoe_len,
fcoe_ctlr_fcoe_size(fip)));
fcoe_ctlr_vn_add(fip, new);
break;
default:
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+ "ignoring claim from %x\n", new->ids.port_id);
break;
}
}
@@ -2591,19 +2659,26 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
frport = fcoe_ctlr_rport(new);
if (frport->flags & FIP_FL_REC_OR_P2P) {
+ LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
return;
}
- rdata = lport->tt.rport_lookup(lport, new->ids.port_id);
+ rdata = fc_rport_lookup(lport, new->ids.port_id);
if (rdata) {
if (rdata->ids.node_name == new->ids.node_name &&
rdata->ids.port_name == new->ids.port_name) {
frport = fcoe_ctlr_rport(rdata);
- if (!frport->time && fip->state == FIP_ST_VNMP_UP)
- lport->tt.rport_login(rdata);
+ LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n",
+ rdata->ids.port_id);
+ if (!frport->time && fip->state == FIP_ST_VNMP_UP) {
+ LIBFCOE_FIP_DBG(fip, "beacon expired "
+ "for rport %x\n",
+ rdata->ids.port_id);
+ fc_rport_login(rdata);
+ }
frport->time = jiffies;
}
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
}
if (fip->state != FIP_ST_VNMP_UP)
@@ -2638,11 +2713,15 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
unsigned long deadline;
next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10);
- mutex_lock(&lport->disc.disc_mutex);
+ rcu_read_lock();
list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
+ if (!kref_get_unless_zero(&rdata->kref))
+ continue;
frport = fcoe_ctlr_rport(rdata);
- if (!frport->time)
+ if (!frport->time) {
+ kref_put(&rdata->kref, fc_rport_destroy);
continue;
+ }
deadline = frport->time +
msecs_to_jiffies(FIP_VN_BEACON_INT * 25 / 10);
if (time_after_eq(jiffies, deadline)) {
@@ -2650,11 +2729,12 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
LIBFCOE_FIP_DBG(fip,
"port %16.16llx fc_id %6.6x beacon expired\n",
rdata->ids.port_name, rdata->ids.port_id);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
} else if (time_before(deadline, next_time))
next_time = deadline;
+ kref_put(&rdata->kref, fc_rport_destroy);
}
- mutex_unlock(&lport->disc.disc_mutex);
+ rcu_read_unlock();
return next_time;
}
@@ -2674,11 +2754,21 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct fc_rport_priv rdata;
struct fcoe_rport frport;
} buf;
- int rc;
+ int rc, vlan_id = 0;
fiph = (struct fip_header *)skb->data;
sub = fiph->fip_subcode;
+ if (fip->lp->vlan)
+ vlan_id = skb_vlan_tag_get_id(skb);
+
+ if (vlan_id && vlan_id != fip->lp->vlan) {
+ LIBFCOE_FIP_DBG(fip, "vn_recv drop frame sub %x vlan %d\n",
+ sub, vlan_id);
+ rc = -EAGAIN;
+ goto drop;
+ }
+
rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
if (rc) {
LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
@@ -2941,7 +3031,7 @@ static void fcoe_ctlr_disc_recv(struct fc_lport *lport, struct fc_frame *fp)
rjt_data.reason = ELS_RJT_UNSUP;
rjt_data.explan = ELS_EXPL_NONE;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(fp);
}
@@ -2991,12 +3081,17 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip)
mutex_lock(&disc->disc_mutex);
callback = disc->pending ? disc->disc_callback : NULL;
disc->pending = 0;
+ mutex_unlock(&disc->disc_mutex);
+ rcu_read_lock();
list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+ if (!kref_get_unless_zero(&rdata->kref))
+ continue;
frport = fcoe_ctlr_rport(rdata);
if (frport->time)
- lport->tt.rport_login(rdata);
+ fc_rport_login(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
- mutex_unlock(&disc->disc_mutex);
+ rcu_read_unlock();
if (callback)
callback(lport, DISC_EV_SUCCESS);
}
@@ -3015,11 +3110,13 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
switch (fip->state) {
case FIP_ST_VNMP_START:
fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE1);
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send 1st probe request\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
next_time = jiffies + msecs_to_jiffies(FIP_VN_PROBE_WAIT);
break;
case FIP_ST_VNMP_PROBE1:
fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE2);
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send 2nd probe request\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
break;
@@ -3030,6 +3127,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
hton24(mac + 3, new_port_id);
fcoe_ctlr_map_dest(fip);
fip->update_mac(fip->lp, mac);
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send claim notify\n");
fcoe_ctlr_vn_send_claim(fip);
next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
break;
@@ -3041,6 +3139,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
next_time = fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT);
if (time_after_eq(jiffies, next_time)) {
fcoe_ctlr_set_state(fip, FIP_ST_VNMP_UP);
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send vn2vn beacon\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON,
fcoe_all_vn2vn, 0);
next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
@@ -3051,6 +3150,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
case FIP_ST_VNMP_UP:
next_time = fcoe_ctlr_vn_age(fip);
if (time_after_eq(jiffies, fip->port_ka_time)) {
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send vn2vn beacon\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON,
fcoe_all_vn2vn, 0);
fip->port_ka_time = jiffies +
@@ -3135,7 +3235,6 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
fc_exch_init(lport);
fc_elsct_init(lport);
fc_lport_init(lport);
- fc_rport_init(lport);
fc_disc_init(lport);
fcoe_ctlr_mode_set(lport, fip, fip->mode);
return 0;
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 0675fd128734..9cf3d56296ab 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -335,16 +335,24 @@ static ssize_t store_ctlr_enabled(struct device *dev,
const char *buf, size_t count)
{
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ bool enabled;
int rc;
+ if (*buf == '1')
+ enabled = true;
+ else if (*buf == '0')
+ enabled = false;
+ else
+ return -EINVAL;
+
switch (ctlr->enabled) {
case FCOE_CTLR_ENABLED:
- if (*buf == '1')
+ if (enabled)
return count;
ctlr->enabled = FCOE_CTLR_DISABLED;
break;
case FCOE_CTLR_DISABLED:
- if (*buf == '0')
+ if (!enabled)
return count;
ctlr->enabled = FCOE_CTLR_ENABLED;
break;
@@ -424,6 +432,75 @@ static FCOE_DEVICE_ATTR(ctlr, fip_vlan_responder, S_IRUGO | S_IWUSR,
store_ctlr_fip_resp);
static ssize_t
+fcoe_ctlr_var_store(u32 *var, const char *buf, size_t count)
+{
+ int err;
+ unsigned long v;
+
+ err = kstrtoul(buf, 10, &v);
+ if (err || v > UINT_MAX)
+ return -EINVAL;
+
+ *var = v;
+
+ return count;
+}
+
+static ssize_t store_ctlr_r_a_tov(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ if (ctlr_dev->enabled == FCOE_CTLR_ENABLED)
+ return -EBUSY;
+ if (ctlr_dev->enabled == FCOE_CTLR_DISABLED)
+ return fcoe_ctlr_var_store(&ctlr->lp->r_a_tov, buf, count);
+ return -ENOTSUPP;
+}
+
+static ssize_t show_ctlr_r_a_tov(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ return sprintf(buf, "%d\n", ctlr->lp->r_a_tov);
+}
+
+static FCOE_DEVICE_ATTR(ctlr, r_a_tov, S_IRUGO | S_IWUSR,
+ show_ctlr_r_a_tov, store_ctlr_r_a_tov);
+
+static ssize_t store_ctlr_e_d_tov(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ if (ctlr_dev->enabled == FCOE_CTLR_ENABLED)
+ return -EBUSY;
+ if (ctlr_dev->enabled == FCOE_CTLR_DISABLED)
+ return fcoe_ctlr_var_store(&ctlr->lp->e_d_tov, buf, count);
+ return -ENOTSUPP;
+}
+
+static ssize_t show_ctlr_e_d_tov(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ return sprintf(buf, "%d\n", ctlr->lp->e_d_tov);
+}
+
+static FCOE_DEVICE_ATTR(ctlr, e_d_tov, S_IRUGO | S_IWUSR,
+ show_ctlr_e_d_tov, store_ctlr_e_d_tov);
+
+static ssize_t
store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -507,6 +584,8 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = {
static struct attribute *fcoe_ctlr_attrs[] = {
&device_attr_fcoe_ctlr_fip_vlan_responder.attr,
&device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+ &device_attr_fcoe_ctlr_r_a_tov.attr,
+ &device_attr_fcoe_ctlr_e_d_tov.attr,
&device_attr_fcoe_ctlr_enabled.attr,
&device_attr_fcoe_ctlr_mode.attr,
NULL,
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index d9fd2f841585..2544a37ece0a 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -441,30 +441,38 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
unsigned long ptr;
spinlock_t *io_lock = NULL;
int io_lock_acquired = 0;
+ struct fc_rport_libfc_priv *rp;
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
rport = starget_to_rport(scsi_target(sc->device));
+ if (!rport) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "returning DID_NO_CONNECT for IO as rport is NULL\n");
+ sc->result = DID_NO_CONNECT << 16;
+ done(sc);
+ return 0;
+ }
+
ret = fc_remote_port_chkready(rport);
if (ret) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "rport is not ready\n");
atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
sc->result = ret;
done(sc);
return 0;
}
- if (rport) {
- struct fc_rport_libfc_priv *rp = rport->dd_data;
-
- if (!rp || rp->rp_state != RPORT_ST_READY) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ rp = rport->dd_data;
+ if (!rp || rp->rp_state != RPORT_ST_READY) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"returning DID_NO_CONNECT for IO as rport is removed\n");
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
- sc->result = DID_NO_CONNECT<<16;
- done(sc);
- return 0;
- }
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ sc->result = DID_NO_CONNECT<<16;
+ done(sc);
+ return 0;
}
if (lp->state != LPORT_ST_READY || !(lp->link_up))
@@ -2543,7 +2551,7 @@ int fnic_reset(struct Scsi_Host *shost)
* Reset local port, this will clean up libFC exchanges,
* reset remote port sessions, and if link is up, begin flogi
*/
- ret = lp->tt.lport_reset(lp);
+ ret = fc_lport_reset(lp);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from fnic reset %s\n",
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 4e15c4bf0795..5a5fa01576b7 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -613,7 +613,7 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
fc_trace_entries.rd_idx = 0;
}
- fc_buf->time_stamp = CURRENT_TIME;
+ ktime_get_real_ts64(&fc_buf->time_stamp);
fc_buf->host_no = host_no;
fc_buf->frame_type = frame_type;
@@ -740,7 +740,7 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
len = *orig_len;
- time_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
+ time64_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t";
len += snprintf(fnic_dbgfs_prt->buffer + len,
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
index a8aa0578fcb0..e375d0c2eaaf 100644
--- a/drivers/scsi/fnic/fnic_trace.h
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -72,7 +72,7 @@ struct fnic_trace_data {
typedef struct fnic_trace_data fnic_trace_data_t;
struct fc_trace_hdr {
- struct timespec time_stamp;
+ struct timespec64 time_stamp;
u32 host_no;
u8 frame_type;
u8 frame_len;
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 9795d6f3e197..ba69d6112fa1 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -499,10 +499,7 @@ void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
if (err)
- printk(KERN_ERR
- "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
- err);
+ pr_err("Can't add addr [%pM], %d\n", addr, err);
}
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
@@ -517,10 +514,7 @@ void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
if (err)
- printk(KERN_ERR
- "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
- err);
+ pr_err("Can't del addr [%pM], %d\n", addr, err);
}
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index cbf010324c18..de5147a8c959 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -64,9 +64,9 @@ static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
module_param_array(card, int, NULL, 0);
MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)");
+MODULE_ALIAS("g_NCR5380_mmio");
MODULE_LICENSE("GPL");
-#ifndef SCSI_G_NCR5380_MEM
/*
* Configure I/O address of 53C400A or DTC436 by writing magic numbers
* to ports 0x779 and 0x379.
@@ -88,40 +88,35 @@ static void magic_configure(int idx, u8 irq, u8 magic[])
cfg = 0x80 | idx | (irq << 4);
outb(cfg, 0x379);
}
-#endif
+
+static unsigned int ncr_53c400a_ports[] = {
+ 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
+};
+static unsigned int dtc_3181e_ports[] = {
+ 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
+};
+static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
+ 0x59, 0xb9, 0xc5, 0xae, 0xa6
+};
+static u8 hp_c2502_magic[] = { /* HP C2502 */
+ 0x0f, 0x22, 0xf0, 0x20, 0x80
+};
static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
struct device *pdev, int base, int irq, int board)
{
- unsigned int *ports;
+ bool is_pmio = base <= 0xffff;
+ int ret;
+ int flags = 0;
+ unsigned int *ports = NULL;
u8 *magic = NULL;
-#ifndef SCSI_G_NCR5380_MEM
int i;
int port_idx = -1;
unsigned long region_size;
-#endif
- static unsigned int ncr_53c400a_ports[] = {
- 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
- };
- static unsigned int dtc_3181e_ports[] = {
- 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
- };
- static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
- 0x59, 0xb9, 0xc5, 0xae, 0xa6
- };
- static u8 hp_c2502_magic[] = { /* HP C2502 */
- 0x0f, 0x22, 0xf0, 0x20, 0x80
- };
- int flags, ret;
struct Scsi_Host *instance;
struct NCR5380_hostdata *hostdata;
-#ifdef SCSI_G_NCR5380_MEM
- void __iomem *iomem;
- resource_size_t iomem_size;
-#endif
+ u8 __iomem *iomem;
- ports = NULL;
- flags = 0;
switch (board) {
case BOARD_NCR5380:
flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
@@ -140,8 +135,7 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
break;
}
-#ifndef SCSI_G_NCR5380_MEM
- if (ports && magic) {
+ if (is_pmio && ports && magic) {
/* wakeup sequence for the NCR53C400A and DTC3181E */
/* Disable the adapter and look for a free io port */
@@ -170,84 +164,89 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
if (ports[i]) {
/* At this point we have our region reserved */
magic_configure(i, 0, magic); /* no IRQ yet */
- outb(0xc0, ports[i] + 9);
- if (inb(ports[i] + 9) != 0x80) {
+ base = ports[i];
+ outb(0xc0, base + 9);
+ if (inb(base + 9) != 0x80) {
ret = -ENODEV;
goto out_release;
}
- base = ports[i];
port_idx = i;
} else
return -EINVAL;
- }
- else
- {
+ } else if (is_pmio) {
/* NCR5380 - no configuration, just grab */
region_size = 8;
if (!base || !request_region(base, region_size, "ncr5380"))
return -EBUSY;
+ } else { /* MMIO */
+ region_size = NCR53C400_region_size;
+ if (!request_mem_region(base, region_size, "ncr5380"))
+ return -EBUSY;
}
-#else
- iomem_size = NCR53C400_region_size;
- if (!request_mem_region(base, iomem_size, "ncr5380"))
- return -EBUSY;
- iomem = ioremap(base, iomem_size);
+
+ if (is_pmio)
+ iomem = ioport_map(base, region_size);
+ else
+ iomem = ioremap(base, region_size);
+
if (!iomem) {
- release_mem_region(base, iomem_size);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_release;
}
-#endif
+
instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata));
if (instance == NULL) {
ret = -ENOMEM;
- goto out_release;
+ goto out_unmap;
}
hostdata = shost_priv(instance);
-#ifndef SCSI_G_NCR5380_MEM
- instance->io_port = base;
- instance->n_io_port = region_size;
- hostdata->io_width = 1; /* 8-bit PDMA by default */
-
- /*
- * On NCR53C400 boards, NCR5380 registers are mapped 8 past
- * the base address.
- */
- switch (board) {
- case BOARD_NCR53C400:
- instance->io_port += 8;
- hostdata->c400_ctl_status = 0;
- hostdata->c400_blk_cnt = 1;
- hostdata->c400_host_buf = 4;
- break;
- case BOARD_DTC3181E:
- hostdata->io_width = 2; /* 16-bit PDMA */
- /* fall through */
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- hostdata->c400_ctl_status = 9;
- hostdata->c400_blk_cnt = 10;
- hostdata->c400_host_buf = 8;
- break;
- }
-#else
- instance->base = base;
- hostdata->iomem = iomem;
- hostdata->iomem_size = iomem_size;
- switch (board) {
- case BOARD_NCR53C400:
- hostdata->c400_ctl_status = 0x100;
- hostdata->c400_blk_cnt = 0x101;
- hostdata->c400_host_buf = 0x104;
- break;
- case BOARD_DTC3181E:
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
- ret = -EINVAL;
- goto out_unregister;
+ hostdata->io = iomem;
+ hostdata->region_size = region_size;
+
+ if (is_pmio) {
+ hostdata->io_port = base;
+ hostdata->io_width = 1; /* 8-bit PDMA by default */
+ hostdata->offset = 0;
+
+ /*
+ * On NCR53C400 boards, NCR5380 registers are mapped 8 past
+ * the base address.
+ */
+ switch (board) {
+ case BOARD_NCR53C400:
+ hostdata->io_port += 8;
+ hostdata->c400_ctl_status = 0;
+ hostdata->c400_blk_cnt = 1;
+ hostdata->c400_host_buf = 4;
+ break;
+ case BOARD_DTC3181E:
+ hostdata->io_width = 2; /* 16-bit PDMA */
+ /* fall through */
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ hostdata->c400_ctl_status = 9;
+ hostdata->c400_blk_cnt = 10;
+ hostdata->c400_host_buf = 8;
+ break;
+ }
+ } else {
+ hostdata->base = base;
+ hostdata->offset = NCR53C400_mem_base;
+ switch (board) {
+ case BOARD_NCR53C400:
+ hostdata->c400_ctl_status = 0x100;
+ hostdata->c400_blk_cnt = 0x101;
+ hostdata->c400_host_buf = 0x104;
+ break;
+ case BOARD_DTC3181E:
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
+ ret = -EINVAL;
+ goto out_unregister;
+ }
}
-#endif
ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
if (ret)
@@ -273,11 +272,9 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
instance->irq = NO_IRQ;
if (instance->irq != NO_IRQ) {
-#ifndef SCSI_G_NCR5380_MEM
/* set IRQ for HP C2502 */
if (board == BOARD_HP_C2502)
magic_configure(port_idx, instance->irq, magic);
-#endif
if (request_irq(instance->irq, generic_NCR5380_intr,
0, "NCR5380", instance)) {
printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
@@ -303,38 +300,39 @@ out_free_irq:
NCR5380_exit(instance);
out_unregister:
scsi_host_put(instance);
-out_release:
-#ifndef SCSI_G_NCR5380_MEM
- release_region(base, region_size);
-#else
+out_unmap:
iounmap(iomem);
- release_mem_region(base, iomem_size);
-#endif
+out_release:
+ if (is_pmio)
+ release_region(base, region_size);
+ else
+ release_mem_region(base, region_size);
return ret;
}
static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ void __iomem *iomem = hostdata->io;
+ unsigned long io_port = hostdata->io_port;
+ unsigned long base = hostdata->base;
+ unsigned long region_size = hostdata->region_size;
+
scsi_remove_host(instance);
if (instance->irq != NO_IRQ)
free_irq(instance->irq, instance);
NCR5380_exit(instance);
-#ifndef SCSI_G_NCR5380_MEM
- release_region(instance->io_port, instance->n_io_port);
-#else
- {
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- iounmap(hostdata->iomem);
- release_mem_region(instance->base, hostdata->iomem_size);
- }
-#endif
scsi_host_put(instance);
+ iounmap(iomem);
+ if (io_port)
+ release_region(io_port, region_size);
+ else
+ release_mem_region(base, region_size);
}
/**
* generic_NCR5380_pread - pseudo DMA read
- * @instance: adapter to read from
+ * @hostdata: scsi host private data
* @dst: buffer to read into
* @len: buffer length
*
@@ -342,10 +340,9 @@ static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
* controller
*/
-static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
+static inline int generic_NCR5380_pread(struct NCR5380_hostdata *hostdata,
unsigned char *dst, int len)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
int blocks = len / 128;
int start = 0;
@@ -361,18 +358,16 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; /* FIXME - no timeout */
-#ifndef SCSI_G_NCR5380_MEM
- if (hostdata->io_width == 2)
- insw(instance->io_port + hostdata->c400_host_buf,
+ if (hostdata->io_port && hostdata->io_width == 2)
+ insw(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 64);
- else
- insb(instance->io_port + hostdata->c400_host_buf,
+ else if (hostdata->io_port)
+ insb(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 128);
-#else
- /* implies SCSI_G_NCR5380_MEM */
- memcpy_fromio(dst + start,
- hostdata->iomem + NCR53C400_host_buffer, 128);
-#endif
+ else
+ memcpy_fromio(dst + start,
+ hostdata->io + NCR53C400_host_buffer, 128);
+
start += 128;
blocks--;
}
@@ -381,18 +376,16 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; /* FIXME - no timeout */
-#ifndef SCSI_G_NCR5380_MEM
- if (hostdata->io_width == 2)
- insw(instance->io_port + hostdata->c400_host_buf,
+ if (hostdata->io_port && hostdata->io_width == 2)
+ insw(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 64);
- else
- insb(instance->io_port + hostdata->c400_host_buf,
+ else if (hostdata->io_port)
+ insb(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 128);
-#else
- /* implies SCSI_G_NCR5380_MEM */
- memcpy_fromio(dst + start,
- hostdata->iomem + NCR53C400_host_buffer, 128);
-#endif
+ else
+ memcpy_fromio(dst + start,
+ hostdata->io + NCR53C400_host_buffer, 128);
+
start += 128;
blocks--;
}
@@ -412,7 +405,7 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
/**
* generic_NCR5380_pwrite - pseudo DMA write
- * @instance: adapter to read from
+ * @hostdata: scsi host private data
* @dst: buffer to read into
* @len: buffer length
*
@@ -420,10 +413,9 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
* controller
*/
-static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
+static inline int generic_NCR5380_pwrite(struct NCR5380_hostdata *hostdata,
unsigned char *src, int len)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
int blocks = len / 128;
int start = 0;
@@ -439,18 +431,17 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
break;
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; // FIXME - timeout
-#ifndef SCSI_G_NCR5380_MEM
- if (hostdata->io_width == 2)
- outsw(instance->io_port + hostdata->c400_host_buf,
+
+ if (hostdata->io_port && hostdata->io_width == 2)
+ outsw(hostdata->io_port + hostdata->c400_host_buf,
src + start, 64);
- else
- outsb(instance->io_port + hostdata->c400_host_buf,
+ else if (hostdata->io_port)
+ outsb(hostdata->io_port + hostdata->c400_host_buf,
src + start, 128);
-#else
- /* implies SCSI_G_NCR5380_MEM */
- memcpy_toio(hostdata->iomem + NCR53C400_host_buffer,
- src + start, 128);
-#endif
+ else
+ memcpy_toio(hostdata->io + NCR53C400_host_buffer,
+ src + start, 128);
+
start += 128;
blocks--;
}
@@ -458,18 +449,16 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; // FIXME - no timeout
-#ifndef SCSI_G_NCR5380_MEM
- if (hostdata->io_width == 2)
- outsw(instance->io_port + hostdata->c400_host_buf,
+ if (hostdata->io_port && hostdata->io_width == 2)
+ outsw(hostdata->io_port + hostdata->c400_host_buf,
src + start, 64);
- else
- outsb(instance->io_port + hostdata->c400_host_buf,
+ else if (hostdata->io_port)
+ outsb(hostdata->io_port + hostdata->c400_host_buf,
src + start, 128);
-#else
- /* implies SCSI_G_NCR5380_MEM */
- memcpy_toio(hostdata->iomem + NCR53C400_host_buffer,
- src + start, 128);
-#endif
+ else
+ memcpy_toio(hostdata->io + NCR53C400_host_buffer,
+ src + start, 128);
+
start += 128;
blocks--;
}
@@ -489,10 +478,9 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
return 0;
}
-static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance,
+static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
int transfersize = cmd->transfersize;
if (hostdata->flags & FLAG_NO_PSEUDO_DMA)
@@ -566,7 +554,7 @@ static struct isa_driver generic_NCR5380_isa_driver = {
},
};
-#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+#ifdef CONFIG_PNP
static struct pnp_device_id generic_NCR5380_pnp_ids[] = {
{ .id = "DTC436e", .driver_data = BOARD_DTC3181E },
{ .id = "" }
@@ -600,7 +588,7 @@ static struct pnp_driver generic_NCR5380_pnp_driver = {
.probe = generic_NCR5380_pnp_probe,
.remove = generic_NCR5380_pnp_remove,
};
-#endif /* !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) */
+#endif /* defined(CONFIG_PNP) */
static int pnp_registered, isa_registered;
@@ -624,7 +612,7 @@ static int __init generic_NCR5380_init(void)
card[0] = BOARD_HP_C2502;
}
-#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+#ifdef CONFIG_PNP
if (!pnp_register_driver(&generic_NCR5380_pnp_driver))
pnp_registered = 1;
#endif
@@ -637,7 +625,7 @@ static int __init generic_NCR5380_init(void)
static void __exit generic_NCR5380_exit(void)
{
-#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+#ifdef CONFIG_PNP
if (pnp_registered)
pnp_unregister_driver(&generic_NCR5380_pnp_driver);
#endif
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index b175b9234458..3ce5b65ccb00 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -14,49 +14,28 @@
#ifndef GENERIC_NCR5380_H
#define GENERIC_NCR5380_H
-#ifndef SCSI_G_NCR5380_MEM
#define DRV_MODULE_NAME "g_NCR5380"
#define NCR5380_read(reg) \
- inb(instance->io_port + (reg))
+ ioread8(hostdata->io + hostdata->offset + (reg))
#define NCR5380_write(reg, value) \
- outb(value, instance->io_port + (reg))
+ iowrite8(value, hostdata->io + hostdata->offset + (reg))
#define NCR5380_implementation_fields \
+ int offset; \
int c400_ctl_status; \
int c400_blk_cnt; \
int c400_host_buf; \
int io_width;
-#else
-/* therefore SCSI_G_NCR5380_MEM */
-#define DRV_MODULE_NAME "g_NCR5380_mmio"
-
#define NCR53C400_mem_base 0x3880
#define NCR53C400_host_buffer 0x3900
#define NCR53C400_region_size 0x3a00
-#define NCR5380_read(reg) \
- readb(((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \
- NCR53C400_mem_base + (reg))
-#define NCR5380_write(reg, value) \
- writeb(value, ((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \
- NCR53C400_mem_base + (reg))
-
-#define NCR5380_implementation_fields \
- void __iomem *iomem; \
- resource_size_t iomem_size; \
- int c400_ctl_status; \
- int c400_blk_cnt; \
- int c400_host_buf;
-
-#endif
-
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- generic_NCR5380_dma_xfer_len(instance, cmd)
+#define NCR5380_dma_xfer_len generic_NCR5380_dma_xfer_len
#define NCR5380_dma_recv_setup generic_NCR5380_pread
#define NCR5380_dma_send_setup generic_NCR5380_pwrite
-#define NCR5380_dma_residual(instance) (0)
+#define NCR5380_dma_residual NCR5380_dma_residual_none
#define NCR5380_intr generic_NCR5380_intr
#define NCR5380_queue_command generic_NCR5380_queue_command
@@ -73,4 +52,3 @@
#define BOARD_HP_C2502 4
#endif /* GENERIC_NCR5380_H */
-
diff --git a/drivers/scsi/g_NCR5380_mmio.c b/drivers/scsi/g_NCR5380_mmio.c
deleted file mode 100644
index 8cdde71ba0c8..000000000000
--- a/drivers/scsi/g_NCR5380_mmio.c
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * There is probably a nicer way to do this but this one makes
- * pretty obvious what is happening. We rebuild the same file with
- * different options for mmio versus pio.
- */
-
-#define SCSI_G_NCR5380_MEM
-
-#include "g_NCR5380.c"
-
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 72c98522bd26..c0cd505a9ef7 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -13,6 +13,7 @@
#define _HISI_SAS_H_
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/dmapool.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -110,7 +111,7 @@ struct hisi_sas_device {
struct domain_device *sas_device;
u64 attached_phy;
u64 device_id;
- u64 running_req;
+ atomic64_t running_req;
u8 dev_status;
};
@@ -149,7 +150,8 @@ struct hisi_sas_hw {
struct domain_device *device);
struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
- int (*get_free_slot)(struct hisi_hba *hisi_hba, int *q, int *s);
+ int (*get_free_slot)(struct hisi_hba *hisi_hba, u32 dev_id,
+ int *q, int *s);
void (*start_delivery)(struct hisi_hba *hisi_hba);
int (*prep_ssp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot, int is_tmf,
@@ -166,6 +168,9 @@ struct hisi_sas_hw {
void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no);
+ void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
+ struct sas_phy_linkrates *linkrates);
+ enum sas_linkrate (*phy_get_max_linkrate)(void);
void (*free_device)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
@@ -183,6 +188,7 @@ struct hisi_hba {
u32 ctrl_reset_reg;
u32 ctrl_reset_sts_reg;
u32 ctrl_clock_ena_reg;
+ u32 refclk_frequency_mhz;
u8 sas_addr[SAS_ADDR_SIZE];
int n_phy;
@@ -205,7 +211,6 @@ struct hisi_hba {
struct hisi_sas_port port[HISI_SAS_MAX_PHYS];
int queue_count;
- int queue;
struct hisi_sas_slot *slot_prep;
struct dma_pool *sge_page_pool;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 2f872f784e10..d50e9cfefd24 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -162,8 +162,8 @@ out:
hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
if (task->task_done)
task->task_done(task);
- if (sas_dev && sas_dev->running_req)
- sas_dev->running_req--;
+ if (sas_dev)
+ atomic64_dec(&sas_dev->running_req);
}
static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
@@ -232,8 +232,8 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
if (rc)
goto err_out;
- rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
- &dlvry_queue_slot);
+ rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
+ &dlvry_queue, &dlvry_queue_slot);
if (rc)
goto err_out_tag;
@@ -303,7 +303,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
hisi_hba->slot_prep = slot;
- sas_dev->running_req++;
+ atomic64_inc(&sas_dev->running_req);
++(*pass);
return 0;
@@ -369,9 +369,14 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
struct sas_phy *sphy = sas_phy->phy;
sphy->negotiated_linkrate = sas_phy->linkrate;
- sphy->minimum_linkrate = phy->minimum_linkrate;
sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
- sphy->maximum_linkrate = phy->maximum_linkrate;
+ sphy->maximum_linkrate_hw =
+ hisi_hba->hw->phy_get_max_linkrate();
+ if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
+ sphy->minimum_linkrate = phy->minimum_linkrate;
+
+ if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
+ sphy->maximum_linkrate = phy->maximum_linkrate;
}
if (phy->phy_type & PORT_TYPE_SAS) {
@@ -537,7 +542,7 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
struct hisi_sas_phy *phy = sas_phy->lldd_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- struct hisi_sas_port *port = &hisi_hba->port[sas_phy->id];
+ struct hisi_sas_port *port = &hisi_hba->port[phy->port_id];
unsigned long flags;
if (!sas_port)
@@ -645,6 +650,9 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
break;
case PHY_FUNC_SET_LINK_RATE:
+ hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
+ break;
+
case PHY_FUNC_RELEASE_SPINUP_HOLD:
default:
return -EOPNOTSUPP;
@@ -764,7 +772,8 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
task = NULL;
}
ex_err:
- WARN_ON(retry == TASK_RETRY);
+ if (retry == TASK_RETRY)
+ dev_warn(dev, "abort tmf: executing internal task failed!\n");
sas_free_task(task);
return res;
}
@@ -960,6 +969,9 @@ static int hisi_sas_query_task(struct sas_task *task)
case TMF_RESP_FUNC_FAILED:
case TMF_RESP_FUNC_COMPLETE:
break;
+ default:
+ rc = TMF_RESP_FUNC_FAILED;
+ break;
}
}
return rc;
@@ -987,8 +999,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
if (rc)
goto err_out;
- rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
- &dlvry_queue_slot);
+ rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
+ &dlvry_queue, &dlvry_queue_slot);
if (rc)
goto err_out_tag;
@@ -1023,7 +1035,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
hisi_hba->slot_prep = slot;
- sas_dev->running_req++;
+ atomic64_inc(&sas_dev->running_req);
+
/* send abort command to our chip */
hisi_hba->hw->start_delivery(hisi_hba);
@@ -1396,10 +1409,13 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
struct hisi_hba *hisi_hba;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
+ struct clk *refclk;
shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
- if (!shost)
- goto err_out;
+ if (!shost) {
+ dev_err(dev, "scsi host alloc failed\n");
+ return NULL;
+ }
hisi_hba = shost_priv(shost);
hisi_hba->hw = hw;
@@ -1432,6 +1448,12 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
goto err_out;
}
+ refclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(refclk))
+ dev_info(dev, "no ref clk property\n");
+ else
+ hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
+
if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
goto err_out;
@@ -1457,6 +1479,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
return shost;
err_out:
+ kfree(shost);
dev_err(dev, "shost alloc failed\n");
return NULL;
}
@@ -1483,10 +1506,8 @@ int hisi_sas_probe(struct platform_device *pdev,
int rc, phy_nr, port_nr, i;
shost = hisi_sas_shost_alloc(pdev, hw);
- if (!shost) {
- rc = -ENOMEM;
- goto err_out_ha;
- }
+ if (!shost)
+ return -ENOMEM;
sha = SHOST_TO_SAS_HA(shost);
hisi_hba = shost_priv(shost);
@@ -1496,12 +1517,13 @@ int hisi_sas_probe(struct platform_device *pdev,
arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
- if (!arr_phy || !arr_port)
- return -ENOMEM;
+ if (!arr_phy || !arr_port) {
+ rc = -ENOMEM;
+ goto err_out_ha;
+ }
sha->sas_phy = arr_phy;
sha->sas_port = arr_port;
- sha->core.shost = shost;
sha->lldd_ha = hisi_hba;
shost->transportt = hisi_sas_stt;
@@ -1546,6 +1568,7 @@ int hisi_sas_probe(struct platform_device *pdev,
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
+ hisi_sas_free(hisi_hba);
kfree(shost);
return rc;
}
@@ -1555,12 +1578,14 @@ int hisi_sas_remove(struct platform_device *pdev)
{
struct sas_ha_struct *sha = platform_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct Scsi_Host *shost = sha->core.shost;
scsi_remove_host(sha->core.shost);
sas_unregister_ha(sha);
sas_remove_host(sha->core.shost);
hisi_sas_free(hisi_hba);
+ kfree(shost);
return 0;
}
EXPORT_SYMBOL_GPL(hisi_sas_remove);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index c0ac49d8bc8d..8a1be0ba8a22 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -843,6 +843,49 @@ static void sl_notify_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
}
+static enum sas_linkrate phy_get_max_linkrate_v1_hw(void)
+{
+ return SAS_LINK_RATE_6_0_GBPS;
+}
+
+static void phy_set_linkrate_v1_hw(struct hisi_hba *hisi_hba, int phy_no,
+ struct sas_phy_linkrates *r)
+{
+ u32 prog_phy_link_rate =
+ hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ int i;
+ enum sas_linkrate min, max;
+ u32 rate_mask = 0;
+
+ if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = sas_phy->phy->maximum_linkrate;
+ min = r->minimum_linkrate;
+ } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = r->maximum_linkrate;
+ min = sas_phy->phy->minimum_linkrate;
+ } else
+ return;
+
+ sas_phy->phy->maximum_linkrate = max;
+ sas_phy->phy->minimum_linkrate = min;
+
+ min -= SAS_LINK_RATE_1_5_GBPS;
+ max -= SAS_LINK_RATE_1_5_GBPS;
+
+ for (i = 0; i <= max; i++)
+ rate_mask |= 1 << (i * 2);
+
+ prog_phy_link_rate &= ~0xff;
+ prog_phy_link_rate |= rate_mask;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
+ prog_phy_link_rate);
+
+ phy_hard_reset_v1_hw(hisi_hba, phy_no);
+}
+
static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
{
int i, bitmap = 0;
@@ -862,29 +905,23 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
* The callpath to this function and upto writing the write
* queue pointer should be safe from interruption.
*/
-static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, int *q, int *s)
+static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, u32 dev_id,
+ int *q, int *s)
{
struct device *dev = &hisi_hba->pdev->dev;
struct hisi_sas_dq *dq;
u32 r, w;
- int queue = hisi_hba->queue;
-
- while (1) {
- dq = &hisi_hba->dq[queue];
- w = dq->wr_point;
- r = hisi_sas_read32_relaxed(hisi_hba,
- DLVRY_Q_0_RD_PTR + (queue * 0x14));
- if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
- queue = (queue + 1) % hisi_hba->queue_count;
- if (queue == hisi_hba->queue) {
- dev_warn(dev, "could not find free slot\n");
- return -EAGAIN;
- }
- continue;
- }
- break;
+ int queue = dev_id % hisi_hba->queue_count;
+
+ dq = &hisi_hba->dq[queue];
+ w = dq->wr_point;
+ r = hisi_sas_read32_relaxed(hisi_hba,
+ DLVRY_Q_0_RD_PTR + (queue * 0x14));
+ if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
+ dev_warn(dev, "could not find free slot\n");
+ return -EAGAIN;
}
- hisi_hba->queue = (queue + 1) % hisi_hba->queue_count;
+
*q = queue;
*s = w;
return 0;
@@ -1372,8 +1409,8 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
}
out:
- if (sas_dev && sas_dev->running_req)
- sas_dev->running_req--;
+ if (sas_dev)
+ atomic64_dec(&sas_dev->running_req);
hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
@@ -1824,6 +1861,8 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.phy_enable = enable_phy_v1_hw,
.phy_disable = disable_phy_v1_hw,
.phy_hard_reset = phy_hard_reset_v1_hw,
+ .phy_set_linkrate = phy_set_linkrate_v1_hw,
+ .phy_get_max_linkrate = phy_get_max_linkrate_v1_hw,
.get_wideport_bitmap = get_wideport_bitmap_v1_hw,
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr),
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 9825a3f49f53..b934aec1eebb 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -55,10 +55,44 @@
#define HGC_DFX_CFG2 0xc0
#define HGC_IOMB_PROC1_STATUS 0x104
#define CFG_1US_TIMER_TRSH 0xcc
+#define HGC_LM_DFX_STATUS2 0x128
+#define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0
+#define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \
+ HGC_LM_DFX_STATUS2_IOSTLIST_OFF)
+#define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12
+#define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \
+ HGC_LM_DFX_STATUS2_ITCTLIST_OFF)
+#define HGC_CQE_ECC_ADDR 0x13c
+#define HGC_CQE_ECC_1B_ADDR_OFF 0
+#define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF)
+#define HGC_CQE_ECC_MB_ADDR_OFF 8
+#define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF)
+#define HGC_IOST_ECC_ADDR 0x140
+#define HGC_IOST_ECC_1B_ADDR_OFF 0
+#define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF)
+#define HGC_IOST_ECC_MB_ADDR_OFF 16
+#define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF)
+#define HGC_DQE_ECC_ADDR 0x144
+#define HGC_DQE_ECC_1B_ADDR_OFF 0
+#define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF)
+#define HGC_DQE_ECC_MB_ADDR_OFF 16
+#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
#define HGC_INVLD_DQE_INFO 0x148
#define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9
#define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF)
#define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18
+#define HGC_ITCT_ECC_ADDR 0x150
+#define HGC_ITCT_ECC_1B_ADDR_OFF 0
+#define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \
+ HGC_ITCT_ECC_1B_ADDR_OFF)
+#define HGC_ITCT_ECC_MB_ADDR_OFF 16
+#define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \
+ HGC_ITCT_ECC_MB_ADDR_OFF)
+#define HGC_AXI_FIFO_ERR_INFO 0x154
+#define AXI_ERR_INFO_OFF 0
+#define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
+#define FIFO_ERR_INFO_OFF 8
+#define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF)
#define INT_COAL_EN 0x19c
#define OQ_INT_COAL_TIME 0x1a0
#define OQ_INT_COAL_CNT 0x1a4
@@ -73,13 +107,41 @@
#define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
#define ENT_INT_SRC2 0x1bc
#define ENT_INT_SRC3 0x1c0
+#define ENT_INT_SRC3_WP_DEPTH_OFF 8
+#define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9
+#define ENT_INT_SRC3_RP_DEPTH_OFF 10
+#define ENT_INT_SRC3_AXI_OFF 11
+#define ENT_INT_SRC3_FIFO_OFF 12
+#define ENT_INT_SRC3_LM_OFF 14
#define ENT_INT_SRC3_ITC_INT_OFF 15
#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
+#define ENT_INT_SRC3_ABT_OFF 16
#define ENT_INT_SRC_MSK1 0x1c4
#define ENT_INT_SRC_MSK2 0x1c8
#define ENT_INT_SRC_MSK3 0x1cc
#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
+#define SAS_ECC_INTR 0x1e8
+#define SAS_ECC_INTR_DQE_ECC_1B_OFF 0
+#define SAS_ECC_INTR_DQE_ECC_MB_OFF 1
+#define SAS_ECC_INTR_IOST_ECC_1B_OFF 2
+#define SAS_ECC_INTR_IOST_ECC_MB_OFF 3
+#define SAS_ECC_INTR_ITCT_ECC_MB_OFF 4
+#define SAS_ECC_INTR_ITCT_ECC_1B_OFF 5
+#define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 6
+#define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 7
+#define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 8
+#define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 9
+#define SAS_ECC_INTR_CQE_ECC_1B_OFF 10
+#define SAS_ECC_INTR_CQE_ECC_MB_OFF 11
+#define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 12
+#define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 13
+#define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 14
+#define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 15
+#define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 16
+#define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 17
+#define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 18
+#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19
#define SAS_ECC_INTR_MSK 0x1ec
#define HGC_ERR_STAT_EN 0x238
#define DLVRY_Q_0_BASE_ADDR_LO 0x260
@@ -94,7 +156,20 @@
#define COMPL_Q_0_DEPTH 0x4e8
#define COMPL_Q_0_WR_PTR 0x4ec
#define COMPL_Q_0_RD_PTR 0x4f0
-
+#define HGC_RXM_DFX_STATUS14 0xae8
+#define HGC_RXM_DFX_STATUS14_MEM0_OFF 0
+#define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS14_MEM0_OFF)
+#define HGC_RXM_DFX_STATUS14_MEM1_OFF 9
+#define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS14_MEM1_OFF)
+#define HGC_RXM_DFX_STATUS14_MEM2_OFF 18
+#define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS14_MEM2_OFF)
+#define HGC_RXM_DFX_STATUS15 0xaec
+#define HGC_RXM_DFX_STATUS15_MEM3_OFF 0
+#define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS15_MEM3_OFF)
/* phy registers need init */
#define PORT_BASE (0x2000)
@@ -119,6 +194,9 @@
#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
#define SL_CONTROL_CTA_OFF 17
#define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF)
+#define RX_PRIMS_STATUS (PORT_BASE + 0x98)
+#define RX_BCAST_CHG_OFF 1
+#define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF)
#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
@@ -267,6 +345,8 @@
#define ITCT_HDR_RTOLT_OFF 48
#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
+#define HISI_SAS_FATAL_INT_NR 2
+
struct hisi_sas_complete_v2_hdr {
__le32 dw0;
__le32 dw1;
@@ -659,8 +739,6 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
qw0 &= ~(1 << ITCT_HDR_VALID_OFF);
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
ENT_INT_SRC3_ITC_INT_MSK);
- hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED;
- hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL;
/* clear the itct */
hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
@@ -808,7 +886,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe);
- hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfffff3c0);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
for (i = 0; i < hisi_hba->queue_count; i++)
hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
@@ -824,7 +902,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
@@ -836,7 +914,9 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
- hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694);
+ if (hisi_hba->refclk_frequency_mhz == 66)
+ hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694);
+ /* else, do nothing -> leave it how you found it */
}
for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -980,6 +1060,49 @@ static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
}
+static enum sas_linkrate phy_get_max_linkrate_v2_hw(void)
+{
+ return SAS_LINK_RATE_12_0_GBPS;
+}
+
+static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no,
+ struct sas_phy_linkrates *r)
+{
+ u32 prog_phy_link_rate =
+ hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ int i;
+ enum sas_linkrate min, max;
+ u32 rate_mask = 0;
+
+ if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = sas_phy->phy->maximum_linkrate;
+ min = r->minimum_linkrate;
+ } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = r->maximum_linkrate;
+ min = sas_phy->phy->minimum_linkrate;
+ } else
+ return;
+
+ sas_phy->phy->maximum_linkrate = max;
+ sas_phy->phy->minimum_linkrate = min;
+
+ min -= SAS_LINK_RATE_1_5_GBPS;
+ max -= SAS_LINK_RATE_1_5_GBPS;
+
+ for (i = 0; i <= max; i++)
+ rate_mask |= 1 << (i * 2);
+
+ prog_phy_link_rate &= ~0xff;
+ prog_phy_link_rate |= rate_mask;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
+ prog_phy_link_rate);
+
+ phy_hard_reset_v2_hw(hisi_hba, phy_no);
+}
+
static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
{
int i, bitmap = 0;
@@ -1010,29 +1133,24 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
* The callpath to this function and upto writing the write
* queue pointer should be safe from interruption.
*/
-static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s)
+static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id,
+ int *q, int *s)
{
struct device *dev = &hisi_hba->pdev->dev;
struct hisi_sas_dq *dq;
u32 r, w;
- int queue = hisi_hba->queue;
-
- while (1) {
- dq = &hisi_hba->dq[queue];
- w = dq->wr_point;
- r = hisi_sas_read32_relaxed(hisi_hba,
- DLVRY_Q_0_RD_PTR + (queue * 0x14));
- if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
- queue = (queue + 1) % hisi_hba->queue_count;
- if (queue == hisi_hba->queue) {
- dev_warn(dev, "could not find free slot\n");
- return -EAGAIN;
- }
- continue;
- }
- break;
+ int queue = dev_id % hisi_hba->queue_count;
+
+ dq = &hisi_hba->dq[queue];
+ w = dq->wr_point;
+ r = hisi_sas_read32_relaxed(hisi_hba,
+ DLVRY_Q_0_RD_PTR + (queue * 0x14));
+ if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
+ dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
+ queue, r, w);
+ return -EAGAIN;
}
- hisi_hba->queue = (queue + 1) % hisi_hba->queue_count;
+
*q = queue;
*s = w;
return 0;
@@ -1653,8 +1771,8 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
}
out:
- if (sas_dev && sas_dev->running_req)
- sas_dev->running_req--;
+ if (sas_dev)
+ atomic64_dec(&sas_dev->running_req);
hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
@@ -1675,6 +1793,7 @@ static u8 get_ata_protocol(u8 cmd, int direction)
case ATA_CMD_NCQ_NON_DATA:
return SATA_PROTOCOL_FPDMA;
+ case ATA_CMD_DOWNLOAD_MICRO:
case ATA_CMD_ID_ATA:
case ATA_CMD_PMP_READ:
case ATA_CMD_READ_LOG_EXT:
@@ -1686,18 +1805,27 @@ static u8 get_ata_protocol(u8 cmd, int direction)
case ATA_CMD_PIO_WRITE_EXT:
return SATA_PROTOCOL_PIO;
+ case ATA_CMD_DSM:
+ case ATA_CMD_DOWNLOAD_MICRO_DMA:
+ case ATA_CMD_PMP_READ_DMA:
+ case ATA_CMD_PMP_WRITE_DMA:
case ATA_CMD_READ:
case ATA_CMD_READ_EXT:
case ATA_CMD_READ_LOG_DMA_EXT:
+ case ATA_CMD_READ_STREAM_DMA_EXT:
+ case ATA_CMD_TRUSTED_RCV_DMA:
+ case ATA_CMD_TRUSTED_SND_DMA:
case ATA_CMD_WRITE:
case ATA_CMD_WRITE_EXT:
+ case ATA_CMD_WRITE_FUA_EXT:
case ATA_CMD_WRITE_QUEUED:
case ATA_CMD_WRITE_LOG_DMA_EXT:
+ case ATA_CMD_WRITE_STREAM_DMA_EXT:
return SATA_PROTOCOL_DMA;
- case ATA_CMD_DOWNLOAD_MICRO:
- case ATA_CMD_DEV_RESET:
case ATA_CMD_CHK_POWER:
+ case ATA_CMD_DEV_RESET:
+ case ATA_CMD_EDD:
case ATA_CMD_FLUSH:
case ATA_CMD_FLUSH_EXT:
case ATA_CMD_VERIFY:
@@ -1970,9 +2098,12 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
+ if (bcast_status & RX_BCAST_CHG_MSK)
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -2005,8 +2136,9 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
if (irq_value1) {
if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
CHL_INT1_DMAC_TX_ECC_ERR_MSK))
- panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
- dev_name(dev), irq_value1);
+ panic("%s: DMAC RX/TX ecc bad error!\
+ (0x%x)",
+ dev_name(dev), irq_value1);
hisi_sas_phy_write32(hisi_hba, phy_no,
CHL_INT1, irq_value1);
@@ -2037,6 +2169,318 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
+static void
+one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value)
+{
+ struct device *dev = &hisi_hba->pdev->dev;
+ u32 reg_val;
+
+ if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
+ dev_warn(dev, "hgc_dqe_acc1b_intr found: \
+ Ram address is 0x%08X\n",
+ (reg_val & HGC_DQE_ECC_1B_ADDR_MSK) >>
+ HGC_DQE_ECC_1B_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
+ dev_warn(dev, "hgc_iost_acc1b_intr found: \
+ Ram address is 0x%08X\n",
+ (reg_val & HGC_IOST_ECC_1B_ADDR_MSK) >>
+ HGC_IOST_ECC_1B_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
+ dev_warn(dev, "hgc_itct_acc1b_intr found: \
+ Ram address is 0x%08X\n",
+ (reg_val & HGC_ITCT_ECC_1B_ADDR_MSK) >>
+ HGC_ITCT_ECC_1B_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+ dev_warn(dev, "hgc_iostl_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
+ HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+ dev_warn(dev, "hgc_itctl_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
+ HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
+ dev_warn(dev, "hgc_cqe_acc1b_intr found: \
+ Ram address is 0x%08X\n",
+ (reg_val & HGC_CQE_ECC_1B_ADDR_MSK) >>
+ HGC_CQE_ECC_1B_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ dev_warn(dev, "rxm_mem0_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM0_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ dev_warn(dev, "rxm_mem1_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM1_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ dev_warn(dev, "rxm_mem2_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM2_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
+ dev_warn(dev, "rxm_mem3_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
+ HGC_RXM_DFX_STATUS15_MEM3_OFF);
+ }
+
+}
+
+static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
+ u32 irq_value)
+{
+ u32 reg_val;
+ struct device *dev = &hisi_hba->pdev->dev;
+
+ if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
+ panic("%s: hgc_dqe_accbad_intr (0x%x) found: \
+ Ram address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_DQE_ECC_MB_ADDR_MSK) >>
+ HGC_DQE_ECC_MB_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
+ panic("%s: hgc_iost_accbad_intr (0x%x) found: \
+ Ram address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_IOST_ECC_MB_ADDR_MSK) >>
+ HGC_IOST_ECC_MB_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
+ panic("%s: hgc_itct_accbad_intr (0x%x) found: \
+ Ram address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_ITCT_ECC_MB_ADDR_MSK) >>
+ HGC_ITCT_ECC_MB_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+ panic("%s: hgc_iostl_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
+ HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+ panic("%s: hgc_itctl_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
+ HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
+ panic("%s: hgc_cqe_accbad_intr (0x%x) found: \
+ Ram address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_CQE_ECC_MB_ADDR_MSK) >>
+ HGC_CQE_ECC_MB_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ panic("%s: rxm_mem0_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM0_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ panic("%s: rxm_mem1_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM1_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ panic("%s: rxm_mem2_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM2_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
+ panic("%s: rxm_mem3_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
+ HGC_RXM_DFX_STATUS15_MEM3_OFF);
+ }
+
+}
+
+static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
+ u32 irq_value, irq_msk;
+
+ irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff);
+
+ irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
+ if (irq_value) {
+ one_bit_ecc_error_process_v2_hw(hisi_hba, irq_value);
+ multi_bit_ecc_error_process_v2_hw(hisi_hba, irq_value);
+ }
+
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk);
+
+ return IRQ_HANDLED;
+}
+
+#define AXI_ERR_NR 8
+static const char axi_err_info[AXI_ERR_NR][32] = {
+ "IOST_AXI_W_ERR",
+ "IOST_AXI_R_ERR",
+ "ITCT_AXI_W_ERR",
+ "ITCT_AXI_R_ERR",
+ "SATA_AXI_W_ERR",
+ "SATA_AXI_R_ERR",
+ "DQE_AXI_R_ERR",
+ "CQE_AXI_W_ERR"
+};
+
+#define FIFO_ERR_NR 5
+static const char fifo_err_info[FIFO_ERR_NR][32] = {
+ "CQE_WINFO_FIFO",
+ "CQE_MSG_FIFIO",
+ "GETDQE_FIFO",
+ "CMDP_FIFO",
+ "AWTCTRL_FIFO"
+};
+
+static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
+ u32 irq_value, irq_msk, err_value;
+ struct device *dev = &hisi_hba->pdev->dev;
+
+ irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe);
+
+ irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ if (irq_value) {
+ if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) {
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_WP_DEPTH_OFF);
+ panic("%s: write pointer and depth error (0x%x) \
+ found!\n",
+ dev_name(dev), irq_value);
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) {
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 <<
+ ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF);
+ panic("%s: iptt no match slot error (0x%x) found!\n",
+ dev_name(dev), irq_value);
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF))
+ panic("%s: read pointer and depth error (0x%x) \
+ found!\n",
+ dev_name(dev), irq_value);
+
+ if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) {
+ int i;
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_AXI_OFF);
+ err_value = hisi_sas_read32(hisi_hba,
+ HGC_AXI_FIFO_ERR_INFO);
+
+ for (i = 0; i < AXI_ERR_NR; i++) {
+ if (err_value & BIT(i))
+ panic("%s: %s (0x%x) found!\n",
+ dev_name(dev),
+ axi_err_info[i], irq_value);
+ }
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_FIFO_OFF)) {
+ int i;
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_FIFO_OFF);
+ err_value = hisi_sas_read32(hisi_hba,
+ HGC_AXI_FIFO_ERR_INFO);
+
+ for (i = 0; i < FIFO_ERR_NR; i++) {
+ if (err_value & BIT(AXI_ERR_NR + i))
+ panic("%s: %s (0x%x) found!\n",
+ dev_name(dev),
+ fifo_err_info[i], irq_value);
+ }
+
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) {
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_LM_OFF);
+ panic("%s: LM add/fetch list error (0x%x) found!\n",
+ dev_name(dev), irq_value);
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) {
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_ABT_OFF);
+ panic("%s: SAS_HGC_ABT fetch LM list error (0x%x) found!\n",
+ dev_name(dev), irq_value);
+ }
+ }
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
{
struct hisi_sas_cq *cq = p;
@@ -2136,6 +2580,16 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
goto end;
}
+ /* check ERR bit of Status Register */
+ if (fis->status & ATA_ERR) {
+ dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no,
+ fis->status);
+ disable_phy_v2_hw(hisi_hba, phy_no);
+ enable_phy_v2_hw(hisi_hba, phy_no);
+ res = IRQ_NONE;
+ goto end;
+ }
+
if (unlikely(phy_no == 8)) {
u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
@@ -2190,6 +2644,11 @@ static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = {
int_chnl_int_v2_hw,
};
+static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
+ fatal_ecc_int_v2_hw,
+ fatal_axi_int_v2_hw
+};
+
/**
* There is a limitation in the hip06 chipset that we need
* to map in all mbigen interrupts, even if they are not used.
@@ -2245,6 +2704,26 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
}
+ for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++) {
+ int idx = i;
+
+ irq = irq_map[idx + 81];
+ if (!irq) {
+ dev_err(dev, "irq init: fail map fatal interrupt %d\n",
+ idx);
+ return -ENOENT;
+ }
+
+ rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
+ DRV_NAME " fatal", hisi_hba);
+ if (rc) {
+ dev_err(dev,
+ "irq init: could not request fatal interrupt %d, rc=%d\n",
+ irq, rc);
+ return -ENOENT;
+ }
+ }
+
for (i = 0; i < hisi_hba->queue_count; i++) {
int idx = i + 96; /* First cq interrupt is irq96 */
@@ -2303,12 +2782,26 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.phy_enable = enable_phy_v2_hw,
.phy_disable = disable_phy_v2_hw,
.phy_hard_reset = phy_hard_reset_v2_hw,
+ .phy_set_linkrate = phy_set_linkrate_v2_hw,
+ .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw,
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
};
static int hisi_sas_v2_probe(struct platform_device *pdev)
{
+ /*
+ * Check if we should defer the probe before we probe the
+ * upper layer, as it's hard to defer later on.
+ */
+ int ret = platform_get_irq(pdev, 0);
+
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "cannot obtain irq\n");
+ return ret;
+ }
+
return hisi_sas_probe(pdev, &hisi_sas_v2_hw);
}
@@ -2319,6 +2812,7 @@ static int hisi_sas_v2_remove(struct platform_device *pdev)
static const struct of_device_id sas_v2_of_match[] = {
{ .compatible = "hisilicon,hip06-sas-v2",},
+ { .compatible = "hisilicon,hip07-sas-v2",},
{},
};
MODULE_DEVICE_TABLE(of, sas_v2_of_match);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index a1d6ab76a514..691a09316952 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -276,6 +276,9 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
unsigned long *memory_bar);
static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
+static int wait_for_device_to_become_ready(struct ctlr_info *h,
+ unsigned char lunaddr[],
+ int reply_queue);
static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
int wait_for_ready);
static inline void finish_cmd(struct CommandList *c);
@@ -700,9 +703,7 @@ static ssize_t lunid_show(struct device *dev,
}
memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
spin_unlock_irqrestore(&h->lock, flags);
- return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- lunid[0], lunid[1], lunid[2], lunid[3],
- lunid[4], lunid[5], lunid[6], lunid[7]);
+ return snprintf(buf, 20, "0x%8phN\n", lunid);
}
static ssize_t unique_id_show(struct device *dev,
@@ -864,6 +865,16 @@ static ssize_t path_info_show(struct device *dev,
return output_len;
}
+static ssize_t host_show_ctlr_num(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ h = shost_to_hba(shost);
+ return snprintf(buf, 20, "%d\n", h->ctlr);
+}
+
static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
@@ -887,6 +898,8 @@ static DEVICE_ATTR(resettable, S_IRUGO,
host_show_resettable, NULL);
static DEVICE_ATTR(lockup_detected, S_IRUGO,
host_show_lockup_detected, NULL);
+static DEVICE_ATTR(ctlr_num, S_IRUGO,
+ host_show_ctlr_num, NULL);
static struct device_attribute *hpsa_sdev_attrs[] = {
&dev_attr_raid_level,
@@ -907,6 +920,7 @@ static struct device_attribute *hpsa_shost_attrs[] = {
&dev_attr_hp_ssd_smart_path_status,
&dev_attr_raid_offload_debug,
&dev_attr_lockup_detected,
+ &dev_attr_ctlr_num,
NULL,
};
@@ -1001,7 +1015,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
{
if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
- if (unlikely(!h->msix_vector))
+ if (unlikely(!h->msix_vectors))
return;
if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
c->Header.ReplyQueue =
@@ -2541,7 +2555,7 @@ static void complete_scsi_command(struct CommandList *cp)
if ((unlikely(hpsa_is_pending_event(cp)))) {
if (cp->reset_pending)
- return hpsa_cmd_resolve_and_free(h, cp);
+ return hpsa_cmd_free_and_done(h, cp, cmd);
if (cp->abort_pending)
return hpsa_cmd_abort_and_free(h, cp, cmd);
}
@@ -2824,14 +2838,8 @@ static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
const u8 *cdb = c->Request.CDB;
const u8 *lun = c->Header.LUN.LunAddrBytes;
- dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
- " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- txt, lun[0], lun[1], lun[2], lun[3],
- lun[4], lun[5], lun[6], lun[7],
- cdb[0], cdb[1], cdb[2], cdb[3],
- cdb[4], cdb[5], cdb[6], cdb[7],
- cdb[8], cdb[9], cdb[10], cdb[11],
- cdb[12], cdb[13], cdb[14], cdb[15]);
+ dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
+ txt, lun, cdb);
}
static void hpsa_scsi_interpret_error(struct ctlr_info *h,
@@ -3080,6 +3088,8 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
if (unlikely(rc))
atomic_set(&dev->reset_cmds_out, 0);
+ else
+ wait_for_device_to_become_ready(h, scsi3addr, 0);
mutex_unlock(&h->reset_mutex);
return rc;
@@ -3623,8 +3633,32 @@ out:
static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
struct ReportExtendedLUNdata *buf, int bufsize)
{
- return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
- HPSA_REPORT_PHYS_EXTENDED);
+ int rc;
+ struct ReportLUNdata *lbuf;
+
+ rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
+ HPSA_REPORT_PHYS_EXTENDED);
+ if (!rc || !hpsa_allow_any)
+ return rc;
+
+ /* REPORT PHYS EXTENDED is not supported */
+ lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
+ if (!lbuf)
+ return -ENOMEM;
+
+ rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
+ if (!rc) {
+ int i;
+ u32 nphys;
+
+ /* Copy ReportLUNdata header */
+ memcpy(buf, lbuf, 8);
+ nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
+ for (i = 0; i < nphys; i++)
+ memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
+ }
+ kfree(lbuf);
+ return rc;
}
static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
@@ -5488,7 +5522,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
dev = cmd->device->hostdata;
if (!dev) {
- cmd->result = NOT_READY << 16; /* host byte */
+ cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd);
return 0;
}
@@ -5569,6 +5603,14 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
if (unlikely(lockup_detected(h)))
return hpsa_scan_complete(h);
+ /*
+ * Do the scan after a reset completion
+ */
+ if (h->reset_in_progress) {
+ h->drv_req_rescan = 1;
+ return;
+ }
+
hpsa_update_scsi_devices(h);
hpsa_scan_complete(h);
@@ -5624,7 +5666,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
sh->sg_tablesize = h->maxsgentries;
sh->transportt = hpsa_sas_transport_template;
sh->hostdata[0] = (unsigned long) h;
- sh->irq = h->intr[h->intr_mode];
+ sh->irq = pci_irq_vector(h->pdev, 0);
sh->unique_id = sh->irq;
h->scsi_host = sh;
@@ -5999,11 +6041,9 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
if (h->raid_offload_debug > 0)
dev_info(&h->pdev->dev,
- "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ "scsi %d:%d:%d:%d %s scsi3addr 0x%8phN\n",
h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
- "Reset as abort",
- scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
- scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
+ "Reset as abort", scsi3addr);
if (!dev->offload_enabled) {
dev_warn(&h->pdev->dev,
@@ -6020,32 +6060,28 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
/* send the reset */
if (h->raid_offload_debug > 0)
dev_info(&h->pdev->dev,
- "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- psa[0], psa[1], psa[2], psa[3],
- psa[4], psa[5], psa[6], psa[7]);
+ "Reset as abort: Resetting physical device at scsi3addr 0x%8phN\n",
+ psa);
rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue);
if (rc != 0) {
dev_warn(&h->pdev->dev,
- "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- psa[0], psa[1], psa[2], psa[3],
- psa[4], psa[5], psa[6], psa[7]);
+ "Reset as abort: Failed on physical device at scsi3addr 0x%8phN\n",
+ psa);
return rc; /* failed to reset */
}
/* wait for device to recover */
if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
dev_warn(&h->pdev->dev,
- "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- psa[0], psa[1], psa[2], psa[3],
- psa[4], psa[5], psa[6], psa[7]);
+ "Reset as abort: Failed: Device never recovered from reset: 0x%8phN\n",
+ psa);
return -1; /* failed to recover */
}
/* device recovered */
dev_info(&h->pdev->dev,
- "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- psa[0], psa[1], psa[2], psa[3],
- psa[4], psa[5], psa[6], psa[7]);
+ "Reset as abort: Device recovered from reset: scsi3addr 0x%8phN\n",
+ psa);
return rc; /* success */
}
@@ -6663,8 +6699,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- ioc = (BIG_IOCTL_Command_struct *)
- kmalloc(sizeof(*ioc), GFP_KERNEL);
+ ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
if (!ioc) {
status = -ENOMEM;
goto cleanup1;
@@ -7658,67 +7693,41 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
{
- if (h->msix_vector) {
- if (h->pdev->msix_enabled)
- pci_disable_msix(h->pdev);
- h->msix_vector = 0;
- } else if (h->msi_vector) {
- if (h->pdev->msi_enabled)
- pci_disable_msi(h->pdev);
- h->msi_vector = 0;
- }
+ pci_free_irq_vectors(h->pdev);
+ h->msix_vectors = 0;
}
/* If MSI/MSI-X is supported by the kernel we will try to enable it on
* controllers that are capable. If not, we use legacy INTx mode.
*/
-static void hpsa_interrupt_mode(struct ctlr_info *h)
+static int hpsa_interrupt_mode(struct ctlr_info *h)
{
-#ifdef CONFIG_PCI_MSI
- int err, i;
- struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
-
- for (i = 0; i < MAX_REPLY_QUEUES; i++) {
- hpsa_msix_entries[i].vector = 0;
- hpsa_msix_entries[i].entry = i;
- }
+ unsigned int flags = PCI_IRQ_LEGACY;
+ int ret;
/* Some boards advertise MSI but don't really support it */
- if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
- (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
- goto default_int_mode;
- if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
- dev_info(&h->pdev->dev, "MSI-X capable controller\n");
- h->msix_vector = MAX_REPLY_QUEUES;
- if (h->msix_vector > num_online_cpus())
- h->msix_vector = num_online_cpus();
- err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
- 1, h->msix_vector);
- if (err < 0) {
- dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
- h->msix_vector = 0;
- goto single_msi_mode;
- } else if (err < h->msix_vector) {
- dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
- "available\n", err);
+ switch (h->board_id) {
+ case 0x40700E11:
+ case 0x40800E11:
+ case 0x40820E11:
+ case 0x40830E11:
+ break;
+ default:
+ ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ if (ret > 0) {
+ h->msix_vectors = ret;
+ return 0;
}
- h->msix_vector = err;
- for (i = 0; i < h->msix_vector; i++)
- h->intr[i] = hpsa_msix_entries[i].vector;
- return;
- }
-single_msi_mode:
- if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
- dev_info(&h->pdev->dev, "MSI capable controller\n");
- if (!pci_enable_msi(h->pdev))
- h->msi_vector = 1;
- else
- dev_warn(&h->pdev->dev, "MSI init failed\n");
+
+ flags |= PCI_IRQ_MSI;
+ break;
}
-default_int_mode:
-#endif /* CONFIG_PCI_MSI */
- /* if we get here we're going to use the default interrupt mode */
- h->intr[h->intr_mode] = h->pdev->irq;
+
+ ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
+ if (ret < 0)
+ return ret;
+ return 0;
}
static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
@@ -8074,7 +8083,9 @@ static int hpsa_pci_init(struct ctlr_info *h)
pci_set_master(h->pdev);
- hpsa_interrupt_mode(h);
+ err = hpsa_interrupt_mode(h);
+ if (err)
+ goto clean1;
err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
if (err)
goto clean2; /* intmode+region, pci */
@@ -8110,6 +8121,7 @@ clean3: /* vaddr, intmode+region, pci */
h->vaddr = NULL;
clean2: /* intmode+region, pci */
hpsa_disable_interrupt_mode(h);
+clean1:
/*
* call pci_disable_device before pci_release_regions per
* Documentation/PCI/pci.txt
@@ -8243,34 +8255,20 @@ clean_up:
return -ENOMEM;
}
-static void hpsa_irq_affinity_hints(struct ctlr_info *h)
-{
- int i, cpu;
-
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < h->msix_vector; i++) {
- irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
-}
-
/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
static void hpsa_free_irqs(struct ctlr_info *h)
{
int i;
- if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
+ if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
/* Single reply queue, only one irq to free */
- i = h->intr_mode;
- irq_set_affinity_hint(h->intr[i], NULL);
- free_irq(h->intr[i], &h->q[i]);
- h->q[i] = 0;
+ free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]);
+ h->q[h->intr_mode] = 0;
return;
}
- for (i = 0; i < h->msix_vector; i++) {
- irq_set_affinity_hint(h->intr[i], NULL);
- free_irq(h->intr[i], &h->q[i]);
+ for (i = 0; i < h->msix_vectors; i++) {
+ free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
h->q[i] = 0;
}
for (; i < MAX_REPLY_QUEUES; i++)
@@ -8291,11 +8289,11 @@ static int hpsa_request_irqs(struct ctlr_info *h,
for (i = 0; i < MAX_REPLY_QUEUES; i++)
h->q[i] = (u8) i;
- if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
+ if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
/* If performant mode and MSI-X, use multiple reply queues */
- for (i = 0; i < h->msix_vector; i++) {
+ for (i = 0; i < h->msix_vectors; i++) {
sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
- rc = request_irq(h->intr[i], msixhandler,
+ rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
0, h->intrname[i],
&h->q[i]);
if (rc) {
@@ -8303,9 +8301,9 @@ static int hpsa_request_irqs(struct ctlr_info *h,
dev_err(&h->pdev->dev,
"failed to get irq %d for %s\n",
- h->intr[i], h->devname);
+ pci_irq_vector(h->pdev, i), h->devname);
for (j = 0; j < i; j++) {
- free_irq(h->intr[j], &h->q[j]);
+ free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
h->q[j] = 0;
}
for (; j < MAX_REPLY_QUEUES; j++)
@@ -8313,33 +8311,27 @@ static int hpsa_request_irqs(struct ctlr_info *h,
return rc;
}
}
- hpsa_irq_affinity_hints(h);
} else {
/* Use single reply pool */
- if (h->msix_vector > 0 || h->msi_vector) {
- if (h->msix_vector)
- sprintf(h->intrname[h->intr_mode],
- "%s-msix", h->devname);
- else
- sprintf(h->intrname[h->intr_mode],
- "%s-msi", h->devname);
- rc = request_irq(h->intr[h->intr_mode],
+ if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
+ sprintf(h->intrname[0], "%s-msi%s", h->devname,
+ h->msix_vectors ? "x" : "");
+ rc = request_irq(pci_irq_vector(h->pdev, 0),
msixhandler, 0,
- h->intrname[h->intr_mode],
+ h->intrname[0],
&h->q[h->intr_mode]);
} else {
sprintf(h->intrname[h->intr_mode],
"%s-intx", h->devname);
- rc = request_irq(h->intr[h->intr_mode],
+ rc = request_irq(pci_irq_vector(h->pdev, 0),
intxhandler, IRQF_SHARED,
- h->intrname[h->intr_mode],
+ h->intrname[0],
&h->q[h->intr_mode]);
}
- irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
}
if (rc) {
dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
- h->intr[h->intr_mode], h->devname);
+ pci_irq_vector(h->pdev, 0), h->devname);
hpsa_free_irqs(h);
return -ENODEV;
}
@@ -8640,6 +8632,14 @@ static void hpsa_rescan_ctlr_worker(struct work_struct *work)
if (h->remove_in_progress)
return;
+ /*
+ * Do the scan after the reset
+ */
+ if (h->reset_in_progress) {
+ h->drv_req_rescan = 1;
+ return;
+ }
+
if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
scsi_host_get(h->scsi_host);
hpsa_ack_ctlr_events(h);
@@ -9525,7 +9525,7 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
return rc;
}
- h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
+ h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
hpsa_get_max_perf_mode_cmds(h);
/* Performant mode ring buffer and supporting data structures */
h->reply_queue_size = h->max_commands * sizeof(u64);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 9ea162de80dc..64e98295b707 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -176,9 +176,7 @@ struct ctlr_info {
# define DOORBELL_INT 1
# define SIMPLE_MODE_INT 2
# define MEMQ_MODE_INT 3
- unsigned int intr[MAX_REPLY_QUEUES];
- unsigned int msix_vector;
- unsigned int msi_vector;
+ unsigned int msix_vectors;
int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
struct access_method access;
@@ -466,7 +464,7 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
unsigned long register_value = FIFO_EMPTY;
/* msi auto clears the interrupt pending bit. */
- if (unlikely(!(h->msi_vector || h->msix_vector))) {
+ if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) {
/* flush the controller write of the reply queue by reading
* outbound doorbell status register.
*/
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 7e487c78279c..78b72c28a55d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -32,6 +32,7 @@
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/stringify.h>
+#include <linux/bsg-lib.h>
#include <asm/firmware.h>
#include <asm/irq.h>
#include <asm/vio.h>
@@ -1701,14 +1702,14 @@ static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
/**
* ibmvfc_bsg_timeout - Handle a BSG timeout
- * @job: struct fc_bsg_job that timed out
+ * @job: struct bsg_job that timed out
*
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_bsg_timeout(struct fc_bsg_job *job)
+static int ibmvfc_bsg_timeout(struct bsg_job *job)
{
- struct ibmvfc_host *vhost = shost_priv(job->shost);
+ struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
unsigned long port_id = (unsigned long)job->dd_data;
struct ibmvfc_event *evt;
struct ibmvfc_tmf *tmf;
@@ -1814,41 +1815,43 @@ unlock_out:
/**
* ibmvfc_bsg_request - Handle a BSG request
- * @job: struct fc_bsg_job to be executed
+ * @job: struct bsg_job to be executed
*
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_bsg_request(struct fc_bsg_job *job)
+static int ibmvfc_bsg_request(struct bsg_job *job)
{
- struct ibmvfc_host *vhost = shost_priv(job->shost);
- struct fc_rport *rport = job->rport;
+ struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
+ struct fc_rport *rport = fc_bsg_to_rport(job);
struct ibmvfc_passthru_mad *mad;
struct ibmvfc_event *evt;
union ibmvfc_iu rsp_iu;
unsigned long flags, port_id = -1;
- unsigned int code = job->request->msgcode;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ unsigned int code = bsg_request->msgcode;
int rc = 0, req_seg, rsp_seg, issue_login = 0;
u32 fc_flags, rsp_len;
ENTER;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (rport)
port_id = rport->port_id;
switch (code) {
case FC_BSG_HST_ELS_NOLOGIN:
- port_id = (job->request->rqst_data.h_els.port_id[0] << 16) |
- (job->request->rqst_data.h_els.port_id[1] << 8) |
- job->request->rqst_data.h_els.port_id[2];
+ port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
+ (bsg_request->rqst_data.h_els.port_id[1] << 8) |
+ bsg_request->rqst_data.h_els.port_id[2];
case FC_BSG_RPT_ELS:
fc_flags = IBMVFC_FC_ELS;
break;
case FC_BSG_HST_CT:
issue_login = 1;
- port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) |
- (job->request->rqst_data.h_ct.port_id[1] << 8) |
- job->request->rqst_data.h_ct.port_id[2];
+ port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
+ (bsg_request->rqst_data.h_ct.port_id[1] << 8) |
+ bsg_request->rqst_data.h_ct.port_id[2];
case FC_BSG_RPT_CT:
fc_flags = IBMVFC_FC_CT_IU;
break;
@@ -1937,13 +1940,14 @@ static int ibmvfc_bsg_request(struct fc_bsg_job *job)
if (rsp_iu.passthru.common.status)
rc = -EIO;
else
- job->reply->reply_payload_rcv_len = rsp_len;
+ bsg_reply->reply_payload_rcv_len = rsp_len;
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_free_event(evt);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
rc = 0;
out:
dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 642b739ad0da..c9fa3565c671 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -22,7 +22,7 @@
*
****************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
@@ -81,7 +81,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
}
} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
if (se_cmd->data_direction == DMA_TO_DEVICE) {
- /* residual data from an overflow write */
+ /* residual data from an overflow write */
rsp->flags = SRP_RSP_FLAG_DOOVER;
rsp->data_out_res_cnt = cpu_to_be32(residual_count);
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
@@ -101,7 +101,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
* and the function returns TRUE.
*
* EXECUTION ENVIRONMENT:
- * Interrupt or Process environment
+ * Interrupt or Process environment
*/
static bool connection_broken(struct scsi_info *vscsi)
{
@@ -324,7 +324,7 @@ static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
}
/**
- * ibmvscsis_send_init_message() - send initialize message to the client
+ * ibmvscsis_send_init_message() - send initialize message to the client
* @vscsi: Pointer to our adapter structure
* @format: Which Init Message format to send
*
@@ -382,13 +382,13 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
vscsi->cmd_q.base_addr);
if (crq) {
*format = (uint)(crq->format);
- rc = ERROR;
+ rc = ERROR;
crq->valid = INVALIDATE_CMD_RESP_EL;
dma_rmb();
}
} else {
*format = (uint)(crq->format);
- rc = ERROR;
+ rc = ERROR;
crq->valid = INVALIDATE_CMD_RESP_EL;
dma_rmb();
}
@@ -397,166 +397,6 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
}
/**
- * ibmvscsis_establish_new_q() - Establish new CRQ queue
- * @vscsi: Pointer to our adapter structure
- * @new_state: New state being established after resetting the queue
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_establish_new_q(struct scsi_info *vscsi, uint new_state)
-{
- long rc = ADAPT_SUCCESS;
- uint format;
-
- vscsi->flags &= PRESERVE_FLAG_FIELDS;
- vscsi->rsp_q_timer.timer_pops = 0;
- vscsi->debit = 0;
- vscsi->credit = 0;
-
- rc = vio_enable_interrupts(vscsi->dma_dev);
- if (rc) {
- pr_warn("reset_queue: failed to enable interrupts, rc %ld\n",
- rc);
- return rc;
- }
-
- rc = ibmvscsis_check_init_msg(vscsi, &format);
- if (rc) {
- dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n",
- rc);
- return rc;
- }
-
- if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) {
- rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
- switch (rc) {
- case H_SUCCESS:
- case H_DROPPED:
- case H_CLOSED:
- rc = ADAPT_SUCCESS;
- break;
-
- case H_PARAMETER:
- case H_HARDWARE:
- break;
-
- default:
- vscsi->state = UNDEFINED;
- rc = H_HARDWARE;
- break;
- }
- }
-
- return rc;
-}
-
-/**
- * ibmvscsis_reset_queue() - Reset CRQ Queue
- * @vscsi: Pointer to our adapter structure
- * @new_state: New state to establish after resetting the queue
- *
- * This function calls h_free_q and then calls h_reg_q and does all
- * of the bookkeeping to get us back to where we can communicate.
- *
- * Actually, we don't always call h_free_crq. A problem was discovered
- * where one partition would close and reopen his queue, which would
- * cause his partner to get a transport event, which would cause him to
- * close and reopen his queue, which would cause the original partition
- * to get a transport event, etc., etc. To prevent this, we don't
- * actually close our queue if the client initiated the reset, (i.e.
- * either we got a transport event or we have detected that the client's
- * queue is gone)
- *
- * EXECUTION ENVIRONMENT:
- * Process environment, called with interrupt lock held
- */
-static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state)
-{
- int bytes;
- long rc = ADAPT_SUCCESS;
-
- pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
-
- /* don't reset, the client did it for us */
- if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
- vscsi->flags &= PRESERVE_FLAG_FIELDS;
- vscsi->rsp_q_timer.timer_pops = 0;
- vscsi->debit = 0;
- vscsi->credit = 0;
- vscsi->state = new_state;
- vio_enable_interrupts(vscsi->dma_dev);
- } else {
- rc = ibmvscsis_free_command_q(vscsi);
- if (rc == ADAPT_SUCCESS) {
- vscsi->state = new_state;
-
- bytes = vscsi->cmd_q.size * PAGE_SIZE;
- rc = h_reg_crq(vscsi->dds.unit_id,
- vscsi->cmd_q.crq_token, bytes);
- if (rc == H_CLOSED || rc == H_SUCCESS) {
- rc = ibmvscsis_establish_new_q(vscsi,
- new_state);
- }
-
- if (rc != ADAPT_SUCCESS) {
- pr_debug("reset_queue: reg_crq rc %ld\n", rc);
-
- vscsi->state = ERR_DISCONNECTED;
- vscsi->flags |= RESPONSE_Q_DOWN;
- ibmvscsis_free_command_q(vscsi);
- }
- } else {
- vscsi->state = ERR_DISCONNECTED;
- vscsi->flags |= RESPONSE_Q_DOWN;
- }
- }
-}
-
-/**
- * ibmvscsis_free_cmd_resources() - Free command resources
- * @vscsi: Pointer to our adapter structure
- * @cmd: Command which is not longer in use
- *
- * Must be called with interrupt lock held.
- */
-static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
- struct ibmvscsis_cmd *cmd)
-{
- struct iu_entry *iue = cmd->iue;
-
- switch (cmd->type) {
- case TASK_MANAGEMENT:
- case SCSI_CDB:
- /*
- * When the queue goes down this value is cleared, so it
- * cannot be cleared in this general purpose function.
- */
- if (vscsi->debit)
- vscsi->debit -= 1;
- break;
- case ADAPTER_MAD:
- vscsi->flags &= ~PROCESSING_MAD;
- break;
- case UNSET_TYPE:
- break;
- default:
- dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
- cmd->type);
- break;
- }
-
- cmd->iue = NULL;
- list_add_tail(&cmd->list, &vscsi->free_cmd);
- srp_iu_put(iue);
-
- if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
- list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
- vscsi->flags &= ~WAIT_FOR_IDLE;
- complete(&vscsi->wait_idle);
- }
-}
-
-/**
* ibmvscsis_disconnect() - Helper function to disconnect
* @work: Pointer to work_struct, gives access to our adapter structure
*
@@ -575,7 +415,6 @@ static void ibmvscsis_disconnect(struct work_struct *work)
proc_work);
u16 new_state;
bool wait_idle = false;
- long rc = ADAPT_SUCCESS;
spin_lock_bh(&vscsi->intr_lock);
new_state = vscsi->new_state;
@@ -589,7 +428,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
* should transitition to the new state
*/
switch (vscsi->state) {
- /* Should never be called while in this state. */
+ /* Should never be called while in this state. */
case NO_QUEUE:
/*
* Can never transition from this state;
@@ -628,30 +467,24 @@ static void ibmvscsis_disconnect(struct work_struct *work)
vscsi->state = new_state;
break;
- /*
- * If this is a transition into an error state.
- * a client is attempting to establish a connection
- * and has violated the RPA protocol.
- * There can be nothing pending on the adapter although
- * there can be requests in the command queue.
- */
case WAIT_ENABLED:
- case PART_UP_WAIT_ENAB:
switch (new_state) {
- case ERR_DISCONNECT:
- vscsi->flags |= RESPONSE_Q_DOWN;
+ case UNCONFIGURING:
vscsi->state = new_state;
+ vscsi->flags |= RESPONSE_Q_DOWN;
vscsi->flags &= ~(SCHEDULE_DISCONNECT |
DISCONNECT_SCHEDULED);
- ibmvscsis_free_command_q(vscsi);
- break;
- case ERR_DISCONNECT_RECONNECT:
- ibmvscsis_reset_queue(vscsi, WAIT_ENABLED);
+ dma_rmb();
+ if (vscsi->flags & CFG_SLEEPING) {
+ vscsi->flags &= ~CFG_SLEEPING;
+ complete(&vscsi->unconfig);
+ }
break;
/* should never happen */
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
case WAIT_IDLE:
- rc = ERROR;
dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
vscsi->state);
break;
@@ -660,6 +493,13 @@ static void ibmvscsis_disconnect(struct work_struct *work)
case WAIT_IDLE:
switch (new_state) {
+ case UNCONFIGURING:
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ vscsi->state = new_state;
+ vscsi->flags &= ~(SCHEDULE_DISCONNECT |
+ DISCONNECT_SCHEDULED);
+ ibmvscsis_free_command_q(vscsi);
+ break;
case ERR_DISCONNECT:
case ERR_DISCONNECT_RECONNECT:
vscsi->state = new_state;
@@ -788,7 +628,6 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
break;
case WAIT_ENABLED:
- case PART_UP_WAIT_ENAB:
case WAIT_IDLE:
case WAIT_CONNECTION:
case CONNECTED:
@@ -806,6 +645,310 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
}
/**
+ * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+
+ switch (vscsi->state) {
+ case NO_QUEUE:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case ERR_DISCONNECTED:
+ case UNCONFIGURING:
+ case UNDEFINED:
+ rc = ERROR;
+ break;
+
+ case WAIT_CONNECTION:
+ vscsi->state = CONNECTED;
+ break;
+
+ case WAIT_IDLE:
+ case SRP_PROCESSING:
+ case CONNECTED:
+ case WAIT_ENABLED:
+ default:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
+ vscsi->state);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_handle_init_msg() - Respond to an Init Message
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+
+ switch (vscsi->state) {
+ case WAIT_CONNECTION:
+ rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
+ switch (rc) {
+ case H_SUCCESS:
+ vscsi->state = CONNECTED;
+ break;
+
+ case H_PARAMETER:
+ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+ break;
+
+ case H_DROPPED:
+ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
+ rc = ERROR;
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+ break;
+
+ case H_CLOSED:
+ pr_warn("init_msg: failed to send, rc %ld\n", rc);
+ rc = 0;
+ break;
+ }
+ break;
+
+ case UNDEFINED:
+ rc = ERROR;
+ break;
+
+ case UNCONFIGURING:
+ break;
+
+ case WAIT_ENABLED:
+ case CONNECTED:
+ case SRP_PROCESSING:
+ case WAIT_IDLE:
+ case NO_QUEUE:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case ERR_DISCONNECTED:
+ default:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
+ vscsi->state);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_init_msg() - Respond to an init message
+ * @vscsi: Pointer to our adapter structure
+ * @crq: Pointer to CRQ element containing the Init Message
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
+{
+ long rc = ADAPT_SUCCESS;
+
+ pr_debug("init_msg: state 0x%hx\n", vscsi->state);
+
+ rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
+ (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
+ 0);
+ if (rc == H_SUCCESS) {
+ vscsi->client_data.partition_number =
+ be64_to_cpu(*(u64 *)vscsi->map_buf);
+ pr_debug("init_msg, part num %d\n",
+ vscsi->client_data.partition_number);
+ } else {
+ pr_debug("init_msg h_vioctl rc %ld\n", rc);
+ rc = ADAPT_SUCCESS;
+ }
+
+ if (crq->format == INIT_MSG) {
+ rc = ibmvscsis_handle_init_msg(vscsi);
+ } else if (crq->format == INIT_COMPLETE_MSG) {
+ rc = ibmvscsis_handle_init_compl_msg(vscsi);
+ } else {
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
+ (uint)crq->format);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_establish_new_q() - Establish new CRQ queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+ uint format;
+
+ vscsi->flags &= PRESERVE_FLAG_FIELDS;
+ vscsi->rsp_q_timer.timer_pops = 0;
+ vscsi->debit = 0;
+ vscsi->credit = 0;
+
+ rc = vio_enable_interrupts(vscsi->dma_dev);
+ if (rc) {
+ pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
+ rc);
+ return rc;
+ }
+
+ rc = ibmvscsis_check_init_msg(vscsi, &format);
+ if (rc) {
+ dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n",
+ rc);
+ return rc;
+ }
+
+ if (format == UNUSED_FORMAT) {
+ rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
+ switch (rc) {
+ case H_SUCCESS:
+ case H_DROPPED:
+ case H_CLOSED:
+ rc = ADAPT_SUCCESS;
+ break;
+
+ case H_PARAMETER:
+ case H_HARDWARE:
+ break;
+
+ default:
+ vscsi->state = UNDEFINED;
+ rc = H_HARDWARE;
+ break;
+ }
+ } else if (format == INIT_MSG) {
+ rc = ibmvscsis_handle_init_msg(vscsi);
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_reset_queue() - Reset CRQ Queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * This function calls h_free_q and then calls h_reg_q and does all
+ * of the bookkeeping to get us back to where we can communicate.
+ *
+ * Actually, we don't always call h_free_crq. A problem was discovered
+ * where one partition would close and reopen his queue, which would
+ * cause his partner to get a transport event, which would cause him to
+ * close and reopen his queue, which would cause the original partition
+ * to get a transport event, etc., etc. To prevent this, we don't
+ * actually close our queue if the client initiated the reset, (i.e.
+ * either we got a transport event or we have detected that the client's
+ * queue is gone)
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process environment, called with interrupt lock held
+ */
+static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
+{
+ int bytes;
+ long rc = ADAPT_SUCCESS;
+
+ pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
+
+ /* don't reset, the client did it for us */
+ if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
+ vscsi->flags &= PRESERVE_FLAG_FIELDS;
+ vscsi->rsp_q_timer.timer_pops = 0;
+ vscsi->debit = 0;
+ vscsi->credit = 0;
+ vscsi->state = WAIT_CONNECTION;
+ vio_enable_interrupts(vscsi->dma_dev);
+ } else {
+ rc = ibmvscsis_free_command_q(vscsi);
+ if (rc == ADAPT_SUCCESS) {
+ vscsi->state = WAIT_CONNECTION;
+
+ bytes = vscsi->cmd_q.size * PAGE_SIZE;
+ rc = h_reg_crq(vscsi->dds.unit_id,
+ vscsi->cmd_q.crq_token, bytes);
+ if (rc == H_CLOSED || rc == H_SUCCESS) {
+ rc = ibmvscsis_establish_new_q(vscsi);
+ }
+
+ if (rc != ADAPT_SUCCESS) {
+ pr_debug("reset_queue: reg_crq rc %ld\n", rc);
+
+ vscsi->state = ERR_DISCONNECTED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ ibmvscsis_free_command_q(vscsi);
+ }
+ } else {
+ vscsi->state = ERR_DISCONNECTED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ }
+ }
+}
+
+/**
+ * ibmvscsis_free_cmd_resources() - Free command resources
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Command which is not longer in use
+ *
+ * Must be called with interrupt lock held.
+ */
+static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd)
+{
+ struct iu_entry *iue = cmd->iue;
+
+ switch (cmd->type) {
+ case TASK_MANAGEMENT:
+ case SCSI_CDB:
+ /*
+ * When the queue goes down this value is cleared, so it
+ * cannot be cleared in this general purpose function.
+ */
+ if (vscsi->debit)
+ vscsi->debit -= 1;
+ break;
+ case ADAPTER_MAD:
+ vscsi->flags &= ~PROCESSING_MAD;
+ break;
+ case UNSET_TYPE:
+ break;
+ default:
+ dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
+ cmd->type);
+ break;
+ }
+
+ cmd->iue = NULL;
+ list_add_tail(&cmd->list, &vscsi->free_cmd);
+ srp_iu_put(iue);
+
+ if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
+ list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
+ vscsi->flags &= ~WAIT_FOR_IDLE;
+ complete(&vscsi->wait_idle);
+ }
+}
+
+/**
* ibmvscsis_trans_event() - Handle a Transport Event
* @vscsi: Pointer to our adapter structure
* @crq: Pointer to CRQ entry containing the Transport Event
@@ -863,10 +1006,6 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
TRANS_EVENT));
break;
- case PART_UP_WAIT_ENAB:
- vscsi->state = WAIT_ENABLED;
- break;
-
case SRP_PROCESSING:
if ((vscsi->debit > 0) ||
!list_empty(&vscsi->schedule_q) ||
@@ -895,7 +1034,7 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
}
}
- rc = vscsi->flags & SCHEDULE_DISCONNECT;
+ rc = vscsi->flags & SCHEDULE_DISCONNECT;
pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
vscsi->flags, vscsi->state, rc);
@@ -1066,16 +1205,28 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
free_qs = true;
switch (vscsi->state) {
+ case UNCONFIGURING:
+ ibmvscsis_free_command_q(vscsi);
+ dma_rmb();
+ isync();
+ if (vscsi->flags & CFG_SLEEPING) {
+ vscsi->flags &= ~CFG_SLEEPING;
+ complete(&vscsi->unconfig);
+ }
+ break;
case ERR_DISCONNECT_RECONNECT:
- ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION);
+ ibmvscsis_reset_queue(vscsi);
pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
break;
case ERR_DISCONNECT:
ibmvscsis_free_command_q(vscsi);
- vscsi->flags &= ~DISCONNECT_SCHEDULED;
+ vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED);
vscsi->flags |= RESPONSE_Q_DOWN;
- vscsi->state = ERR_DISCONNECTED;
+ if (vscsi->tport.enabled)
+ vscsi->state = ERR_DISCONNECTED;
+ else
+ vscsi->state = WAIT_ENABLED;
pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
vscsi->flags, vscsi->state);
break;
@@ -1220,7 +1371,7 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
* @iue: Information Unit containing the Adapter Info MAD request
*
* EXECUTION ENVIRONMENT:
- * Interrupt adpater lock is held
+ * Interrupt adapter lock is held
*/
static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
struct iu_entry *iue)
@@ -1620,8 +1771,8 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
be64_to_cpu(msg_hi),
be64_to_cpu(cmd->rsp.tag));
- pr_debug("send_messages: tag 0x%llx, rc %ld\n",
- be64_to_cpu(cmd->rsp.tag), rc);
+ pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
+ cmd, be64_to_cpu(cmd->rsp.tag), rc);
/* if all ok free up the command element resources */
if (rc == H_SUCCESS) {
@@ -1691,7 +1842,7 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
* @crq: Pointer to the CRQ entry containing the MAD request
*
* EXECUTION ENVIRONMENT:
- * Interrupt called with adapter lock held
+ * Interrupt, called with adapter lock held
*/
static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
{
@@ -1745,14 +1896,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
- if (be16_to_cpu(mad->length) < 0) {
- dev_err(&vscsi->dev, "mad: length is < 0\n");
- ibmvscsis_post_disconnect(vscsi,
- ERR_DISCONNECT_RECONNECT, 0);
- rc = SRP_VIOLATION;
- } else {
- rc = ibmvscsis_process_mad(vscsi, iue);
- }
+ rc = ibmvscsis_process_mad(vscsi, iue);
pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
rc);
@@ -1864,7 +2008,7 @@ static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
break;
case H_PERMISSION:
if (connection_broken(vscsi))
- flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
+ flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
rc);
ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
@@ -2187,156 +2331,6 @@ static long ibmvscsis_ping_response(struct scsi_info *vscsi)
}
/**
- * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
- * @vscsi: Pointer to our adapter structure
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
-{
- long rc = ADAPT_SUCCESS;
-
- switch (vscsi->state) {
- case NO_QUEUE:
- case ERR_DISCONNECT:
- case ERR_DISCONNECT_RECONNECT:
- case ERR_DISCONNECTED:
- case UNCONFIGURING:
- case UNDEFINED:
- rc = ERROR;
- break;
-
- case WAIT_CONNECTION:
- vscsi->state = CONNECTED;
- break;
-
- case WAIT_IDLE:
- case SRP_PROCESSING:
- case CONNECTED:
- case WAIT_ENABLED:
- case PART_UP_WAIT_ENAB:
- default:
- rc = ERROR;
- dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
- vscsi->state);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
- break;
- }
-
- return rc;
-}
-
-/**
- * ibmvscsis_handle_init_msg() - Respond to an Init Message
- * @vscsi: Pointer to our adapter structure
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
-{
- long rc = ADAPT_SUCCESS;
-
- switch (vscsi->state) {
- case WAIT_ENABLED:
- vscsi->state = PART_UP_WAIT_ENAB;
- break;
-
- case WAIT_CONNECTION:
- rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
- switch (rc) {
- case H_SUCCESS:
- vscsi->state = CONNECTED;
- break;
-
- case H_PARAMETER:
- dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
- rc);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
- break;
-
- case H_DROPPED:
- dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
- rc);
- rc = ERROR;
- ibmvscsis_post_disconnect(vscsi,
- ERR_DISCONNECT_RECONNECT, 0);
- break;
-
- case H_CLOSED:
- pr_warn("init_msg: failed to send, rc %ld\n", rc);
- rc = 0;
- break;
- }
- break;
-
- case UNDEFINED:
- rc = ERROR;
- break;
-
- case UNCONFIGURING:
- break;
-
- case PART_UP_WAIT_ENAB:
- case CONNECTED:
- case SRP_PROCESSING:
- case WAIT_IDLE:
- case NO_QUEUE:
- case ERR_DISCONNECT:
- case ERR_DISCONNECT_RECONNECT:
- case ERR_DISCONNECTED:
- default:
- rc = ERROR;
- dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
- vscsi->state);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
- break;
- }
-
- return rc;
-}
-
-/**
- * ibmvscsis_init_msg() - Respond to an init message
- * @vscsi: Pointer to our adapter structure
- * @crq: Pointer to CRQ element containing the Init Message
- *
- * EXECUTION ENVIRONMENT:
- * Interrupt, interrupt lock held
- */
-static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
-{
- long rc = ADAPT_SUCCESS;
-
- pr_debug("init_msg: state 0x%hx\n", vscsi->state);
-
- rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
- (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
- 0);
- if (rc == H_SUCCESS) {
- vscsi->client_data.partition_number =
- be64_to_cpu(*(u64 *)vscsi->map_buf);
- pr_debug("init_msg, part num %d\n",
- vscsi->client_data.partition_number);
- } else {
- pr_debug("init_msg h_vioctl rc %ld\n", rc);
- rc = ADAPT_SUCCESS;
- }
-
- if (crq->format == INIT_MSG) {
- rc = ibmvscsis_handle_init_msg(vscsi);
- } else if (crq->format == INIT_COMPLETE_MSG) {
- rc = ibmvscsis_handle_init_compl_msg(vscsi);
- } else {
- rc = ERROR;
- dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
- (uint)crq->format);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
- }
-
- return rc;
-}
-
-/**
* ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
* @vscsi: Pointer to our adapter structure
* @crq: Pointer to CRQ element containing the SRP request
@@ -2391,7 +2385,7 @@ static long ibmvscsis_parse_command(struct scsi_info *vscsi,
break;
case VALID_TRANS_EVENT:
- rc = ibmvscsis_trans_event(vscsi, crq);
+ rc = ibmvscsis_trans_event(vscsi, crq);
break;
case VALID_INIT_MSG:
@@ -2522,7 +2516,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
srp->tag);
goto fail;
- return;
}
cmd->rsp.sol_not = srp->sol_not;
@@ -2559,6 +2552,10 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
data_len, attr, dir, 0);
if (rc) {
dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
+ spin_lock_bh(&vscsi->intr_lock);
+ list_del(&cmd->list);
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ spin_unlock_bh(&vscsi->intr_lock);
goto fail;
}
return;
@@ -2638,6 +2635,9 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi,
if (rc) {
dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
rc);
+ spin_lock_bh(&vscsi->intr_lock);
+ list_del(&cmd->list);
+ spin_unlock_bh(&vscsi->intr_lock);
cmd->se_cmd.se_tmr_req->response =
TMR_FUNCTION_REJECTED;
}
@@ -2786,36 +2786,6 @@ static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
}
/**
- * ibmvscsis_check_q() - Helper function to Check Init Message Valid
- * @vscsi: Pointer to our adapter structure
- *
- * Checks if a initialize message was queued by the initiatior
- * while the timing window was open. This function is called from
- * probe after the CRQ is created and interrupts are enabled.
- * It would only be used by adapters who wait for some event before
- * completing the init handshake with the client. For ibmvscsi, this
- * event is waiting for the port to be enabled.
- *
- * EXECUTION ENVIRONMENT:
- * Process level only, interrupt lock held
- */
-static long ibmvscsis_check_q(struct scsi_info *vscsi)
-{
- uint format;
- long rc;
-
- rc = ibmvscsis_check_init_msg(vscsi, &format);
- if (rc)
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
- else if (format == UNUSED_FORMAT)
- vscsi->state = WAIT_ENABLED;
- else
- vscsi->state = PART_UP_WAIT_ENAB;
-
- return rc;
-}
-
-/**
* ibmvscsis_enable_change_state() - Set new state based on enabled status
* @vscsi: Pointer to our adapter structure
*
@@ -2826,77 +2796,19 @@ static long ibmvscsis_check_q(struct scsi_info *vscsi)
*/
static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
{
+ int bytes;
long rc = ADAPT_SUCCESS;
-handle_state_change:
- switch (vscsi->state) {
- case WAIT_ENABLED:
- rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
- switch (rc) {
- case H_SUCCESS:
- case H_DROPPED:
- case H_CLOSED:
- vscsi->state = WAIT_CONNECTION;
- rc = ADAPT_SUCCESS;
- break;
-
- case H_PARAMETER:
- break;
-
- case H_HARDWARE:
- break;
-
- default:
- vscsi->state = UNDEFINED;
- rc = H_HARDWARE;
- break;
- }
- break;
- case PART_UP_WAIT_ENAB:
- rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
- switch (rc) {
- case H_SUCCESS:
- vscsi->state = CONNECTED;
- rc = ADAPT_SUCCESS;
- break;
-
- case H_DROPPED:
- case H_CLOSED:
- vscsi->state = WAIT_ENABLED;
- goto handle_state_change;
-
- case H_PARAMETER:
- break;
-
- case H_HARDWARE:
- break;
-
- default:
- rc = H_HARDWARE;
- break;
- }
- break;
-
- case WAIT_CONNECTION:
- case WAIT_IDLE:
- case SRP_PROCESSING:
- case CONNECTED:
- rc = ADAPT_SUCCESS;
- break;
- /* should not be able to get here */
- case UNCONFIGURING:
- rc = ERROR;
- vscsi->state = UNDEFINED;
- break;
+ bytes = vscsi->cmd_q.size * PAGE_SIZE;
+ rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes);
+ if (rc == H_CLOSED || rc == H_SUCCESS) {
+ vscsi->state = WAIT_CONNECTION;
+ rc = ibmvscsis_establish_new_q(vscsi);
+ }
- /* driver should never allow this to happen */
- case ERR_DISCONNECT:
- case ERR_DISCONNECT_RECONNECT:
- default:
- dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n",
- vscsi->state);
- rc = ADAPT_SUCCESS;
- break;
+ if (rc != ADAPT_SUCCESS) {
+ vscsi->state = ERR_DISCONNECTED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
}
return rc;
@@ -2916,7 +2828,6 @@ handle_state_change:
*/
static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
{
- long rc = 0;
int pages;
struct vio_dev *vdev = vscsi->dma_dev;
@@ -2940,22 +2851,7 @@ static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
return -ENOMEM;
}
- rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE);
- if (rc) {
- if (rc == H_CLOSED) {
- vscsi->state = WAIT_ENABLED;
- rc = 0;
- } else {
- dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- free_page((unsigned long)vscsi->cmd_q.base_addr);
- rc = -ENODEV;
- }
- } else {
- vscsi->state = WAIT_ENABLED;
- }
-
- return rc;
+ return 0;
}
/**
@@ -3270,7 +3166,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
/*
* if we are in a path where we are waiting for all pending commands
* to complete because we received a transport event and anything in
- * the command queue is for a new connection, do nothing
+ * the command queue is for a new connection, do nothing
*/
if (TARGET_STOP(vscsi)) {
vio_enable_interrupts(vscsi->dma_dev);
@@ -3314,7 +3210,7 @@ cmd_work:
* everything but transport events on the queue
*
* need to decrement the queue index so we can
- * look at the elment again
+ * look at the element again
*/
if (vscsi->cmd_q.index)
vscsi->cmd_q.index -= 1;
@@ -3378,7 +3274,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
INIT_LIST_HEAD(&vscsi->waiting_rsp);
INIT_LIST_HEAD(&vscsi->active_q);
- snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev));
+ snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
+ dev_name(&vdev->dev));
pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
@@ -3393,6 +3290,9 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
strncat(vscsi->eye, vdev->name, MAX_EYE);
vscsi->dds.unit_id = vdev->unit_address;
+ strncpy(vscsi->dds.partition_name, partition_name,
+ sizeof(vscsi->dds.partition_name));
+ vscsi->dds.partition_num = partition_number;
spin_lock_bh(&ibmvscsis_dev_lock);
list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
@@ -3469,6 +3369,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
(unsigned long)vscsi);
init_completion(&vscsi->wait_idle);
+ init_completion(&vscsi->unconfig);
snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
vscsi->work_q = create_workqueue(wq_name);
@@ -3485,31 +3386,12 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
goto destroy_WQ;
}
- spin_lock_bh(&vscsi->intr_lock);
- vio_enable_interrupts(vdev);
- if (rc) {
- dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc);
- rc = -ENODEV;
- spin_unlock_bh(&vscsi->intr_lock);
- goto free_irq;
- }
-
- if (ibmvscsis_check_q(vscsi)) {
- rc = ERROR;
- dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc);
- spin_unlock_bh(&vscsi->intr_lock);
- goto disable_interrupt;
- }
- spin_unlock_bh(&vscsi->intr_lock);
+ vscsi->state = WAIT_ENABLED;
dev_set_drvdata(&vdev->dev, vscsi);
return 0;
-disable_interrupt:
- vio_disable_interrupts(vdev);
-free_irq:
- free_irq(vdev->irq, vscsi);
destroy_WQ:
destroy_workqueue(vscsi->work_q);
unmap_buf:
@@ -3543,10 +3425,11 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
- /*
- * TBD: Need to handle if there are commands on the waiting_rsp q
- * Actually, can there still be cmds outstanding to tcm?
- */
+ spin_lock_bh(&vscsi->intr_lock);
+ ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
+ vscsi->flags |= CFG_SLEEPING;
+ spin_unlock_bh(&vscsi->intr_lock);
+ wait_for_completion(&vscsi->unconfig);
vio_disable_interrupts(vdev);
free_irq(vdev->irq, vscsi);
@@ -3555,7 +3438,6 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
DMA_BIDIRECTIONAL);
kfree(vscsi->map_buf);
tasklet_kill(&vscsi->work_task);
- ibmvscsis_unregister_command_q(vscsi);
ibmvscsis_destroy_command_q(vscsi);
ibmvscsis_freetimer(vscsi);
ibmvscsis_free_cmds(vscsi);
@@ -3609,7 +3491,7 @@ static int ibmvscsis_get_system_info(void)
num = of_get_property(rootdn, "ibm,partition-no", NULL);
if (num)
- partition_number = *num;
+ partition_number = of_read_number(num, 1);
of_node_put(rootdn);
@@ -3903,18 +3785,22 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
}
if (tmp) {
- tport->enabled = true;
spin_lock_bh(&vscsi->intr_lock);
+ tport->enabled = true;
lrc = ibmvscsis_enable_change_state(vscsi);
if (lrc)
pr_err("enable_change_state failed, rc %ld state %d\n",
lrc, vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
} else {
+ spin_lock_bh(&vscsi->intr_lock);
tport->enabled = false;
+ /* This simulates the server going down */
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+ spin_unlock_bh(&vscsi->intr_lock);
}
- pr_debug("tpg_enable_store, state %d\n", vscsi->state);
+ pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
return count;
}
@@ -3983,10 +3869,10 @@ static struct attribute *ibmvscsis_dev_attrs[] = {
ATTRIBUTE_GROUPS(ibmvscsis_dev);
static struct class ibmvscsis_class = {
- .name = "ibmvscsis",
- .dev_release = ibmvscsis_dev_release,
- .class_attrs = ibmvscsis_class_attrs,
- .dev_groups = ibmvscsis_dev_groups,
+ .name = "ibmvscsis",
+ .dev_release = ibmvscsis_dev_release,
+ .class_attrs = ibmvscsis_class_attrs,
+ .dev_groups = ibmvscsis_dev_groups,
};
static struct vio_device_id ibmvscsis_device_table[] = {
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index 981a0c992b6c..98b0ca79a5c5 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -204,8 +204,6 @@ struct scsi_info {
struct list_head waiting_rsp;
#define NO_QUEUE 0x00
#define WAIT_ENABLED 0X01
- /* driver has received an initialize command */
-#define PART_UP_WAIT_ENAB 0x02
#define WAIT_CONNECTION 0x04
/* have established a connection */
#define CONNECTED 0x08
@@ -259,6 +257,8 @@ struct scsi_info {
#define SCHEDULE_DISCONNECT 0x00400
/* disconnect handler is scheduled */
#define DISCONNECT_SCHEDULED 0x00800
+ /* remove function is sleeping */
+#define CFG_SLEEPING 0x01000
u32 flags;
/* adapter lock */
spinlock_t intr_lock;
@@ -287,6 +287,7 @@ struct scsi_info {
struct workqueue_struct *work_q;
struct completion wait_idle;
+ struct completion unconfig;
struct device dev;
struct vio_dev *dma_dev;
struct srp_target target;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 532474109624..835c59c777f2 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -186,16 +186,16 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
};
static const struct ipr_chip_t ipr_chip[] = {
- { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
- { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
+ { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
};
static int ipr_max_bus_speeds[] = {
@@ -9439,23 +9439,11 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
{
struct pci_dev *pdev = ioa_cfg->pdev;
+ int i;
- if (ioa_cfg->intr_flag == IPR_USE_MSI ||
- ioa_cfg->intr_flag == IPR_USE_MSIX) {
- int i;
- for (i = 0; i < ioa_cfg->nvectors; i++)
- free_irq(ioa_cfg->vectors_info[i].vec,
- &ioa_cfg->hrrq[i]);
- } else
- free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
-
- if (ioa_cfg->intr_flag == IPR_USE_MSI) {
- pci_disable_msi(pdev);
- ioa_cfg->intr_flag &= ~IPR_USE_MSI;
- } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
- pci_disable_msix(pdev);
- ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
- }
+ for (i = 0; i < ioa_cfg->nvectors; i++)
+ free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
+ pci_free_irq_vectors(pdev);
}
/**
@@ -9883,45 +9871,6 @@ static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
}
}
-static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
-{
- struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
- int i, vectors;
-
- for (i = 0; i < ARRAY_SIZE(entries); ++i)
- entries[i].entry = i;
-
- vectors = pci_enable_msix_range(ioa_cfg->pdev,
- entries, 1, ipr_number_of_msix);
- if (vectors < 0) {
- ipr_wait_for_pci_err_recovery(ioa_cfg);
- return vectors;
- }
-
- for (i = 0; i < vectors; i++)
- ioa_cfg->vectors_info[i].vec = entries[i].vector;
- ioa_cfg->nvectors = vectors;
-
- return 0;
-}
-
-static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
-{
- int i, vectors;
-
- vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
- if (vectors < 0) {
- ipr_wait_for_pci_err_recovery(ioa_cfg);
- return vectors;
- }
-
- for (i = 0; i < vectors; i++)
- ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
- ioa_cfg->nvectors = vectors;
-
- return 0;
-}
-
static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
{
int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
@@ -9934,19 +9883,20 @@ static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
}
}
-static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
+static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
+ struct pci_dev *pdev)
{
int i, rc;
for (i = 1; i < ioa_cfg->nvectors; i++) {
- rc = request_irq(ioa_cfg->vectors_info[i].vec,
+ rc = request_irq(pci_irq_vector(pdev, i),
ipr_isr_mhrrq,
0,
ioa_cfg->vectors_info[i].desc,
&ioa_cfg->hrrq[i]);
if (rc) {
while (--i >= 0)
- free_irq(ioa_cfg->vectors_info[i].vec,
+ free_irq(pci_irq_vector(pdev, i),
&ioa_cfg->hrrq[i]);
return rc;
}
@@ -9984,8 +9934,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
* ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
* @pdev: PCI device struct
*
- * Description: The return value from pci_enable_msi_range() can not always be
- * trusted. This routine sets up and initiates a test interrupt to determine
+ * Description: This routine sets up and initiates a test interrupt to determine
* if the interrupt is received via the ipr_test_intr() service routine.
* If the tests fails, the driver will fall back to LSI.
*
@@ -9997,6 +9946,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
int rc;
volatile u32 int_reg;
unsigned long lock_flags = 0;
+ int irq = pci_irq_vector(pdev, 0);
ENTER;
@@ -10008,15 +9958,12 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
- else
- rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
+ rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
if (rc) {
- dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
+ dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
return rc;
} else if (ipr_debug)
- dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
+ dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
@@ -10033,10 +9980,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
- else
- free_irq(pdev->irq, ioa_cfg);
+ free_irq(irq, ioa_cfg);
LEAVE;
@@ -10060,6 +10004,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
int rc = PCIBIOS_SUCCESSFUL;
volatile u32 mask, uproc, interrupts;
unsigned long lock_flags, driver_lock_flags;
+ unsigned int irq_flag;
ENTER;
@@ -10175,18 +10120,18 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
}
- if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
- ipr_enable_msix(ioa_cfg) == 0)
- ioa_cfg->intr_flag = IPR_USE_MSIX;
- else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
- ipr_enable_msi(ioa_cfg) == 0)
- ioa_cfg->intr_flag = IPR_USE_MSI;
- else {
- ioa_cfg->intr_flag = IPR_USE_LSI;
- ioa_cfg->clear_isr = 1;
- ioa_cfg->nvectors = 1;
- dev_info(&pdev->dev, "Cannot enable MSI.\n");
+ irq_flag = PCI_IRQ_LEGACY;
+ if (ioa_cfg->ipr_chip->has_msi)
+ irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
+ rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
+ if (rc < 0) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ goto cleanup_nomem;
}
+ ioa_cfg->nvectors = rc;
+
+ if (!pdev->msi_enabled && !pdev->msix_enabled)
+ ioa_cfg->clear_isr = 1;
pci_set_master(pdev);
@@ -10199,33 +10144,23 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
}
}
- if (ioa_cfg->intr_flag == IPR_USE_MSI ||
- ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ if (pdev->msi_enabled || pdev->msix_enabled) {
rc = ipr_test_msi(ioa_cfg, pdev);
- if (rc == -EOPNOTSUPP) {
+ switch (rc) {
+ case 0:
+ dev_info(&pdev->dev,
+ "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
+ pdev->msix_enabled ? "-X" : "");
+ break;
+ case -EOPNOTSUPP:
ipr_wait_for_pci_err_recovery(ioa_cfg);
- if (ioa_cfg->intr_flag == IPR_USE_MSI) {
- ioa_cfg->intr_flag &= ~IPR_USE_MSI;
- pci_disable_msi(pdev);
- } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
- ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
- pci_disable_msix(pdev);
- }
+ pci_free_irq_vectors(pdev);
- ioa_cfg->intr_flag = IPR_USE_LSI;
ioa_cfg->nvectors = 1;
- }
- else if (rc)
+ ioa_cfg->clear_isr = 1;
+ break;
+ default:
goto out_msi_disable;
- else {
- if (ioa_cfg->intr_flag == IPR_USE_MSI)
- dev_info(&pdev->dev,
- "Request for %d MSIs succeeded with starting IRQ: %d\n",
- ioa_cfg->nvectors, pdev->irq);
- else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- dev_info(&pdev->dev,
- "Request for %d MSIXs succeeded.",
- ioa_cfg->nvectors);
}
}
@@ -10273,15 +10208,13 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSI
- || ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ if (pdev->msi_enabled || pdev->msix_enabled) {
name_msi_vectors(ioa_cfg);
- rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
- 0,
+ rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
ioa_cfg->vectors_info[0].desc,
&ioa_cfg->hrrq[0]);
if (!rc)
- rc = ipr_request_other_msi_irqs(ioa_cfg);
+ rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
} else {
rc = request_irq(pdev->irq, ipr_isr,
IRQF_SHARED,
@@ -10323,10 +10256,7 @@ cleanup_nolog:
ipr_free_mem(ioa_cfg);
out_msi_disable:
ipr_wait_for_pci_err_recovery(ioa_cfg);
- if (ioa_cfg->intr_flag == IPR_USE_MSI)
- pci_disable_msi(pdev);
- else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- pci_disable_msix(pdev);
+ pci_free_irq_vectors(pdev);
cleanup_nomem:
iounmap(ipr_regs);
out_disable:
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 8995053d01b3..b7d2e98eb45b 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1413,10 +1413,7 @@ struct ipr_chip_cfg_t {
struct ipr_chip_t {
u16 vendor;
u16 device;
- u16 intr_type;
-#define IPR_USE_LSI 0x00
-#define IPR_USE_MSI 0x01
-#define IPR_USE_MSIX 0x02
+ bool has_msi;
u16 sis_type;
#define IPR_SIS32 0x00
#define IPR_SIS64 0x01
@@ -1593,11 +1590,9 @@ struct ipr_ioa_cfg {
struct ipr_cmnd **ipr_cmnd_list;
dma_addr_t *ipr_cmnd_list_dma;
- u16 intr_flag;
unsigned int nvectors;
struct {
- unsigned short vec;
char desc[22];
} vectors_info[IPR_MAX_MSIX_VECTORS];
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 02cb76fd4420..3419e1bcdff6 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -2241,9 +2241,6 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
uint8_t minor;
uint8_t subminor;
uint8_t *buffer;
- char hexDigits[] =
- { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C',
- 'D', 'E', 'F' };
METHOD_TRACE("ips_get_bios_version", 1);
@@ -2374,13 +2371,13 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
}
}
- ha->bios_version[0] = hexDigits[(major & 0xF0) >> 4];
+ ha->bios_version[0] = hex_asc_upper_hi(major);
ha->bios_version[1] = '.';
- ha->bios_version[2] = hexDigits[major & 0x0F];
- ha->bios_version[3] = hexDigits[subminor];
+ ha->bios_version[2] = hex_asc_upper_lo(major);
+ ha->bios_version[3] = hex_asc_upper_lo(subminor);
ha->bios_version[4] = '.';
- ha->bios_version[5] = hexDigits[(minor & 0xF0) >> 4];
- ha->bios_version[6] = hexDigits[minor & 0x0F];
+ ha->bios_version[5] = hex_asc_upper_hi(minor);
+ ha->bios_version[6] = hex_asc_upper_lo(minor);
ha->bios_version[7] = 0;
}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 22a9bb1abae1..b3539928073c 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -295,7 +295,6 @@ enum sci_controller_states {
#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
struct isci_pci_info {
- struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
struct isci_host *hosts[SCI_MAX_CONTROLLERS];
struct isci_orom *orom;
};
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 77128d680e3b..0b5b5db0d0f8 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -350,16 +350,12 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
*/
num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
- for (i = 0; i < num_msix; i++)
- pci_info->msix_entries[i].entry = i;
-
- err = pci_enable_msix_exact(pdev, pci_info->msix_entries, num_msix);
- if (err)
+ err = pci_alloc_irq_vectors(pdev, num_msix, num_msix, PCI_IRQ_MSIX);
+ if (err < 0)
goto intx;
for (i = 0; i < num_msix; i++) {
int id = i / SCI_NUM_MSI_X_INT;
- struct msix_entry *msix = &pci_info->msix_entries[i];
irq_handler_t isr;
ihost = pci_info->hosts[id];
@@ -369,8 +365,8 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
else
isr = isci_msix_isr;
- err = devm_request_irq(&pdev->dev, msix->vector, isr, 0,
- DRV_NAME"-msix", ihost);
+ err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ isr, 0, DRV_NAME"-msix", ihost);
if (!err)
continue;
@@ -378,18 +374,19 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
while (i--) {
id = i / SCI_NUM_MSI_X_INT;
ihost = pci_info->hosts[id];
- msix = &pci_info->msix_entries[i];
- devm_free_irq(&pdev->dev, msix->vector, ihost);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ ihost);
}
- pci_disable_msix(pdev);
+ pci_free_irq_vectors(pdev);
goto intx;
}
return 0;
intx:
for_each_isci_host(i, ihost, pdev) {
- err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr,
- IRQF_SHARED, DRV_NAME"-intx", ihost);
+ err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, 0),
+ isci_intx_isr, IRQF_SHARED, DRV_NAME"-intx",
+ ihost);
if (err)
break;
}
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index 8ac646e5eddc..a2bbe46f8ccb 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -54,6 +54,7 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
len = pci_biosrom_size(pdev);
rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
if (!rom) {
+ pci_unmap_biosrom(oprom);
dev_warn(&pdev->dev,
"Unable to allocate memory for orom\n");
return NULL;
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 1910100638a2..e3f2a5359d71 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
{
static const char * const strings[] = RNC_STATES;
+ if (state >= ARRAY_SIZE(strings))
+ return "UNKNOWN";
+
return strings[state];
}
#undef C
@@ -454,7 +457,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
* the device since it's being invalidated anyway */
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: SCIC Remote Node Context 0x%p was "
- "suspeneded by hardware while being "
+ "suspended by hardware while being "
"invalidated.\n", __func__, sci_rnc);
break;
default:
@@ -473,7 +476,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
* the device since it's being resumed anyway */
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: SCIC Remote Node Context 0x%p was "
- "suspeneded by hardware while being resumed.\n",
+ "suspended by hardware while being resumed.\n",
__func__, sci_rnc);
break;
default:
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index b709d2b20880..47f66e949745 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2473,7 +2473,7 @@ static void isci_request_process_response_iu(
"%s: resp_iu = %p "
"resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
"resp_iu->response_data_len = %x, "
- "resp_iu->sense_data_len = %x\nrepsonse data: ",
+ "resp_iu->sense_data_len = %x\nresponse data: ",
__func__,
resp_iu,
resp_iu->status,
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 880a9068ca12..6103231104da 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -68,10 +68,14 @@ static void fc_disc_stop_rports(struct fc_disc *disc)
lport = fc_disc_lport(disc);
- mutex_lock(&disc->disc_mutex);
- list_for_each_entry_rcu(rdata, &disc->rports, peers)
- lport->tt.rport_logoff(rdata);
- mutex_unlock(&disc->disc_mutex);
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+ if (kref_get_unless_zero(&rdata->kref)) {
+ fc_rport_logoff(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+ }
+ rcu_read_unlock();
}
/**
@@ -150,7 +154,7 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
break;
}
}
- lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
/*
* If not doing a complete rediscovery, do GPN_ID on
@@ -178,7 +182,7 @@ reject:
FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(fp);
}
@@ -289,15 +293,19 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
* Skip ports which were never discovered. These are the dNS port
* and ports which were created by PLOGI.
*/
+ rcu_read_lock();
list_for_each_entry_rcu(rdata, &disc->rports, peers) {
- if (!rdata->disc_id)
+ if (!kref_get_unless_zero(&rdata->kref))
continue;
- if (rdata->disc_id == disc->disc_id)
- lport->tt.rport_login(rdata);
- else
- lport->tt.rport_logoff(rdata);
+ if (rdata->disc_id) {
+ if (rdata->disc_id == disc->disc_id)
+ fc_rport_login(rdata);
+ else
+ fc_rport_logoff(rdata);
+ }
+ kref_put(&rdata->kref, fc_rport_destroy);
}
-
+ rcu_read_unlock();
mutex_unlock(&disc->disc_mutex);
disc->disc_callback(lport, event);
mutex_lock(&disc->disc_mutex);
@@ -446,7 +454,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
if (ids.port_id != lport->port_id &&
ids.port_name != lport->wwpn) {
- rdata = lport->tt.rport_create(lport, ids.port_id);
+ rdata = fc_rport_create(lport, ids.port_id);
if (rdata) {
rdata->ids.port_name = ids.port_name;
rdata->disc_id = disc->disc_id;
@@ -592,7 +600,6 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
lport = rdata->local_port;
disc = &lport->disc;
- mutex_lock(&disc->disc_mutex);
if (PTR_ERR(fp) == -FC_EX_CLOSED)
goto out;
if (IS_ERR(fp))
@@ -607,37 +614,41 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
goto redisc;
pn = (struct fc_ns_gid_pn *)(cp + 1);
port_name = get_unaligned_be64(&pn->fn_wwpn);
+ mutex_lock(&rdata->rp_mutex);
if (rdata->ids.port_name == -1)
rdata->ids.port_name = port_name;
else if (rdata->ids.port_name != port_name) {
FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. "
"Port-id %6.6x wwpn %16.16llx\n",
rdata->ids.port_id, port_name);
- lport->tt.rport_logoff(rdata);
-
- new_rdata = lport->tt.rport_create(lport,
- rdata->ids.port_id);
+ mutex_unlock(&rdata->rp_mutex);
+ fc_rport_logoff(rdata);
+ mutex_lock(&lport->disc.disc_mutex);
+ new_rdata = fc_rport_create(lport, rdata->ids.port_id);
+ mutex_unlock(&lport->disc.disc_mutex);
if (new_rdata) {
new_rdata->disc_id = disc->disc_id;
- lport->tt.rport_login(new_rdata);
+ fc_rport_login(new_rdata);
}
goto out;
}
rdata->disc_id = disc->disc_id;
- lport->tt.rport_login(rdata);
+ mutex_unlock(&rdata->rp_mutex);
+ fc_rport_login(rdata);
} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n",
cp->ct_reason, cp->ct_explan);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
} else {
FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n",
ntohs(cp->ct_cmd));
redisc:
+ mutex_lock(&disc->disc_mutex);
fc_disc_restart(disc);
+ mutex_unlock(&disc->disc_mutex);
}
out:
- mutex_unlock(&disc->disc_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -678,7 +689,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
{
struct fc_rport_priv *rdata;
- rdata = lport->tt.rport_create(lport, dp->port_id);
+ rdata = fc_rport_create(lport, dp->port_id);
if (!rdata)
return -ENOMEM;
rdata->disc_id = 0;
@@ -708,7 +719,7 @@ static void fc_disc_stop(struct fc_lport *lport)
static void fc_disc_stop_final(struct fc_lport *lport)
{
fc_disc_stop(lport);
- lport->tt.rport_flush_queue();
+ fc_rport_flush_queue();
}
/**
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index c2384d501470..6384a98048af 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -67,7 +67,7 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type,
FC_FCTL_REQ, 0);
- return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
+ return fc_exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
}
EXPORT_SYMBOL(fc_elsct_send);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 16ca31ad5ec0..42bcf7f3a0f9 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -94,6 +94,7 @@ struct fc_exch_pool {
struct fc_exch_mgr {
struct fc_exch_pool __percpu *pool;
mempool_t *ep_pool;
+ struct fc_lport *lport;
enum fc_class class;
struct kref kref;
u16 min_xid;
@@ -362,8 +363,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
fc_exch_hold(ep); /* hold for timer */
if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
- msecs_to_jiffies(timer_msec)))
+ msecs_to_jiffies(timer_msec))) {
+ FC_EXCH_DBG(ep, "Exchange already queued\n");
fc_exch_release(ep);
+ }
}
/**
@@ -406,6 +409,8 @@ static int fc_exch_done_locked(struct fc_exch *ep)
return rc;
}
+static struct fc_exch fc_quarantine_exch;
+
/**
* fc_exch_ptr_get() - Return an exchange from an exchange pool
* @pool: Exchange Pool to get an exchange from
@@ -450,14 +455,17 @@ static void fc_exch_delete(struct fc_exch *ep)
/* update cache of free slot */
index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
- if (pool->left == FC_XID_UNKNOWN)
- pool->left = index;
- else if (pool->right == FC_XID_UNKNOWN)
- pool->right = index;
- else
- pool->next_index = index;
-
- fc_exch_ptr_set(pool, index, NULL);
+ if (!(ep->state & FC_EX_QUARANTINE)) {
+ if (pool->left == FC_XID_UNKNOWN)
+ pool->left = index;
+ else if (pool->right == FC_XID_UNKNOWN)
+ pool->right = index;
+ else
+ pool->next_index = index;
+ fc_exch_ptr_set(pool, index, NULL);
+ } else {
+ fc_exch_ptr_set(pool, index, &fc_quarantine_exch);
+ }
list_del(&ep->ex_list);
spin_unlock_bh(&pool->lock);
fc_exch_release(ep); /* drop hold for exch in mp */
@@ -525,8 +533,7 @@ out:
* Note: The frame will be freed either by a direct call to fc_frame_free(fp)
* or indirectly by calling libfc_function_template.frame_send().
*/
-static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
- struct fc_frame *fp)
+int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp)
{
struct fc_exch *ep;
int error;
@@ -536,6 +543,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
spin_unlock_bh(&ep->ex_lock);
return error;
}
+EXPORT_SYMBOL(fc_seq_send);
/**
* fc_seq_alloc() - Allocate a sequence for a given exchange
@@ -577,7 +585,7 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
* for a given sequence/exchange pair
* @sp: The sequence/exchange to get a new exchange for
*/
-static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
+struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
{
struct fc_exch *ep = fc_seq_exch(sp);
@@ -587,16 +595,16 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
return sp;
}
+EXPORT_SYMBOL(fc_seq_start_next);
/*
* Set the response handler for the exchange associated with a sequence.
*
* Note: May sleep if invoked from outside a response handler.
*/
-static void fc_seq_set_resp(struct fc_seq *sp,
- void (*resp)(struct fc_seq *, struct fc_frame *,
- void *),
- void *arg)
+void fc_seq_set_resp(struct fc_seq *sp,
+ void (*resp)(struct fc_seq *, struct fc_frame *, void *),
+ void *arg)
{
struct fc_exch *ep = fc_seq_exch(sp);
DEFINE_WAIT(wait);
@@ -615,12 +623,20 @@ static void fc_seq_set_resp(struct fc_seq *sp,
ep->arg = arg;
spin_unlock_bh(&ep->ex_lock);
}
+EXPORT_SYMBOL(fc_seq_set_resp);
/**
* fc_exch_abort_locked() - Abort an exchange
* @ep: The exchange to be aborted
* @timer_msec: The period of time to wait before aborting
*
+ * Abort an exchange and sequence. Generally called because of a
+ * exchange timeout or an abort from the upper layer.
+ *
+ * A timer_msec can be specified for abort timeout, if non-zero
+ * timer_msec value is specified then exchange resp handler
+ * will be called with timeout error if no response to abort.
+ *
* Locking notes: Called with exch lock held
*
* Return value: 0 on success else error code
@@ -632,9 +648,13 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
struct fc_frame *fp;
int error;
+ FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec);
if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
- ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))
+ ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
+ FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n",
+ ep->esb_stat, ep->state);
return -ENXIO;
+ }
/*
* Send the abort on a new sequence if possible.
@@ -680,8 +700,7 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
*
* Return value: 0 on success else error code
*/
-static int fc_seq_exch_abort(const struct fc_seq *req_sp,
- unsigned int timer_msec)
+int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
{
struct fc_exch *ep;
int error;
@@ -758,7 +777,7 @@ static void fc_exch_timeout(struct work_struct *work)
u32 e_stat;
int rc = 1;
- FC_EXCH_DBG(ep, "Exchange timed out\n");
+ FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state);
spin_lock_bh(&ep->ex_lock);
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
@@ -821,14 +840,18 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
/* peek cache of free slot */
if (pool->left != FC_XID_UNKNOWN) {
- index = pool->left;
- pool->left = FC_XID_UNKNOWN;
- goto hit;
+ if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) {
+ index = pool->left;
+ pool->left = FC_XID_UNKNOWN;
+ goto hit;
+ }
}
if (pool->right != FC_XID_UNKNOWN) {
- index = pool->right;
- pool->right = FC_XID_UNKNOWN;
- goto hit;
+ if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) {
+ index = pool->right;
+ pool->right = FC_XID_UNKNOWN;
+ goto hit;
+ }
}
index = pool->next_index;
@@ -888,14 +911,19 @@ err:
* EM is selected when a NULL match function pointer is encountered
* or when a call to a match function returns true.
*/
-static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
- struct fc_frame *fp)
+static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
+ struct fc_frame *fp)
{
struct fc_exch_mgr_anchor *ema;
+ struct fc_exch *ep;
- list_for_each_entry(ema, &lport->ema_list, ema_list)
- if (!ema->match || ema->match(fp))
- return fc_exch_em_alloc(lport, ema->mp);
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
+ if (!ema->match || ema->match(fp)) {
+ ep = fc_exch_em_alloc(lport, ema->mp);
+ if (ep)
+ return ep;
+ }
+ }
return NULL;
}
@@ -906,14 +934,17 @@ static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
*/
static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
{
+ struct fc_lport *lport = mp->lport;
struct fc_exch_pool *pool;
struct fc_exch *ep = NULL;
u16 cpu = xid & fc_cpu_mask;
+ if (xid == FC_XID_UNKNOWN)
+ return NULL;
+
if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
- printk_ratelimited(KERN_ERR
- "libfc: lookup request for XID = %d, "
- "indicates invalid CPU %d\n", xid, cpu);
+ pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:",
+ lport->host->host_no, lport->port_id, xid, cpu);
return NULL;
}
@@ -921,6 +952,10 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_bh(&pool->lock);
ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
+ if (ep == &fc_quarantine_exch) {
+ FC_LPORT_DBG(lport, "xid %x quarantined\n", xid);
+ ep = NULL;
+ }
if (ep) {
WARN_ON(ep->xid != xid);
fc_exch_hold(ep);
@@ -938,7 +973,7 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
*
* Note: May sleep if invoked from outside a response handler.
*/
-static void fc_exch_done(struct fc_seq *sp)
+void fc_exch_done(struct fc_seq *sp)
{
struct fc_exch *ep = fc_seq_exch(sp);
int rc;
@@ -951,6 +986,7 @@ static void fc_exch_done(struct fc_seq *sp)
if (!rc)
fc_exch_delete(ep);
}
+EXPORT_SYMBOL(fc_exch_done);
/**
* fc_exch_resp() - Allocate a new exchange for a response frame
@@ -1197,8 +1233,8 @@ static void fc_exch_set_addr(struct fc_exch *ep,
*
* The received frame is not freed.
*/
-static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
- struct fc_seq_els_data *els_data)
+void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
+ struct fc_seq_els_data *els_data)
{
switch (els_cmd) {
case ELS_LS_RJT:
@@ -1217,6 +1253,7 @@ static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
}
}
+EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send);
/**
* fc_seq_send_last() - Send a sequence that is the last in the exchange
@@ -1258,8 +1295,10 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
*/
if (fc_sof_needs_ack(fr_sof(rx_fp))) {
fp = fc_frame_alloc(lport, 0);
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(ep, "Drop ACK request, out of memory\n");
return;
+ }
fh = fc_frame_header_get(fp);
fh->fh_r_ctl = FC_RCTL_ACK_1;
@@ -1312,13 +1351,18 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
struct fc_frame_header *rx_fh;
struct fc_frame_header *fh;
struct fc_ba_rjt *rp;
+ struct fc_seq *sp;
struct fc_lport *lport;
unsigned int f_ctl;
lport = fr_dev(rx_fp);
+ sp = fr_seq(rx_fp);
fp = fc_frame_alloc(lport, sizeof(*rp));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(fc_seq_exch(sp),
+ "Drop BA_RJT request, out of memory\n");
return;
+ }
fh = fc_frame_header_get(fp);
rx_fh = fc_frame_header_get(rx_fp);
@@ -1383,14 +1427,17 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
if (!ep)
goto reject;
+ FC_EXCH_DBG(ep, "exch: ABTS received\n");
fp = fc_frame_alloc(ep->lp, sizeof(*ap));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(ep, "Drop ABTS request, out of memory\n");
goto free;
+ }
spin_lock_bh(&ep->ex_lock);
if (ep->esb_stat & ESB_ST_COMPLETE) {
spin_unlock_bh(&ep->ex_lock);
-
+ FC_EXCH_DBG(ep, "exch: ABTS rejected, exchange complete\n");
fc_frame_free(fp);
goto reject;
}
@@ -1433,7 +1480,7 @@ reject:
* A reference will be held on the exchange/sequence for the caller, which
* must call fc_seq_release().
*/
-static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
+struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_exch_mgr_anchor *ema;
@@ -1447,15 +1494,17 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
break;
return fr_seq(fp);
}
+EXPORT_SYMBOL(fc_seq_assign);
/**
* fc_seq_release() - Release the hold
* @sp: The sequence.
*/
-static void fc_seq_release(struct fc_seq *sp)
+void fc_seq_release(struct fc_seq *sp)
{
fc_exch_release(fc_seq_exch(sp));
}
+EXPORT_SYMBOL(fc_seq_release);
/**
* fc_exch_recv_req() - Handler for an incoming request
@@ -1491,7 +1540,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
* The upper-level protocol may request one later, if needed.
*/
if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
- return lport->tt.lport_recv(lport, fp);
+ return fc_lport_recv(lport, fp);
reject = fc_seq_lookup_recip(lport, mp, fp);
if (reject == FC_RJT_NONE) {
@@ -1512,7 +1561,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
* first.
*/
if (!fc_invoke_resp(ep, sp, fp))
- lport->tt.lport_recv(lport, fp);
+ fc_lport_recv(lport, fp);
fc_exch_release(ep); /* release from lookup */
} else {
FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
@@ -1562,9 +1611,6 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
if (fc_sof_is_init(sof)) {
sp->ssb_stat |= SSB_ST_RESP;
sp->id = fh->fh_seq_id;
- } else if (sp->id != fh->fh_seq_id) {
- atomic_inc(&mp->stats.seq_not_found);
- goto rel;
}
f_ctl = ntoh24(fh->fh_f_ctl);
@@ -1761,7 +1807,10 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
fc_frame_free(fp);
break;
case FC_RCTL_BA_ABTS:
- fc_exch_recv_abts(ep, fp);
+ if (ep)
+ fc_exch_recv_abts(ep, fp);
+ else
+ fc_frame_free(fp);
break;
default: /* ignore junk */
fc_frame_free(fp);
@@ -1784,11 +1833,16 @@ static void fc_seq_ls_acc(struct fc_frame *rx_fp)
struct fc_lport *lport;
struct fc_els_ls_acc *acc;
struct fc_frame *fp;
+ struct fc_seq *sp;
lport = fr_dev(rx_fp);
+ sp = fr_seq(rx_fp);
fp = fc_frame_alloc(lport, sizeof(*acc));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(fc_seq_exch(sp),
+ "exch: drop LS_ACC, out of memory\n");
return;
+ }
acc = fc_frame_payload_get(fp, sizeof(*acc));
memset(acc, 0, sizeof(*acc));
acc->la_cmd = ELS_LS_ACC;
@@ -1811,11 +1865,16 @@ static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
struct fc_lport *lport;
struct fc_els_ls_rjt *rjt;
struct fc_frame *fp;
+ struct fc_seq *sp;
lport = fr_dev(rx_fp);
+ sp = fr_seq(rx_fp);
fp = fc_frame_alloc(lport, sizeof(*rjt));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(fc_seq_exch(sp),
+ "exch: drop LS_ACC, out of memory\n");
return;
+ }
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
memset(rjt, 0, sizeof(*rjt));
rjt->er_cmd = ELS_LS_RJT;
@@ -1960,8 +2019,7 @@ static void fc_exch_els_rec(struct fc_frame *rfp)
enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
enum fc_els_rjt_explan explan;
u32 sid;
- u16 rxid;
- u16 oxid;
+ u16 xid, rxid, oxid;
lport = fr_dev(rfp);
rp = fc_frame_payload_get(rfp, sizeof(*rp));
@@ -1972,18 +2030,35 @@ static void fc_exch_els_rec(struct fc_frame *rfp)
rxid = ntohs(rp->rec_rx_id);
oxid = ntohs(rp->rec_ox_id);
- ep = fc_exch_lookup(lport,
- sid == fc_host_port_id(lport->host) ? oxid : rxid);
explan = ELS_EXPL_OXID_RXID;
- if (!ep)
+ if (sid == fc_host_port_id(lport->host))
+ xid = oxid;
+ else
+ xid = rxid;
+ if (xid == FC_XID_UNKNOWN) {
+ FC_LPORT_DBG(lport,
+ "REC request from %x: invalid rxid %x oxid %x\n",
+ sid, rxid, oxid);
+ goto reject;
+ }
+ ep = fc_exch_lookup(lport, xid);
+ if (!ep) {
+ FC_LPORT_DBG(lport,
+ "REC request from %x: rxid %x oxid %x not found\n",
+ sid, rxid, oxid);
goto reject;
+ }
+ FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n",
+ sid, rxid, oxid);
if (ep->oid != sid || oxid != ep->oxid)
goto rel;
if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
goto rel;
fp = fc_frame_alloc(lport, sizeof(*acc));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(ep, "Drop REC request, out of memory\n");
goto out;
+ }
acc = fc_frame_payload_get(fp, sizeof(*acc));
memset(acc, 0, sizeof(*acc));
@@ -2065,6 +2140,24 @@ cleanup:
* @arg: The argument to be passed to the response handler
* @timer_msec: The timeout period for the exchange
*
+ * The exchange response handler is set in this routine to resp()
+ * function pointer. It can be called in two scenarios: if a timeout
+ * occurs or if a response frame is received for the exchange. The
+ * fc_frame pointer in response handler will also indicate timeout
+ * as error using IS_ERR related macros.
+ *
+ * The exchange destructor handler is also set in this routine.
+ * The destructor handler is invoked by EM layer when exchange
+ * is about to free, this can be used by caller to free its
+ * resources along with exchange free.
+ *
+ * The arg is passed back to resp and destructor handler.
+ *
+ * The timeout value (in msec) for an exchange is set if non zero
+ * timer_msec argument is specified. The timer is canceled when
+ * it fires or when the exchange is done. The exchange timeout handler
+ * is registered by EM layer.
+ *
* The frame pointer with some of the header's fields must be
* filled before calling this routine, those fields are:
*
@@ -2075,14 +2168,13 @@ cleanup:
* - frame control
* - parameter or relative offset
*/
-static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
- struct fc_frame *fp,
- void (*resp)(struct fc_seq *,
- struct fc_frame *fp,
- void *arg),
- void (*destructor)(struct fc_seq *,
- void *),
- void *arg, u32 timer_msec)
+struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
+ struct fc_frame *fp,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg),
+ void (*destructor)(struct fc_seq *, void *),
+ void *arg, u32 timer_msec)
{
struct fc_exch *ep;
struct fc_seq *sp = NULL;
@@ -2101,7 +2193,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
ep->resp = resp;
ep->destructor = destructor;
ep->arg = arg;
- ep->r_a_tov = FC_DEF_R_A_TOV;
+ ep->r_a_tov = lport->r_a_tov;
ep->lp = lport;
sp = &ep->seq;
@@ -2135,6 +2227,7 @@ err:
fc_exch_delete(ep);
return NULL;
}
+EXPORT_SYMBOL(fc_exch_seq_send);
/**
* fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command
@@ -2176,6 +2269,7 @@ static void fc_exch_rrq(struct fc_exch *ep)
return;
retry:
+ FC_EXCH_DBG(ep, "exch: RRQ send failed\n");
spin_lock_bh(&ep->ex_lock);
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
spin_unlock_bh(&ep->ex_lock);
@@ -2218,6 +2312,8 @@ static void fc_exch_els_rrq(struct fc_frame *fp)
if (!ep)
goto reject;
spin_lock_bh(&ep->ex_lock);
+ FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n",
+ sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id));
if (ep->oxid != ntohs(rp->rrq_ox_id))
goto unlock_reject;
if (ep->rxid != ntohs(rp->rrq_rx_id) &&
@@ -2385,6 +2481,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
return NULL;
mp->class = class;
+ mp->lport = lport;
/* adjust em exch xid range for offload */
mp->min_xid = min_xid;
@@ -2558,36 +2655,9 @@ EXPORT_SYMBOL(fc_exch_recv);
*/
int fc_exch_init(struct fc_lport *lport)
{
- if (!lport->tt.seq_start_next)
- lport->tt.seq_start_next = fc_seq_start_next;
-
- if (!lport->tt.seq_set_resp)
- lport->tt.seq_set_resp = fc_seq_set_resp;
-
- if (!lport->tt.exch_seq_send)
- lport->tt.exch_seq_send = fc_exch_seq_send;
-
- if (!lport->tt.seq_send)
- lport->tt.seq_send = fc_seq_send;
-
- if (!lport->tt.seq_els_rsp_send)
- lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
-
- if (!lport->tt.exch_done)
- lport->tt.exch_done = fc_exch_done;
-
if (!lport->tt.exch_mgr_reset)
lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
- if (!lport->tt.seq_exch_abort)
- lport->tt.seq_exch_abort = fc_seq_exch_abort;
-
- if (!lport->tt.seq_assign)
- lport->tt.seq_assign = fc_seq_assign;
-
- if (!lport->tt.seq_release)
- lport->tt.seq_release = fc_seq_release;
-
return 0;
}
EXPORT_SYMBOL(fc_exch_init);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 5121272f28fd..0e67621477a8 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -122,6 +122,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
#define FC_HRD_ERROR 9
#define FC_CRC_ERROR 10
#define FC_TIMED_OUT 11
+#define FC_TRANS_RESET 12
/*
* Error recovery timeout values.
@@ -195,7 +196,7 @@ static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
* @seq: The sequence that the FCP packet is on (required by destructor API)
* @fsp: The FCP packet to be released
*
- * This routine is called by a destructor callback in the exch_seq_send()
+ * This routine is called by a destructor callback in the fc_exch_seq_send()
* routine of the libfc Transport Template. The 'struct fc_seq' is a required
* argument even though it is not used by this routine.
*
@@ -253,8 +254,21 @@ static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
*/
static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
{
- if (!(fsp->state & FC_SRB_COMPL))
+ if (!(fsp->state & FC_SRB_COMPL)) {
mod_timer(&fsp->timer, jiffies + delay);
+ fsp->timer_delay = delay;
+ }
+}
+
+static void fc_fcp_abort_done(struct fc_fcp_pkt *fsp)
+{
+ fsp->state |= FC_SRB_ABORTED;
+ fsp->state &= ~FC_SRB_ABORT_PENDING;
+
+ if (fsp->wait_for_comp)
+ complete(&fsp->tm_done);
+ else
+ fc_fcp_complete_locked(fsp);
}
/**
@@ -264,6 +278,8 @@ static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
*/
static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
{
+ int rc;
+
if (!fsp->seq_ptr)
return -EINVAL;
@@ -271,7 +287,16 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
put_cpu();
fsp->state |= FC_SRB_ABORT_PENDING;
- return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
+ rc = fc_seq_exch_abort(fsp->seq_ptr, 0);
+ /*
+ * fc_seq_exch_abort() might return -ENXIO if
+ * the sequence is already completed
+ */
+ if (rc == -ENXIO) {
+ fc_fcp_abort_done(fsp);
+ rc = 0;
+ }
+ return rc;
}
/**
@@ -283,16 +308,16 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
* fc_io_compl() will notify the SCSI-ml that the I/O is done.
* The SCSI-ml will retry the command.
*/
-static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
+static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp, int status_code)
{
if (fsp->seq_ptr) {
- fsp->lp->tt.exch_done(fsp->seq_ptr);
+ fc_exch_done(fsp->seq_ptr);
fsp->seq_ptr = NULL;
}
fsp->state &= ~FC_SRB_ABORT_PENDING;
fsp->io_status = 0;
- fsp->status_code = FC_ERROR;
+ fsp->status_code = status_code;
fc_fcp_complete_locked(fsp);
}
@@ -402,8 +427,6 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
if (!can_queue)
can_queue = 1;
lport->host->can_queue = can_queue;
- shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n"
- "Reducing can_queue to %d.\n", can_queue);
unlock:
spin_unlock_irqrestore(lport->host->host_lock, flags);
@@ -430,10 +453,29 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
put_cpu();
/* error case */
fc_fcp_can_queue_ramp_down(lport);
+ shost_printk(KERN_ERR, lport->host,
+ "libfc: Could not allocate frame, "
+ "reducing can_queue to %d.\n", lport->host->can_queue);
return NULL;
}
/**
+ * get_fsp_rec_tov() - Helper function to get REC_TOV
+ * @fsp: the FCP packet
+ *
+ * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
+ */
+static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
+{
+ struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
+ unsigned int e_d_tov = FC_DEF_E_D_TOV;
+
+ if (rpriv && rpriv->e_d_tov > e_d_tov)
+ e_d_tov = rpriv->e_d_tov;
+ return msecs_to_jiffies(e_d_tov) + HZ;
+}
+
+/**
* fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target
* @fsp: The FCP packet the data is on
* @fp: The data frame
@@ -536,8 +578,10 @@ crc_err:
* and completes the transfer, call the completion handler.
*/
if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
- fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
+ fsp->xfer_len == fsp->data_len - fsp->scsi_resid) {
+ FC_FCP_DBG( fsp, "complete out-of-order sequence\n" );
fc_fcp_complete_locked(fsp);
+ }
return;
err:
fc_fcp_recovery(fsp, host_bcode);
@@ -609,7 +653,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
remaining = seq_blen;
fh_parm_offset = frame_offset = offset;
tlen = 0;
- seq = lport->tt.seq_start_next(seq);
+ seq = fc_seq_start_next(seq);
f_ctl = FC_FC_REL_OFF;
WARN_ON(!seq);
@@ -687,7 +731,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
/*
* send fragment using for a sequence.
*/
- error = lport->tt.seq_send(lport, seq, fp);
+ error = fc_seq_send(lport, seq, fp);
if (error) {
WARN_ON(1); /* send error should be rare */
return error;
@@ -727,15 +771,8 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
ba_done = 0;
}
- if (ba_done) {
- fsp->state |= FC_SRB_ABORTED;
- fsp->state &= ~FC_SRB_ABORT_PENDING;
-
- if (fsp->wait_for_comp)
- complete(&fsp->tm_done);
- else
- fc_fcp_complete_locked(fsp);
- }
+ if (ba_done)
+ fc_fcp_abort_done(fsp);
}
/**
@@ -764,8 +801,11 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
fh = fc_frame_header_get(fp);
r_ctl = fh->fh_r_ctl;
- if (lport->state != LPORT_ST_READY)
+ if (lport->state != LPORT_ST_READY) {
+ FC_FCP_DBG(fsp, "lport state %d, ignoring r_ctl %x\n",
+ lport->state, r_ctl);
goto out;
+ }
if (fc_fcp_lock_pkt(fsp))
goto out;
@@ -774,8 +814,10 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
goto unlock;
}
- if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
+ if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) {
+ FC_FCP_DBG(fsp, "command aborted, ignoring r_ctl %x\n", r_ctl);
goto unlock;
+ }
if (r_ctl == FC_RCTL_DD_DATA_DESC) {
/*
@@ -910,7 +952,16 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
* Wait a at least one jiffy to see if it is delivered.
* If this expires without data, we may do SRR.
*/
- fc_fcp_timer_set(fsp, 2);
+ if (fsp->lp->qfull) {
+ FC_FCP_DBG(fsp, "tgt %6.6x queue busy retry\n",
+ fsp->rport->port_id);
+ return;
+ }
+ FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx data underrun "
+ "len %x, data len %x\n",
+ fsp->rport->port_id,
+ fsp->xfer_len, expected_len, fsp->data_len);
+ fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
return;
}
fsp->status_code = FC_DATA_OVRRUN;
@@ -959,8 +1010,11 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
if (fsp->cdb_status == SAM_STAT_GOOD &&
fsp->xfer_len < fsp->data_len && !fsp->io_status &&
(!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
- fsp->xfer_len < fsp->data_len - fsp->scsi_resid))
+ fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
+ FC_FCP_DBG(fsp, "data underrun, xfer %zx data %x\n",
+ fsp->xfer_len, fsp->data_len);
fsp->status_code = FC_DATA_UNDRUN;
+ }
}
seq = fsp->seq_ptr;
@@ -970,7 +1024,7 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
struct fc_frame *conf_frame;
struct fc_seq *csp;
- csp = lport->tt.seq_start_next(seq);
+ csp = fc_seq_start_next(seq);
conf_frame = fc_fcp_frame_alloc(fsp->lp, 0);
if (conf_frame) {
f_ctl = FC_FC_SEQ_INIT;
@@ -979,10 +1033,10 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
ep->did, ep->sid,
FC_TYPE_FCP, f_ctl, 0);
- lport->tt.seq_send(lport, csp, conf_frame);
+ fc_seq_send(lport, csp, conf_frame);
}
}
- lport->tt.exch_done(seq);
+ fc_exch_done(seq);
}
/*
* Some resets driven by SCSI are not I/Os and do not have
@@ -1000,10 +1054,8 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
*/
static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
{
- struct fc_lport *lport = fsp->lp;
-
if (fsp->seq_ptr) {
- lport->tt.exch_done(fsp->seq_ptr);
+ fc_exch_done(fsp->seq_ptr);
fsp->seq_ptr = NULL;
}
fsp->status_code = error;
@@ -1116,19 +1168,6 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
}
/**
- * get_fsp_rec_tov() - Helper function to get REC_TOV
- * @fsp: the FCP packet
- *
- * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
- */
-static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
-{
- struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
-
- return msecs_to_jiffies(rpriv->e_d_tov) + HZ;
-}
-
-/**
* fc_fcp_cmd_send() - Send a FCP command
* @lport: The local port to send the command on
* @fsp: The FCP packet the command is on
@@ -1165,8 +1204,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
rpriv->local_port->port_id, FC_TYPE_FCP,
FC_FCTL_REQ, 0);
- seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy,
- fsp, 0);
+ seq = fc_exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
if (!seq) {
rc = -1;
goto unlock;
@@ -1196,7 +1234,7 @@ static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
return;
if (error == -FC_EX_CLOSED) {
- fc_fcp_retry_cmd(fsp);
+ fc_fcp_retry_cmd(fsp, FC_ERROR);
goto unlock;
}
@@ -1222,8 +1260,16 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
int rc = FAILED;
unsigned long ticks_left;
- if (fc_fcp_send_abort(fsp))
+ FC_FCP_DBG(fsp, "pkt abort state %x\n", fsp->state);
+ if (fc_fcp_send_abort(fsp)) {
+ FC_FCP_DBG(fsp, "failed to send abort\n");
return FAILED;
+ }
+
+ if (fsp->state & FC_SRB_ABORTED) {
+ FC_FCP_DBG(fsp, "target abort cmd completed\n");
+ return SUCCESS;
+ }
init_completion(&fsp->tm_done);
fsp->wait_for_comp = 1;
@@ -1301,7 +1347,7 @@ static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
spin_lock_bh(&fsp->scsi_pkt_lock);
if (fsp->seq_ptr) {
- lport->tt.exch_done(fsp->seq_ptr);
+ fc_exch_done(fsp->seq_ptr);
fsp->seq_ptr = NULL;
}
fsp->wait_for_comp = 0;
@@ -1355,7 +1401,7 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
if (fh->fh_type != FC_TYPE_BLS)
fc_fcp_resp(fsp, fp);
fsp->seq_ptr = NULL;
- fsp->lp->tt.exch_done(seq);
+ fc_exch_done(seq);
out_unlock:
fc_fcp_unlock_pkt(fsp);
out:
@@ -1394,6 +1440,15 @@ static void fc_fcp_timeout(unsigned long data)
if (fsp->cdb_cmd.fc_tm_flags)
goto unlock;
+ if (fsp->lp->qfull) {
+ FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n",
+ fsp->timer_delay);
+ setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
+ fc_fcp_timer_set(fsp, fsp->timer_delay);
+ goto unlock;
+ }
+ FC_FCP_DBG(fsp, "fcp timeout, delay %d flags %x state %x\n",
+ fsp->timer_delay, rpriv->flags, fsp->state);
fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
@@ -1486,8 +1541,8 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
switch (rjt->er_reason) {
default:
- FC_FCP_DBG(fsp, "device %x unexpected REC reject "
- "reason %d expl %d\n",
+ FC_FCP_DBG(fsp,
+ "device %x invalid REC reject %d/%d\n",
fsp->rport->port_id, rjt->er_reason,
rjt->er_explan);
/* fall through */
@@ -1503,18 +1558,23 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
break;
case ELS_RJT_LOGIC:
case ELS_RJT_UNAB:
+ FC_FCP_DBG(fsp, "device %x REC reject %d/%d\n",
+ fsp->rport->port_id, rjt->er_reason,
+ rjt->er_explan);
/*
- * If no data transfer, the command frame got dropped
- * so we just retry. If data was transferred, we
- * lost the response but the target has no record,
- * so we abort and retry.
+ * If response got lost or is stuck in the
+ * queue somewhere we have no idea if and when
+ * the response will be received. So quarantine
+ * the xid and retry the command.
*/
- if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
- fsp->xfer_len == 0) {
- fc_fcp_retry_cmd(fsp);
+ if (rjt->er_explan == ELS_EXPL_OXID_RXID) {
+ struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
+ ep->state |= FC_EX_QUARANTINE;
+ fsp->state |= FC_SRB_ABORTED;
+ fc_fcp_retry_cmd(fsp, FC_TRANS_RESET);
break;
}
- fc_fcp_recovery(fsp, FC_ERROR);
+ fc_fcp_recovery(fsp, FC_TRANS_RESET);
break;
}
} else if (opcode == ELS_LS_ACC) {
@@ -1608,7 +1668,9 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
switch (error) {
case -FC_EX_CLOSED:
- fc_fcp_retry_cmd(fsp);
+ FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange closed\n",
+ fsp, fsp->rport->port_id);
+ fc_fcp_retry_cmd(fsp, FC_ERROR);
break;
default:
@@ -1622,8 +1684,8 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
* Assume REC or LS_ACC was lost.
* The exchange manager will have aborted REC, so retry.
*/
- FC_FCP_DBG(fsp, "REC fid %6.6x error error %d retry %d/%d\n",
- fsp->rport->port_id, error, fsp->recov_retry,
+ FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange timeout retry %d/%d\n",
+ fsp, fsp->rport->port_id, fsp->recov_retry,
FC_MAX_RECOV_RETRY);
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
@@ -1642,6 +1704,7 @@ out:
*/
static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
{
+ FC_FCP_DBG(fsp, "start recovery code %x\n", code);
fsp->status_code = code;
fsp->cdb_status = 0;
fsp->io_status = 0;
@@ -1668,7 +1731,6 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
struct fc_seq *seq;
struct fcp_srr *srr;
struct fc_frame *fp;
- unsigned int rec_tov;
rport = fsp->rport;
rpriv = rport->dd_data;
@@ -1692,10 +1754,9 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
rpriv->local_port->port_id, FC_TYPE_FCP,
FC_FCTL_REQ, 0);
- rec_tov = get_fsp_rec_tov(fsp);
- seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp,
- fc_fcp_pkt_destroy,
- fsp, jiffies_to_msecs(rec_tov));
+ seq = fc_exch_seq_send(lport, fp, fc_fcp_srr_resp,
+ fc_fcp_pkt_destroy,
+ fsp, get_fsp_rec_tov(fsp));
if (!seq)
goto retry;
@@ -1706,7 +1767,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
return;
retry:
- fc_fcp_retry_cmd(fsp);
+ fc_fcp_retry_cmd(fsp, FC_TRANS_RESET);
}
/**
@@ -1730,9 +1791,9 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
fh = fc_frame_header_get(fp);
/*
- * BUG? fc_fcp_srr_error calls exch_done which would release
+ * BUG? fc_fcp_srr_error calls fc_exch_done which would release
* the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
- * then fc_exch_timeout would be sending an abort. The exch_done
+ * then fc_exch_timeout would be sending an abort. The fc_exch_done
* call by fc_fcp_srr_error would prevent fc_exch.c from seeing
* an abort response though.
*/
@@ -1753,7 +1814,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
}
fc_fcp_unlock_pkt(fsp);
out:
- fsp->lp->tt.exch_done(seq);
+ fc_exch_done(seq);
fc_frame_free(fp);
}
@@ -1768,20 +1829,22 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
goto out;
switch (PTR_ERR(fp)) {
case -FC_EX_TIMEOUT:
+ FC_FCP_DBG(fsp, "SRR timeout, retries %d\n", fsp->recov_retry);
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
fc_fcp_recovery(fsp, FC_TIMED_OUT);
break;
case -FC_EX_CLOSED: /* e.g., link failure */
+ FC_FCP_DBG(fsp, "SRR error, exchange closed\n");
/* fall through */
default:
- fc_fcp_retry_cmd(fsp);
+ fc_fcp_retry_cmd(fsp, FC_ERROR);
break;
}
fc_fcp_unlock_pkt(fsp);
out:
- fsp->lp->tt.exch_done(fsp->recov_seq);
+ fc_exch_done(fsp->recov_seq);
}
/**
@@ -1832,8 +1895,13 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
rpriv = rport->dd_data;
if (!fc_fcp_lport_queue_ready(lport)) {
- if (lport->qfull)
+ if (lport->qfull) {
fc_fcp_can_queue_ramp_down(lport);
+ shost_printk(KERN_ERR, lport->host,
+ "libfc: queue full, "
+ "reducing can_queue to %d.\n",
+ lport->host->can_queue);
+ }
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -1980,15 +2048,26 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
break;
case FC_CMD_ABORTED:
- FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
- "due to FC_CMD_ABORTED\n");
- sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
+ if (host_byte(sc_cmd->result) == DID_TIME_OUT)
+ FC_FCP_DBG(fsp, "Returning DID_TIME_OUT to scsi-ml "
+ "due to FC_CMD_ABORTED\n");
+ else {
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_CMD_ABORTED\n");
+ set_host_byte(sc_cmd, DID_ERROR);
+ }
+ sc_cmd->result |= fsp->io_status;
break;
case FC_CMD_RESET:
FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml "
"due to FC_CMD_RESET\n");
sc_cmd->result = (DID_RESET << 16);
break;
+ case FC_TRANS_RESET:
+ FC_FCP_DBG(fsp, "Returning DID_SOFT_ERROR to scsi-ml "
+ "due to FC_TRANS_RESET\n");
+ sc_cmd->result = (DID_SOFT_ERROR << 16);
+ break;
case FC_HRD_ERROR:
FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml "
"due to FC_HRD_ERROR\n");
@@ -2142,7 +2221,7 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
fc_block_scsi_eh(sc_cmd);
- lport->tt.lport_reset(lport);
+ fc_lport_reset(lport);
wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
wait_tmo))
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index c11a638f32e6..d623d084b7ec 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -226,7 +226,7 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
sp = fr_seq(in_fp);
if (sp)
- fr_seq(fp) = fr_dev(in_fp)->tt.seq_start_next(sp);
+ fr_seq(fp) = fc_seq_start_next(sp);
fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
}
EXPORT_SYMBOL(fc_fill_reply_hdr);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 50c71678a156..919736a74ffa 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -149,7 +149,7 @@ static const char *fc_lport_state_names[] = {
* @offset: The offset into the response data
*/
struct fc_bsg_info {
- struct fc_bsg_job *job;
+ struct bsg_job *job;
struct fc_lport *lport;
u16 rsp_code;
struct scatterlist *sg;
@@ -200,7 +200,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
"in the DNS or FDMI state, it's in the "
"%d state", rdata->ids.port_id,
lport->state);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
}
break;
case RPORT_EV_LOGO:
@@ -237,23 +237,26 @@ static const char *fc_lport_state(struct fc_lport *lport)
* @remote_fid: The FID of the ptp rport
* @remote_wwpn: The WWPN of the ptp rport
* @remote_wwnn: The WWNN of the ptp rport
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
*/
static void fc_lport_ptp_setup(struct fc_lport *lport,
u32 remote_fid, u64 remote_wwpn,
u64 remote_wwnn)
{
- mutex_lock(&lport->disc.disc_mutex);
if (lport->ptp_rdata) {
- lport->tt.rport_logoff(lport->ptp_rdata);
- kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+ fc_rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
}
- lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->ptp_rdata = fc_rport_create(lport, remote_fid);
kref_get(&lport->ptp_rdata->kref);
lport->ptp_rdata->ids.port_name = remote_wwpn;
lport->ptp_rdata->ids.node_name = remote_wwnn;
mutex_unlock(&lport->disc.disc_mutex);
- lport->tt.rport_login(lport->ptp_rdata);
+ fc_rport_login(lport->ptp_rdata);
fc_lport_enter_ready(lport);
}
@@ -409,7 +412,7 @@ static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
fc_lport_state(lport));
- lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
fc_frame_free(fp);
}
@@ -478,7 +481,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
if (!req) {
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
- lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
} else {
fmt = req->rnid_fmt;
len = sizeof(*rp);
@@ -518,7 +521,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
*/
static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
{
- lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
fc_lport_enter_reset(lport);
fc_frame_free(fp);
}
@@ -620,9 +623,9 @@ int fc_fabric_logoff(struct fc_lport *lport)
lport->tt.disc_stop_final(lport);
mutex_lock(&lport->lp_mutex);
if (lport->dns_rdata)
- lport->tt.rport_logoff(lport->dns_rdata);
+ fc_rport_logoff(lport->dns_rdata);
mutex_unlock(&lport->lp_mutex);
- lport->tt.rport_flush_queue();
+ fc_rport_flush_queue();
mutex_lock(&lport->lp_mutex);
fc_lport_enter_logo(lport);
mutex_unlock(&lport->lp_mutex);
@@ -899,7 +902,7 @@ static void fc_lport_recv_els_req(struct fc_lport *lport,
/*
* Check opcode.
*/
- recv = lport->tt.rport_recv_req;
+ recv = fc_rport_recv_req;
switch (fc_frame_payload_op(fp)) {
case ELS_FLOGI:
if (!lport->point_to_multipoint)
@@ -941,15 +944,14 @@ struct fc4_prov fc_lport_els_prov = {
};
/**
- * fc_lport_recv_req() - The generic lport request handler
+ * fc_lport_recv() - The generic lport request handler
* @lport: The lport that received the request
* @fp: The frame the request is in
*
* Locking Note: This function should not be called with the lport
* lock held because it may grab the lock.
*/
-static void fc_lport_recv_req(struct fc_lport *lport,
- struct fc_frame *fp)
+void fc_lport_recv(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_seq *sp = fr_seq(fp);
@@ -978,8 +980,9 @@ drop:
FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
fc_frame_free(fp);
if (sp)
- lport->tt.exch_done(sp);
+ fc_exch_done(sp);
}
+EXPORT_SYMBOL(fc_lport_recv);
/**
* fc_lport_reset() - Reset a local port
@@ -1007,12 +1010,14 @@ EXPORT_SYMBOL(fc_lport_reset);
*/
static void fc_lport_reset_locked(struct fc_lport *lport)
{
- if (lport->dns_rdata)
- lport->tt.rport_logoff(lport->dns_rdata);
+ if (lport->dns_rdata) {
+ fc_rport_logoff(lport->dns_rdata);
+ lport->dns_rdata = NULL;
+ }
if (lport->ptp_rdata) {
- lport->tt.rport_logoff(lport->ptp_rdata);
- kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+ fc_rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
lport->ptp_rdata = NULL;
}
@@ -1426,13 +1431,13 @@ static void fc_lport_enter_dns(struct fc_lport *lport)
fc_lport_state_enter(lport, LPORT_ST_DNS);
mutex_lock(&lport->disc.disc_mutex);
- rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
+ rdata = fc_rport_create(lport, FC_FID_DIR_SERV);
mutex_unlock(&lport->disc.disc_mutex);
if (!rdata)
goto err;
rdata->ops = &fc_lport_rport_ops;
- lport->tt.rport_login(rdata);
+ fc_rport_login(rdata);
return;
err:
@@ -1543,13 +1548,13 @@ static void fc_lport_enter_fdmi(struct fc_lport *lport)
fc_lport_state_enter(lport, LPORT_ST_FDMI);
mutex_lock(&lport->disc.disc_mutex);
- rdata = lport->tt.rport_create(lport, FC_FID_MGMT_SERV);
+ rdata = fc_rport_create(lport, FC_FID_MGMT_SERV);
mutex_unlock(&lport->disc.disc_mutex);
if (!rdata)
goto err;
rdata->ops = &fc_lport_rport_ops;
- lport->tt.rport_login(rdata);
+ fc_rport_login(rdata);
return;
err:
@@ -1772,7 +1777,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
if ((csp_flags & FC_SP_FT_FPORT) == 0) {
if (e_d_tov > lport->e_d_tov)
lport->e_d_tov = e_d_tov;
- lport->r_a_tov = 2 * e_d_tov;
+ lport->r_a_tov = 2 * lport->e_d_tov;
fc_lport_set_port_id(lport, did, fp);
printk(KERN_INFO "host%d: libfc: "
"Port (%6.6x) entered "
@@ -1784,8 +1789,10 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
get_unaligned_be64(
&flp->fl_wwnn));
} else {
- lport->e_d_tov = e_d_tov;
- lport->r_a_tov = r_a_tov;
+ if (e_d_tov > lport->e_d_tov)
+ lport->e_d_tov = e_d_tov;
+ if (r_a_tov > lport->r_a_tov)
+ lport->r_a_tov = r_a_tov;
fc_host_fabric_name(lport->host) =
get_unaligned_be64(&flp->fl_wwnn);
fc_lport_set_port_id(lport, did, fp);
@@ -1858,12 +1865,6 @@ EXPORT_SYMBOL(fc_lport_config);
*/
int fc_lport_init(struct fc_lport *lport)
{
- if (!lport->tt.lport_recv)
- lport->tt.lport_recv = fc_lport_recv_req;
-
- if (!lport->tt.lport_reset)
- lport->tt.lport_reset = fc_lport_reset;
-
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
fc_host_node_name(lport->host) = lport->wwnn;
fc_host_port_name(lport->host) = lport->wwpn;
@@ -1900,18 +1901,19 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
void *info_arg)
{
struct fc_bsg_info *info = info_arg;
- struct fc_bsg_job *job = info->job;
+ struct bsg_job *job = info->job;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct fc_lport *lport = info->lport;
struct fc_frame_header *fh;
size_t len;
void *buf;
if (IS_ERR(fp)) {
- job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
+ bsg_reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
-ECONNABORTED : -ETIMEDOUT;
job->reply_len = sizeof(uint32_t);
- job->state_flags |= FC_RQST_STATE_DONE;
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
kfree(info);
return;
}
@@ -1928,25 +1930,25 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
(unsigned short)fc_frame_payload_op(fp);
/* Save the reply status of the job */
- job->reply->reply_data.ctels_reply.status =
+ bsg_reply->reply_data.ctels_reply.status =
(cmd == info->rsp_code) ?
FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
}
- job->reply->reply_payload_rcv_len +=
+ bsg_reply->reply_payload_rcv_len +=
fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
&info->offset, NULL);
if (fr_eof(fp) == FC_EOF_T &&
(ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
(FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
- if (job->reply->reply_payload_rcv_len >
+ if (bsg_reply->reply_payload_rcv_len >
job->reply_payload.payload_len)
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
job->reply_payload.payload_len;
- job->reply->result = 0;
- job->state_flags |= FC_RQST_STATE_DONE;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
kfree(info);
}
fc_frame_free(fp);
@@ -1962,7 +1964,7 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
* Locking Note: The lport lock is expected to be held before calling
* this routine.
*/
-static int fc_lport_els_request(struct fc_bsg_job *job,
+static int fc_lport_els_request(struct bsg_job *job,
struct fc_lport *lport,
u32 did, u32 tov)
{
@@ -2005,8 +2007,8 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
info->nents = job->reply_payload.sg_cnt;
info->sg = job->reply_payload.sg_list;
- if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
- NULL, info, tov)) {
+ if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
+ NULL, info, tov)) {
kfree(info);
return -ECOMM;
}
@@ -2023,7 +2025,7 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
* Locking Note: The lport lock is expected to be held before calling
* this routine.
*/
-static int fc_lport_ct_request(struct fc_bsg_job *job,
+static int fc_lport_ct_request(struct bsg_job *job,
struct fc_lport *lport, u32 did, u32 tov)
{
struct fc_bsg_info *info;
@@ -2066,8 +2068,8 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
info->nents = job->reply_payload.sg_cnt;
info->sg = job->reply_payload.sg_list;
- if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
- NULL, info, tov)) {
+ if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
+ NULL, info, tov)) {
kfree(info);
return -ECOMM;
}
@@ -2079,25 +2081,27 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
* FC Passthrough requests
* @job: The BSG passthrough job
*/
-int fc_lport_bsg_request(struct fc_bsg_job *job)
+int fc_lport_bsg_request(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct request *rsp = job->req->next_rq;
- struct Scsi_Host *shost = job->shost;
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
struct fc_lport *lport = shost_priv(shost);
struct fc_rport *rport;
struct fc_rport_priv *rdata;
int rc = -EINVAL;
u32 did, tov;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (rsp)
rsp->resid_len = job->reply_payload.payload_len;
mutex_lock(&lport->lp_mutex);
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
- rport = job->rport;
+ rport = fc_bsg_to_rport(job);
if (!rport)
break;
@@ -2107,7 +2111,7 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
break;
case FC_BSG_RPT_CT:
- rport = job->rport;
+ rport = fc_bsg_to_rport(job);
if (!rport)
break;
@@ -2117,25 +2121,25 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
break;
case FC_BSG_HST_CT:
- did = ntoh24(job->request->rqst_data.h_ct.port_id);
+ did = ntoh24(bsg_request->rqst_data.h_ct.port_id);
if (did == FC_FID_DIR_SERV) {
rdata = lport->dns_rdata;
if (!rdata)
break;
tov = rdata->e_d_tov;
} else {
- rdata = lport->tt.rport_lookup(lport, did);
+ rdata = fc_rport_lookup(lport, did);
if (!rdata)
break;
tov = rdata->e_d_tov;
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
rc = fc_lport_ct_request(job, lport, did, tov);
break;
case FC_BSG_HST_ELS_NOLOGIN:
- did = ntoh24(job->request->rqst_data.h_els.port_id);
+ did = ntoh24(bsg_request->rqst_data.h_els.port_id);
rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
break;
}
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 97aeaddd600d..c991f3b822f8 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -44,6 +44,19 @@
* path this potential over-use of the mutex is acceptable.
*/
+/*
+ * RPORT REFERENCE COUNTING
+ *
+ * A rport reference should be taken when:
+ * - an rport is allocated
+ * - a workqueue item is scheduled
+ * - an ELS request is send
+ * The reference should be dropped when:
+ * - the workqueue function has finished
+ * - the ELS response is handled
+ * - an rport is removed
+ */
+
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -74,8 +87,8 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_frame *);
static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *);
static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *);
static void fc_rport_timeout(struct work_struct *);
-static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
-static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
+static void fc_rport_error(struct fc_rport_priv *, int);
+static void fc_rport_error_retry(struct fc_rport_priv *, int);
static void fc_rport_work(struct work_struct *);
static const char *fc_rport_state_names[] = {
@@ -98,8 +111,8 @@ static const char *fc_rport_state_names[] = {
* The reference count of the fc_rport_priv structure is
* increased by one.
*/
-static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
- u32 port_id)
+struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
+ u32 port_id)
{
struct fc_rport_priv *rdata = NULL, *tmp_rdata;
@@ -113,6 +126,7 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
rcu_read_unlock();
return rdata;
}
+EXPORT_SYMBOL(fc_rport_lookup);
/**
* fc_rport_create() - Create a new remote port
@@ -123,12 +137,11 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
*
* Locking note: must be called with the disc_mutex held.
*/
-static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
- u32 port_id)
+struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
{
struct fc_rport_priv *rdata;
- rdata = lport->tt.rport_lookup(lport, port_id);
+ rdata = fc_rport_lookup(lport, port_id);
if (rdata)
return rdata;
@@ -158,18 +171,20 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
}
return rdata;
}
+EXPORT_SYMBOL(fc_rport_create);
/**
* fc_rport_destroy() - Free a remote port after last reference is released
* @kref: The remote port's kref
*/
-static void fc_rport_destroy(struct kref *kref)
+void fc_rport_destroy(struct kref *kref)
{
struct fc_rport_priv *rdata;
rdata = container_of(kref, struct fc_rport_priv, kref);
kfree_rcu(rdata, rcu);
}
+EXPORT_SYMBOL(fc_rport_destroy);
/**
* fc_rport_state() - Return a string identifying the remote port's state
@@ -242,6 +257,8 @@ static void fc_rport_state_enter(struct fc_rport_priv *rdata,
/**
* fc_rport_work() - Handler for remote port events in the rport_event_queue
* @work: Handle to the remote port being dequeued
+ *
+ * Reference counting: drops kref on return
*/
static void fc_rport_work(struct work_struct *work)
{
@@ -272,12 +289,14 @@ static void fc_rport_work(struct work_struct *work)
kref_get(&rdata->kref);
mutex_unlock(&rdata->rp_mutex);
- if (!rport)
+ if (!rport) {
+ FC_RPORT_DBG(rdata, "No rport!\n");
rport = fc_remote_port_add(lport->host, 0, &ids);
+ }
if (!rport) {
FC_RPORT_DBG(rdata, "Failed to add the rport\n");
- lport->tt.rport_logoff(rdata);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ fc_rport_logoff(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
}
mutex_lock(&rdata->rp_mutex);
@@ -303,7 +322,7 @@ static void fc_rport_work(struct work_struct *work)
FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
rdata->lld_event_callback(lport, rdata, event);
}
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
break;
case RPORT_EV_FAILED:
@@ -329,7 +348,8 @@ static void fc_rport_work(struct work_struct *work)
FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
rdata->lld_event_callback(lport, rdata, event);
}
- cancel_delayed_work_sync(&rdata->retry_work);
+ if (cancel_delayed_work_sync(&rdata->retry_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
/*
* Reset any outstanding exchanges before freeing rport.
@@ -351,7 +371,7 @@ static void fc_rport_work(struct work_struct *work)
if (port_id == FC_FID_DIR_SERV) {
rdata->event = RPORT_EV_NONE;
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
} else if ((rdata->flags & FC_RP_STARTED) &&
rdata->major_retries <
lport->max_rport_retry_count) {
@@ -362,17 +382,21 @@ static void fc_rport_work(struct work_struct *work)
mutex_unlock(&rdata->rp_mutex);
} else {
FC_RPORT_DBG(rdata, "work delete\n");
+ mutex_lock(&lport->disc.disc_mutex);
list_del_rcu(&rdata->peers);
+ mutex_unlock(&lport->disc.disc_mutex);
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
} else {
/*
* Re-open for events. Reissue READY event if ready.
*/
rdata->event = RPORT_EV_NONE;
- if (rdata->rp_state == RPORT_ST_READY)
+ if (rdata->rp_state == RPORT_ST_READY) {
+ FC_RPORT_DBG(rdata, "work reopen\n");
fc_rport_enter_ready(rdata);
+ }
mutex_unlock(&rdata->rp_mutex);
}
break;
@@ -381,12 +405,21 @@ static void fc_rport_work(struct work_struct *work)
mutex_unlock(&rdata->rp_mutex);
break;
}
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
* fc_rport_login() - Start the remote port login state machine
* @rdata: The remote port to be logged in to
*
+ * Initiates the RP state machine. It is called from the LP module.
+ * This function will issue the following commands to the N_Port
+ * identified by the FC ID provided.
+ *
+ * - PLOGI
+ * - PRLI
+ * - RTV
+ *
* Locking Note: Called without the rport lock held. This
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
@@ -395,10 +428,16 @@ static void fc_rport_work(struct work_struct *work)
* If it appears we are already logged in, ADISC is used to verify
* the setup.
*/
-static int fc_rport_login(struct fc_rport_priv *rdata)
+int fc_rport_login(struct fc_rport_priv *rdata)
{
mutex_lock(&rdata->rp_mutex);
+ if (rdata->flags & FC_RP_STARTED) {
+ FC_RPORT_DBG(rdata, "port already started\n");
+ mutex_unlock(&rdata->rp_mutex);
+ return 0;
+ }
+
rdata->flags |= FC_RP_STARTED;
switch (rdata->rp_state) {
case RPORT_ST_READY:
@@ -408,15 +447,20 @@ static int fc_rport_login(struct fc_rport_priv *rdata)
case RPORT_ST_DELETE:
FC_RPORT_DBG(rdata, "Restart deleted port\n");
break;
- default:
+ case RPORT_ST_INIT:
FC_RPORT_DBG(rdata, "Login to port\n");
fc_rport_enter_flogi(rdata);
break;
+ default:
+ FC_RPORT_DBG(rdata, "Login in progress, state %s\n",
+ fc_rport_state(rdata));
+ break;
}
mutex_unlock(&rdata->rp_mutex);
return 0;
}
+EXPORT_SYMBOL(fc_rport_login);
/**
* fc_rport_enter_delete() - Schedule a remote port to be deleted
@@ -431,6 +475,8 @@ static int fc_rport_login(struct fc_rport_priv *rdata)
* Set the new event so that the old pending event will not occur.
* Since we have the mutex, even if fc_rport_work() is already started,
* it'll see the new event.
+ *
+ * Reference counting: does not modify kref
*/
static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
enum fc_rport_event event)
@@ -442,8 +488,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
fc_rport_state_enter(rdata, RPORT_ST_DELETE);
- if (rdata->event == RPORT_EV_NONE)
- queue_work(rport_event_queue, &rdata->event_work);
+ kref_get(&rdata->kref);
+ if (rdata->event == RPORT_EV_NONE &&
+ !queue_work(rport_event_queue, &rdata->event_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
+
rdata->event = event;
}
@@ -455,7 +504,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
*/
-static int fc_rport_logoff(struct fc_rport_priv *rdata)
+int fc_rport_logoff(struct fc_rport_priv *rdata)
{
struct fc_lport *lport = rdata->local_port;
u32 port_id = rdata->ids.port_id;
@@ -489,6 +538,7 @@ out:
mutex_unlock(&rdata->rp_mutex);
return 0;
}
+EXPORT_SYMBOL(fc_rport_logoff);
/**
* fc_rport_enter_ready() - Transition to the RPORT_ST_READY state
@@ -496,6 +546,8 @@ out:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: schedules workqueue, does not modify kref
*/
static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
{
@@ -503,8 +555,11 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
FC_RPORT_DBG(rdata, "Port is Ready\n");
- if (rdata->event == RPORT_EV_NONE)
- queue_work(rport_event_queue, &rdata->event_work);
+ kref_get(&rdata->kref);
+ if (rdata->event == RPORT_EV_NONE &&
+ !queue_work(rport_event_queue, &rdata->event_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
+
rdata->event = RPORT_EV_READY;
}
@@ -515,6 +570,8 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
* Locking Note: Called without the rport lock held. This
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
+ *
+ * Reference counting: Drops kref on return.
*/
static void fc_rport_timeout(struct work_struct *work)
{
@@ -522,6 +579,7 @@ static void fc_rport_timeout(struct work_struct *work)
container_of(work, struct fc_rport_priv, retry_work.work);
mutex_lock(&rdata->rp_mutex);
+ FC_RPORT_DBG(rdata, "Port timeout, state %s\n", fc_rport_state(rdata));
switch (rdata->rp_state) {
case RPORT_ST_FLOGI:
@@ -547,23 +605,25 @@ static void fc_rport_timeout(struct work_struct *work)
}
mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
* fc_rport_error() - Error handler, called once retries have been exhausted
* @rdata: The remote port the error is happened on
- * @fp: The error code encapsulated in a frame pointer
+ * @err: The error code
*
* Locking Note: The rport lock is expected to be held before
* calling this routine
+ *
+ * Reference counting: does not modify kref
*/
-static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
+static void fc_rport_error(struct fc_rport_priv *rdata, int err)
{
struct fc_lport *lport = rdata->local_port;
- FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
- IS_ERR(fp) ? -PTR_ERR(fp) : 0,
- fc_rport_state(rdata), rdata->retries);
+ FC_RPORT_DBG(rdata, "Error %d in state %s, retries %d\n",
+ -err, fc_rport_state(rdata), rdata->retries);
switch (rdata->rp_state) {
case RPORT_ST_FLOGI:
@@ -595,36 +655,39 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
/**
* fc_rport_error_retry() - Handler for remote port state retries
* @rdata: The remote port whose state is to be retried
- * @fp: The error code encapsulated in a frame pointer
+ * @err: The error code
*
* If the error was an exchange timeout retry immediately,
* otherwise wait for E_D_TOV.
*
* Locking Note: The rport lock is expected to be held before
* calling this routine
+ *
+ * Reference counting: increments kref when scheduling retry_work
*/
-static void fc_rport_error_retry(struct fc_rport_priv *rdata,
- struct fc_frame *fp)
+static void fc_rport_error_retry(struct fc_rport_priv *rdata, int err)
{
- unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV);
+ unsigned long delay = msecs_to_jiffies(rdata->e_d_tov);
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
- if (PTR_ERR(fp) == -FC_EX_CLOSED)
+ if (err == -FC_EX_CLOSED)
goto out;
if (rdata->retries < rdata->local_port->max_rport_retry_count) {
- FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
- PTR_ERR(fp), fc_rport_state(rdata));
+ FC_RPORT_DBG(rdata, "Error %d in state %s, retrying\n",
+ err, fc_rport_state(rdata));
rdata->retries++;
/* no additional delay on exchange timeouts */
- if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
+ if (err == -FC_EX_TIMEOUT)
delay = 0;
- schedule_delayed_work(&rdata->retry_work, delay);
+ kref_get(&rdata->kref);
+ if (!schedule_delayed_work(&rdata->retry_work, delay))
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
}
out:
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, err);
}
/**
@@ -684,8 +747,11 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_lport *lport = rdata->local_port;
struct fc_els_flogi *flogi;
unsigned int r_a_tov;
+ u8 opcode;
+ int err = 0;
- FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp));
+ FC_RPORT_DBG(rdata, "Received a FLOGI %s\n",
+ IS_ERR(fp) ? "error" : fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
goto put;
@@ -701,18 +767,34 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, PTR_ERR(fp));
goto err;
}
-
- if (fc_frame_payload_op(fp) != ELS_LS_ACC)
+ opcode = fc_frame_payload_op(fp);
+ if (opcode == ELS_LS_RJT) {
+ struct fc_els_ls_rjt *rjt;
+
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ FC_RPORT_DBG(rdata, "FLOGI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
+ err = -FC_EX_ELS_RJT;
goto bad;
- if (fc_rport_login_complete(rdata, fp))
+ } else if (opcode != ELS_LS_ACC) {
+ FC_RPORT_DBG(rdata, "FLOGI ELS invalid opcode %x\n", opcode);
+ err = -FC_EX_ELS_RJT;
goto bad;
+ }
+ if (fc_rport_login_complete(rdata, fp)) {
+ FC_RPORT_DBG(rdata, "FLOGI failed, no login\n");
+ err = -FC_EX_INV_LOGIN;
+ goto bad;
+ }
flogi = fc_frame_payload_get(fp, sizeof(*flogi));
- if (!flogi)
+ if (!flogi) {
+ err = -FC_EX_ALLOC_ERR;
goto bad;
+ }
r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov);
if (r_a_tov > rdata->r_a_tov)
rdata->r_a_tov = r_a_tov;
@@ -726,11 +808,11 @@ out:
err:
mutex_unlock(&rdata->rp_mutex);
put:
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
bad:
FC_RPORT_DBG(rdata, "Bad FLOGI response\n");
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, err);
goto out;
}
@@ -740,6 +822,8 @@ bad:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
{
@@ -756,20 +840,23 @@ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
if (!fp)
- return fc_rport_error_retry(rdata, fp);
+ return fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
+ kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI,
fc_rport_flogi_resp, rdata,
- 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
/**
* fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode
* @lport: The local port that received the PLOGI request
* @rx_fp: The PLOGI request frame
+ *
+ * Reference counting: drops kref on return
*/
static void fc_rport_recv_flogi_req(struct fc_lport *lport,
struct fc_frame *rx_fp)
@@ -799,7 +886,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
goto reject;
}
- rdata = lport->tt.rport_lookup(lport, sid);
+ rdata = fc_rport_lookup(lport, sid);
if (!rdata) {
rjt_data.reason = ELS_RJT_FIP;
rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
@@ -824,8 +911,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
* RPORT wouldn;t have created and 'rport_lookup' would have
* failed anyway in that case.
*/
- if (lport->point_to_multipoint)
- break;
+ break;
case RPORT_ST_DELETE:
mutex_unlock(&rdata->rp_mutex);
rjt_data.reason = ELS_RJT_FIP;
@@ -867,20 +953,27 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
- if (rdata->ids.port_name < lport->wwpn)
- fc_rport_enter_plogi(rdata);
- else
- fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
+ /*
+ * Do not proceed with the state machine if our
+ * FLOGI has crossed with an FLOGI from the
+ * remote port; wait for the FLOGI response instead.
+ */
+ if (rdata->rp_state != RPORT_ST_FLOGI) {
+ if (rdata->ids.port_name < lport->wwpn)
+ fc_rport_enter_plogi(rdata);
+ else
+ fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
+ }
out:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
fc_frame_free(rx_fp);
return;
reject_put:
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
reject:
- lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(rx_fp);
}
@@ -904,10 +997,13 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
u16 cssp_seq;
u8 op;
- mutex_lock(&rdata->rp_mutex);
-
FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
if (rdata->rp_state != RPORT_ST_PLOGI) {
FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
"%s\n", fc_rport_state(rdata));
@@ -917,7 +1013,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, PTR_ERR(fp));
goto err;
}
@@ -939,14 +1035,20 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
rdata->max_seq = csp_seq;
rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
fc_rport_enter_prli(rdata);
- } else
- fc_rport_error_retry(rdata, fp);
+ } else {
+ struct fc_els_ls_rjt *rjt;
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
+ fc_rport_error_retry(rdata, -FC_EX_ELS_RJT);
+ }
out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
}
static bool
@@ -969,6 +1071,8 @@ fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata)
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
{
@@ -990,17 +1094,18 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
if (!fp) {
FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__);
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
rdata->e_d_tov = lport->e_d_tov;
+ kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
fc_rport_plogi_resp, rdata,
- 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
/**
@@ -1022,16 +1127,20 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_els_spp spp;
} *pp;
struct fc_els_spp temp_spp;
+ struct fc_els_ls_rjt *rjt;
struct fc4_prov *prov;
u32 roles = FC_RPORT_ROLE_UNKNOWN;
u32 fcp_parm = 0;
u8 op;
- u8 resp_code = 0;
-
- mutex_lock(&rdata->rp_mutex);
+ enum fc_els_spp_resp resp_code;
FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
if (rdata->rp_state != RPORT_ST_PRLI) {
FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
"%s\n", fc_rport_state(rdata));
@@ -1041,7 +1150,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, PTR_ERR(fp));
goto err;
}
@@ -1055,14 +1164,14 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
goto out;
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
- FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n",
- pp->spp.spp_flags);
+ FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
+ pp->spp.spp_flags, pp->spp.spp_type);
rdata->spp_type = pp->spp.spp_type;
if (resp_code != FC_SPP_RESP_ACK) {
if (resp_code == FC_SPP_RESP_CONF)
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, -FC_EX_SEQ_ERR);
else
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out;
}
if (pp->prli.prli_spp_len < sizeof(pp->spp))
@@ -1074,13 +1183,25 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
if (fcp_parm & FCP_SPPF_CONF_COMPL)
rdata->flags |= FC_RP_FLAGS_CONF_REQ;
- prov = fc_passive_prov[FC_TYPE_FCP];
+ /*
+ * Call prli provider if we should act as a target
+ */
+ prov = fc_passive_prov[rdata->spp_type];
if (prov) {
memset(&temp_spp, 0, sizeof(temp_spp));
prov->prli(rdata, pp->prli.prli_spp_len,
&pp->spp, &temp_spp);
}
-
+ /*
+ * Check if the image pair could be established
+ */
+ if (rdata->spp_type != FC_TYPE_FCP ||
+ !(pp->spp.spp_flags & FC_SPP_EST_IMG_PAIR)) {
+ /*
+ * Nope; we can't use this port as a target.
+ */
+ fcp_parm &= ~FCP_SPPF_TARG_FCN;
+ }
rdata->supported_classes = FC_COS_CLASS3;
if (fcp_parm & FCP_SPPF_INIT_FCN)
roles |= FC_RPORT_ROLE_FCP_INITIATOR;
@@ -1091,15 +1212,18 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
fc_rport_enter_rtv(rdata);
} else {
- FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
- fc_rport_error_retry(rdata, fp);
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
+ fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
}
out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1108,6 +1232,8 @@ err:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
{
@@ -1128,6 +1254,15 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
return;
}
+ /*
+ * And if the local port does not support the initiator function
+ * there's no need to send a PRLI, either.
+ */
+ if (!(lport->service_params & FCP_SPPF_INIT_FCN)) {
+ fc_rport_enter_ready(rdata);
+ return;
+ }
+
FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
fc_rport_state(rdata));
@@ -1135,7 +1270,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(*pp));
if (!fp) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
@@ -1151,15 +1286,16 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
fc_host_port_id(lport->host), FC_TYPE_ELS,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
- if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp,
- NULL, rdata, 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ kref_get(&rdata->kref);
+ if (!fc_exch_seq_send(lport, fp, fc_rport_prli_resp,
+ NULL, rdata, 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
/**
- * fc_rport_els_rtv_resp() - Handler for Request Timeout Value (RTV) responses
+ * fc_rport_rtv_resp() - Handler for Request Timeout Value (RTV) responses
* @sp: The sequence the RTV was on
* @fp: The RTV response frame
* @rdata_arg: The remote port that sent the RTV response
@@ -1176,10 +1312,13 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_rport_priv *rdata = rdata_arg;
u8 op;
- mutex_lock(&rdata->rp_mutex);
-
FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
if (rdata->rp_state != RPORT_ST_RTV) {
FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
"%s\n", fc_rport_state(rdata));
@@ -1189,7 +1328,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, PTR_ERR(fp));
goto err;
}
@@ -1205,13 +1344,15 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
tov = ntohl(rtv->rtv_r_a_tov);
if (tov == 0)
tov = 1;
- rdata->r_a_tov = tov;
+ if (tov > rdata->r_a_tov)
+ rdata->r_a_tov = tov;
tov = ntohl(rtv->rtv_e_d_tov);
if (toq & FC_ELS_RTV_EDRES)
tov /= 1000000;
if (tov == 0)
tov = 1;
- rdata->e_d_tov = tov;
+ if (tov > rdata->e_d_tov)
+ rdata->e_d_tov = tov;
}
}
@@ -1221,7 +1362,8 @@ out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1230,6 +1372,8 @@ err:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
{
@@ -1243,16 +1387,52 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
if (!fp) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
+ kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
fc_rport_rtv_resp, rdata,
- 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+}
+
+/**
+ * fc_rport_recv_rtv_req() - Handler for Read Timeout Value (RTV) requests
+ * @rdata: The remote port that sent the RTV request
+ * @in_fp: The RTV request frame
+ *
+ * Locking Note: Called with the lport and rport locks held.
+ */
+static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
+ struct fc_frame *in_fp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+ struct fc_els_rtv_acc *rtv;
+ struct fc_seq_els_data rjt_data;
+
+ FC_RPORT_DBG(rdata, "Received RTV request\n");
+
+ fp = fc_frame_alloc(lport, sizeof(*rtv));
+ if (!fp) {
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.reason = ELS_EXPL_INSUF_RES;
+ fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ goto drop;
+ }
+ rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+ rtv->rtv_cmd = ELS_LS_ACC;
+ rtv->rtv_r_a_tov = htonl(lport->r_a_tov);
+ rtv->rtv_e_d_tov = htonl(lport->e_d_tov);
+ rtv->rtv_toq = 0;
+ fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+drop:
+ fc_frame_free(in_fp);
}
/**
@@ -1262,15 +1442,16 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
* @lport_arg: The local port
*/
static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
- void *lport_arg)
+ void *rdata_arg)
{
- struct fc_lport *lport = lport_arg;
+ struct fc_rport_priv *rdata = rdata_arg;
+ struct fc_lport *lport = rdata->local_port;
FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did,
"Received a LOGO %s\n", fc_els_resp_type(fp));
- if (IS_ERR(fp))
- return;
- fc_frame_free(fp);
+ if (!IS_ERR(fp))
+ fc_frame_free(fp);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1279,6 +1460,8 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
{
@@ -1291,8 +1474,10 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
if (!fp)
return;
- (void)lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
- fc_rport_logo_resp, lport, 0);
+ kref_get(&rdata->kref);
+ if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
+ fc_rport_logo_resp, rdata, 0))
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1312,10 +1497,13 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_els_adisc *adisc;
u8 op;
- mutex_lock(&rdata->rp_mutex);
-
FC_RPORT_DBG(rdata, "Received a ADISC response\n");
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
if (rdata->rp_state != RPORT_ST_ADISC) {
FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
fc_rport_state(rdata));
@@ -1325,7 +1513,7 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, PTR_ERR(fp));
goto err;
}
@@ -1350,7 +1538,8 @@ out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1359,6 +1548,8 @@ err:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
{
@@ -1372,15 +1563,16 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
if (!fp) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
+ kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
fc_rport_adisc_resp, rdata,
- 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
/**
@@ -1404,7 +1596,7 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
if (!adisc) {
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
- lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
goto drop;
}
@@ -1480,7 +1672,7 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
goto out;
out_rjt:
- lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
out:
fc_frame_free(rx_fp);
}
@@ -1494,15 +1686,21 @@ out:
* The ELS opcode has already been validated by the caller.
*
* Locking Note: Called with the lport lock held.
+ *
+ * Reference counting: does not modify kref
*/
static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_rport_priv *rdata;
struct fc_seq_els_data els_data;
- rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp));
- if (!rdata)
+ rdata = fc_rport_lookup(lport, fc_frame_sid(fp));
+ if (!rdata) {
+ FC_RPORT_ID_DBG(lport, fc_frame_sid(fp),
+ "Received ELS 0x%02x from non-logged-in port\n",
+ fc_frame_payload_op(fp));
goto reject;
+ }
mutex_lock(&rdata->rp_mutex);
@@ -1512,9 +1710,21 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
case RPORT_ST_READY:
case RPORT_ST_ADISC:
break;
+ case RPORT_ST_PLOGI:
+ if (fc_frame_payload_op(fp) == ELS_PRLI) {
+ FC_RPORT_DBG(rdata, "Reject ELS PRLI "
+ "while in state %s\n",
+ fc_rport_state(rdata));
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ goto busy;
+ }
default:
+ FC_RPORT_DBG(rdata,
+ "Reject ELS 0x%02x while in state %s\n",
+ fc_frame_payload_op(fp), fc_rport_state(rdata));
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
goto reject;
}
@@ -1529,30 +1739,41 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
fc_rport_recv_adisc_req(rdata, fp);
break;
case ELS_RRQ:
- lport->tt.seq_els_rsp_send(fp, ELS_RRQ, NULL);
+ fc_seq_els_rsp_send(fp, ELS_RRQ, NULL);
fc_frame_free(fp);
break;
case ELS_REC:
- lport->tt.seq_els_rsp_send(fp, ELS_REC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_REC, NULL);
fc_frame_free(fp);
break;
case ELS_RLS:
fc_rport_recv_rls_req(rdata, fp);
break;
+ case ELS_RTV:
+ fc_rport_recv_rtv_req(rdata, fp);
+ break;
default:
fc_frame_free(fp); /* can't happen */
break;
}
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
reject:
els_data.reason = ELS_RJT_UNAB;
els_data.explan = ELS_EXPL_PLOGI_REQD;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_frame_free(fp);
+ return;
+
+busy:
+ els_data.reason = ELS_RJT_BUSY;
+ els_data.explan = ELS_EXPL_NONE;
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
fc_frame_free(fp);
+ return;
}
/**
@@ -1561,8 +1782,10 @@ reject:
* @fp: The request frame
*
* Locking Note: Called with the lport lock held.
+ *
+ * Reference counting: does not modify kref
*/
-static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_seq_els_data els_data;
@@ -1588,16 +1811,18 @@ static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
case ELS_RRQ:
case ELS_REC:
case ELS_RLS:
+ case ELS_RTV:
fc_rport_recv_els_req(lport, fp);
break;
default:
els_data.reason = ELS_RJT_UNSUP;
els_data.explan = ELS_EXPL_NONE;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
fc_frame_free(fp);
break;
}
}
+EXPORT_SYMBOL(fc_rport_recv_req);
/**
* fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests
@@ -1605,6 +1830,8 @@ static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
* @rx_fp: The PLOGI request frame
*
* Locking Note: The rport lock is held before calling this function.
+ *
+ * Reference counting: increments kref on return
*/
static void fc_rport_recv_plogi_req(struct fc_lport *lport,
struct fc_frame *rx_fp)
@@ -1630,7 +1857,7 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
disc = &lport->disc;
mutex_lock(&disc->disc_mutex);
- rdata = lport->tt.rport_create(lport, sid);
+ rdata = fc_rport_create(lport, sid);
if (!rdata) {
mutex_unlock(&disc->disc_mutex);
rjt_data.reason = ELS_RJT_UNAB;
@@ -1718,7 +1945,7 @@ out:
return;
reject:
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(fp);
}
@@ -1744,7 +1971,6 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
unsigned int len;
unsigned int plen;
enum fc_els_spp_resp resp;
- enum fc_els_spp_resp passive;
struct fc_seq_els_data rjt_data;
struct fc4_prov *prov;
@@ -1794,15 +2020,21 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
resp = 0;
if (rspp->spp_type < FC_FC4_PROV_SIZE) {
+ enum fc_els_spp_resp active = 0, passive = 0;
+
prov = fc_active_prov[rspp->spp_type];
if (prov)
- resp = prov->prli(rdata, plen, rspp, spp);
+ active = prov->prli(rdata, plen, rspp, spp);
prov = fc_passive_prov[rspp->spp_type];
- if (prov) {
+ if (prov)
passive = prov->prli(rdata, plen, rspp, spp);
- if (!resp || passive == FC_SPP_RESP_ACK)
- resp = passive;
- }
+ if (!active || passive == FC_SPP_RESP_ACK)
+ resp = passive;
+ else
+ resp = active;
+ FC_RPORT_DBG(rdata, "PRLI rspp type %x "
+ "active %x passive %x\n",
+ rspp->spp_type, active, passive);
}
if (!resp) {
if (spp->spp_flags & FC_SPP_EST_IMG_PAIR)
@@ -1823,20 +2055,13 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
- switch (rdata->rp_state) {
- case RPORT_ST_PRLI:
- fc_rport_enter_ready(rdata);
- break;
- default:
- break;
- }
goto drop;
reject_len:
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
reject:
- lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
drop:
fc_frame_free(rx_fp);
}
@@ -1907,7 +2132,7 @@ reject_len:
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
reject:
- lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
drop:
fc_frame_free(rx_fp);
}
@@ -1919,17 +2144,19 @@ drop:
*
* Locking Note: The rport lock is expected to be held before calling
* this function.
+ *
+ * Reference counting: drops kref on return
*/
static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_rport_priv *rdata;
u32 sid;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
sid = fc_frame_sid(fp);
- rdata = lport->tt.rport_lookup(lport, sid);
+ rdata = fc_rport_lookup(lport, sid);
if (rdata) {
mutex_lock(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
@@ -1937,7 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
fc_rport_enter_delete(rdata, RPORT_EV_STOP);
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
} else
FC_RPORT_ID_DBG(lport, sid,
"Received LOGO from non-logged-in port\n");
@@ -1947,41 +2174,11 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
/**
* fc_rport_flush_queue() - Flush the rport_event_queue
*/
-static void fc_rport_flush_queue(void)
+void fc_rport_flush_queue(void)
{
flush_workqueue(rport_event_queue);
}
-
-/**
- * fc_rport_init() - Initialize the remote port layer for a local port
- * @lport: The local port to initialize the remote port layer for
- */
-int fc_rport_init(struct fc_lport *lport)
-{
- if (!lport->tt.rport_lookup)
- lport->tt.rport_lookup = fc_rport_lookup;
-
- if (!lport->tt.rport_create)
- lport->tt.rport_create = fc_rport_create;
-
- if (!lport->tt.rport_login)
- lport->tt.rport_login = fc_rport_login;
-
- if (!lport->tt.rport_logoff)
- lport->tt.rport_logoff = fc_rport_logoff;
-
- if (!lport->tt.rport_recv_req)
- lport->tt.rport_recv_req = fc_rport_recv_req;
-
- if (!lport->tt.rport_flush_queue)
- lport->tt.rport_flush_queue = fc_rport_flush_queue;
-
- if (!lport->tt.rport_destroy)
- lport->tt.rport_destroy = fc_rport_destroy;
-
- return 0;
-}
-EXPORT_SYMBOL(fc_rport_init);
+EXPORT_SYMBOL(fc_rport_flush_queue);
/**
* fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator.
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index b484859464f6..8a20b4e86224 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -648,6 +648,10 @@ struct lpfc_hba {
#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
+#define HBA_FORCED_LINK_SPEED 0x40000 /*
+ * Firmware supports Forced Link Speed
+ * capability
+ */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -746,6 +750,8 @@ struct lpfc_hba {
uint32_t cfg_oas_priority;
uint32_t cfg_XLanePriority;
uint32_t cfg_enable_bg;
+ uint32_t cfg_prot_mask;
+ uint32_t cfg_prot_guard;
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f1019908800e..c84775562c65 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2759,18 +2759,14 @@ LPFC_ATTR_R(enable_npiv, 1, 0, 1,
LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
"FCF Fast failover=1 Priority failover=2");
-int lpfc_enable_rrq = 2;
-module_param(lpfc_enable_rrq, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
-lpfc_param_show(enable_rrq);
/*
# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
# 0x0 = disabled, XRI/OXID use not tracked.
# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
# 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
*/
-lpfc_param_init(enable_rrq, 2, 0, 2);
-static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
+LPFC_ATTR_R(enable_rrq, 2, 0, 2,
+ "Enable RRQ functionality");
/*
# lpfc_suppress_link_up: Bring link up at initialization
@@ -2827,14 +2823,8 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
lpfc_txcmplq_hw_show, NULL);
-int lpfc_iocb_cnt = 2;
-module_param(lpfc_iocb_cnt, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_iocb_cnt,
+LPFC_ATTR_R(iocb_cnt, 2, 1, 5,
"Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
-lpfc_param_show(iocb_cnt);
-lpfc_param_init(iocb_cnt, 2, 1, 5);
-static DEVICE_ATTR(lpfc_iocb_cnt, S_IRUGO,
- lpfc_iocb_cnt_show, NULL);
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
@@ -2887,9 +2877,9 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
if (val != LPFC_DEF_DEVLOSS_TMO)
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0407 Ignoring nodev_tmo module "
- "parameter because devloss_tmo is "
- "set.\n");
+ "0407 Ignoring lpfc_nodev_tmo module "
+ "parameter because lpfc_devloss_tmo "
+ "is set.\n");
return 0;
}
@@ -2948,8 +2938,8 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
if (vport->dev_loss_tmo_changed ||
(lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0401 Ignoring change to nodev_tmo "
- "because devloss_tmo is set.\n");
+ "0401 Ignoring change to lpfc_nodev_tmo "
+ "because lpfc_devloss_tmo is set.\n");
return 0;
}
if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
@@ -2964,7 +2954,7 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
return 0;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0403 lpfc_nodev_tmo attribute cannot be set to"
+ "0403 lpfc_nodev_tmo attribute cannot be set to "
"%d, allowed range is [%d, %d]\n",
val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
return -EINVAL;
@@ -3015,8 +3005,8 @@ lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0404 lpfc_devloss_tmo attribute cannot be set to"
- " %d, allowed range is [%d, %d]\n",
+ "0404 lpfc_devloss_tmo attribute cannot be set to "
+ "%d, allowed range is [%d, %d]\n",
val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
return -EINVAL;
}
@@ -3204,6 +3194,8 @@ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
# Default value is 0.
*/
+LPFC_ATTR(topology, 0, 0, 6,
+ "Select Fibre Channel topology");
/**
* lpfc_topology_set - Set the adapters topology field
@@ -3281,11 +3273,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
phba->brd_no, val);
return -EINVAL;
}
-static int lpfc_topology = 0;
-module_param(lpfc_topology, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
+
lpfc_param_show(topology)
-lpfc_param_init(topology, 0, 0, 6)
static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
lpfc_topology_show, lpfc_topology_store);
@@ -3679,7 +3668,12 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
int nolip = 0;
const char *val_buf = buf;
int err;
- uint32_t prev_val;
+ uint32_t prev_val, if_type;
+
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2 &&
+ phba->hba_flag & HBA_FORCED_LINK_SPEED)
+ return -EPERM;
if (!strncmp(buf, "nolip ", strlen("nolip "))) {
nolip = 1;
@@ -3789,6 +3783,9 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
# 1 = aer supported and enabled (default)
# Value range is [0,1]. Default value is 1.
*/
+LPFC_ATTR(aer_support, 1, 0, 1,
+ "Enable PCIe device AER support");
+lpfc_param_show(aer_support)
/**
* lpfc_aer_support_store - Set the adapter for aer support
@@ -3871,46 +3868,6 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
return rc;
}
-static int lpfc_aer_support = 1;
-module_param(lpfc_aer_support, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
-lpfc_param_show(aer_support)
-
-/**
- * lpfc_aer_support_init - Set the initial adapters aer support flag
- * @phba: lpfc_hba pointer.
- * @val: enable aer or disable aer flag.
- *
- * Description:
- * If val is in a valid range [0,1], then set the adapter's initial
- * cfg_aer_support field. It will be up to the driver's probe_one
- * routine to determine whether the device's AER support can be set
- * or not.
- *
- * Notes:
- * If the value is not in range log a kernel error message, and
- * choose the default value of setting AER support and return.
- *
- * Returns:
- * zero if val saved.
- * -EINVAL val out of range
- **/
-static int
-lpfc_aer_support_init(struct lpfc_hba *phba, int val)
-{
- if (val == 0 || val == 1) {
- phba->cfg_aer_support = val;
- return 0;
- }
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2712 lpfc_aer_support attribute value %d out "
- "of range, allowed values are 0|1, setting it "
- "to default value of 1\n", val);
- /* By default, try to enable AER on a device */
- phba->cfg_aer_support = 1;
- return -EINVAL;
-}
-
static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
lpfc_aer_support_show, lpfc_aer_support_store);
@@ -4055,39 +4012,10 @@ lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
return rc;
}
-static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
-module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
-lpfc_param_show(sriov_nr_virtfn)
-
-/**
- * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
- * @phba: lpfc_hba pointer.
- * @val: link speed value.
- *
- * Description:
- * If val is in a valid range [0,255], then set the adapter's initial
- * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
- * number shall be used instead. It will be up to the driver's probe_one
- * routine to determine whether the device's SR-IOV is supported or not.
- *
- * Returns:
- * zero if val saved.
- * -EINVAL val out of range
- **/
-static int
-lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
-{
- if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
- phba->cfg_sriov_nr_virtfn = val;
- return 0;
- }
+LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
+ "Enable PCIe device SR-IOV virtual fn");
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3017 Enabling %d virtual functions is not "
- "allowed.\n", val);
- return -EINVAL;
-}
+lpfc_param_show(sriov_nr_virtfn)
static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
@@ -4251,7 +4179,8 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3016 fcp_imax: %d out of range, using default\n", val);
+ "3016 lpfc_fcp_imax: %d out of range, using default\n",
+ val);
phba->cfg_fcp_imax = LPFC_DEF_IMAX;
return 0;
@@ -4401,8 +4330,8 @@ lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3326 fcp_cpu_map: %d out of range, using default\n",
- val);
+ "3326 lpfc_fcp_cpu_map: %d out of range, using "
+ "default\n", val);
phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
return 0;
@@ -4441,12 +4370,10 @@ LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
# to limit the I/O completion time to the parameter value.
# The value is set in milliseconds.
*/
-static int lpfc_max_scsicmpl_time;
-module_param(lpfc_max_scsicmpl_time, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
+LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000,
"Use command completion time to control queue depth");
+
lpfc_vport_param_show(max_scsicmpl_time);
-lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000);
static int
lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
{
@@ -4691,12 +4618,15 @@ unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
# HBA supports DIX Type 1: Host to HBA Type 1 protection
#
*/
-unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
- SHOST_DIX_TYPE0_PROTECTION |
- SHOST_DIX_TYPE1_PROTECTION;
-
-module_param(lpfc_prot_mask, uint, S_IRUGO);
-MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
+LPFC_ATTR(prot_mask,
+ (SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION),
+ 0,
+ (SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION),
+ "T10-DIF host protection capabilities mask");
/*
# lpfc_prot_guard: i
@@ -4706,9 +4636,9 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
# - Default will result in registering capabilities for all guard types
#
*/
-unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
-module_param(lpfc_prot_guard, byte, S_IRUGO);
-MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
+LPFC_ATTR(prot_guard,
+ SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP,
+ "T10-DIF host protection guard type");
/*
* Delay initial NPort discovery when Clean Address bit is cleared in
@@ -5828,6 +5758,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_oas_flags = 0;
phba->cfg_oas_priority = 0;
lpfc_enable_bg_init(phba, lpfc_enable_bg);
+ lpfc_prot_mask_init(phba, lpfc_prot_mask);
+ lpfc_prot_guard_init(phba, lpfc_prot_guard);
if (phba->sli_rev == LPFC_SLI_REV4)
phba->cfg_poll = 0;
else
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 05dcc2abd541..7dca4d6a8883 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/list.h>
+#include <linux/bsg-lib.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -97,7 +98,7 @@ struct lpfc_bsg_menlo {
#define TYPE_MENLO 4
struct bsg_job_data {
uint32_t type;
- struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */
+ struct bsg_job *set_job; /* job waiting for this iocb to finish */
union {
struct lpfc_bsg_event *evt;
struct lpfc_bsg_iocb iocb;
@@ -211,7 +212,7 @@ lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
static unsigned int
lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
- struct fc_bsg_buffer *bsg_buffers,
+ struct bsg_buffer *bsg_buffers,
unsigned int bytes_to_transfer, int to_buffers)
{
@@ -297,7 +298,8 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_nodelist *ndlp;
@@ -312,6 +314,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -350,7 +353,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
}
} else {
rsp_size = rsp->un.genreq64.bdl.bdeSize;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
lpfc_bsg_copy_data(rmp, &job->reply_payload,
rsp_size, 0);
}
@@ -367,8 +370,9 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -378,12 +382,13 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
* @job: fc_bsg_job to handle
**/
static int
-lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
+lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata = job->rport->dd_data;
+ struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
struct lpfc_nodelist *ndlp = rdata->pnode;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct ulp_bde64 *bpl = NULL;
uint32_t timeout;
struct lpfc_iocbq *cmdiocbq = NULL;
@@ -398,7 +403,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
int iocb_stat;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
@@ -542,7 +547,7 @@ no_ndlp:
kfree(dd_data);
no_dd_data:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
@@ -570,7 +575,8 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
IOCB_t *rsp;
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
@@ -588,6 +594,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -609,17 +616,17 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
if (job) {
if (rsp->ulpStatus == IOSTAT_SUCCESS) {
rsp_size = rsp->un.elsreq64.bdl.bdeSize;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
prsp->virt,
rsp_size);
} else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sizeof(struct fc_bsg_ctels_reply);
/* LS_RJT data returned in word 4 */
rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
- els_reply = &job->reply->reply_data.ctels_reply;
+ els_reply = &bsg_reply->reply_data.ctels_reply;
els_reply->status = FC_CTELS_STATUS_REJECT;
els_reply->rjt_data.action = rjt_data[3];
els_reply->rjt_data.reason_code = rjt_data[2];
@@ -637,8 +644,9 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -648,12 +656,14 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
* @job: fc_bsg_job to handle
**/
static int
-lpfc_bsg_rport_els(struct fc_bsg_job *job)
+lpfc_bsg_rport_els(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata = job->rport->dd_data;
+ struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
struct lpfc_nodelist *ndlp = rdata->pnode;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t elscmd;
uint32_t cmdsize;
struct lpfc_iocbq *cmdiocbq;
@@ -664,7 +674,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
int rc = 0;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* verify the els command is not greater than the
* maximum ELS transfer size.
@@ -684,7 +694,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
goto no_dd_data;
}
- elscmd = job->request->rqst_data.r_els.els_code;
+ elscmd = bsg_request->rqst_data.r_els.els_code;
cmdsize = job->request_payload.payload_len;
if (!lpfc_nlp_get(ndlp)) {
@@ -771,7 +781,7 @@ free_dd_data:
no_dd_data:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
@@ -917,7 +927,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
struct lpfc_hbq_entry *hbqe;
struct lpfc_sli_ct_request *ct_req;
- struct fc_bsg_job *job = NULL;
+ struct bsg_job *job = NULL;
+ struct fc_bsg_reply *bsg_reply;
struct bsg_job_data *dd_data = NULL;
unsigned long flags;
int size = 0;
@@ -1120,13 +1131,15 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dd_data->set_job = NULL;
lpfc_bsg_event_unref(evt);
if (job) {
- job->reply->reply_payload_rcv_len = size;
+ bsg_reply = job->reply;
+ bsg_reply->reply_payload_rcv_len = size;
/* make error code available to userspace */
- job->reply->result = 0;
+ bsg_reply->result = 0;
job->dd_data = NULL;
/* complete the job back to userspace */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
}
}
@@ -1187,10 +1200,11 @@ lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
* @job: SET_EVENT fc_bsg_job
**/
static int
-lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
+lpfc_bsg_hba_set_event(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
struct set_ct_event *event_req;
struct lpfc_bsg_event *evt;
int rc = 0;
@@ -1208,7 +1222,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
}
event_req = (struct set_ct_event *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
FC_REG_EVENT_MASK);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -1271,10 +1285,12 @@ job_error:
* @job: GET_EVENT fc_bsg_job
**/
static int
-lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
+lpfc_bsg_hba_get_event(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct get_ct_event *event_req;
struct get_ct_event_reply *event_reply;
struct lpfc_bsg_event *evt, *evt_next;
@@ -1292,10 +1308,10 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
}
event_req = (struct get_ct_event *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
event_reply = (struct get_ct_event_reply *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
if (evt->reg_id == event_req->ev_reg_id) {
@@ -1315,7 +1331,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
* an error indicating that there isn't anymore
*/
if (evt_dat == NULL) {
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
rc = -ENOENT;
goto job_error;
}
@@ -1331,12 +1347,12 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
event_reply->type = evt_dat->type;
event_reply->immed_data = evt_dat->immed_dat;
if (evt_dat->len > 0)
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
evt_dat->data, evt_dat->len);
else
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (evt_dat) {
kfree(evt_dat->data);
@@ -1347,13 +1363,14 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
lpfc_bsg_event_unref(evt);
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->dd_data = NULL;
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
job_error:
job->dd_data = NULL;
- job->reply->result = rc;
+ bsg_reply->result = rc;
return rc;
}
@@ -1380,7 +1397,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp;
struct lpfc_nodelist *ndlp;
@@ -1411,6 +1429,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
/* Copy the completed job data or set the error status */
if (job) {
+ bsg_reply = job->reply;
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
@@ -1428,7 +1447,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
rc = -EACCES;
}
} else {
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
}
}
@@ -1442,8 +1461,9 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -1457,7 +1477,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
* @num_entry: Number of enties in the bde.
**/
static int
-lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
+lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
int num_entry)
{
@@ -1603,12 +1623,14 @@ no_dd_data:
* @job: SEND_MGMT_RESP fc_bsg_job
**/
static int
-lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
+lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
struct ulp_bde64 *bpl;
struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
int bpl_entries;
@@ -1618,7 +1640,7 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
int rc = 0;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
rc = -ERANGE;
@@ -1664,7 +1686,7 @@ send_mgmt_rsp_free_bmp:
kfree(bmp);
send_mgmt_rsp_exit:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
@@ -1760,8 +1782,10 @@ lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
* All of this is done in-line.
*/
static int
-lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct diag_mode_set *loopback_mode;
uint32_t link_flags;
uint32_t timeout;
@@ -1771,7 +1795,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
int rc = 0;
/* no data to return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len < sizeof(struct fc_bsg_request) +
sizeof(struct diag_mode_set)) {
@@ -1791,7 +1815,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
/* bring the link to diagnostic mode */
loopback_mode = (struct diag_mode_set *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
@@ -1864,10 +1888,11 @@ loopback_mode_exit:
job_error:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2015,14 +2040,16 @@ lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
* loopback mode in order to perform a diagnostic loopback test.
*/
static int
-lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct diag_mode_set *loopback_mode;
uint32_t link_flags, timeout;
int i, rc = 0;
/* no data to return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len < sizeof(struct fc_bsg_request) +
sizeof(struct diag_mode_set)) {
@@ -2054,7 +2081,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3129 Bring link to diagnostic state.\n");
loopback_mode = (struct diag_mode_set *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
@@ -2151,10 +2178,11 @@ loopback_mode_exit:
job_error:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2166,17 +2194,17 @@ job_error:
* command from the user to proper driver action routines.
*/
static int
-lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
{
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
int rc;
- shost = job->shost;
+ shost = fc_bsg_to_shost(job);
if (!shost)
return -ENODEV;
- vport = (struct lpfc_vport *)job->shost->hostdata;
+ vport = shost_priv(shost);
if (!vport)
return -ENODEV;
phba = vport->phba;
@@ -2202,8 +2230,10 @@ lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
* command from the user to proper driver action routines.
*/
static int
-lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
+lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
@@ -2211,10 +2241,10 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
uint32_t timeout;
int rc, i;
- shost = job->shost;
+ shost = fc_bsg_to_shost(job);
if (!shost)
return -ENODEV;
- vport = (struct lpfc_vport *)job->shost->hostdata;
+ vport = shost_priv(shost);
if (!vport)
return -ENODEV;
phba = vport->phba;
@@ -2232,7 +2262,7 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
phba->link_flag &= ~LS_LOOPBACK_MODE;
spin_unlock_irq(&phba->hbalock);
loopback_mode_end_cmd = (struct diag_mode_set *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
timeout = loopback_mode_end_cmd->timeout * 100;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
@@ -2263,10 +2293,11 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
loopback_mode_end_exit:
/* make return code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2278,8 +2309,10 @@ loopback_mode_end_exit:
* applicaiton.
*/
static int
-lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
+lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
@@ -2292,12 +2325,12 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
struct diag_status *diag_status_reply;
int mbxstatus, rc = 0;
- shost = job->shost;
+ shost = fc_bsg_to_shost(job);
if (!shost) {
rc = -ENODEV;
goto job_error;
}
- vport = (struct lpfc_vport *)job->shost->hostdata;
+ vport = shost_priv(shost);
if (!vport) {
rc = -ENODEV;
goto job_error;
@@ -2335,7 +2368,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
goto job_error;
link_diag_test_cmd = (struct sli4_link_diag *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
@@ -2385,7 +2418,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
}
diag_status_reply = (struct diag_status *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
if (job->reply_len <
sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
@@ -2413,10 +2446,11 @@ link_diag_test_exit:
job_error:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2982,9 +3016,10 @@ err_post_rxbufs_exit:
* of loopback mode.
**/
static int
-lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_run(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct lpfc_bsg_event *evt;
struct event_data *evdat;
@@ -3012,7 +3047,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
uint32_t total_mem;
/* in case no data is returned return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
@@ -3237,11 +3272,11 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
rc = IOCB_SUCCESS;
/* skip over elx loopback header */
rx_databuf += ELX_LOOPBACK_HEADER_SZ;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
rx_databuf, size);
- job->reply->reply_payload_rcv_len = size;
+ bsg_reply->reply_payload_rcv_len = size;
}
}
@@ -3271,11 +3306,12 @@ err_loopback_test_exit:
loopback_test_exit:
kfree(dataout);
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
/* complete the job back to userspace if no error */
if (rc == IOCB_SUCCESS)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -3284,9 +3320,10 @@ loopback_test_exit:
* @job: GET_DFC_REV fc_bsg_job
**/
static int
-lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
+lpfc_bsg_get_dfc_rev(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct get_mgmt_rev_reply *event_reply;
int rc = 0;
@@ -3301,7 +3338,7 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
}
event_reply = (struct get_mgmt_rev_reply *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
if (job->reply_len <
sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
@@ -3315,9 +3352,10 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
job_error:
- job->reply->result = rc;
+ bsg_reply->result = rc;
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -3336,7 +3374,8 @@ static void
lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
+ struct bsg_job *job;
uint32_t size;
unsigned long flags;
uint8_t *pmb, *pmb_buf;
@@ -3364,8 +3403,9 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/* Copy the mailbox data to the job if it is still active */
if (job) {
+ bsg_reply = job->reply;
size = job->reply_payload.payload_len;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmb_buf, size);
@@ -3379,8 +3419,9 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -3510,11 +3551,12 @@ lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
* This is routine handles BSG job for mailbox commands completions with
* multiple external buffers.
**/
-static struct fc_bsg_job *
+static struct bsg_job *
lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
uint8_t *pmb, *pmb_buf;
unsigned long flags;
uint32_t size;
@@ -3529,6 +3571,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -3559,13 +3602,13 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
if (job) {
size = job->reply_payload.payload_len;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmb_buf, size);
/* result for successful */
- job->reply->result = 0;
+ bsg_reply->result = 0;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2937 SLI_CONFIG ext-buffer maibox command "
@@ -3603,7 +3646,8 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
static void
lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
@@ -3623,9 +3667,11 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
/* if the job is still active, call job done */
- if (job)
- job->job_done(job);
-
+ if (job) {
+ bsg_reply = job->reply;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ }
return;
}
@@ -3640,7 +3686,8 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
static void
lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
@@ -3658,8 +3705,11 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
lpfc_bsg_mbox_ext_session_reset(phba);
/* if the job is still active, call job done */
- if (job)
- job->job_done(job);
+ if (job) {
+ bsg_reply = job->reply;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ }
return;
}
@@ -3768,10 +3818,11 @@ lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
* non-embedded external bufffers.
**/
static int
-lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
enum nemb_type nemb_tp,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_request *bsg_request = job->request;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
struct dfc_mbox_req *mbox_req;
struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
@@ -3784,7 +3835,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
int rc, i;
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* pointer to the start of mailbox command */
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -3955,10 +4006,12 @@ job_error:
* non-embedded external bufffers.
**/
static int
-lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
enum nemb_type nemb_tp,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct dfc_mbox_req *mbox_req;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
uint32_t ext_buf_cnt;
@@ -3969,7 +4022,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
int rc = SLI_CONFIG_NOT_HANDLED, i;
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* pointer to the start of mailbox command */
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -4096,8 +4149,9 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* wait for additoinal external buffers */
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return SLI_CONFIG_HANDLED;
job_error:
@@ -4119,7 +4173,7 @@ job_error:
* with embedded sussystem 0x1 and opcodes with external HBDs.
**/
static int
-lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
struct lpfc_sli_config_mbox *sli_cfg_mbx;
@@ -4268,8 +4322,9 @@ lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
* user space through BSG.
**/
static int
-lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
{
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
struct lpfc_dmabuf *dmabuf;
uint8_t *pbuf;
@@ -4307,7 +4362,7 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
dmabuf, index);
pbuf = (uint8_t *)dmabuf->virt;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pbuf, size);
@@ -4321,8 +4376,9 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
lpfc_bsg_mbox_ext_session_reset(phba);
}
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return SLI_CONFIG_HANDLED;
}
@@ -4336,9 +4392,10 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
* from user space through BSG.
**/
static int
-lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct bsg_job_data *dd_data = NULL;
LPFC_MBOXQ_t *pmboxq = NULL;
MAILBOX_t *pmb;
@@ -4436,8 +4493,9 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
/* wait for additoinal external buffers */
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return SLI_CONFIG_HANDLED;
job_error:
@@ -4457,7 +4515,7 @@ job_error:
* command with multiple non-embedded external buffers.
**/
static int
-lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
int rc;
@@ -4502,14 +4560,15 @@ lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
* (0x9B) mailbox commands and external buffers.
**/
static int
-lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_request *bsg_request = job->request;
struct dfc_mbox_req *mbox_req;
int rc = SLI_CONFIG_NOT_HANDLED;
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* mbox command with/without single external buffer */
if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
@@ -4579,9 +4638,11 @@ sli_cfg_ext_error:
* let our completion handler finish the command.
**/
static int
-lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_vport *vport)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
/* a 4k buffer to hold the mb and extended data from/to the bsg */
@@ -4600,7 +4661,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
uint32_t size;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* sanity check to protect driver */
if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
@@ -4619,7 +4680,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* check if requested extended data lengths are valid */
if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
@@ -4841,7 +4902,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* job finished, copy the data */
memcpy(pmbx, pmb, sizeof(*pmb));
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmbx, size);
@@ -4870,15 +4931,17 @@ job_cont:
* @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
**/
static int
-lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
+lpfc_bsg_mbox_cmd(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct dfc_mbox_req *mbox_req;
int rc = 0;
/* mix-and-match backward compatibility */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -4889,7 +4952,7 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
sizeof(struct fc_bsg_request)),
(int)sizeof(struct dfc_mbox_req));
mbox_req = (struct dfc_mbox_req *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
mbox_req->extMboxTag = 0;
mbox_req->extSeqNum = 0;
}
@@ -4898,15 +4961,16 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
if (rc == 0) {
/* job done */
- job->reply->result = 0;
+ bsg_reply->result = 0;
job->dd_data = NULL;
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
} else if (rc == 1)
/* job submitted, will complete later*/
rc = 0; /* return zero, no error */
else {
/* some error occurred */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
}
@@ -4936,7 +5000,8 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_bsg_menlo *menlo;
@@ -4956,6 +5021,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -4970,7 +5036,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
*/
menlo_resp = (struct menlo_response *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
menlo_resp->xri = rsp->ulpContext;
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
@@ -4990,7 +5056,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
}
} else {
rsp_size = rsp->un.genreq64.bdl.bdeSize;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
lpfc_bsg_copy_data(rmp, &job->reply_payload,
rsp_size, 0);
}
@@ -5007,8 +5073,9 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
/* Complete the job if active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
@@ -5024,9 +5091,11 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
* supplied in the menlo request header xri field.
**/
static int
-lpfc_menlo_cmd(struct fc_bsg_job *job)
+lpfc_menlo_cmd(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocbq;
IOCB_t *cmd;
@@ -5039,7 +5108,7 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
struct ulp_bde64 *bpl = NULL;
/* in case no data is returned return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) +
@@ -5069,7 +5138,7 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
}
menlo_cmd = (struct menlo_command *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
@@ -5180,19 +5249,65 @@ free_dd:
kfree(dd_data);
no_dd_data:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
+static int
+lpfc_forced_link_speed(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct forced_link_speed_support_reply *forced_reply;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct get_forced_link_speed_support)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "0048 Received FORCED_LINK_SPEED request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ forced_reply = (struct forced_link_speed_support_reply *)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+ if (job->reply_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct forced_link_speed_support_reply)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "0049 Received FORCED_LINK_SPEED reply below "
+ "minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
+ ? LPFC_FORCED_LINK_SPEED_SUPPORTED
+ : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
+job_error:
+ bsg_reply->result = rc;
+ if (rc == 0)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return rc;
+}
+
/**
* lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
* @job: fc_bsg_job to handle
**/
static int
-lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
+lpfc_bsg_hst_vendor(struct bsg_job *job)
{
- int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
int rc;
switch (command) {
@@ -5227,11 +5342,14 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
case LPFC_BSG_VENDOR_MENLO_DATA:
rc = lpfc_menlo_cmd(job);
break;
+ case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
+ rc = lpfc_forced_link_speed(job);
+ break;
default:
rc = -EINVAL;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
break;
}
@@ -5243,12 +5361,14 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
* @job: fc_bsg_job to handle
**/
int
-lpfc_bsg_request(struct fc_bsg_job *job)
+lpfc_bsg_request(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t msgcode;
int rc;
- msgcode = job->request->msgcode;
+ msgcode = bsg_request->msgcode;
switch (msgcode) {
case FC_BSG_HST_VENDOR:
rc = lpfc_bsg_hst_vendor(job);
@@ -5261,9 +5381,9 @@ lpfc_bsg_request(struct fc_bsg_job *job)
break;
default:
rc = -EINVAL;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
break;
}
@@ -5278,9 +5398,9 @@ lpfc_bsg_request(struct fc_bsg_job *job)
* the waiting function which will handle passing the error back to userspace
**/
int
-lpfc_bsg_timeout(struct fc_bsg_job *job)
+lpfc_bsg_timeout(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index e557bcdbcb19..f2247aa4fa17 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -35,6 +35,7 @@
#define LPFC_BSG_VENDOR_MENLO_DATA 9
#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
+#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14
struct set_ct_event {
uint32_t command;
@@ -284,6 +285,15 @@ struct lpfc_sli_config_mbox {
} un;
};
+#define LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED 0
+#define LPFC_FORCED_LINK_SPEED_SUPPORTED 1
+struct get_forced_link_speed_support {
+ uint32_t command;
+};
+struct forced_link_speed_support_reply {
+ uint8_t supported;
+};
+
/* driver only */
#define SLI_CONFIG_NOT_HANDLED 0
#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index bd7576d452f2..15d2bfdf582d 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -397,8 +397,6 @@ extern spinlock_t _dump_buf_lock;
extern int _dump_buf_done;
extern spinlock_t pgcnt_lock;
extern unsigned int pgcnt;
-extern unsigned int lpfc_prot_mask;
-extern unsigned char lpfc_prot_guard;
extern unsigned int lpfc_fcp_look_ahead;
/* Interface exported by fabric iocb scheduler */
@@ -431,8 +429,8 @@ struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
#define HBA_EVENT_LINK_DOWN 3
/* functions to support SGIOv4/bsg interface */
-int lpfc_bsg_request(struct fc_bsg_job *);
-int lpfc_bsg_timeout(struct fc_bsg_job *);
+int lpfc_bsg_request(struct bsg_job *);
+int lpfc_bsg_timeout(struct bsg_job *);
int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b7d54bfb1df9..236e4e51d161 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7610,7 +7610,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* reject till our FLOGI completes */
if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
(cmd != ELS_CMD_FLOGI)) {
- rjt_err = LSRJT_UNABLE_TPC;
+ rjt_err = LSRJT_LOGICAL_BSY;
rjt_exp = LSEXP_NOTHING_MORE;
goto lsrjt;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index ee8022737591..5646699b0516 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -921,6 +921,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
#define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B
+#define LPFC_MBOX_OPCODE_SET_HOST_DATA 0x5D
#define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73
#define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74
#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
@@ -2289,6 +2290,9 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
+#define lpfc_mbx_rd_conf_link_speed_SHIFT 16
+#define lpfc_mbx_rd_conf_link_speed_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_link_speed_WORD word6
uint32_t rsvd_7;
uint32_t rsvd_8;
uint32_t word9;
@@ -2919,6 +2923,16 @@ struct lpfc_mbx_set_feature {
};
+#define LPFC_SET_HOST_OS_DRIVER_VERSION 0x2
+struct lpfc_mbx_set_host_data {
+#define LPFC_HOST_OS_DRIVER_VERSION_SIZE 48
+ struct mbox_header header;
+ uint32_t param_id;
+ uint32_t param_len;
+ uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
+};
+
+
struct lpfc_mbx_get_sli4_parameters {
struct mbox_header header;
struct lpfc_sli4_parameters sli4_parameters;
@@ -3313,6 +3327,7 @@ struct lpfc_mqe {
struct lpfc_mbx_get_port_name get_port_name;
struct lpfc_mbx_set_feature set_feature;
struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
+ struct lpfc_mbx_set_host_data set_host_data;
struct lpfc_mbx_nop nop;
} un;
};
@@ -3981,7 +3996,8 @@ union lpfc_wqe128 {
struct gen_req64_wqe gen_req;
};
-#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
+#define LPFC_GROUP_OJECT_MAGIC_G5 0xfeaa0001
+#define LPFC_GROUP_OJECT_MAGIC_G6 0xfeaa0003
#define LPFC_FILE_TYPE_GROUP 0xf7
#define LPFC_FILE_ID_GROUP 0xa2
struct lpfc_grp_hdr {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 734a0428ef0e..4776fd85514f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6279,34 +6279,36 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
uint32_t old_guard;
int pagecnt = 10;
- if (lpfc_prot_mask && lpfc_prot_guard) {
+ if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"1478 Registering BlockGuard with the "
"SCSI layer\n");
- old_mask = lpfc_prot_mask;
- old_guard = lpfc_prot_guard;
+ old_mask = phba->cfg_prot_mask;
+ old_guard = phba->cfg_prot_guard;
/* Only allow supported values */
- lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
+ phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
SHOST_DIX_TYPE0_PROTECTION |
SHOST_DIX_TYPE1_PROTECTION);
- lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC);
+ phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
+ SHOST_DIX_GUARD_CRC);
/* DIF Type 1 protection for profiles AST1/C1 is end to end */
- if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
- lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
+ if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
+ phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
- if (lpfc_prot_mask && lpfc_prot_guard) {
- if ((old_mask != lpfc_prot_mask) ||
- (old_guard != lpfc_prot_guard))
+ if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
+ if ((old_mask != phba->cfg_prot_mask) ||
+ (old_guard != phba->cfg_prot_guard))
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1475 Registering BlockGuard with the "
"SCSI layer: mask %d guard %d\n",
- lpfc_prot_mask, lpfc_prot_guard);
+ phba->cfg_prot_mask,
+ phba->cfg_prot_guard);
- scsi_host_set_prot(shost, lpfc_prot_mask);
- scsi_host_set_guard(shost, lpfc_prot_guard);
+ scsi_host_set_prot(shost, phba->cfg_prot_mask);
+ scsi_host_set_guard(shost, phba->cfg_prot_guard);
} else
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1479 Not Registering BlockGuard with the SCSI "
@@ -6929,6 +6931,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
struct lpfc_mbx_get_func_cfg *get_func_cfg;
struct lpfc_rsrc_desc_fcfcoe *desc;
char *pdesc_0;
+ uint16_t forced_link_speed;
+ uint32_t if_type;
int length, i, rc = 0, rc2;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -7022,6 +7026,58 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
if (rc)
goto read_cfg_out;
+ /* Update link speed if forced link speed is supported */
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ forced_link_speed =
+ bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
+ if (forced_link_speed) {
+ phba->hba_flag |= HBA_FORCED_LINK_SPEED;
+
+ switch (forced_link_speed) {
+ case LINK_SPEED_1G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_1G;
+ break;
+ case LINK_SPEED_2G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_2G;
+ break;
+ case LINK_SPEED_4G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_4G;
+ break;
+ case LINK_SPEED_8G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_8G;
+ break;
+ case LINK_SPEED_10G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_10G;
+ break;
+ case LINK_SPEED_16G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_16G;
+ break;
+ case LINK_SPEED_32G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_32G;
+ break;
+ case 0xffff:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_AUTO;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0047 Unrecognized link "
+ "speed : %d\n",
+ forced_link_speed);
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_AUTO;
+ }
+ }
+ }
+
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
length = phba->sli4_hba.max_cfg_param.max_xri -
lpfc_sli4_get_els_iocb_cnt(phba);
@@ -7256,6 +7312,7 @@ int
lpfc_sli4_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
+ uint32_t wqesize;
int idx;
/*
@@ -7340,15 +7397,10 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.fcp_cq[idx] = qdesc;
/* Create Fast Path FCP WQs */
- if (phba->fcp_embed_io) {
- qdesc = lpfc_sli4_queue_alloc(phba,
- LPFC_WQE128_SIZE,
- LPFC_WQE128_DEF_COUNT);
- } else {
- qdesc = lpfc_sli4_queue_alloc(phba,
- phba->sli4_hba.wq_esize,
- phba->sli4_hba.wq_ecount);
- }
+ wqesize = (phba->fcp_embed_io) ?
+ LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
+ qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
+ phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0503 Failed allocate fast-path FCP "
@@ -10260,6 +10312,7 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
int i, rc = 0;
struct lpfc_dmabuf *dmabuf, *next;
uint32_t offset = 0, temp_offset = 0;
+ uint32_t magic_number, ftype, fid, fsize;
/* It can be null in no-wait mode, sanity check */
if (!fw) {
@@ -10268,18 +10321,19 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
}
image = (struct lpfc_grp_hdr *)fw->data;
+ magic_number = be32_to_cpu(image->magic_number);
+ ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
+ fid = bf_get_be32(lpfc_grp_hdr_id, image),
+ fsize = be32_to_cpu(image->size);
+
INIT_LIST_HEAD(&dma_buffer_list);
- if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
- (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
- LPFC_FILE_TYPE_GROUP) ||
- (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
- (be32_to_cpu(image->size) != fw->size)) {
+ if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 &&
+ magic_number != LPFC_GROUP_OJECT_MAGIC_G6) ||
+ ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3022 Invalid FW image found. "
- "Magic:%x Type:%x ID:%x\n",
- be32_to_cpu(image->magic_number),
- bf_get_be32(lpfc_grp_hdr_file_type, image),
- bf_get_be32(lpfc_grp_hdr_id, image));
+ "Magic:%x Type:%x ID:%x Size %d %zd\n",
+ magic_number, ftype, fid, fsize, fw->size);
rc = -EINVAL;
goto release_out;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index d197aa176dee..ad350d969bdc 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -413,15 +413,13 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
* struct fcp_cmnd, struct fcp_rsp and the number of bde's
* necessary to support the sg_tablesize.
*/
- psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+ psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool,
GFP_KERNEL, &psb->dma_handle);
if (!psb->data) {
kfree(psb);
break;
}
- /* Initialize virtual ptrs to dma_buf region. */
- memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
/* Allocate iotag for psb->cur_iocbq. */
iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
@@ -607,7 +605,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
+ * lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list
* @phba: pointer to lpfc hba data structure.
* @post_sblist: pointer to the scsi buffer list.
*
@@ -736,7 +734,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
+ * lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls
* @phba: pointer to lpfc hba data structure.
*
* This routine walks the list of scsi buffers that have been allocated and
@@ -821,13 +819,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
* for the struct fcp_cmnd, struct fcp_rsp and the number
* of bde's necessary to support the sg_tablesize.
*/
- psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+ psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool,
GFP_KERNEL, &psb->dma_handle);
if (!psb->data) {
kfree(psb);
break;
}
- memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
/*
* 4K Page alignment is CRITICAL to BlockGuard, double check
@@ -857,7 +854,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
psb->data, psb->dma_handle);
kfree(psb);
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "3368 Failed to allocated IOTAG for"
+ "3368 Failed to allocate IOTAG for"
" XRI:0x%x\n", lxri);
lpfc_sli4_free_xri(phba, lxri);
break;
@@ -1136,7 +1133,7 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
*
* This routine does the pci dma mapping for scatter-gather list of scsi cmnd
* field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
- * through sg elements and format the bdea. This routine also initializes all
+ * through sg elements and format the bde. This routine also initializes all
* IOCB fields which are dependent on scsi command request buffer.
*
* Return codes:
@@ -1269,13 +1266,16 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-/* Return if if error injection is detected by Initiator */
+/* Return BG_ERR_INIT if error injection is detected by Initiator */
#define BG_ERR_INIT 0x1
-/* Return if if error injection is detected by Target */
+/* Return BG_ERR_TGT if error injection is detected by Target */
#define BG_ERR_TGT 0x2
-/* Return if if swapping CSUM<-->CRC is required for error injection */
+/* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
#define BG_ERR_SWAP 0x10
-/* Return if disabling Guard/Ref/App checking is required for error injection */
+/**
+ * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
+ * error injection
+ **/
#define BG_ERR_CHECK 0x20
/**
@@ -4139,13 +4139,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
- /* The sdev is not guaranteed to be valid post scsi_done upcall. */
- cmd->scsi_done(cmd);
-
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_cmd->pCmd = NULL;
spin_unlock_irqrestore(&phba->hbalock, flags);
+ /* The sdev is not guaranteed to be valid post scsi_done upcall. */
+ cmd->scsi_done(cmd);
+
/*
* If there is a thread waiting for command completion
* wake up the thread.
@@ -4822,7 +4822,7 @@ wait_for_cmpl:
ret = FAILED;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0748 abort handler timed out waiting "
- "for abortng I/O (xri:x%x) to complete: "
+ "for aborting I/O (xri:x%x) to complete: "
"ret %#x, ID %d, LUN %llu\n",
iocb->sli4_xritag, ret,
cmnd->device->id, cmnd->device->lun);
@@ -4945,26 +4945,30 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
* 0x2002 - Success.
**/
static int
-lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
- unsigned tgt_id, uint64_t lun_id,
- uint8_t task_mgmt_cmd)
+lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
+ unsigned int tgt_id, uint64_t lun_id,
+ uint8_t task_mgmt_cmd)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_scsi_buf *lpfc_cmd;
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *iocbqrsp;
- struct lpfc_nodelist *pnode = rdata->pnode;
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *pnode;
int ret;
int status;
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
return FAILED;
+ pnode = rdata->pnode;
- lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
+ lpfc_cmd = lpfc_get_scsi_buf(phba, pnode);
if (lpfc_cmd == NULL)
return FAILED;
lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
lpfc_cmd->rdata = rdata;
+ lpfc_cmd->pCmd = cmnd;
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
task_mgmt_cmd);
@@ -5171,7 +5175,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
+ status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
FCP_LUN_RESET);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5249,7 +5253,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
+ status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
FCP_TARGET_RESET);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5328,7 +5332,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
if (!match)
continue;
- status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
+ status = lpfc_send_taskmgmt(vport, cmnd,
i, 0, FCP_TARGET_RESET);
if (status != SUCCESS) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f4f77c5b0c83..4faa7672fc1d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -47,6 +47,7 @@
#include "lpfc_compat.h"
#include "lpfc_debugfs.h"
#include "lpfc_vport.h"
+#include "lpfc_version.h"
/* There are only four IOCB completion types. */
typedef enum _lpfc_iocb_type {
@@ -2678,15 +2679,16 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
- list_del_init(&cmd_iocb->list);
if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+ /* remove from txcmpl queue list */
+ list_del_init(&cmd_iocb->list);
cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ return cmd_iocb;
}
- return cmd_iocb;
}
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0317 iotag x%x is out off "
+ "0317 iotag x%x is out of "
"range: max iotag x%x wd0 x%x\n",
iotag, phba->sli.last_iotag,
*(((uint32_t *) &prspiocb->iocb) + 7));
@@ -2721,8 +2723,9 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
return cmd_iocb;
}
}
+
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0372 iotag x%x is out off range: max iotag (x%x)\n",
+ "0372 iotag x%x is out of range: max iotag (x%x)\n",
iotag, phba->sli.last_iotag);
return NULL;
}
@@ -6291,6 +6294,25 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
return 0;
}
+void
+lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ uint32_t len;
+
+ len = sizeof(struct lpfc_mbx_set_host_data) -
+ sizeof(struct lpfc_sli4_cfg_mhdr);
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
+ LPFC_SLI4_MBX_EMBED);
+
+ mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
+ mbox->u.mqe.un.set_host_data.param_len = 8;
+ snprintf(mbox->u.mqe.un.set_host_data.data,
+ LPFC_HOST_OS_DRIVER_VERSION_SIZE,
+ "Linux %s v"LPFC_DRIVER_VERSION,
+ (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
+}
+
/**
* lpfc_sli4_hba_setup - SLI4 device intialization PCI function
* @phba: Pointer to HBA context object.
@@ -6542,6 +6564,15 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_mbox;
}
+ lpfc_set_host_data(phba, mboxq);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "2134 Failed to set host os driver version %x",
+ rc);
+ }
+
/* Read the port's service parameters. */
rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
if (rc) {
@@ -11781,6 +11812,8 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
/* Look up the ELS command IOCB and create pseudo response IOCB */
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ /* Put the iocb back on the txcmplq */
+ lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
if (unlikely(!cmdiocbq)) {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c9bf20eb7223..50bfc43ebcb0 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.2.0.0."
+#define LPFC_DRIVER_VERSION "11.2.0.2"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index a590089b9397..ccb68d12692c 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -28,17 +28,15 @@
/* Definitions for the core NCR5380 driver. */
-#define NCR5380_implementation_fields unsigned char *pdma_base; \
- int pdma_residual
+#define NCR5380_implementation_fields int pdma_residual
-#define NCR5380_read(reg) macscsi_read(instance, reg)
-#define NCR5380_write(reg, value) macscsi_write(instance, reg, value)
+#define NCR5380_read(reg) in_8(hostdata->io + ((reg) << 4))
+#define NCR5380_write(reg, value) out_8(hostdata->io + ((reg) << 4), value)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- macscsi_dma_xfer_len(instance, cmd)
+#define NCR5380_dma_xfer_len macscsi_dma_xfer_len
#define NCR5380_dma_recv_setup macscsi_pread
#define NCR5380_dma_send_setup macscsi_pwrite
-#define NCR5380_dma_residual(instance) (hostdata->pdma_residual)
+#define NCR5380_dma_residual macscsi_dma_residual
#define NCR5380_intr macscsi_intr
#define NCR5380_queue_command macscsi_queue_command
@@ -61,20 +59,6 @@ module_param(setup_hostid, int, 0);
static int setup_toshiba_delay = -1;
module_param(setup_toshiba_delay, int, 0);
-/*
- * NCR 5380 register access functions
- */
-
-static inline char macscsi_read(struct Scsi_Host *instance, int reg)
-{
- return in_8(instance->base + (reg << 4));
-}
-
-static inline void macscsi_write(struct Scsi_Host *instance, int reg, int value)
-{
- out_8(instance->base + (reg << 4), value);
-}
-
#ifndef MODULE
static int __init mac_scsi_setup(char *str)
{
@@ -167,16 +151,15 @@ __asm__ __volatile__ \
: "0"(s), "1"(d), "2"(n) \
: "d0")
-static int macscsi_pread(struct Scsi_Host *instance,
- unsigned char *dst, int len)
+static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
+ unsigned char *dst, int len)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- unsigned char *s = hostdata->pdma_base + (INPUT_DATA_REG << 4);
+ unsigned char *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
unsigned char *d = dst;
int n = len;
int transferred;
- while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+ while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ | BASR_PHASE_MATCH,
BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
CP_IO_TO_MEM(s, d, n);
@@ -189,23 +172,23 @@ static int macscsi_pread(struct Scsi_Host *instance,
return 0;
/* Target changed phase early? */
- if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ,
+ if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
scmd_printk(KERN_ERR, hostdata->connected,
"%s: !REQ and !ACK\n", __func__);
if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
return 0;
- dsprintk(NDEBUG_PSEUDO_DMA, instance,
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
"%s: bus error (%d/%d)\n", __func__, transferred, len);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
d = dst + transferred;
n = len - transferred;
}
scmd_printk(KERN_ERR, hostdata->connected,
"%s: phase mismatch or !DRQ\n", __func__);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
return -1;
}
@@ -270,16 +253,15 @@ __asm__ __volatile__ \
: "0"(s), "1"(d), "2"(n) \
: "d0")
-static int macscsi_pwrite(struct Scsi_Host *instance,
- unsigned char *src, int len)
+static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
+ unsigned char *src, int len)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char *s = src;
- unsigned char *d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4);
+ unsigned char *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
int n = len;
int transferred;
- while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+ while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ | BASR_PHASE_MATCH,
BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
CP_MEM_TO_IO(s, d, n);
@@ -288,7 +270,7 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
hostdata->pdma_residual = len - transferred;
/* Target changed phase early? */
- if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ,
+ if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
scmd_printk(KERN_ERR, hostdata->connected,
"%s: !REQ and !ACK\n", __func__);
@@ -297,7 +279,7 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
/* No bus error. */
if (n == 0) {
- if (NCR5380_poll_politely(instance, TARGET_COMMAND_REG,
+ if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
TCR_LAST_BYTE_SENT,
TCR_LAST_BYTE_SENT, HZ / 64) < 0)
scmd_printk(KERN_ERR, hostdata->connected,
@@ -305,25 +287,23 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
return 0;
}
- dsprintk(NDEBUG_PSEUDO_DMA, instance,
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
"%s: bus error (%d/%d)\n", __func__, transferred, len);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
s = src + transferred;
n = len - transferred;
}
scmd_printk(KERN_ERR, hostdata->connected,
"%s: phase mismatch or !DRQ\n", __func__);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
return -1;
}
-static int macscsi_dma_xfer_len(struct Scsi_Host *instance,
+static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
cmd->SCp.this_residual < 16)
return 0;
@@ -331,6 +311,11 @@ static int macscsi_dma_xfer_len(struct Scsi_Host *instance,
return cmd->SCp.this_residual;
}
+static int macscsi_dma_residual(struct NCR5380_hostdata *hostdata)
+{
+ return hostdata->pdma_residual;
+}
+
#include "NCR5380.c"
#define DRV_MODULE_NAME "mac_scsi"
@@ -356,6 +341,7 @@ static struct scsi_host_template mac_scsi_template = {
static int __init mac_scsi_probe(struct platform_device *pdev)
{
struct Scsi_Host *instance;
+ struct NCR5380_hostdata *hostdata;
int error;
int host_flags = 0;
struct resource *irq, *pio_mem, *pdma_mem = NULL;
@@ -388,17 +374,18 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
if (!instance)
return -ENOMEM;
- instance->base = pio_mem->start;
if (irq)
instance->irq = irq->start;
else
instance->irq = NO_IRQ;
- if (pdma_mem && setup_use_pdma) {
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ hostdata = shost_priv(instance);
+ hostdata->base = pio_mem->start;
+ hostdata->io = (void *)pio_mem->start;
- hostdata->pdma_base = (unsigned char *)pdma_mem->start;
- } else
+ if (pdma_mem && setup_use_pdma)
+ hostdata->pdma_io = (void *)pdma_mem->start;
+ else
host_flags |= FLAG_NO_PSEUDO_DMA;
host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 3aaea713bf37..fdd519c1dd57 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.811.02.00-rc1"
-#define MEGASAS_RELDATE "April 12, 2016"
+#define MEGASAS_VERSION "06.812.07.00-rc1"
+#define MEGASAS_RELDATE "August 22, 2016"
/*
* Device IDs
@@ -1429,6 +1429,8 @@ enum FW_BOOT_CONTEXT {
#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14
#define MR_MAX_MSIX_REG_ARRAY 16
#define MR_RDPQ_MODE_OFFSET 0X00800000
+#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
+
/*
* register set for both 1068 and 1078 controllers
* structure extended for 1078 registers
@@ -2118,7 +2120,6 @@ struct megasas_instance {
u32 ctrl_context_pages;
struct megasas_ctrl_info *ctrl_info;
unsigned int msix_vectors;
- struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES];
struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
u64 map_id;
u64 pd_seq_map_id;
@@ -2140,6 +2141,7 @@ struct megasas_instance {
u8 is_imr;
u8 is_rdpq;
bool dev_handle;
+ bool fw_sync_cache_support;
};
struct MR_LD_VF_MAP {
u32 size;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index d8b1fbd4c8aa..6484c382f670 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1700,11 +1700,8 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
goto out_done;
}
- /*
- * FW takes care of flush cache on its own for Virtual Disk.
- * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
- */
- if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
+ if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd) &&
+ (!instance->fw_sync_cache_support)) {
scmd->result = DID_OK << 16;
goto out_done;
}
@@ -4840,7 +4837,7 @@ fail_alloc_cmds:
}
/*
- * megasas_setup_irqs_msix - register legacy interrupts.
+ * megasas_setup_irqs_ioapic - register legacy interrupts.
* @instance: Adapter soft state
*
* Do not enable interrupt, only setup ISRs.
@@ -4855,8 +4852,9 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
pdev = instance->pdev;
instance->irq_context[0].instance = instance;
instance->irq_context[0].MSIxIndex = 0;
- if (request_irq(pdev->irq, instance->instancet->service_isr,
- IRQF_SHARED, "megasas", &instance->irq_context[0])) {
+ if (request_irq(pci_irq_vector(pdev, 0),
+ instance->instancet->service_isr, IRQF_SHARED,
+ "megasas", &instance->irq_context[0])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ from %s %d\n",
__func__, __LINE__);
@@ -4877,28 +4875,23 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
static int
megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
{
- int i, j, cpu;
+ int i, j;
struct pci_dev *pdev;
pdev = instance->pdev;
/* Try MSI-x */
- cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < instance->msix_vectors; i++) {
instance->irq_context[i].instance = instance;
instance->irq_context[i].MSIxIndex = i;
- if (request_irq(instance->msixentry[i].vector,
+ if (request_irq(pci_irq_vector(pdev, i),
instance->instancet->service_isr, 0, "megasas",
&instance->irq_context[i])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ for vector %d.\n", i);
- for (j = 0; j < i; j++) {
- if (smp_affinity_enable)
- irq_set_affinity_hint(
- instance->msixentry[j].vector, NULL);
- free_irq(instance->msixentry[j].vector,
- &instance->irq_context[j]);
- }
+ for (j = 0; j < i; j++)
+ free_irq(pci_irq_vector(pdev, j),
+ &instance->irq_context[j]);
/* Retry irq register for IO_APIC*/
instance->msix_vectors = 0;
if (is_probe)
@@ -4906,14 +4899,6 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
else
return -1;
}
- if (smp_affinity_enable) {
- if (irq_set_affinity_hint(instance->msixentry[i].vector,
- get_cpu_mask(cpu)))
- dev_err(&instance->pdev->dev,
- "Failed to set affinity hint"
- " for cpu %d\n", cpu);
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
}
return 0;
}
@@ -4930,14 +4915,12 @@ megasas_destroy_irqs(struct megasas_instance *instance) {
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
- if (smp_affinity_enable)
- irq_set_affinity_hint(
- instance->msixentry[i].vector, NULL);
- free_irq(instance->msixentry[i].vector,
+ free_irq(pci_irq_vector(instance->pdev, i),
&instance->irq_context[i]);
}
else
- free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ free_irq(pci_irq_vector(instance->pdev, 0),
+ &instance->irq_context[0]);
}
/**
@@ -5095,6 +5078,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
0x4000000) >> 0x1a;
if (msix_enable && !msix_disable) {
+ int irq_flags = PCI_IRQ_MSIX;
+
scratch_pad_2 = readl
(&instance->reg_set->outbound_scratch_pad_2);
/* Check max MSI-X vectors */
@@ -5131,15 +5116,18 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Don't bother allocating more MSI-X vectors than cpus */
instance->msix_vectors = min(instance->msix_vectors,
(unsigned int)num_online_cpus());
- for (i = 0; i < instance->msix_vectors; i++)
- instance->msixentry[i].entry = i;
- i = pci_enable_msix_range(instance->pdev, instance->msixentry,
- 1, instance->msix_vectors);
+ if (smp_affinity_enable)
+ irq_flags |= PCI_IRQ_AFFINITY;
+ i = pci_alloc_irq_vectors(instance->pdev, 1,
+ instance->msix_vectors, irq_flags);
if (i > 0)
instance->msix_vectors = i;
else
instance->msix_vectors = 0;
}
+ i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
+ if (i < 0)
+ goto fail_setup_irqs;
dev_info(&instance->pdev->dev,
"firmware supports msix\t: (%d)", fw_msix_count);
@@ -5152,11 +5140,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- if (instance->msix_vectors ?
- megasas_setup_irqs_msix(instance, 1) :
- megasas_setup_irqs_ioapic(instance))
- goto fail_setup_irqs;
-
instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
GFP_KERNEL);
if (instance->ctrl_info == NULL)
@@ -5172,6 +5155,10 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
+ if (instance->msix_vectors ?
+ megasas_setup_irqs_msix(instance, 1) :
+ megasas_setup_irqs_ioapic(instance))
+ goto fail_init_adapter;
instance->instancet->enable_intr(instance);
@@ -5315,7 +5302,7 @@ fail_init_adapter:
megasas_destroy_irqs(instance);
fail_setup_irqs:
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
instance->msix_vectors = 0;
fail_ready_state:
kfree(instance->ctrl_info);
@@ -5584,7 +5571,6 @@ static int megasas_io_attach(struct megasas_instance *instance)
/*
* Export parameters required by SCSI mid-layer
*/
- host->irq = instance->pdev->irq;
host->unique_id = instance->unique_id;
host->can_queue = instance->max_scsi_cmds;
host->this_id = instance->init_id;
@@ -5947,7 +5933,7 @@ fail_io_attach:
else
megasas_release_mfi(instance);
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
fail_init_mfi:
fail_alloc_dma_buf:
if (instance->evt_detail)
@@ -6105,7 +6091,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
megasas_destroy_irqs(instance);
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
pci_save_state(pdev);
pci_disable_device(pdev);
@@ -6125,6 +6111,7 @@ megasas_resume(struct pci_dev *pdev)
int rval;
struct Scsi_Host *host;
struct megasas_instance *instance;
+ int irq_flags = PCI_IRQ_LEGACY;
instance = pci_get_drvdata(pdev);
host = instance->host;
@@ -6160,9 +6147,15 @@ megasas_resume(struct pci_dev *pdev)
goto fail_ready_state;
/* Now re-enable MSI-X */
- if (instance->msix_vectors &&
- pci_enable_msix_exact(instance->pdev, instance->msixentry,
- instance->msix_vectors))
+ if (instance->msix_vectors) {
+ irq_flags = PCI_IRQ_MSIX;
+ if (smp_affinity_enable)
+ irq_flags |= PCI_IRQ_AFFINITY;
+ }
+ rval = pci_alloc_irq_vectors(instance->pdev, 1,
+ instance->msix_vectors ?
+ instance->msix_vectors : 1, irq_flags);
+ if (rval < 0)
goto fail_reenable_msix;
if (instance->ctrl_context) {
@@ -6245,6 +6238,34 @@ fail_reenable_msix:
#define megasas_resume NULL
#endif
+static inline int
+megasas_wait_for_adapter_operational(struct megasas_instance *instance)
+{
+ int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
+ int i;
+
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
+ return 1;
+
+ for (i = 0; i < wait_time; i++) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
+ break;
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
+ dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
+
+ msleep(1000);
+ }
+
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+ dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
+ __func__);
+ return 1;
+ }
+
+ return 0;
+}
+
/**
* megasas_detach_one - PCI hot"un"plug entry point
* @pdev: PCI device structure
@@ -6269,9 +6290,14 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->fw_crash_state != UNAVAILABLE)
megasas_free_host_crash_buffer(instance);
scsi_remove_host(instance->host);
+
+ if (megasas_wait_for_adapter_operational(instance))
+ goto skip_firing_dcmds;
+
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+skip_firing_dcmds:
/* cancel the delayed work if this work still in queue*/
if (instance->ev != NULL) {
struct megasas_aen_event *ev = instance->ev;
@@ -6302,7 +6328,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
megasas_destroy_irqs(instance);
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
if (instance->ctrl_context) {
megasas_release_fusion(instance);
@@ -6385,13 +6411,19 @@ static void megasas_shutdown(struct pci_dev *pdev)
struct megasas_instance *instance = pci_get_drvdata(pdev);
instance->unload = 1;
+
+ if (megasas_wait_for_adapter_operational(instance))
+ goto skip_firing_dcmds;
+
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+
+skip_firing_dcmds:
instance->instancet->disable_intr(instance);
megasas_destroy_irqs(instance);
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
}
/**
@@ -6752,8 +6784,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
- dev_err(&instance->pdev->dev, "timed out while"
- "waiting for HBA to recover\n");
+ dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
error = -ENODEV;
goto out_up;
}
@@ -6821,8 +6852,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
spin_lock_irqsave(&instance->hba_lock, flags);
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
- dev_err(&instance->pdev->dev, "timed out while waiting"
- "for HBA to recover\n");
+ dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
return -ENODEV;
}
spin_unlock_irqrestore(&instance->hba_lock, flags);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index e413113c86ac..f237d0003df3 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -782,7 +782,8 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
- pd = MR_ArPdGet(arRef, physArm + 1, map);
+ physArm = physArm + 1;
+ pd = MR_ArPdGet(arRef, physArm, map);
if (pd != MR_PD_INVALID)
*pDevHandle = MR_PdDevHandleGet(pd, map);
}
@@ -879,7 +880,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
/* Get alternate Pd. */
- pd = MR_ArPdGet(arRef, physArm + 1, map);
+ physArm = physArm + 1;
+ pd = MR_ArPdGet(arRef, physArm, map);
if (pd != MR_PD_INVALID)
/* Get dev handle from Pd */
*pDevHandle = MR_PdDevHandleGet(pd, map);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 52d8bbf7feb5..24778ba4b6e8 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -748,6 +748,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
goto fail_fw_init;
}
+ instance->fw_sync_cache_support = (scratch_pad_2 &
+ MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
+ dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
+ instance->fw_sync_cache_support ? "Yes" : "No");
+
IOCInitMessage =
dma_alloc_coherent(&instance->pdev->dev,
sizeof(struct MPI2_IOC_INIT_REQUEST),
@@ -2000,6 +2005,8 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
pRAID_Context->regLockFlags |=
(MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+ pRAID_Context->Type = MPI2_TYPE_CUDA;
+ pRAID_Context->nseg = 0x1;
} else if (fusion->fast_path_io) {
pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
pRAID_Context->configSeqNum = 0;
@@ -2035,12 +2042,10 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
pRAID_Context->timeoutValue =
cpu_to_le16((os_timeout_value > timeout_limit) ?
timeout_limit : os_timeout_value);
- if (fusion->adapter_type == INVADER_SERIES) {
- pRAID_Context->Type = MPI2_TYPE_CUDA;
- pRAID_Context->nseg = 0x1;
+ if (fusion->adapter_type == INVADER_SERIES)
io_request->IoFlags |=
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
- }
+
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -2463,12 +2468,15 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
/* Start collecting crash, if DMA bit is done */
if ((fw_state == MFI_STATE_FAULT) && dma_state)
schedule_work(&instance->crash_init);
- else if (fw_state == MFI_STATE_FAULT)
- schedule_work(&instance->work_init);
+ else if (fw_state == MFI_STATE_FAULT) {
+ if (instance->unload == 0)
+ schedule_work(&instance->work_init);
+ }
} else if (fw_state == MFI_STATE_FAULT) {
dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
"for scsi%d\n", instance->host->host_no);
- schedule_work(&instance->work_init);
+ if (instance->unload == 0)
+ schedule_work(&instance->work_init);
}
}
@@ -2823,6 +2831,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
"will reset adapter scsi%d.\n",
instance->host->host_no);
+ *convert = 1;
retval = 1;
}
out:
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 95356a82ee99..fa61baf7c74d 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -478,6 +478,13 @@ typedef struct _MPI2_CONFIG_REPLY {
#define MPI26_MFGPAGE_DEVID_SAS3324_3 (0x00C2)
#define MPI26_MFGPAGE_DEVID_SAS3324_4 (0x00C3)
+#define MPI26_MFGPAGE_DEVID_SAS3516 (0x00AA)
+#define MPI26_MFGPAGE_DEVID_SAS3516_1 (0x00AB)
+#define MPI26_MFGPAGE_DEVID_SAS3416 (0x00AC)
+#define MPI26_MFGPAGE_DEVID_SAS3508 (0x00AD)
+#define MPI26_MFGPAGE_DEVID_SAS3508_1 (0x00AE)
+#define MPI26_MFGPAGE_DEVID_SAS3408 (0x00AF)
+
/*Manufacturing Page 0 */
typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index a1a5ceb42ce6..f00ef88a378a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -849,7 +849,7 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
ack_request->EventContext = mpi_reply->EventContext;
ack_request->VF_ID = 0; /* TODO */
ack_request->VP_ID = 0;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
out:
@@ -1078,7 +1078,7 @@ _base_interrupt(int irq, void *bus_id)
* new reply host index value in ReplyPostIndex Field and msix_index
* value in MSIxIndex field.
*/
- if (ioc->msix96_vector)
+ if (ioc->combined_reply_queue)
writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
MPI2_RPHI_MSIX_INDEX_SHIFT),
ioc->replyPostRegisterIndex[msix_index/8]);
@@ -1959,7 +1959,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
{
struct msix_entry *entries, *a;
int r;
- int i;
+ int i, local_max_msix_vectors;
u8 try_msix = 0;
if (msix_disable == -1 || msix_disable == 0)
@@ -1979,13 +1979,15 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->cpu_count, max_msix_vectors);
if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
- max_msix_vectors = 8;
+ local_max_msix_vectors = 8;
+ else
+ local_max_msix_vectors = max_msix_vectors;
- if (max_msix_vectors > 0) {
- ioc->reply_queue_count = min_t(int, max_msix_vectors,
+ if (local_max_msix_vectors > 0) {
+ ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
ioc->reply_queue_count);
ioc->msix_vector_count = ioc->reply_queue_count;
- } else if (max_msix_vectors == 0)
+ } else if (local_max_msix_vectors == 0)
goto try_ioapic;
if (ioc->msix_vector_count < ioc->cpu_count)
@@ -2050,7 +2052,7 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
_base_free_irq(ioc);
_base_disable_msix(ioc);
- if (ioc->msix96_vector) {
+ if (ioc->combined_reply_queue) {
kfree(ioc->replyPostRegisterIndex);
ioc->replyPostRegisterIndex = NULL;
}
@@ -2160,7 +2162,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
/* Use the Combined reply queue feature only for SAS3 C0 & higher
* revision HBAs and also only when reply queue count is greater than 8
*/
- if (ioc->msix96_vector && ioc->reply_queue_count > 8) {
+ if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) {
/* Determine the Supplemental Reply Post Host Index Registers
* Addresse. Supplemental Reply Post Host Index Registers
* starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
@@ -2168,7 +2170,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
* MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
*/
ioc->replyPostRegisterIndex = kcalloc(
- MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT,
+ ioc->combined_reply_index_count,
sizeof(resource_size_t *), GFP_KERNEL);
if (!ioc->replyPostRegisterIndex) {
dfailprintk(ioc, printk(MPT3SAS_FMT
@@ -2178,14 +2180,14 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
goto out_fail;
}
- for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) {
+ for (i = 0; i < ioc->combined_reply_index_count; i++) {
ioc->replyPostRegisterIndex[i] = (resource_size_t *)
((u8 *)&ioc->chip->Doorbell +
MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
(i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
}
} else
- ioc->msix96_vector = 0;
+ ioc->combined_reply_queue = 0;
if (ioc->is_warpdrive) {
ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
@@ -2462,15 +2464,15 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
#endif
/**
- * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
+ * _base_put_smid_scsi_io - send SCSI_IO request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
*
* Return nothing.
*/
-void
-mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
+static void
+_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
{
Mpi2RequestDescriptorUnion_t descriptor;
u64 *request = (u64 *)&descriptor;
@@ -2486,15 +2488,15 @@ mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
}
/**
- * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
+ * _base_put_smid_fast_path - send fast path request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
*
* Return nothing.
*/
-void
-mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+static void
+_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle)
{
Mpi2RequestDescriptorUnion_t descriptor;
@@ -2511,14 +2513,14 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
- * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
+ * _base_put_smid_hi_priority - send Task Management request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
* Return nothing.
*/
-void
-mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+static void
+_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 msix_task)
{
Mpi2RequestDescriptorUnion_t descriptor;
@@ -2535,14 +2537,14 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
- * mpt3sas_base_put_smid_default - Default, primarily used for config pages
+ * _base_put_smid_default - Default, primarily used for config pages
* @ioc: per adapter object
* @smid: system request message index
*
* Return nothing.
*/
-void
-mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+static void
+_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
Mpi2RequestDescriptorUnion_t descriptor;
u64 *request = (u64 *)&descriptor;
@@ -2557,6 +2559,95 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
}
/**
+* _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
+* Atomic Request Descriptor
+* @ioc: per adapter object
+* @smid: system request message index
+* @handle: device handle, unused in this function, for function type match
+*
+* Return nothing.
+*/
+static void
+_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+ descriptor.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_fast_path_atomic - send fast path request to firmware
+ * using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle, unused in this function, for function type match
+ * Return nothing
+ */
+static void
+_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
+ descriptor.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_hi_priority_atomic - send Task Management request to
+ * firmware using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 msix_task)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ descriptor.MSIxIndex = msix_task;
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_default - Default, primarily used for config pages
+ * use Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ descriptor.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
* _base_display_OEMs_branding - Display branding string
* @ioc: per adapter object
*
@@ -4070,7 +4161,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
ioc->ioc_link_reset_in_progress = 1;
init_completion(&ioc->base_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@ -4170,7 +4261,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
ioc->base_cmds.smid = smid;
memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
init_completion(&ioc->base_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -4355,6 +4446,8 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
if ((facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
ioc->rdpq_array_capable = 1;
+ if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
+ ioc->atomic_desc_capable = 1;
facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
facts->IOCRequestFrameSize =
le16_to_cpu(mpi_reply.IOCRequestFrameSize);
@@ -4582,7 +4675,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
init_completion(&ioc->port_enable_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -4645,7 +4738,7 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
return 0;
}
@@ -4764,7 +4857,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
mpi_request->EventMasks[i] =
cpu_to_le32(ioc->event_masks[i]);
init_completion(&ioc->base_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -5138,7 +5231,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
/* initialize reply post host index */
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
- if (ioc->msix96_vector)
+ if (ioc->combined_reply_queue)
writel((reply_q->msix_index & 7)<<
MPI2_RPHI_MSIX_INDEX_SHIFT,
ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
@@ -5280,9 +5373,23 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->build_sg = &_base_build_sg_ieee;
ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
+
break;
}
+ if (ioc->atomic_desc_capable) {
+ ioc->put_smid_default = &_base_put_smid_default_atomic;
+ ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
+ ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic;
+ ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic;
+ } else {
+ ioc->put_smid_default = &_base_put_smid_default;
+ ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
+ ioc->put_smid_fast_path = &_base_put_smid_fast_path;
+ ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
+ }
+
+
/*
* These function pointers for other requests that don't
* the require IEEE scatter gather elements.
@@ -5332,6 +5439,21 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
goto out_free_resources;
}
+ /* allocate memory for pending OS device add list */
+ ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
+ if (ioc->facts.MaxDevHandle % 8)
+ ioc->pend_os_device_add_sz++;
+ ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
+ GFP_KERNEL);
+ if (!ioc->pend_os_device_add)
+ goto out_free_resources;
+
+ ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
+ ioc->device_remove_in_progress =
+ kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
+ if (!ioc->device_remove_in_progress)
+ goto out_free_resources;
+
ioc->fwfault_debug = mpt3sas_fwfault_debug;
/* base internal command bits */
@@ -5414,6 +5536,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
kfree(ioc->reply_post_host_index);
kfree(ioc->pd_handles);
kfree(ioc->blocking_handles);
+ kfree(ioc->device_remove_in_progress);
+ kfree(ioc->pend_os_device_add);
kfree(ioc->tm_cmds.reply);
kfree(ioc->transport_cmds.reply);
kfree(ioc->scsih_cmds.reply);
@@ -5455,6 +5579,8 @@ mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
kfree(ioc->reply_post_host_index);
kfree(ioc->pd_handles);
kfree(ioc->blocking_handles);
+ kfree(ioc->device_remove_in_progress);
+ kfree(ioc->pend_os_device_add);
kfree(ioc->pfacts);
kfree(ioc->ctl_cmds.reply);
kfree(ioc->ctl_cmds.sense);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 3e71bc1b4a80..8de0eda8cd00 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -73,9 +73,9 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "13.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 13
-#define MPT3SAS_MINOR_VERSION 100
+#define MPT3SAS_DRIVER_VERSION "14.101.00.00"
+#define MPT3SAS_MAJOR_VERSION 14
+#define MPT3SAS_MINOR_VERSION 101
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -300,8 +300,9 @@
* There are twelve Supplemental Reply Post Host Index Registers
* and each register is at offset 0x10 bytes from the previous one.
*/
-#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT 12
-#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
/* OEM Identifiers */
#define MFG10_OEM_ID_INVALID (0x00000000)
@@ -375,7 +376,6 @@ struct MPT3SAS_TARGET {
* per device private data
*/
#define MPT_DEVICE_FLAGS_INIT 0x01
-#define MPT_DEVICE_TLR_ON 0x02
#define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003)
#define MFG_PAGE10_HIDE_ALL_DISKS (0x00)
@@ -736,7 +736,10 @@ typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge,
typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc,
void *paddr);
-
+/* To support atomic and non atomic descriptors*/
+typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 funcdep);
+typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid);
/* IOC Facts and Port Facts converted from little endian to cpu */
union mpi3_version_union {
@@ -1079,6 +1082,9 @@ struct MPT3SAS_ADAPTER {
void *pd_handles;
u16 pd_handles_sz;
+ void *pend_os_device_add;
+ u16 pend_os_device_add_sz;
+
/* config page */
u16 config_page_sz;
void *config_page;
@@ -1156,7 +1162,8 @@ struct MPT3SAS_ADAPTER {
u8 reply_queue_count;
struct list_head reply_queue_list;
- u8 msix96_vector;
+ u8 combined_reply_queue;
+ u8 combined_reply_index_count;
/* reply post register index */
resource_size_t **replyPostRegisterIndex;
@@ -1187,6 +1194,15 @@ struct MPT3SAS_ADAPTER {
struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi;
+ void *device_remove_in_progress;
+ u16 device_remove_in_progress_sz;
+ u8 is_gen35_ioc;
+ u8 atomic_desc_capable;
+ PUT_SMID_IO_FP_HIP put_smid_scsi_io;
+ PUT_SMID_IO_FP_HIP put_smid_fast_path;
+ PUT_SMID_IO_FP_HIP put_smid_hi_priority;
+ PUT_SMID_DEFAULT put_smid_default;
+
};
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1232,13 +1248,6 @@ u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
-void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid,
- u16 handle);
-void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
- u16 handle);
-void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc,
- u16 smid, u16 msix_task);
-void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
void mpt3sas_base_initialize_callback_handler(void);
u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
void mpt3sas_base_release_callback_handler(u8 cb_idx);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index cebfd734fd76..dd6270125614 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -384,7 +384,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
_config_display_some_debug(ioc, smid, "config_request", NULL);
init_completion(&ioc->config_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 26cdc127ac89..050bd788ad02 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -654,6 +654,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
size_t data_in_sz = 0;
long ret;
u16 wait_state_count;
+ u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
issue_reset = 0;
@@ -738,10 +739,13 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
data_in_sz = karg.data_in_size;
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
- mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
- if (!le16_to_cpu(mpi_request->FunctionDependent1) ||
- le16_to_cpu(mpi_request->FunctionDependent1) >
- ioc->facts.MaxDevHandle) {
+ mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
+ mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH) {
+
+ device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
+ if (!device_handle || (device_handle >
+ ioc->facts.MaxDevHandle)) {
ret = -EINVAL;
mpt3sas_base_free_smid(ioc, smid);
goto out;
@@ -797,14 +801,20 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
scsiio_request->SenseBufferLowAddress =
mpt3sas_base_get_sense_buffer_dma(ioc, smid);
memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
+ if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ ioc->name, device_handle));
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
-
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
- mpt3sas_base_put_smid_scsi_io(ioc, smid,
- le16_to_cpu(mpi_request->FunctionDependent1));
+ ioc->put_smid_scsi_io(ioc, smid, device_handle);
else
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SCSI_TASK_MGMT:
@@ -827,11 +837,19 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
}
+ if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ ioc->name, device_handle));
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
tm_request->DevHandle));
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
- mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
break;
}
case MPI2_FUNCTION_SMP_PASSTHROUGH:
@@ -862,16 +880,30 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
data_in_sz);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SATA_PASSTHROUGH:
+ {
+ if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ ioc->name, device_handle));
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+ data_in_sz);
+ ioc->put_smid_default(ioc, smid);
+ break;
+ }
case MPI2_FUNCTION_FW_DOWNLOAD:
case MPI2_FUNCTION_FW_UPLOAD:
{
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
data_in_sz);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_TOOLBOX:
@@ -886,7 +918,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
}
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
@@ -905,7 +937,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
default:
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
@@ -1064,7 +1096,10 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
break;
case MPI25_VERSION:
case MPI26_VERSION:
- karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
+ if (ioc->is_gen35_ioc)
+ karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35;
+ else
+ karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
break;
}
@@ -1491,7 +1526,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
cpu_to_le32(ioc->product_specific[buffer_type][i]);
init_completion(&ioc->ctl_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -1838,7 +1873,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
mpi_request->VP_ID = 0;
init_completion(&ioc->ctl_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -2105,7 +2140,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
mpi_request->VP_ID = 0;
init_completion(&ioc->ctl_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 89408356d252..f3e17a8c1b07 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -143,6 +143,7 @@ struct mpt3_ioctl_pci_info {
#define MPT2_IOCTL_INTERFACE_SAS2 (0x04)
#define MPT2_IOCTL_INTERFACE_SAS2_SSS6200 (0x05)
#define MPT3_IOCTL_INTERFACE_SAS3 (0x06)
+#define MPT3_IOCTL_INTERFACE_SAS35 (0x07)
#define MPT2_IOCTL_VERSION_LENGTH (32)
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 1c4744e78173..5c8f75247d73 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -423,7 +423,7 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
return 0;
}
- /* we hit this becuase the given parent handle doesn't exist */
+ /* we hit this because the given parent handle doesn't exist */
if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
return -ENXIO;
@@ -788,6 +788,11 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
list_add_tail(&sas_device->list, &ioc->sas_device_list);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (ioc->hide_drives) {
+ clear_bit(sas_device->handle, ioc->pend_os_device_add);
+ return;
+ }
+
if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
sas_device->sas_address_parent)) {
_scsih_sas_device_remove(ioc, sas_device);
@@ -803,7 +808,8 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
sas_device->sas_address_parent);
_scsih_sas_device_remove(ioc, sas_device);
}
- }
+ } else
+ clear_bit(sas_device->handle, ioc->pend_os_device_add);
}
/**
@@ -1517,7 +1523,7 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
/*
* raid transport support -
* Enabled for SLES11 and newer, in older kernels the driver will panic when
- * unloading the driver followed by a load - I beleive that the subroutine
+ * unloading the driver followed by a load - I believe that the subroutine
* raid_class_release() is not cleaning up properly.
*/
@@ -2279,7 +2285,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
msix_task = scsi_lookup->msix_io;
else
msix_task = 0;
- mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
+ ioc->put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -2837,7 +2843,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
if (r == -EINVAL)
sdev_printk(KERN_WARNING, sdev,
"device_block failed with return(%d) for handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle, r);
+ r, sas_device_priv_data->sas_target->handle);
}
/**
@@ -2867,20 +2873,20 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
sdev_printk(KERN_WARNING, sdev,
"device_unblock failed with return(%d) for handle(0x%04x) "
"performing a block followed by an unblock\n",
- sas_device_priv_data->sas_target->handle, r);
+ r, sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 1;
r = scsi_internal_device_block(sdev);
if (r)
sdev_printk(KERN_WARNING, sdev, "retried device_block "
"failed with return(%d) for handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle, r);
+ r, sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 0;
r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
if (r)
sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
" failed with return(%d) for handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle, r);
+ r, sas_device_priv_data->sas_target->handle);
}
}
@@ -2942,7 +2948,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
* @ioc: per adapter object
* @handle: device handle
*
- * During device pull we need to appropiately set the sdev state.
+ * During device pull we need to appropriately set the sdev state.
*/
static void
_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
@@ -2971,7 +2977,7 @@ _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @handle: device handle
*
- * During device pull we need to appropiately set the sdev state.
+ * During device pull we need to appropriately set the sdev state.
*/
static void
_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -3138,6 +3144,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if (test_bit(handle, ioc->pd_handles))
return;
+ clear_bit(handle, ioc->pend_os_device_add);
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
if (sas_device && sas_device->starget &&
@@ -3192,7 +3200,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ set_bit(handle, ioc->device_remove_in_progress);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
out:
@@ -3291,7 +3300,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
mpi_request->DevHandle = mpi_request_tm->DevHandle;
- mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
+ ioc->put_smid_default(ioc, smid_sas_ctrl);
return _scsih_check_for_pending_tm(ioc, smid);
}
@@ -3326,6 +3335,11 @@ _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
le16_to_cpu(mpi_reply->IOCStatus),
le32_to_cpu(mpi_reply->IOCLogInfo)));
+ if (le16_to_cpu(mpi_reply->IOCStatus) ==
+ MPI2_IOCSTATUS_SUCCESS) {
+ clear_bit(le16_to_cpu(mpi_reply->DevHandle),
+ ioc->device_remove_in_progress);
+ }
} else {
pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
@@ -3381,7 +3395,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
}
/**
@@ -3473,7 +3487,7 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
ack_request->EventContext = event_context;
ack_request->VF_ID = 0; /* TODO */
ack_request->VP_ID = 0;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
}
/**
@@ -3530,7 +3544,7 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
mpi_request->DevHandle = handle;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
}
/**
@@ -3930,7 +3944,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
* _scsih_setup_eedp - setup MPI request for EEDP transfer
* @ioc: per adapter object
* @scmd: pointer to scsi command object
- * @mpi_request: pointer to the SCSI_IO reqest message frame
+ * @mpi_request: pointer to the SCSI_IO request message frame
*
* Supporting protection 1 and 3.
*
@@ -3983,6 +3997,9 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
mpi_request_3v->EEDPBlockSize =
cpu_to_le16(scmd->device->sector_size);
+
+ if (ioc->is_gen35_ioc)
+ eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
}
@@ -4084,7 +4101,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
- /* device busy with task managment */
+ /* device busy with task management */
} else if (sas_target_priv_data->tm_busy ||
sas_device_priv_data->block)
return SCSI_MLQUEUE_DEVICE_BUSY;
@@ -4154,12 +4171,12 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
MPI25_SCSIIO_IOFLAGS_FAST_PATH);
- mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
+ ioc->put_smid_fast_path(ioc, smid, handle);
} else
- mpt3sas_base_put_smid_scsi_io(ioc, smid,
+ ioc->put_smid_scsi_io(ioc, smid,
le16_to_cpu(mpi_request->DevHandle));
} else
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
return 0;
out:
@@ -4658,7 +4675,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
mpi_request->DevHandle =
cpu_to_le16(sas_device_priv_data->sas_target->handle);
- mpt3sas_base_put_smid_scsi_io(ioc, smid,
+ ioc->put_smid_scsi_io(ioc, smid,
sas_device_priv_data->sas_target->handle);
return 0;
}
@@ -5383,10 +5400,10 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
sas_device->handle, handle);
sas_target_priv_data->handle = handle;
sas_device->handle = handle;
- if (sas_device_pg0.Flags &
+ if (le16_to_cpu(sas_device_pg0.Flags) &
MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
sas_device->enclosure_level =
- le16_to_cpu(sas_device_pg0.EnclosureLevel);
+ sas_device_pg0.EnclosureLevel;
memcpy(sas_device->connector_name,
sas_device_pg0.ConnectorName, 4);
sas_device->connector_name[4] = '\0';
@@ -5465,6 +5482,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
if (!(_scsih_is_end_device(device_info)))
return -1;
+ set_bit(handle, ioc->pend_os_device_add);
sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
/* check if device is present */
@@ -5483,6 +5501,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device = mpt3sas_get_sdev_by_addr(ioc,
sas_address);
if (sas_device) {
+ clear_bit(handle, ioc->pend_os_device_add);
sas_device_put(sas_device);
return -1;
}
@@ -5513,9 +5532,10 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
- if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+ if (le16_to_cpu(sas_device_pg0.Flags)
+ & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
sas_device->enclosure_level =
- le16_to_cpu(sas_device_pg0.EnclosureLevel);
+ sas_device_pg0.EnclosureLevel;
memcpy(sas_device->connector_name,
sas_device_pg0.ConnectorName, 4);
sas_device->connector_name[4] = '\0';
@@ -5806,6 +5826,9 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
_scsih_check_device(ioc, sas_address, handle,
phy_number, link_rate);
+ if (!test_bit(handle, ioc->pend_os_device_add))
+ break;
+
case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
@@ -6267,7 +6290,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
handle, phys_disk_num));
init_completion(&ioc->scsih_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -6320,7 +6343,7 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
{
sdev->no_uld_attach = no_uld_attach ? 1 : 0;
sdev_printk(KERN_INFO, sdev, "%s raid component\n",
- sdev->no_uld_attach ? "hidding" : "exposing");
+ sdev->no_uld_attach ? "hiding" : "exposing");
WARN_ON(scsi_device_reprobe(sdev));
}
@@ -7050,7 +7073,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
if (sas_device_pg0->Flags &
MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
sas_device->enclosure_level =
- le16_to_cpu(sas_device_pg0->EnclosureLevel);
+ sas_device_pg0->EnclosureLevel;
memcpy(&sas_device->connector_name[0],
&sas_device_pg0->ConnectorName[0], 4);
} else {
@@ -7112,6 +7135,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
sas_device_pg0.SASAddress =
le64_to_cpu(sas_device_pg0.SASAddress);
sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot);
+ sas_device_pg0.Flags = le16_to_cpu(sas_device_pg0.Flags);
_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
}
@@ -7723,6 +7747,9 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
complete(&ioc->tm_cmds.done);
}
+ memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
+ memset(ioc->device_remove_in_progress, 0,
+ ioc->device_remove_in_progress_sz);
_scsih_fw_event_cleanup_queue(ioc);
_scsih_flush_running_cmds(ioc);
break;
@@ -8113,7 +8140,7 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->hide_ir_msg)
pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
init_completion(&ioc->scsih_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -8654,6 +8681,12 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
case MPI26_MFGPAGE_DEVID_SAS3324_2:
case MPI26_MFGPAGE_DEVID_SAS3324_3:
case MPI26_MFGPAGE_DEVID_SAS3324_4:
+ case MPI26_MFGPAGE_DEVID_SAS3508:
+ case MPI26_MFGPAGE_DEVID_SAS3508_1:
+ case MPI26_MFGPAGE_DEVID_SAS3408:
+ case MPI26_MFGPAGE_DEVID_SAS3516:
+ case MPI26_MFGPAGE_DEVID_SAS3516_1:
+ case MPI26_MFGPAGE_DEVID_SAS3416:
return MPI26_VERSION;
}
return 0;
@@ -8722,10 +8755,29 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->hba_mpi_version_belonged = hba_mpi_version;
ioc->id = mpt3_ids++;
sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
+ switch (pdev->device) {
+ case MPI26_MFGPAGE_DEVID_SAS3508:
+ case MPI26_MFGPAGE_DEVID_SAS3508_1:
+ case MPI26_MFGPAGE_DEVID_SAS3408:
+ case MPI26_MFGPAGE_DEVID_SAS3516:
+ case MPI26_MFGPAGE_DEVID_SAS3516_1:
+ case MPI26_MFGPAGE_DEVID_SAS3416:
+ ioc->is_gen35_ioc = 1;
+ break;
+ default:
+ ioc->is_gen35_ioc = 0;
+ }
if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
- (ioc->hba_mpi_version_belonged == MPI26_VERSION))
- ioc->msix96_vector = 1;
+ (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
+ ioc->combined_reply_queue = 1;
+ if (ioc->is_gen35_ioc)
+ ioc->combined_reply_index_count =
+ MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
+ else
+ ioc->combined_reply_index_count =
+ MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
+ }
break;
default:
return -ENODEV;
@@ -9128,6 +9180,19 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
PCI_ANY_ID, PCI_ANY_ID },
+ /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
+ PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
@@ -9168,7 +9233,7 @@ scsih_init(void)
/* queuecommand callback hander */
scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
- /* task managment callback handler */
+ /* task management callback handler */
tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
/* base internal commands callback handler */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index b74faf1a69b2..7f1d5785bc30 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -392,7 +392,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
"report_manufacture - send to sas_addr(0x%016llx)\n",
ioc->name, (unsigned long long)sas_address));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1198,7 +1198,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
ioc->name, (unsigned long long)phy->identify.sas_address,
phy->number));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1514,7 +1514,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
ioc->name, (unsigned long long)phy->identify.sas_address,
phy->number, phy_operation));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -2032,7 +2032,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
"%s - sending smp request\n", ioc->name, __func__));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 4c57d9abce7b..7de5d8d75480 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -668,7 +668,7 @@ static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
{
u32 tmp;
tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
- if (tmp && 1 << (slot_idx % 32)) {
+ if (tmp & 1 << (slot_idx % 32)) {
mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
1 << (slot_idx % 32));
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 845affa112f7..337982cf3d63 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3787,11 +3787,11 @@ static long pmcraid_ioctl_passthrough(
direction);
if (rc) {
pmcraid_err("couldn't build passthrough ioadls\n");
- goto out_free_buffer;
+ goto out_free_cmd;
}
} else if (request_size < 0) {
rc = -EINVAL;
- goto out_free_buffer;
+ goto out_free_cmd;
}
/* If data is being written into the device, copy the data from user
@@ -3908,6 +3908,8 @@ out_handle_response:
out_free_sglist:
pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
+
+out_free_cmd:
pmcraid_return_cmd(cmd);
out_free_buffer:
@@ -6018,8 +6020,10 @@ static int __init pmcraid_init(void)
error = pmcraid_netlink_init();
- if (error)
+ if (error) {
+ class_destroy(pmcraid_class);
goto out_unreg_chrdev;
+ }
error = pci_register_driver(&pmcraid_driver);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 643014f82f7d..1bf8061ff803 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,4 +1,4 @@
-/*
+ /*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
*
@@ -9,6 +9,7 @@
#include <linux/kthread.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
+#include <linux/bsg-lib.h>
/* BSG support for ELS/CT pass through */
void
@@ -16,10 +17,12 @@ qla2x00_bsg_job_done(void *data, void *ptr, int res)
{
srb_t *sp = (srb_t *)ptr;
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
- bsg_job->reply->result = res;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = res;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
sp->free(vha, sp);
}
@@ -28,13 +31,15 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
{
srb_t *sp = (srb_t *)ptr;
struct scsi_qla_host *vha = sp->fcport->vha;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+
struct qla_hw_data *ha = vha->hw;
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
if (sp->type == SRB_FXIOCB_BCMD) {
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
- &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
dma_unmap_sg(&ha->pdev->dev,
@@ -116,9 +121,11 @@ qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
}
static int
-qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
+qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int ret = 0;
@@ -131,7 +138,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
}
/* Get the sub command */
- oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
/* Only set config is allowed if config memory is not allocated */
if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
@@ -145,10 +152,10 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
ha->fcp_prio_cfg->attributes &=
~FCP_PRIO_ATTR_ENABLE;
qla24xx_update_all_fcp_prio(vha);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
} else {
ret = -EINVAL;
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
goto exit_fcp_prio_cfg;
}
break;
@@ -160,10 +167,10 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
ha->fcp_prio_cfg->attributes |=
FCP_PRIO_ATTR_ENABLE;
qla24xx_update_all_fcp_prio(vha);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
} else {
ret = -EINVAL;
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
goto exit_fcp_prio_cfg;
}
}
@@ -173,12 +180,12 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
len = bsg_job->reply_payload.payload_len;
if (!len || len > FCP_PRIO_CFG_SIZE) {
ret = -EINVAL;
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
goto exit_fcp_prio_cfg;
}
- bsg_job->reply->result = DID_OK;
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->result = DID_OK;
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
@@ -189,7 +196,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
case QLFC_FCP_PRIO_SET_CONFIG:
len = bsg_job->request_payload.payload_len;
if (!len || len > FCP_PRIO_CFG_SIZE) {
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
ret = -EINVAL;
goto exit_fcp_prio_cfg;
}
@@ -200,7 +207,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7050,
"Unable to allocate memory for fcp prio "
"config data (%x).\n", FCP_PRIO_CFG_SIZE);
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
ret = -ENOMEM;
goto exit_fcp_prio_cfg;
}
@@ -215,7 +222,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
if (!qla24xx_fcp_prio_cfg_valid(vha,
(struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
ret = -EINVAL;
/* If buffer was invalidatic int
* fcp_prio_cfg is of no use
@@ -229,7 +236,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
ha->flags.fcp_prio_enabled = 1;
qla24xx_update_all_fcp_prio(vha);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
break;
default:
ret = -EINVAL;
@@ -237,13 +244,15 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
}
exit_fcp_prio_cfg:
if (!ret)
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return ret;
}
static int
-qla2x00_process_els(struct fc_bsg_job *bsg_job)
+qla2x00_process_els(struct bsg_job *bsg_job)
{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
struct fc_rport *rport;
fc_port_t *fcport = NULL;
struct Scsi_Host *host;
@@ -255,15 +264,15 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
int rval = (DRIVER_ERROR << 16);
uint16_t nextlid = 0;
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
- rport = bsg_job->rport;
+ if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+ rport = fc_bsg_to_rport(bsg_job);
fcport = *(fc_port_t **) rport->dd_data;
host = rport_to_shost(rport);
vha = shost_priv(host);
ha = vha->hw;
type = "FC_BSG_RPT_ELS";
} else {
- host = bsg_job->shost;
+ host = fc_bsg_to_shost(bsg_job);
vha = shost_priv(host);
ha = vha->hw;
type = "FC_BSG_HST_ELS_NOLOGIN";
@@ -296,7 +305,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
}
/* ELS request for rport */
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
+ if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
/* make sure the rport is logged in,
* if not perform fabric login
*/
@@ -322,11 +331,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
fcport->d_id.b.al_pa =
- bsg_job->request->rqst_data.h_els.port_id[0];
+ bsg_request->rqst_data.h_els.port_id[0];
fcport->d_id.b.area =
- bsg_job->request->rqst_data.h_els.port_id[1];
+ bsg_request->rqst_data.h_els.port_id[1];
fcport->d_id.b.domain =
- bsg_job->request->rqst_data.h_els.port_id[2];
+ bsg_request->rqst_data.h_els.port_id[2];
fcport->loop_id =
(fcport->d_id.b.al_pa == 0xFD) ?
NPH_FABRIC_CONTROLLER : NPH_F_PORT;
@@ -366,11 +375,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
}
sp->type =
- (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
- SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
+ (bsg_request->msgcode == FC_BSG_RPT_ELS ?
+ SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
sp->name =
- (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
- "bsg_els_rpt" : "bsg_els_hst");
+ (bsg_request->msgcode == FC_BSG_RPT_ELS ?
+ "bsg_els_rpt" : "bsg_els_hst");
sp->u.bsg_job = bsg_job;
sp->free = qla2x00_bsg_sp_free;
sp->done = qla2x00_bsg_job_done;
@@ -378,7 +387,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
ql_dbg(ql_dbg_user, vha, 0x700a,
"bsg rqst type: %s els type: %x - loop-id=%x "
"portid=%-2x%02x%02x.\n", type,
- bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
+ bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
rval = qla2x00_start_sp(sp);
@@ -399,7 +408,7 @@ done_unmap_sg:
goto done_free_fcport;
done_free_fcport:
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS)
+ if (bsg_request->msgcode == FC_BSG_RPT_ELS)
kfree(fcport);
done:
return rval;
@@ -420,10 +429,11 @@ qla24xx_calc_ct_iocbs(uint16_t dsds)
}
static int
-qla2x00_process_ct(struct fc_bsg_job *bsg_job)
+qla2x00_process_ct(struct bsg_job *bsg_job)
{
srb_t *sp;
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = (DRIVER_ERROR << 16);
@@ -469,7 +479,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
}
loop_id =
- (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
+ (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
>> 24;
switch (loop_id) {
case 0xFC:
@@ -500,9 +510,9 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
- fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
- fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
- fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
+ fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
+ fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
+ fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
fcport->loop_id = loop_id;
/* Alloc SRB structure */
@@ -524,7 +534,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
ql_dbg(ql_dbg_user, vha, 0x7016,
"bsg rqst type: %s else type: %x - "
"loop-id=%x portid=%02x%02x%02x.\n", type,
- (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
+ (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
@@ -697,9 +707,11 @@ done_set_internal:
}
static int
-qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
+qla2x00_process_loopback(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval;
@@ -780,9 +792,9 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
elreq.rcv_dma = rsp_data_dma;
elreq.transfer_size = req_data_len;
- elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
elreq.iteration_count =
- bsg_job->request->rqst_data.h_vendor.vendor_cmd[2];
+ bsg_request->rqst_data.h_vendor.vendor_cmd[2];
if (atomic_read(&vha->loop_state) == LOOP_READY &&
(ha->current_topology == ISP_CFG_F ||
@@ -896,12 +908,12 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
"Vendor request %s failed.\n", type);
rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = (DID_ERROR << 16);
+ bsg_reply->reply_payload_rcv_len = 0;
} else {
ql_dbg(ql_dbg_user, vha, 0x702d,
"Vendor request %s completed.\n", type);
- bsg_job->reply->result = (DID_OK << 16);
+ bsg_reply->result = (DID_OK << 16);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, rsp_data,
rsp_data_len);
@@ -930,14 +942,17 @@ done_unmap_req_sg:
bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!rval)
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla84xx_reset(struct fc_bsg_job *bsg_job)
+qla84xx_reset(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -948,7 +963,7 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
return -EINVAL;
}
- flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
@@ -960,17 +975,20 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
} else {
ql_dbg(ql_dbg_user, vha, 0x7031,
"Vendor request 84xx reset completed.\n");
- bsg_job->reply->result = DID_OK;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return rval;
}
static int
-qla84xx_updatefw(struct fc_bsg_job *bsg_job)
+qla84xx_updatefw(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct verify_chip_entry_84xx *mn = NULL;
@@ -1027,7 +1045,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
goto done_free_fw_buf;
}
- flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
memset(mn, 0, sizeof(struct access_chip_84xx));
@@ -1059,7 +1077,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
"Vendor request 84xx updatefw completed.\n");
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
}
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
@@ -1072,14 +1090,17 @@ done_unmap_sg:
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!rval)
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
+qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct access_chip_84xx *mn = NULL;
@@ -1107,7 +1128,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
memset(mn, 0, sizeof(struct access_chip_84xx));
mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
mn->entry_count = 1;
- ql84_mgmt = (void *)bsg_job->request + sizeof(struct fc_bsg_request);
+ ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
switch (ql84_mgmt->mgmt.cmd) {
case QLA84_MGMT_READ_MEM:
case QLA84_MGMT_GET_INFO:
@@ -1239,11 +1260,11 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
"Vendor request 84xx mgmt completed.\n");
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
(ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
@@ -1267,14 +1288,17 @@ exit_mgmt:
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
if (!rval)
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla24xx_iidma(struct fc_bsg_job *bsg_job)
+qla24xx_iidma(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
int rval = 0;
struct qla_port_param *port_param = NULL;
@@ -1288,7 +1312,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
return -EINVAL;
}
- port_param = (void *)bsg_job->request + sizeof(struct fc_bsg_request);
+ port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
ql_log(ql_log_warn, vha, 0x7048,
"Invalid destination type.\n");
@@ -1343,24 +1367,26 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
sizeof(struct qla_port_param);
- rsp_ptr = ((uint8_t *)bsg_job->reply) +
+ rsp_ptr = ((uint8_t *)bsg_reply) +
sizeof(struct fc_bsg_reply);
memcpy(rsp_ptr, port_param,
sizeof(struct qla_port_param));
}
- bsg_job->reply->result = DID_OK;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return rval;
}
static int
-qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
+qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
uint8_t is_update)
{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
uint32_t start = 0;
int valid = 0;
struct qla_hw_data *ha = vha->hw;
@@ -1368,7 +1394,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
if (unlikely(pci_channel_offline(ha->pdev)))
return -EINVAL;
- start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
if (start > ha->optrom_size) {
ql_log(ql_log_warn, vha, 0x7055,
"start %d > optrom_size %d.\n", start, ha->optrom_size);
@@ -1427,9 +1453,10 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
}
static int
-qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
+qla2x00_read_optrom(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1451,20 +1478,22 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
ha->optrom_region_size);
- bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
- bsg_job->reply->result = DID_OK;
+ bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
+ bsg_reply->result = DID_OK;
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
ha->optrom_state = QLA_SWAITING;
mutex_unlock(&ha->optrom_mutex);
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
+qla2x00_update_optrom(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1486,19 +1515,21 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
ha->optrom_state = QLA_SWAITING;
mutex_unlock(&ha->optrom_mutex);
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
+qla2x00_update_fru_versions(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1509,7 +1540,7 @@ qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1525,30 +1556,32 @@ qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
image->field_address.device, image->field_address.offset,
sizeof(image->field_info), image->field_address.option);
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
image++;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
+qla2x00_read_fru_status(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1557,7 +1590,7 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1571,7 +1604,7 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
sr->status_reg = *sfp;
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
@@ -1579,24 +1612,26 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->reply_payload_rcv_len = sizeof(*sr);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
+qla2x00_write_fru_status(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1605,7 +1640,7 @@ qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1619,28 +1654,30 @@ qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
sizeof(sr->status_reg), sr->field_address.option);
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
+qla2x00_write_i2c(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1649,7 +1686,7 @@ qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1662,28 +1699,30 @@ qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
i2c->device, i2c->offset, i2c->length, i2c->option);
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
+qla2x00_read_i2c(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1692,7 +1731,7 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1704,7 +1743,7 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
i2c->device, i2c->offset, i2c->length, i2c->option);
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
@@ -1713,24 +1752,26 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
+qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
uint32_t rval = EXT_STATUS_OK;
@@ -1895,19 +1936,21 @@ done:
/* Return an error vendor specific response
* and complete the bsg request
*/
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->reply_payload_rcv_len = 0;
- bsg_job->reply->result = (DID_OK) << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = (DID_OK) << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
/* Always return success, vendor rsp carries correct status */
return 0;
}
static int
-qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
+qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = (DRIVER_ERROR << 16);
@@ -1919,7 +1962,7 @@ qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
/* Copy the IOCB specific information */
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
- &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
/* Dump the vendor information */
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
@@ -2027,9 +2070,10 @@ done:
}
static int
-qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
+qla26xx_serdes_op(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
int rval = 0;
struct qla_serdes_reg sr;
@@ -2042,13 +2086,13 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
switch (sr.cmd) {
case INT_SC_SERDES_WRITE_REG:
rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
break;
case INT_SC_SERDES_READ_REG:
rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
- bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
+ bsg_reply->reply_payload_rcv_len = sizeof(sr);
break;
default:
ql_dbg(ql_dbg_user, vha, 0x708c,
@@ -2057,19 +2101,21 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
break;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
rval ? EXT_STATUS_MAILBOX : 0;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla8044_serdes_op(struct fc_bsg_job *bsg_job)
+qla8044_serdes_op(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
int rval = 0;
struct qla_serdes_reg_ex sr;
@@ -2082,13 +2128,13 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job)
switch (sr.cmd) {
case INT_SC_SERDES_WRITE_REG:
rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
break;
case INT_SC_SERDES_READ_REG:
rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
- bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
+ bsg_reply->reply_payload_rcv_len = sizeof(sr);
break;
default:
ql_dbg(ql_dbg_user, vha, 0x70cf,
@@ -2097,19 +2143,21 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job)
break;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
rval ? EXT_STATUS_MAILBOX : 0;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job)
+qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct qla_flash_update_caps cap;
@@ -2125,21 +2173,23 @@ qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job)
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
- bsg_job->reply->reply_payload_rcv_len = sizeof(cap);
+ bsg_reply->reply_payload_rcv_len = sizeof(cap);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job)
+qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
uint64_t online_fw_attr = 0;
@@ -2158,32 +2208,34 @@ qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job)
(uint64_t)ha->fw_attributes;
if (online_fw_attr != cap.capabilities) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_INVALID_PARAM;
return -EINVAL;
}
if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_INVALID_PARAM;
return -EINVAL;
}
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job)
+qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct qla_bbcr_data bbcr;
@@ -2227,27 +2279,30 @@ qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job)
done:
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
- bsg_job->reply->reply_payload_rcv_len = sizeof(bbcr);
+ bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
+qla2x00_get_priv_stats(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
struct link_statistics *stats = NULL;
dma_addr_t stats_dma;
int rval;
- uint32_t *cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd;
+ uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
if (test_bit(UNLOADING, &vha->dpc_flags))
@@ -2281,13 +2336,14 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
}
- bsg_job->reply->reply_payload_rcv_len = sizeof(*stats);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_payload_rcv_len = sizeof(*stats);
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
- bsg_job->reply_len = sizeof(*bsg_job->reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_job->reply_len = sizeof(*bsg_reply);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
stats, stats_dma);
@@ -2296,9 +2352,10 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
}
static int
-qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
+qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
int rval;
struct qla_dport_diag *dd;
@@ -2323,13 +2380,14 @@ qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
}
- bsg_job->reply->reply_payload_rcv_len = sizeof(*dd);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_payload_rcv_len = sizeof(*dd);
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
- bsg_job->reply_len = sizeof(*bsg_job->reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_job->reply_len = sizeof(*bsg_reply);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
kfree(dd);
@@ -2337,9 +2395,11 @@ qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
}
static int
-qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
+qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
{
- switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+
+ switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
case QL_VND_LOOPBACK:
return qla2x00_process_loopback(bsg_job);
@@ -2413,36 +2473,38 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
}
int
-qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
+qla24xx_bsg_request(struct bsg_job *bsg_job)
{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
int ret = -EINVAL;
struct fc_rport *rport;
struct Scsi_Host *host;
scsi_qla_host_t *vha;
/* In case no data transferred. */
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
- rport = bsg_job->rport;
+ if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+ rport = fc_bsg_to_rport(bsg_job);
host = rport_to_shost(rport);
vha = shost_priv(host);
} else {
- host = bsg_job->shost;
+ host = fc_bsg_to_shost(bsg_job);
vha = shost_priv(host);
}
if (qla2x00_reset_active(vha)) {
ql_dbg(ql_dbg_user, vha, 0x709f,
"BSG: ISP abort active/needed -- cmd=%d.\n",
- bsg_job->request->msgcode);
+ bsg_request->msgcode);
return -EBUSY;
}
ql_dbg(ql_dbg_user, vha, 0x7000,
- "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
+ "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
- switch (bsg_job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
case FC_BSG_HST_ELS_NOLOGIN:
ret = qla2x00_process_els(bsg_job);
@@ -2464,9 +2526,10 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
}
int
-qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
+qla24xx_bsg_timeout(struct bsg_job *bsg_job)
{
- scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
int cnt, que;
@@ -2494,13 +2557,13 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
"mbx abort_command "
"failed.\n");
bsg_job->req->errors =
- bsg_job->reply->result = -EIO;
+ bsg_reply->result = -EIO;
} else {
ql_dbg(ql_dbg_user, vha, 0x708a,
"mbx abort_command "
"success.\n");
bsg_job->req->errors =
- bsg_job->reply->result = 0;
+ bsg_reply->result = 0;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
goto done;
@@ -2510,7 +2573,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
- bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
+ bsg_job->req->errors = bsg_reply->result = -ENXIO;
return 0;
done:
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 73b12e41d992..5236e3f2a06a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -403,7 +403,7 @@ typedef struct srb {
int iocbs;
union {
struct srb_iocb iocb_cmd;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
struct srb_cmd scmd;
} u;
void (*done)(void *, void *, int);
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 6ca00813c71f..c51d9f3359e3 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -733,8 +733,8 @@ extern int qla82xx_read_temperature(scsi_qla_host_t *);
extern int qla8044_read_temperature(scsi_qla_host_t *);
/* BSG related functions */
-extern int qla24xx_bsg_request(struct fc_bsg_job *);
-extern int qla24xx_bsg_timeout(struct fc_bsg_job *);
+extern int qla24xx_bsg_request(struct bsg_job *);
+extern int qla24xx_bsg_timeout(struct bsg_job *);
extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t);
extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
dma_addr_t, size_t, uint32_t);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index b41265a75ed5..221ad8907893 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2197,7 +2197,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
@@ -2212,8 +2213,8 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->opcode =
sp->type == SRB_ELS_CMD_RPT ?
- bsg_job->request->rqst_data.r_els.els_code :
- bsg_job->request->rqst_data.h_els.command_code;
+ bsg_request->rqst_data.r_els.els_code :
+ bsg_request->rqst_data.h_els.command_code;
els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
els_iocb->port_id[1] = sp->fcport->d_id.b.area;
els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
@@ -2250,7 +2251,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
int entry_count = 1;
@@ -2327,7 +2328,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
int entry_count = 1;
@@ -2833,7 +2834,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
struct scatterlist *sg;
int index;
int entry_count = 1;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
/*Update entry type to indicate bidir command */
*((uint32_t *)(&cmd_pkt->entry_type)) =
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 068c4e47fac9..19f18485a854 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1356,7 +1356,8 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
const char func[] = "CT_IOCB";
const char *type;
srb_t *sp;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_reply *bsg_reply;
uint16_t comp_status;
int res;
@@ -1365,6 +1366,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
bsg_job = sp->u.bsg_job;
+ bsg_reply = bsg_job->reply;
type = "ct pass-through";
@@ -1373,32 +1375,32 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
* fc payload to the caller
*/
- bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
if (comp_status != CS_COMPLETE) {
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
ql_log(ql_log_warn, vha, 0x5048,
"CT pass-through-%s error "
"comp_status-status=0x%x total_byte = 0x%x.\n",
type, comp_status,
- bsg_job->reply->reply_payload_rcv_len);
+ bsg_reply->reply_payload_rcv_len);
} else {
ql_log(ql_log_warn, vha, 0x5049,
"CT pass-through-%s error "
"comp_status-status=0x%x.\n", type, comp_status);
res = DID_ERROR << 16;
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
}
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
(uint8_t *)pkt, sizeof(*pkt));
} else {
res = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
bsg_job->reply_len = 0;
}
@@ -1413,7 +1415,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
const char func[] = "ELS_CT_IOCB";
const char *type;
srb_t *sp;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_reply *bsg_reply;
uint16_t comp_status;
uint32_t fw_status[3];
uint8_t* fw_sts_ptr;
@@ -1423,6 +1426,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
if (!sp)
return;
bsg_job = sp->u.bsg_job;
+ bsg_reply = bsg_job->reply;
type = NULL;
switch (sp->type) {
@@ -1452,13 +1456,13 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
* fc payload to the caller
*/
- bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
if (comp_status != CS_COMPLETE) {
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
ql_dbg(ql_dbg_user, vha, 0x503f,
@@ -1480,7 +1484,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->error_subcode_2));
res = DID_ERROR << 16;
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
@@ -1489,7 +1493,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
}
else {
res = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
+ bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
bsg_job->reply_len = 0;
}
@@ -1904,7 +1908,9 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
uint16_t scsi_status;
uint16_t thread_id;
uint32_t rval = EXT_STATUS_OK;
- struct fc_bsg_job *bsg_job = NULL;
+ struct bsg_job *bsg_job = NULL;
+ struct fc_bsg_request *bsg_request;
+ struct fc_bsg_reply *bsg_reply;
sts_entry_t *sts;
struct sts_entry_24xx *sts24;
sts = (sts_entry_t *) pkt;
@@ -1919,11 +1925,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
}
sp = req->outstanding_cmds[index];
- if (sp) {
- /* Free outstanding command slot. */
- req->outstanding_cmds[index] = NULL;
- bsg_job = sp->u.bsg_job;
- } else {
+ if (!sp) {
ql_log(ql_log_warn, vha, 0x70b0,
"Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
req->id, index);
@@ -1932,6 +1934,12 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
return;
}
+ /* Free outstanding command slot. */
+ req->outstanding_cmds[index] = NULL;
+ bsg_job = sp->u.bsg_job;
+ bsg_request = bsg_job->request;
+ bsg_reply = bsg_job->reply;
+
if (IS_FWI2_CAPABLE(ha)) {
comp_status = le16_to_cpu(sts24->comp_status);
scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
@@ -1940,14 +1948,14 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
}
- thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
switch (comp_status) {
case CS_COMPLETE:
if (scsi_status == 0) {
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
vha->qla_stats.input_bytes +=
- bsg_job->reply->reply_payload_rcv_len;
+ bsg_reply->reply_payload_rcv_len;
vha->qla_stats.input_requests++;
rval = EXT_STATUS_OK;
}
@@ -2028,11 +2036,11 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
rval = EXT_STATUS_ERR;
break;
}
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
done:
/* Return the vendor specific reply to API */
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
/* Always return DID_OK, bsg will send the vendor specific response
* in this case only */
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 15dff7099955..02f1de18bc2b 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
+#include <linux/bsg-lib.h>
#include <scsi/scsi_tcq.h>
#include <linux/utsname.h>
@@ -2206,7 +2207,8 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
{
const char func[] = "IOSB_IOCB";
srb_t *sp;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_reply *bsg_reply;
struct srb_iocb *iocb_job;
int res;
struct qla_mt_iocb_rsp_fx00 fstatus;
@@ -2226,6 +2228,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
pkt->dataword_r;
} else {
bsg_job = sp->u.bsg_job;
+ bsg_reply = bsg_job->reply;
memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
@@ -2257,8 +2260,8 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
sp->fcport->vha, 0x5074,
(uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
- res = bsg_job->reply->result = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len =
+ res = bsg_reply->result = DID_OK << 16;
+ bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
}
sp->done(vha, sp, res);
@@ -3252,7 +3255,8 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
{
struct srb_iocb *fxio = &sp->u.iocb_cmd;
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_request *bsg_request;
struct fxdisc_entry_fx00 fx_iocb;
uint8_t entry_cnt = 1;
@@ -3301,8 +3305,9 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
} else {
struct scatterlist *sg;
bsg_job = sp->u.bsg_job;
+ bsg_request = bsg_job->request;
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
- &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
fx_iocb.func_num = piocb_rqst->func_type;
fx_iocb.adapid = piocb_rqst->adapid;
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index a7cfc270bd08..aeebefb1e9f8 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -409,18 +409,9 @@ struct qla4_8xxx_legacy_intr_set {
/* MSI-X Support */
-#define QLA_MSIX_DEFAULT 0x00
-#define QLA_MSIX_RSP_Q 0x01
-
+#define QLA_MSIX_DEFAULT 0
+#define QLA_MSIX_RSP_Q 1
#define QLA_MSIX_ENTRIES 2
-#define QLA_MIDX_DEFAULT 0
-#define QLA_MIDX_RSP_Q 1
-
-struct ql4_msix_entry {
- int have_irq;
- uint16_t msix_vector;
- uint16_t msix_entry;
-};
/*
* ISP Operations
@@ -572,9 +563,6 @@ struct scsi_qla_host {
#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
#define AF_HA_REMOVAL 12 /* 0x00001000 */
-#define AF_INTx_ENABLED 15 /* 0x00008000 */
-#define AF_MSI_ENABLED 16 /* 0x00010000 */
-#define AF_MSIX_ENABLED 17 /* 0x00020000 */
#define AF_MBOX_COMMAND_NOPOLL 18 /* 0x00040000 */
#define AF_FW_RECOVERY 19 /* 0x00080000 */
#define AF_EEH_BUSY 20 /* 0x00100000 */
@@ -762,8 +750,6 @@ struct scsi_qla_host {
struct isp_operations *isp_ops;
struct ql82xx_hw_data hw;
- struct ql4_msix_entry msix_entries[QLA_MSIX_ENTRIES];
-
uint32_t nx_dev_init_timeout;
uint32_t nx_reset_timeout;
void *fw_dump;
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 2559144f5475..bce96a58f14e 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -134,7 +134,6 @@ int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha);
void qla4_82xx_enable_intrs(struct scsi_qla_host *ha);
void qla4_82xx_disable_intrs(struct scsi_qla_host *ha);
int qla4_8xxx_enable_msix(struct scsi_qla_host *ha);
-void qla4_8xxx_disable_msix(struct scsi_qla_host *ha);
irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id);
irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id);
irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id);
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 4f9c0f2be89d..d2cd33d8d67f 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1107,7 +1107,7 @@ static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
if (is_qla8022(ha)) {
writel(0, &ha->qla4_82xx_reg->host_int);
- if (test_bit(AF_INTx_ENABLED, &ha->flags))
+ if (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled)
qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
0xfbff);
}
@@ -1564,19 +1564,18 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
try_msi:
/* Trying MSI */
- ret = pci_enable_msi(ha->pdev);
- if (!ret) {
+ ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret > 0) {
ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
0, DRIVER_NAME, ha);
if (!ret) {
DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
- set_bit(AF_MSI_ENABLED, &ha->flags);
goto irq_attached;
} else {
ql4_printk(KERN_WARNING, ha,
"MSI: Failed to reserve interrupt %d "
"already in use.\n", ha->pdev->irq);
- pci_disable_msi(ha->pdev);
+ pci_free_irq_vectors(ha->pdev);
}
}
@@ -1592,7 +1591,6 @@ try_intx:
IRQF_SHARED, DRIVER_NAME, ha);
if (!ret) {
DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
- set_bit(AF_INTx_ENABLED, &ha->flags);
goto irq_attached;
} else {
@@ -1614,14 +1612,11 @@ irq_not_attached:
void qla4xxx_free_irqs(struct scsi_qla_host *ha)
{
- if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) {
- if (test_bit(AF_MSIX_ENABLED, &ha->flags)) {
- qla4_8xxx_disable_msix(ha);
- } else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
- free_irq(ha->pdev->irq, ha);
- pci_disable_msi(ha->pdev);
- } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) {
- free_irq(ha->pdev->irq, ha);
- }
- }
+ if (!test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
+ return;
+
+ if (ha->pdev->msix_enabled)
+ free_irq(pci_irq_vector(ha->pdev, 1), ha);
+ free_irq(pci_irq_vector(ha->pdev, 0), ha);
+ pci_free_irq_vectors(ha->pdev);
}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index c291fdff1b33..1da04f323d38 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -2032,10 +2032,7 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
ptid = (uint16_t *)&fw_ddb_entry->isid[1];
*ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id);
- DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%02x%02x%02x%02x%02x%02x]\n",
- fw_ddb_entry->isid[5], fw_ddb_entry->isid[4],
- fw_ddb_entry->isid[3], fw_ddb_entry->isid[2],
- fw_ddb_entry->isid[1], fw_ddb_entry->isid[0]));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%pmR]\n", fw_ddb_entry->isid));
iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 06ddd13cb7cc..e91abb327745 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -3945,7 +3945,7 @@ void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
ha->isp_ops->interrupt_service_routine(ha, intr_status);
if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
- test_bit(AF_INTx_ENABLED, &ha->flags))
+ (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled))
qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
0xfbff);
}
@@ -4094,12 +4094,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
ha->phy_port_num = sys_info->port_num;
ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt;
- DEBUG2(printk("scsi%ld: %s: "
- "mac %02x:%02x:%02x:%02x:%02x:%02x "
- "serial %s\n", ha->host_no, __func__,
- ha->my_mac[0], ha->my_mac[1], ha->my_mac[2],
- ha->my_mac[3], ha->my_mac[4], ha->my_mac[5],
- ha->serial_number));
+ DEBUG2(printk("scsi%ld: %s: mac %pM serial %s\n",
+ ha->host_no, __func__, ha->my_mac, ha->serial_number));
status = QLA_SUCCESS;
@@ -4178,78 +4174,37 @@ qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
spin_unlock_irq(&ha->hardware_lock);
}
-struct ql4_init_msix_entry {
- uint16_t entry;
- uint16_t index;
- const char *name;
- irq_handler_t handler;
-};
-
-static struct ql4_init_msix_entry qla4_8xxx_msix_entries[QLA_MSIX_ENTRIES] = {
- { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
- "qla4xxx (default)",
- (irq_handler_t)qla4_8xxx_default_intr_handler },
- { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
- "qla4xxx (rsp_q)", (irq_handler_t)qla4_8xxx_msix_rsp_q },
-};
-
-void
-qla4_8xxx_disable_msix(struct scsi_qla_host *ha)
-{
- int i;
- struct ql4_msix_entry *qentry;
-
- for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
- qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
- if (qentry->have_irq) {
- free_irq(qentry->msix_vector, ha);
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
- __func__, qla4_8xxx_msix_entries[i].name));
- }
- }
- pci_disable_msix(ha->pdev);
- clear_bit(AF_MSIX_ENABLED, &ha->flags);
-}
-
int
qla4_8xxx_enable_msix(struct scsi_qla_host *ha)
{
- int i, ret;
- struct msix_entry entries[QLA_MSIX_ENTRIES];
- struct ql4_msix_entry *qentry;
-
- for (i = 0; i < QLA_MSIX_ENTRIES; i++)
- entries[i].entry = qla4_8xxx_msix_entries[i].entry;
+ int ret;
- ret = pci_enable_msix_exact(ha->pdev, entries, ARRAY_SIZE(entries));
- if (ret) {
+ ret = pci_alloc_irq_vectors(ha->pdev, QLA_MSIX_ENTRIES,
+ QLA_MSIX_ENTRIES, PCI_IRQ_MSIX);
+ if (ret < 0) {
ql4_printk(KERN_WARNING, ha,
"MSI-X: Failed to enable support -- %d/%d\n",
QLA_MSIX_ENTRIES, ret);
- goto msix_out;
- }
- set_bit(AF_MSIX_ENABLED, &ha->flags);
-
- for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
- qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
- qentry->msix_vector = entries[i].vector;
- qentry->msix_entry = entries[i].entry;
- qentry->have_irq = 0;
- ret = request_irq(qentry->msix_vector,
- qla4_8xxx_msix_entries[i].handler, 0,
- qla4_8xxx_msix_entries[i].name, ha);
- if (ret) {
- ql4_printk(KERN_WARNING, ha,
- "MSI-X: Unable to register handler -- %x/%d.\n",
- qla4_8xxx_msix_entries[i].index, ret);
- qla4_8xxx_disable_msix(ha);
- goto msix_out;
- }
- qentry->have_irq = 1;
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
- __func__, qla4_8xxx_msix_entries[i].name));
+ return ret;
}
-msix_out:
+
+ ret = request_irq(pci_irq_vector(ha->pdev, 0),
+ qla4_8xxx_default_intr_handler, 0, "qla4xxx (default)",
+ ha);
+ if (ret)
+ goto out_free_vectors;
+
+ ret = request_irq(pci_irq_vector(ha->pdev, 1),
+ qla4_8xxx_msix_rsp_q, 0, "qla4xxx (rsp_q)", ha);
+ if (ret)
+ goto out_free_default_irq;
+
+ return 0;
+
+out_free_default_irq:
+ free_irq(pci_irq_vector(ha->pdev, 0), ha);
+out_free_vectors:
+ pci_free_irq_vectors(ha->pdev);
return ret;
}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 01c3610a60cf..9fbb33fc90c7 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -6304,13 +6304,9 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
* ISID would not match firmware generated ISID.
*/
if (is_isid_compare) {
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
- "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
- __func__, old_tddb->isid[5], old_tddb->isid[4],
- old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
- old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
- new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
- new_tddb->isid[0]));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: old ISID [%pmR] New ISID [%pmR]\n",
+ __func__, old_tddb->isid, new_tddb->isid));
if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
sizeof(old_tddb->isid)))
@@ -7925,10 +7921,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
break;
case ISCSI_FLASHNODE_ISID:
- rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
- fnode_sess->isid[0], fnode_sess->isid[1],
- fnode_sess->isid[2], fnode_sess->isid[3],
- fnode_sess->isid[4], fnode_sess->isid[5]);
+ rc = sprintf(buf, "%pm\n", fnode_sess->isid);
break;
case ISCSI_FLASHNODE_TSID:
rc = sprintf(buf, "%u\n", fnode_sess->tsid);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 246456925335..28fea83ae2fe 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -220,8 +220,6 @@ static struct {
{"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NEC", "iStorage", NULL, BLIST_REPORTLUN2},
- {"NETAPP", "LUN C-Mode", NULL, BLIST_SYNC_ALUA},
- {"NETAPP", "INF-01-00", NULL, BLIST_SYNC_ALUA},
{"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9a8ccff1121f..c35b6de4ca64 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1998,6 +1998,15 @@ static void scsi_exit_request(void *data, struct request *rq,
kfree(cmd->sense_buffer);
}
+static int scsi_map_queues(struct blk_mq_tag_set *set)
+{
+ struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
+
+ if (shost->hostt->map_queues)
+ return shost->hostt->map_queues(shost);
+ return blk_mq_map_queues(set);
+}
+
static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
{
struct device *host_dev;
@@ -2090,6 +2099,7 @@ static struct blk_mq_ops scsi_mq_ops = {
.timeout = scsi_timeout,
.init_request = scsi_init_request,
.exit_request = scsi_exit_request,
+ .map_queues = scsi_map_queues,
};
struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
@@ -2732,6 +2742,39 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
/**
+ * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
+ * @sdev: SCSI device to count the number of scsi_request_fn() callers for.
+ */
+static int scsi_request_fn_active(struct scsi_device *sdev)
+{
+ struct request_queue *q = sdev->request_queue;
+ int request_fn_active;
+
+ WARN_ON_ONCE(sdev->host->use_blk_mq);
+
+ spin_lock_irq(q->queue_lock);
+ request_fn_active = q->request_fn_active;
+ spin_unlock_irq(q->queue_lock);
+
+ return request_fn_active;
+}
+
+/**
+ * scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls
+ * @sdev: SCSI device pointer.
+ *
+ * Wait until the ongoing shost->hostt->queuecommand() calls that are
+ * invoked from scsi_request_fn() have finished.
+ */
+static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
+{
+ WARN_ON_ONCE(sdev->host->use_blk_mq);
+
+ while (scsi_request_fn_active(sdev))
+ msleep(20);
+}
+
+/**
* scsi_device_quiesce - Block user issued commands.
* @sdev: scsi device to quiesce.
*
@@ -2815,8 +2858,7 @@ EXPORT_SYMBOL(scsi_target_resume);
* @sdev: device to block
*
* Block request made by scsi lld's to temporarily stop all
- * scsi commands on the specified device. Called from interrupt
- * or normal process context.
+ * scsi commands on the specified device. May sleep.
*
* Returns zero if successful or error if not
*
@@ -2825,6 +2867,10 @@ EXPORT_SYMBOL(scsi_target_resume);
* (which must be a legal transition). When the device is in this
* state, all commands are deferred until the scsi lld reenables
* the device with scsi_device_unblock or device_block_tmo fires.
+ *
+ * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
+ * scsi_internal_device_block() has blocked a SCSI device and also
+ * remove the rport mutex lock and unlock calls from srp_queuecommand().
*/
int
scsi_internal_device_block(struct scsi_device *sdev)
@@ -2852,6 +2898,7 @@ scsi_internal_device_block(struct scsi_device *sdev)
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
+ scsi_wait_for_queuecommand(sdev);
}
return 0;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 0f3a3869524b..03577bde6ac5 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/bsg-lib.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
@@ -2592,7 +2593,7 @@ fc_rport_final_delete(struct work_struct *work)
/**
- * fc_rport_create - allocates and creates a remote FC port.
+ * fc_remote_port_create - allocates and creates a remote FC port.
* @shost: scsi host the remote port is connected to.
* @channel: Channel on shost port connected to.
* @ids: The world wide names, fc address, and FC4 port
@@ -2605,8 +2606,8 @@ fc_rport_final_delete(struct work_struct *work)
* This routine assumes no locks are held on entry.
*/
static struct fc_rport *
-fc_rport_create(struct Scsi_Host *shost, int channel,
- struct fc_rport_identifiers *ids)
+fc_remote_port_create(struct Scsi_Host *shost, int channel,
+ struct fc_rport_identifiers *ids)
{
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
struct fc_internal *fci = to_fc_internal(shost->transportt);
@@ -2914,7 +2915,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
spin_unlock_irqrestore(shost->host_lock, flags);
/* No consistent binding found - create new remote port entry */
- rport = fc_rport_create(shost, channel, ids);
+ rport = fc_remote_port_create(shost, channel, ids);
return rport;
}
@@ -3554,81 +3555,6 @@ fc_vport_sched_delete(struct work_struct *work)
* BSG support
*/
-
-/**
- * fc_destroy_bsgjob - routine to teardown/delete a fc bsg job
- * @job: fc_bsg_job that is to be torn down
- */
-static void
-fc_destroy_bsgjob(struct fc_bsg_job *job)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&job->job_lock, flags);
- if (job->ref_cnt) {
- spin_unlock_irqrestore(&job->job_lock, flags);
- return;
- }
- spin_unlock_irqrestore(&job->job_lock, flags);
-
- put_device(job->dev); /* release reference for the request */
-
- kfree(job->request_payload.sg_list);
- kfree(job->reply_payload.sg_list);
- kfree(job);
-}
-
-/**
- * fc_bsg_jobdone - completion routine for bsg requests that the LLD has
- * completed
- * @job: fc_bsg_job that is complete
- */
-static void
-fc_bsg_jobdone(struct fc_bsg_job *job)
-{
- struct request *req = job->req;
- struct request *rsp = req->next_rq;
- int err;
-
- err = job->req->errors = job->reply->result;
-
- if (err < 0)
- /* we're only returning the result field in the reply */
- job->req->sense_len = sizeof(uint32_t);
- else
- job->req->sense_len = job->reply_len;
-
- /* we assume all request payload was transferred, residual == 0 */
- req->resid_len = 0;
-
- if (rsp) {
- WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len);
-
- /* set reply (bidi) residual */
- rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
- rsp->resid_len);
- }
- blk_complete_request(req);
-}
-
-/**
- * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests
- * @rq: BSG request that holds the job to be destroyed
- */
-static void fc_bsg_softirq_done(struct request *rq)
-{
- struct fc_bsg_job *job = rq->special;
- unsigned long flags;
-
- spin_lock_irqsave(&job->job_lock, flags);
- job->state_flags |= FC_RQST_STATE_DONE;
- job->ref_cnt--;
- spin_unlock_irqrestore(&job->job_lock, flags);
-
- blk_end_request_all(rq, rq->errors);
- fc_destroy_bsgjob(job);
-}
-
/**
* fc_bsg_job_timeout - handler for when a bsg request timesout
* @req: request that timed out
@@ -3636,27 +3562,22 @@ static void fc_bsg_softirq_done(struct request *rq)
static enum blk_eh_timer_return
fc_bsg_job_timeout(struct request *req)
{
- struct fc_bsg_job *job = (void *) req->special;
- struct Scsi_Host *shost = job->shost;
+ struct bsg_job *job = (void *) req->special;
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct fc_rport *rport = fc_bsg_to_rport(job);
struct fc_internal *i = to_fc_internal(shost->transportt);
- unsigned long flags;
- int err = 0, done = 0;
+ int err = 0, inflight = 0;
- if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED)
+ if (rport && rport->port_state == FC_PORTSTATE_BLOCKED)
return BLK_EH_RESET_TIMER;
- spin_lock_irqsave(&job->job_lock, flags);
- if (job->state_flags & FC_RQST_STATE_DONE)
- done = 1;
- else
- job->ref_cnt++;
- spin_unlock_irqrestore(&job->job_lock, flags);
+ inflight = bsg_job_get(job);
- if (!done && i->f->bsg_timeout) {
+ if (inflight && i->f->bsg_timeout) {
/* call LLDD to abort the i/o as it has timed out */
err = i->f->bsg_timeout(job);
if (err == -EAGAIN) {
- job->ref_cnt--;
+ bsg_job_put(job);
return BLK_EH_RESET_TIMER;
} else if (err)
printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
@@ -3664,126 +3585,33 @@ fc_bsg_job_timeout(struct request *req)
}
/* the blk_end_sync_io() doesn't check the error */
- if (done)
+ if (!inflight)
return BLK_EH_NOT_HANDLED;
else
return BLK_EH_HANDLED;
}
-static int
-fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
-{
- size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
-
- BUG_ON(!req->nr_phys_segments);
-
- buf->sg_list = kzalloc(sz, GFP_KERNEL);
- if (!buf->sg_list)
- return -ENOMEM;
- sg_init_table(buf->sg_list, req->nr_phys_segments);
- buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
- buf->payload_len = blk_rq_bytes(req);
- return 0;
-}
-
-
-/**
- * fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the
- * bsg request
- * @shost: SCSI Host corresponding to the bsg object
- * @rport: (optional) FC Remote Port corresponding to the bsg object
- * @req: BSG request that needs a job structure
- */
-static int
-fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport,
- struct request *req)
-{
- struct fc_internal *i = to_fc_internal(shost->transportt);
- struct request *rsp = req->next_rq;
- struct fc_bsg_job *job;
- int ret;
-
- BUG_ON(req->special);
-
- job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size,
- GFP_KERNEL);
- if (!job)
- return -ENOMEM;
-
- /*
- * Note: this is a bit silly.
- * The request gets formatted as a SGIO v4 ioctl request, which
- * then gets reformatted as a blk request, which then gets
- * reformatted as a fc bsg request. And on completion, we have
- * to wrap return results such that SGIO v4 thinks it was a scsi
- * status. I hope this was all worth it.
- */
-
- req->special = job;
- job->shost = shost;
- job->rport = rport;
- job->req = req;
- if (i->f->dd_bsg_size)
- job->dd_data = (void *)&job[1];
- spin_lock_init(&job->job_lock);
- job->request = (struct fc_bsg_request *)req->cmd;
- job->request_len = req->cmd_len;
- job->reply = req->sense;
- job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
- * allocated */
- if (req->bio) {
- ret = fc_bsg_map_buffer(&job->request_payload, req);
- if (ret)
- goto failjob_rls_job;
- }
- if (rsp && rsp->bio) {
- ret = fc_bsg_map_buffer(&job->reply_payload, rsp);
- if (ret)
- goto failjob_rls_rqst_payload;
- }
- job->job_done = fc_bsg_jobdone;
- if (rport)
- job->dev = &rport->dev;
- else
- job->dev = &shost->shost_gendev;
- get_device(job->dev); /* take a reference for the request */
-
- job->ref_cnt = 1;
-
- return 0;
-
-
-failjob_rls_rqst_payload:
- kfree(job->request_payload.sg_list);
-failjob_rls_job:
- kfree(job);
- return -ENOMEM;
-}
-
-
-enum fc_dispatch_result {
- FC_DISPATCH_BREAK, /* on return, q is locked, break from q loop */
- FC_DISPATCH_LOCKED, /* on return, q is locked, continue on */
- FC_DISPATCH_UNLOCKED, /* on return, q is unlocked, continue on */
-};
-
-
/**
* fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
- * @q: fc host request queue
* @shost: scsi host rport attached to
* @job: bsg job to be processed
*/
-static enum fc_dispatch_result
-fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
- struct fc_bsg_job *job)
+static int fc_bsg_host_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
{
struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
int ret;
+ /* check if we really have all the request data needed */
+ if (job->request_len < cmdlen) {
+ ret = -ENOMSG;
+ goto fail_host_msg;
+ }
+
/* Validate the host command */
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_HST_ADD_RPORT:
cmdlen += sizeof(struct fc_bsg_host_add_rport);
break;
@@ -3815,7 +3643,7 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
case FC_BSG_HST_VENDOR:
cmdlen += sizeof(struct fc_bsg_host_vendor);
if ((shost->hostt->vendor_id == 0L) ||
- (job->request->rqst_data.h_vendor.vendor_id !=
+ (bsg_request->rqst_data.h_vendor.vendor_id !=
shost->hostt->vendor_id)) {
ret = -ESRCH;
goto fail_host_msg;
@@ -3827,24 +3655,19 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
goto fail_host_msg;
}
- /* check if we really have all the request data needed */
- if (job->request_len < cmdlen) {
- ret = -ENOMSG;
- goto fail_host_msg;
- }
-
ret = i->f->bsg_request(job);
if (!ret)
- return FC_DISPATCH_UNLOCKED;
+ return 0;
fail_host_msg:
/* return the errno failure code as the only status */
BUG_ON(job->reply_len < sizeof(uint32_t));
- job->reply->reply_payload_rcv_len = 0;
- job->reply->result = ret;
+ bsg_reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = ret;
job->reply_len = sizeof(uint32_t);
- fc_bsg_jobdone(job);
- return FC_DISPATCH_UNLOCKED;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return 0;
}
@@ -3855,34 +3678,38 @@ fail_host_msg:
static void
fc_bsg_goose_queue(struct fc_rport *rport)
{
- if (!rport->rqst_q)
+ struct request_queue *q = rport->rqst_q;
+ unsigned long flags;
+
+ if (!q)
return;
- /*
- * This get/put dance makes no sense
- */
- get_device(&rport->dev);
- blk_run_queue_async(rport->rqst_q);
- put_device(&rport->dev);
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_run_queue_async(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
}
/**
* fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
- * @q: rport request queue
* @shost: scsi host rport attached to
- * @rport: rport request destined to
* @job: bsg job to be processed
*/
-static enum fc_dispatch_result
-fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
- struct fc_rport *rport, struct fc_bsg_job *job)
+static int fc_bsg_rport_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
{
struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
int ret;
+ /* check if we really have all the request data needed */
+ if (job->request_len < cmdlen) {
+ ret = -ENOMSG;
+ goto fail_rport_msg;
+ }
+
/* Validate the rport command */
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
cmdlen += sizeof(struct fc_bsg_rport_els);
goto check_bidi;
@@ -3902,133 +3729,31 @@ check_bidi:
goto fail_rport_msg;
}
- /* check if we really have all the request data needed */
- if (job->request_len < cmdlen) {
- ret = -ENOMSG;
- goto fail_rport_msg;
- }
-
ret = i->f->bsg_request(job);
if (!ret)
- return FC_DISPATCH_UNLOCKED;
+ return 0;
fail_rport_msg:
/* return the errno failure code as the only status */
BUG_ON(job->reply_len < sizeof(uint32_t));
- job->reply->reply_payload_rcv_len = 0;
- job->reply->result = ret;
+ bsg_reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = ret;
job->reply_len = sizeof(uint32_t);
- fc_bsg_jobdone(job);
- return FC_DISPATCH_UNLOCKED;
-}
-
-
-/**
- * fc_bsg_request_handler - generic handler for bsg requests
- * @q: request queue to manage
- * @shost: Scsi_Host related to the bsg object
- * @rport: FC remote port related to the bsg object (optional)
- * @dev: device structure for bsg object
- */
-static void
-fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
- struct fc_rport *rport, struct device *dev)
-{
- struct request *req;
- struct fc_bsg_job *job;
- enum fc_dispatch_result ret;
-
- if (!get_device(dev))
- return;
-
- while (1) {
- if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
- !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
- break;
-
- req = blk_fetch_request(q);
- if (!req)
- break;
-
- if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
- req->errors = -ENXIO;
- spin_unlock_irq(q->queue_lock);
- blk_end_request_all(req, -ENXIO);
- spin_lock_irq(q->queue_lock);
- continue;
- }
-
- spin_unlock_irq(q->queue_lock);
-
- ret = fc_req_to_bsgjob(shost, rport, req);
- if (ret) {
- req->errors = ret;
- blk_end_request_all(req, ret);
- spin_lock_irq(q->queue_lock);
- continue;
- }
-
- job = req->special;
-
- /* check if we have the msgcode value at least */
- if (job->request_len < sizeof(uint32_t)) {
- BUG_ON(job->reply_len < sizeof(uint32_t));
- job->reply->reply_payload_rcv_len = 0;
- job->reply->result = -ENOMSG;
- job->reply_len = sizeof(uint32_t);
- fc_bsg_jobdone(job);
- spin_lock_irq(q->queue_lock);
- continue;
- }
-
- /* the dispatch routines will unlock the queue_lock */
- if (rport)
- ret = fc_bsg_rport_dispatch(q, shost, rport, job);
- else
- ret = fc_bsg_host_dispatch(q, shost, job);
-
- /* did dispatcher hit state that can't process any more */
- if (ret == FC_DISPATCH_BREAK)
- break;
-
- /* did dispatcher had released the lock */
- if (ret == FC_DISPATCH_UNLOCKED)
- spin_lock_irq(q->queue_lock);
- }
-
- spin_unlock_irq(q->queue_lock);
- put_device(dev);
- spin_lock_irq(q->queue_lock);
-}
-
-
-/**
- * fc_bsg_host_handler - handler for bsg requests for a fc host
- * @q: fc host request queue
- */
-static void
-fc_bsg_host_handler(struct request_queue *q)
-{
- struct Scsi_Host *shost = q->queuedata;
-
- fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return 0;
}
-
-/**
- * fc_bsg_rport_handler - handler for bsg requests for a fc rport
- * @q: rport request queue
- */
-static void
-fc_bsg_rport_handler(struct request_queue *q)
+static int fc_bsg_dispatch(struct bsg_job *job)
{
- struct fc_rport *rport = q->queuedata;
- struct Scsi_Host *shost = rport_to_shost(rport);
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
- fc_bsg_request_handler(q, shost, rport, &rport->dev);
+ if (scsi_is_fc_rport(job->dev))
+ return fc_bsg_rport_dispatch(shost, job);
+ else
+ return fc_bsg_host_dispatch(shost, job);
}
-
/**
* fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
* @shost: shost for fc_host
@@ -4051,33 +3776,42 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
snprintf(bsg_name, sizeof(bsg_name),
"fc_host%d", shost->host_no);
- q = __scsi_alloc_queue(shost, fc_bsg_host_handler);
+ q = __scsi_alloc_queue(shost, bsg_request_fn);
if (!q) {
- printk(KERN_ERR "fc_host%d: bsg interface failed to "
- "initialize - no request queue\n",
- shost->host_no);
+ dev_err(dev,
+ "fc_host%d: bsg interface failed to initialize - no request queue\n",
+ shost->host_no);
return -ENOMEM;
}
- q->queuedata = shost;
- queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
- blk_queue_softirq_done(q, fc_bsg_softirq_done);
- blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
- blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
-
- err = bsg_register_queue(q, dev, bsg_name, NULL);
+ err = bsg_setup_queue(dev, q, bsg_name, fc_bsg_dispatch,
+ i->f->dd_bsg_size);
if (err) {
- printk(KERN_ERR "fc_host%d: bsg interface failed to "
- "initialize - register queue\n",
- shost->host_no);
+ dev_err(dev,
+ "fc_host%d: bsg interface failed to initialize - setup queue\n",
+ shost->host_no);
blk_cleanup_queue(q);
return err;
}
-
+ blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
+ blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
fc_host->rqst_q = q;
return 0;
}
+static int fc_bsg_rport_prep(struct request_queue *q, struct request *req)
+{
+ struct fc_rport *rport = dev_to_rport(q->queuedata);
+
+ if (rport->port_state == FC_PORTSTATE_BLOCKED &&
+ !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
+ return BLKPREP_DEFER;
+
+ if (rport->port_state != FC_PORTSTATE_ONLINE)
+ return BLKPREP_KILL;
+
+ return BLKPREP_OK;
+}
/**
* fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
@@ -4097,29 +3831,22 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
if (!i->f->bsg_request)
return -ENOTSUPP;
- q = __scsi_alloc_queue(shost, fc_bsg_rport_handler);
+ q = __scsi_alloc_queue(shost, bsg_request_fn);
if (!q) {
- printk(KERN_ERR "%s: bsg interface failed to "
- "initialize - no request queue\n",
- dev->kobj.name);
+ dev_err(dev, "bsg interface failed to initialize - no request queue\n");
return -ENOMEM;
}
- q->queuedata = rport;
- queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
- blk_queue_softirq_done(q, fc_bsg_softirq_done);
- blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
- blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
-
- err = bsg_register_queue(q, dev, NULL, NULL);
+ err = bsg_setup_queue(dev, q, NULL, fc_bsg_dispatch, i->f->dd_bsg_size);
if (err) {
- printk(KERN_ERR "%s: bsg interface failed to "
- "initialize - register queue\n",
- dev->kobj.name);
+ dev_err(dev, "failed to setup bsg queue\n");
blk_cleanup_queue(q);
return err;
}
+ blk_queue_prep_rq(q, fc_bsg_rport_prep);
+ blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
+ blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
rport->rqst_q = q;
return 0;
}
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index e3cd3ece4412..b87a78673f65 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -24,7 +24,6 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/delay.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -115,21 +114,12 @@ static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
NULL, NULL, NULL);
-#define SRP_PID(p) \
- (p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \
- (p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \
- (p)->port_id[8], (p)->port_id[9], (p)->port_id[10], (p)->port_id[11], \
- (p)->port_id[12], (p)->port_id[13], (p)->port_id[14], (p)->port_id[15]
-
-#define SRP_PID_FMT "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:" \
- "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
-
static ssize_t
show_srp_rport_id(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_rport *rport = transport_class_to_srp_rport(dev);
- return sprintf(buf, SRP_PID_FMT "\n", SRP_PID(rport));
+ return sprintf(buf, "%16phC\n", rport->port_id);
}
static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL);
@@ -402,36 +392,6 @@ static void srp_reconnect_work(struct work_struct *work)
}
}
-/**
- * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
- * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
- *
- * To do: add support for scsi-mq in this function.
- */
-static int scsi_request_fn_active(struct Scsi_Host *shost)
-{
- struct scsi_device *sdev;
- struct request_queue *q;
- int request_fn_active = 0;
-
- shost_for_each_device(sdev, shost) {
- q = sdev->request_queue;
-
- spin_lock_irq(q->queue_lock);
- request_fn_active += q->request_fn_active;
- spin_unlock_irq(q->queue_lock);
- }
-
- return request_fn_active;
-}
-
-/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */
-static void srp_wait_for_queuecommand(struct Scsi_Host *shost)
-{
- while (scsi_request_fn_active(shost))
- msleep(20);
-}
-
static void __rport_fail_io_fast(struct srp_rport *rport)
{
struct Scsi_Host *shost = rport_to_shost(rport);
@@ -441,14 +401,17 @@ static void __rport_fail_io_fast(struct srp_rport *rport)
if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
return;
+ /*
+ * Call scsi_target_block() to wait for ongoing shost->queuecommand()
+ * calls before invoking i->f->terminate_rport_io().
+ */
+ scsi_target_block(rport->dev.parent);
scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
/* Involve the LLD if possible to terminate all I/O on the rport. */
i = to_srp_internal(shost->transportt);
- if (i->f->terminate_rport_io) {
- srp_wait_for_queuecommand(shost);
+ if (i->f->terminate_rport_io)
i->f->terminate_rport_io(rport);
- }
}
/**
@@ -576,7 +539,6 @@ int srp_reconnect_rport(struct srp_rport *rport)
if (res)
goto out;
scsi_target_block(&shost->shost_gendev);
- srp_wait_for_queuecommand(shost);
res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 079c2d9759fb..1622e23138e0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2465,9 +2465,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
if (sdkp->first_scan || old_wp != sdkp->write_prot) {
sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
sdkp->write_prot ? "on" : "off");
- sd_printk(KERN_DEBUG, sdkp,
- "Mode Sense: %02x %02x %02x %02x\n",
- buffer[0], buffer[1], buffer[2], buffer[3]);
+ sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
}
}
}
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 07b6444d3e0a..b673825f46b5 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -929,8 +929,6 @@ struct pqi_ctrl_info {
int max_msix_vectors;
int num_msix_vectors_enabled;
int num_msix_vectors_initialized;
- u32 msix_vectors[PQI_MAX_MSIX_VECTORS];
- void *intr_data[PQI_MAX_MSIX_VECTORS];
int event_irq;
struct Scsi_Host *scsi_host;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index a535b2661f38..8702d9cf8040 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -25,6 +25,7 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/cciss_ioctl.h>
+#include <linux/blk-mq-pci.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -2887,19 +2888,19 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
{
+ struct pci_dev *pdev = ctrl_info->pci_dev;
int i;
int rc;
- ctrl_info->event_irq = ctrl_info->msix_vectors[0];
+ ctrl_info->event_irq = pci_irq_vector(pdev, 0);
for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
- rc = request_irq(ctrl_info->msix_vectors[i],
- pqi_irq_handler, 0,
- DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
+ rc = request_irq(pci_irq_vector(pdev, i), pqi_irq_handler, 0,
+ DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
if (rc) {
- dev_err(&ctrl_info->pci_dev->dev,
+ dev_err(&pdev->dev,
"irq %u init failed with error %d\n",
- ctrl_info->msix_vectors[i], rc);
+ pci_irq_vector(pdev, i), rc);
return rc;
}
ctrl_info->num_msix_vectors_initialized++;
@@ -2908,72 +2909,23 @@ static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
return 0;
}
-static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
-{
- int i;
-
- for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
- free_irq(ctrl_info->msix_vectors[i],
- ctrl_info->intr_data[i]);
-}
-
static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
{
- unsigned int i;
- int max_vectors;
- int num_vectors_enabled;
- struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
-
- max_vectors = ctrl_info->num_queue_groups;
-
- for (i = 0; i < max_vectors; i++)
- msix_entries[i].entry = i;
-
- num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
- msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
+ int ret;
- if (num_vectors_enabled < 0) {
+ ret = pci_alloc_irq_vectors(ctrl_info->pci_dev,
+ PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ if (ret < 0) {
dev_err(&ctrl_info->pci_dev->dev,
- "MSI-X init failed with error %d\n",
- num_vectors_enabled);
- return num_vectors_enabled;
- }
-
- ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
- for (i = 0; i < num_vectors_enabled; i++) {
- ctrl_info->msix_vectors[i] = msix_entries[i].vector;
- ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
+ "MSI-X init failed with error %d\n", ret);
+ return ret;
}
+ ctrl_info->num_msix_vectors_enabled = ret;
return 0;
}
-static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
-{
- int i;
- int rc;
- int cpu;
-
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
- rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
- get_cpu_mask(cpu));
- if (rc)
- dev_err(&ctrl_info->pci_dev->dev,
- "error %d setting affinity hint for irq vector %u\n",
- rc, ctrl_info->msix_vectors[i]);
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
-}
-
-static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
-{
- int i;
-
- for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
- irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
-}
-
static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
{
unsigned int i;
@@ -4743,6 +4695,13 @@ static int pqi_slave_configure(struct scsi_device *sdev)
return 0;
}
+static int pqi_map_queues(struct Scsi_Host *shost)
+{
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+ return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
+}
+
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
void __user *arg)
{
@@ -5130,6 +5089,7 @@ static struct scsi_host_template pqi_driver_template = {
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
.slave_configure = pqi_slave_configure,
+ .map_queues = pqi_map_queues,
.sdev_attrs = pqi_sdev_attrs,
.shost_attrs = pqi_shost_attrs,
};
@@ -5159,7 +5119,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost->cmd_per_lun = shost->can_queue;
shost->sg_tablesize = ctrl_info->sg_tablesize;
shost->transportt = pqi_sas_transport_template;
- shost->irq = ctrl_info->msix_vectors[0];
+ shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
shost->unique_id = shost->irq;
shost->nr_hw_queues = ctrl_info->num_queue_groups;
shost->hostdata[0] = (unsigned long)ctrl_info;
@@ -5409,8 +5369,6 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc)
return rc;
- pqi_irq_set_affinity_hint(ctrl_info);
-
rc = pqi_create_queues(ctrl_info);
if (rc)
return rc;
@@ -5557,10 +5515,14 @@ static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
{
- pqi_irq_unset_affinity_hint(ctrl_info);
- pqi_free_irqs(ctrl_info);
- if (ctrl_info->num_msix_vectors_enabled)
- pci_disable_msix(ctrl_info->pci_dev);
+ int i;
+
+ for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
+ free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
+ &ctrl_info->queue_groups[i]);
+ }
+
+ pci_free_irq_vectors(ctrl_info->pci_dev);
}
static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 8ccfc9ea874b..05526b71541b 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1495,9 +1495,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
if (sg_count) {
if (sg_count > MAX_PAGE_BUFFER_COUNT) {
- payload_sz = (sg_count * sizeof(void *) +
+ payload_sz = (sg_count * sizeof(u64) +
sizeof(struct vmbus_packet_mpb_array));
- payload = kmalloc(payload_sz, GFP_ATOMIC);
+ payload = kzalloc(payload_sz, GFP_ATOMIC);
if (!payload)
return SCSI_MLQUEUE_DEVICE_BUSY;
}
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 3c4c07038948..88db6992420e 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -43,20 +43,18 @@
#define NCR5380_implementation_fields /* none */
-#define NCR5380_read(reg) sun3scsi_read(reg)
-#define NCR5380_write(reg, value) sun3scsi_write(reg, value)
+#define NCR5380_read(reg) in_8(hostdata->io + (reg))
+#define NCR5380_write(reg, value) out_8(hostdata->io + (reg), value)
#define NCR5380_queue_command sun3scsi_queue_command
#define NCR5380_bus_reset sun3scsi_bus_reset
#define NCR5380_abort sun3scsi_abort
#define NCR5380_info sun3scsi_info
-#define NCR5380_dma_recv_setup(instance, data, count) (count)
-#define NCR5380_dma_send_setup(instance, data, count) (count)
-#define NCR5380_dma_residual(instance) \
- sun3scsi_dma_residual(instance)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd)
+#define NCR5380_dma_xfer_len sun3scsi_dma_xfer_len
+#define NCR5380_dma_recv_setup sun3scsi_dma_count
+#define NCR5380_dma_send_setup sun3scsi_dma_count
+#define NCR5380_dma_residual sun3scsi_dma_residual
#define NCR5380_acquire_dma_irq(instance) (1)
#define NCR5380_release_dma_irq(instance)
@@ -82,7 +80,6 @@ module_param(setup_hostid, int, 0);
#define SUN3_DVMA_BUFSIZE 0xe000
static struct scsi_cmnd *sun3_dma_setup_done;
-static unsigned char *sun3_scsi_regp;
static volatile struct sun3_dma_regs *dregs;
static struct sun3_udc_regs *udc_regs;
static unsigned char *sun3_dma_orig_addr;
@@ -90,20 +87,6 @@ static unsigned long sun3_dma_orig_count;
static int sun3_dma_active;
static unsigned long last_residual;
-/*
- * NCR 5380 register access functions
- */
-
-static inline unsigned char sun3scsi_read(int reg)
-{
- return in_8(sun3_scsi_regp + reg);
-}
-
-static inline void sun3scsi_write(int reg, int value)
-{
- out_8(sun3_scsi_regp + reg, value);
-}
-
#ifndef SUN3_SCSI_VME
/* dma controller register access functions */
@@ -158,8 +141,8 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dev)
}
/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */
-static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
- void *data, unsigned long count, int write_flag)
+static int sun3scsi_dma_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count, int write_flag)
{
void *addr;
@@ -211,9 +194,10 @@ static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
dregs->csr |= CSR_FIFO;
if(dregs->fifo_count != count) {
- shost_printk(KERN_ERR, instance, "FIFO mismatch %04x not %04x\n",
+ shost_printk(KERN_ERR, hostdata->host,
+ "FIFO mismatch %04x not %04x\n",
dregs->fifo_count, (unsigned int) count);
- NCR5380_dprint(NDEBUG_DMA, instance);
+ NCR5380_dprint(NDEBUG_DMA, hostdata->host);
}
/* setup udc */
@@ -248,14 +232,34 @@ static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
}
-static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance)
+static int sun3scsi_dma_count(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return count;
+}
+
+static inline int sun3scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return sun3scsi_dma_setup(hostdata, data, count, 0);
+}
+
+static inline int sun3scsi_dma_send_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return sun3scsi_dma_setup(hostdata, data, count, 1);
+}
+
+static int sun3scsi_dma_residual(struct NCR5380_hostdata *hostdata)
{
return last_residual;
}
-static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted_len,
- struct scsi_cmnd *cmd)
+static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
{
+ int wanted_len = cmd->SCp.this_residual;
+
if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS)
return 0;
@@ -428,9 +432,10 @@ static struct scsi_host_template sun3_scsi_template = {
static int __init sun3_scsi_probe(struct platform_device *pdev)
{
struct Scsi_Host *instance;
+ struct NCR5380_hostdata *hostdata;
int error;
struct resource *irq, *mem;
- unsigned char *ioaddr;
+ void __iomem *ioaddr;
int host_flags = 0;
#ifdef SUN3_SCSI_VME
int i;
@@ -493,8 +498,6 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
}
#endif
- sun3_scsi_regp = ioaddr;
-
instance = scsi_host_alloc(&sun3_scsi_template,
sizeof(struct NCR5380_hostdata));
if (!instance) {
@@ -502,9 +505,12 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
goto fail_alloc;
}
- instance->io_port = (unsigned long)ioaddr;
instance->irq = irq->start;
+ hostdata = shost_priv(instance);
+ hostdata->base = mem->start;
+ hostdata->io = ioaddr;
+
error = NCR5380_init(instance, host_flags);
if (error)
goto fail_init;
@@ -552,13 +558,15 @@ fail_init:
fail_alloc:
if (udc_regs)
dvma_free(udc_regs);
- iounmap(sun3_scsi_regp);
+ iounmap(ioaddr);
return error;
}
static int __exit sun3_scsi_remove(struct platform_device *pdev)
{
struct Scsi_Host *instance = platform_get_drvdata(pdev);
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ void __iomem *ioaddr = hostdata->io;
scsi_remove_host(instance);
free_irq(instance->irq, instance);
@@ -566,7 +574,7 @@ static int __exit sun3_scsi_remove(struct platform_device *pdev)
scsi_host_put(instance);
if (udc_regs)
dvma_free(udc_regs);
- iounmap(sun3_scsi_regp);
+ iounmap(ioaddr);
return 0;
}
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 3aedf73f1131..aa43bfea0d00 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1094,10 +1094,12 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
* ufs_qcom_setup_clocks - enables/disable clocks
* @hba: host controller instance
* @on: If true, enable clocks else disable them.
+ * @status: PRE_CHANGE or POST_CHANGE notify
*
* Returns 0 on success, non-zero on failure.
*/
-static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
+static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
@@ -1111,18 +1113,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
if (!host)
return 0;
- if (on) {
- err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
- if (err)
- goto out;
+ if (on && (status == POST_CHANGE)) {
+ phy_power_on(host->generic_phy);
- err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
- if (err) {
- dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
- __func__, err);
- ufs_qcom_phy_disable_iface_clk(host->generic_phy);
- goto out;
- }
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
@@ -1130,14 +1123,15 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
if (vote == host->bus_vote.min_bw_vote)
ufs_qcom_update_bus_bw_vote(host);
- } else {
-
- /* M-PHY RMMI interface clocks can be turned off */
- ufs_qcom_phy_disable_iface_clk(host->generic_phy);
- if (!ufs_qcom_is_link_active(hba))
+ } else if (!on && (status == PRE_CHANGE)) {
+ if (!ufs_qcom_is_link_active(hba)) {
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
+ /* powering off PHY during aggressive clk gating */
+ phy_power_off(host->generic_phy);
+ }
+
vote = host->bus_vote.min_bw_vote;
}
@@ -1146,7 +1140,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
dev_err(hba->dev, "%s: set bus vote failed %d\n",
__func__, err);
-out:
return err;
}
@@ -1204,12 +1197,12 @@ static int ufs_qcom_init(struct ufs_hba *hba)
if (IS_ERR(host->generic_phy)) {
err = PTR_ERR(host->generic_phy);
dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
- goto out;
+ goto out_variant_clear;
}
err = ufs_qcom_bus_register(host);
if (err)
- goto out_host_free;
+ goto out_variant_clear;
ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
&host->hw_ver.minor, &host->hw_ver.step);
@@ -1254,7 +1247,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
- ufs_qcom_setup_clocks(hba, true);
+ ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
ufs_qcom_hosts[hba->dev->id] = host;
@@ -1274,8 +1267,7 @@ out_disable_phy:
phy_power_off(host->generic_phy);
out_unregister_bus:
phy_exit(host->generic_phy);
-out_host_free:
- devm_kfree(dev, host);
+out_variant_clear:
ufshcd_set_variant(hba, NULL);
out:
return err;
@@ -1287,6 +1279,7 @@ static void ufs_qcom_exit(struct ufs_hba *hba)
ufs_qcom_disable_lane_clks(host);
phy_power_off(host->generic_phy);
+ phy_exit(host->generic_phy);
}
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 845b874e2977..8e6709a3fb6b 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -46,6 +46,7 @@
#define QUERY_DESC_HDR_SIZE 2
#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
(sizeof(struct utp_upiu_header)))
+#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18
#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
@@ -162,7 +163,7 @@ enum desc_header_offset {
};
enum ufs_desc_max_size {
- QUERY_DESC_DEVICE_MAX_SIZE = 0x1F,
+ QUERY_DESC_DEVICE_MAX_SIZE = 0x40,
QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
QUERY_DESC_UNIT_MAX_SIZE = 0x23,
QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
@@ -416,7 +417,7 @@ struct utp_cmd_rsp {
__be32 residual_transfer_count;
__be32 reserved[4];
__be16 sense_data_len;
- u8 sense_data[18];
+ u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH];
};
/**
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 22f881e9253a..f7983058f3f7 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -128,6 +128,13 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6)
+/*
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, enabling this quirk ensure this.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 7)
+
+
struct ufs_hba;
void ufs_advertise_fixup_device(struct ufs_hba *hba);
@@ -140,6 +147,8 @@ static struct ufs_dev_fix ufs_fixups[] = {
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_NO_FASTAUTO),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index d15eaa466c59..52b546fb509b 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -104,6 +104,7 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
+ ufshcd_dealloc_host(hba);
}
/**
@@ -147,6 +148,7 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = ufshcd_init(hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
+ ufshcd_dealloc_host(hba);
return err;
}
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index db53f38da864..a72a4ba78125 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -163,7 +163,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
if (ret) {
dev_err(dev, "%s: unable to find %s err %d\n",
__func__, prop_name, ret);
- goto out_free;
+ goto out;
}
vreg->min_uA = 0;
@@ -185,9 +185,6 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
goto out;
-out_free:
- devm_kfree(dev, vreg);
- vreg = NULL;
out:
if (!ret)
*out_vreg = vreg;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index cf549871c1ee..ef8548c3a423 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -45,6 +45,8 @@
#include "ufs_quirks.h"
#include "unipro.h"
+#define UFSHCD_REQ_SENSE_SIZE 18
+
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
@@ -57,15 +59,9 @@
#define NOP_OUT_TIMEOUT 30 /* msecs */
/* Query request retries */
-#define QUERY_REQ_RETRIES 10
+#define QUERY_REQ_RETRIES 3
/* Query request timeout */
-#define QUERY_REQ_TIMEOUT 30 /* msec */
-/*
- * Query request timeout for fDeviceInit flag
- * fDeviceInit query response time for some devices is too large that default
- * QUERY_REQ_TIMEOUT may not be enough for such devices.
- */
-#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
+#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
/* Task management command timeout */
#define TM_CMD_TIMEOUT 100 /* msecs */
@@ -123,6 +119,7 @@ enum {
UFSHCD_STATE_RESET,
UFSHCD_STATE_ERROR,
UFSHCD_STATE_OPERATIONAL,
+ UFSHCD_STATE_EH_SCHEDULED,
};
/* UFSHCD error handling flags */
@@ -598,6 +595,20 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
return false;
}
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+ if (ufshcd_is_clkscaling_enabled(hba)) {
+ devfreq_suspend_device(hba->devfreq);
+ hba->clk_scaling.window_start_t = 0;
+ }
+}
+
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
+{
+ if (ufshcd_is_clkscaling_enabled(hba))
+ devfreq_resume_device(hba->devfreq);
+}
+
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
@@ -631,8 +642,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
hba->clk_gating.is_suspended = false;
}
unblock_reqs:
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
+ ufshcd_resume_clkscaling(hba);
scsi_unblock_requests(hba->host);
}
@@ -660,6 +670,21 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
start:
switch (hba->clk_gating.state) {
case CLKS_ON:
+ /*
+ * Wait for the ungate work to complete if in progress.
+ * Though the clocks may be in ON state, the link could
+ * still be in hibner8 state if hibern8 is allowed
+ * during clock gating.
+ * Make sure we exit hibern8 state also in addition to
+ * clocks being ON.
+ */
+ if (ufshcd_can_hibern8_during_gating(hba) &&
+ ufshcd_is_link_hibern8(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->clk_gating.ungate_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ }
break;
case REQ_CLKS_OFF:
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
@@ -709,7 +734,14 @@ static void ufshcd_gate_work(struct work_struct *work)
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.is_suspended) {
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case save time by
+ * skipping the gating work and exit after changing the clock
+ * state to CLKS_ON.
+ */
+ if (hba->clk_gating.is_suspended ||
+ (hba->clk_gating.state == REQ_CLKS_ON)) {
hba->clk_gating.state = CLKS_ON;
goto rel_lock;
}
@@ -731,10 +763,7 @@ static void ufshcd_gate_work(struct work_struct *work)
ufshcd_set_link_hibern8(hba);
}
- if (ufshcd_is_clkscaling_enabled(hba)) {
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
- }
+ ufshcd_suspend_clkscaling(hba);
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
@@ -878,6 +907,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
}
/**
@@ -889,10 +920,14 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
int len;
if (lrbp->sense_buffer &&
ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+ int len_to_copy;
+
len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
+ len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
+
memcpy(lrbp->sense_buffer,
lrbp->ucd_rsp_ptr->sr.sense_data,
- min_t(int, len, SCSI_SENSE_BUFFERSIZE));
+ min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
}
}
@@ -1088,7 +1123,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
*
* Returns 0 in case of success, non-zero value in case of failure
*/
-static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
+static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
struct ufshcd_sg_entry *prd_table;
struct scatterlist *sg;
@@ -1102,8 +1137,13 @@ static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
return sg_segments;
if (sg_segments) {
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((u16) (sg_segments));
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16)(sg_segments *
+ sizeof(struct ufshcd_sg_entry)));
+ else
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16) (sg_segments));
prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
@@ -1410,6 +1450,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
+ case UFSHCD_STATE_EH_SCHEDULED:
case UFSHCD_STATE_RESET:
err = SCSI_MLQUEUE_HOST_BUSY;
goto out_unlock;
@@ -1457,7 +1498,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
- lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+ lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
lrbp->sense_buffer = cmd->sense_buffer;
lrbp->task_tag = tag;
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
@@ -1465,15 +1506,18 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_comp_scsi_upiu(hba, lrbp);
- err = ufshcd_map_sg(lrbp);
+ err = ufshcd_map_sg(hba, lrbp);
if (err) {
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
}
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
/* issue command to the controller */
spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
ufshcd_send_command(hba, tag);
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1581,6 +1625,8 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
msecs_to_jiffies(max_timeout));
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
hba->dev_cmd.complete = NULL;
if (likely(time_left)) {
@@ -1683,6 +1729,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
ufshcd_send_command(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1789,9 +1836,6 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
goto out_unlock;
}
- if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
- timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
-
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
if (err) {
@@ -1861,8 +1905,8 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
- __func__, opcode, idn, err);
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
goto out_unlock;
}
@@ -1961,8 +2005,8 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
- __func__, opcode, idn, err);
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
goto out_unlock;
}
@@ -2055,18 +2099,41 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
desc_id, desc_index, 0, desc_buf,
&buff_len);
- if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
- (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
- ufs_query_desc_max_size[desc_id])
- || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
- dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
- __func__, desc_id, param_offset, buff_len, ret);
- if (!ret)
- ret = -EINVAL;
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+ __func__, desc_id, desc_index, param_offset, ret);
goto out;
}
+ /* Sanity check */
+ if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+ dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
+ __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * While reading variable size descriptors (like string descriptor),
+ * some UFS devices may report the "LENGTH" (field in "Transaction
+ * Specific fields" of Query Response UPIU) same as what was requested
+ * in Query Request UPIU instead of reporting the actual size of the
+ * variable size descriptor.
+ * Although it's safe to ignore the "LENGTH" field for variable size
+ * descriptors as we can always derive the length of the descriptor from
+ * the descriptor header fields. Hence this change impose the length
+ * match check only for fixed size descriptors (for which we always
+ * request the correct size as part of Query Request UPIU).
+ */
+ if ((desc_id != QUERY_DESC_IDN_STRING) &&
+ (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
+ dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
+ __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
+ ret = -EINVAL;
+ goto out;
+ }
+
if (is_kmalloc)
memcpy(param_read_buf, &desc_buf[param_offset], param_size);
out:
@@ -2088,7 +2155,18 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
u8 *buf,
u32 size)
{
- return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+ int err = 0;
+ int retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* Read descriptor*/
+ err = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+ if (!err)
+ break;
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+ }
+
+ return err;
}
int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
@@ -2320,12 +2398,21 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
/* Response upiu and prdt offset should be in double words */
- utrdlp[i].response_upiu_offset =
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16(response_offset);
+ utrdlp[i].prd_table_offset =
+ cpu_to_le16(prdt_offset);
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE);
+ } else {
+ utrdlp[i].response_upiu_offset =
cpu_to_le16((response_offset >> 2));
- utrdlp[i].prd_table_offset =
+ utrdlp[i].prd_table_offset =
cpu_to_le16((prdt_offset >> 2));
- utrdlp[i].response_upiu_length =
+ utrdlp[i].response_upiu_length =
cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
+ }
hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
hba->lrb[i].ucd_req_ptr =
@@ -2429,10 +2516,10 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
} while (ret && peer && --retries);
- if (!retries)
+ if (ret)
dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
- set, UIC_GET_ATTR_ID(attr_sel), mib_val,
- retries);
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+ UFS_UIC_COMMAND_RETRIES - retries);
return ret;
}
@@ -2496,9 +2583,10 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
get, UIC_GET_ATTR_ID(attr_sel), ret);
} while (ret && peer && --retries);
- if (!retries)
+ if (ret)
dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
- get, UIC_GET_ATTR_ID(attr_sel), retries);
+ get, UIC_GET_ATTR_ID(attr_sel),
+ UFS_UIC_COMMAND_RETRIES - retries);
if (mib_val && !ret)
*mib_val = uic_cmd.argument3;
@@ -2651,6 +2739,8 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
int ret;
struct uic_command uic_cmd = {0};
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
+
uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
@@ -2664,7 +2754,9 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
*/
if (ufshcd_link_recovery(hba))
ret = -ENOLINK;
- }
+ } else
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
+ POST_CHANGE);
return ret;
}
@@ -2687,13 +2779,17 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
struct uic_command uic_cmd = {0};
int ret;
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
+
uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
if (ret) {
dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
__func__, ret);
ret = ufshcd_link_recovery(hba);
- }
+ } else
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
+ POST_CHANGE);
return ret;
}
@@ -2725,8 +2821,8 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
if (hba->max_pwr_info.is_valid)
return 0;
- pwr_info->pwr_tx = FASTAUTO_MODE;
- pwr_info->pwr_rx = FASTAUTO_MODE;
+ pwr_info->pwr_tx = FAST_MODE;
+ pwr_info->pwr_rx = FAST_MODE;
pwr_info->hs_rate = PA_HS_MODE_B;
/* Get the connected lane count */
@@ -2757,7 +2853,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
__func__, pwr_info->gear_rx);
return -EINVAL;
}
- pwr_info->pwr_rx = SLOWAUTO_MODE;
+ pwr_info->pwr_rx = SLOW_MODE;
}
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
@@ -2770,7 +2866,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
__func__, pwr_info->gear_tx);
return -EINVAL;
}
- pwr_info->pwr_tx = SLOWAUTO_MODE;
+ pwr_info->pwr_tx = SLOW_MODE;
}
hba->max_pwr_info.is_valid = true;
@@ -3090,7 +3186,16 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
{
int ret;
int retries = DME_LINKSTARTUP_RETRIES;
+ bool link_startup_again = false;
+
+ /*
+ * If UFS device isn't active then we will have to issue link startup
+ * 2 times to make sure the device state move to active.
+ */
+ if (!ufshcd_is_ufs_dev_active(hba))
+ link_startup_again = true;
+link_startup:
do {
ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
@@ -3116,6 +3221,12 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
/* failed to get the link up... retire */
goto out;
+ if (link_startup_again) {
+ link_startup_again = false;
+ retries = DME_LINKSTARTUP_RETRIES;
+ goto link_startup;
+ }
+
if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
ret = ufshcd_disable_device_tx_lcc(hba);
if (ret)
@@ -3181,16 +3292,24 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev)
{
int ret = 0;
u8 lun_qdepth;
+ int retries;
struct ufs_hba *hba;
hba = shost_priv(sdev->host);
lun_qdepth = hba->nutrs;
- ret = ufshcd_read_unit_desc_param(hba,
- ufshcd_scsi_to_upiu_lun(sdev->lun),
- UNIT_DESC_PARAM_LU_Q_DEPTH,
- &lun_qdepth,
- sizeof(lun_qdepth));
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* Read descriptor*/
+ ret = ufshcd_read_unit_desc_param(hba,
+ ufshcd_scsi_to_upiu_lun(sdev->lun),
+ UNIT_DESC_PARAM_LU_Q_DEPTH,
+ &lun_qdepth,
+ sizeof(lun_qdepth));
+ if (!ret || ret == -ENOTSUPP)
+ break;
+
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, ret);
+ }
/* Some WLUN doesn't support unit descriptor */
if (ret == -EOPNOTSUPP)
@@ -4097,6 +4216,17 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
{
u32 reg;
+ /* PHY layer lane error */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+ /* Ignore LINERESET indication, as this is not an error */
+ if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
+ (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK))
+ /*
+ * To know whether this error is fatal or not, DB timeout
+ * must be checked but this error is handled separately.
+ */
+ dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
+
/* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
@@ -4158,7 +4288,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
/* block commands from scsi mid-layer */
scsi_block_requests(hba->host);
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
schedule_work(&hba->eh_work);
}
}
@@ -4311,6 +4441,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
task_req_upiup->input_param1 = cpu_to_be32(lun_id);
task_req_upiup->input_param2 = cpu_to_be32(task_id);
+ ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
+
/* send command to the controller */
__set_bit(free_slot, &hba->outstanding_tasks);
@@ -4318,6 +4450,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
wmb();
ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
spin_unlock_irqrestore(host->host_lock, flags);
@@ -4722,6 +4856,24 @@ out:
return icc_level;
}
+static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
+{
+ int ret = 0;
+ int retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* write attribute */
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
+ if (!ret)
+ break;
+
+ dev_dbg(hba->dev, "%s: failed with error %d\n", __func__, ret);
+ }
+
+ return ret;
+}
+
static void ufshcd_init_icc_levels(struct ufs_hba *hba)
{
int ret;
@@ -4742,9 +4894,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
__func__, hba->init_prefetch_data.icc_level);
- ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
- &hba->init_prefetch_data.icc_level);
+ ret = ufshcd_set_icc_levels_attr(hba,
+ hba->init_prefetch_data.icc_level);
if (ret)
dev_err(hba->dev,
@@ -4965,6 +5116,76 @@ out:
return ret;
}
+/**
+ * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
+ * less than device PA_TACTIVATE time.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
+ * for such devices.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 granularity, peer_granularity;
+ u32 pa_tactivate, peer_pa_tactivate;
+ u32 pa_tactivate_us, peer_pa_tactivate_us;
+ u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &granularity);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &peer_granularity);
+ if (ret)
+ goto out;
+
+ if ((granularity < PA_GRANULARITY_MIN_VAL) ||
+ (granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
+ __func__, granularity);
+ return -EINVAL;
+ }
+
+ if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
+ (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
+ __func__, peer_granularity);
+ return -EINVAL;
+ }
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ &peer_pa_tactivate);
+ if (ret)
+ goto out;
+
+ pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
+ peer_pa_tactivate_us = peer_pa_tactivate *
+ gran_to_us_table[peer_granularity - 1];
+
+ if (pa_tactivate_us > peer_pa_tactivate_us) {
+ u32 new_peer_pa_tactivate;
+
+ new_peer_pa_tactivate = pa_tactivate_us /
+ gran_to_us_table[peer_granularity - 1];
+ new_peer_pa_tactivate++;
+ ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ new_peer_pa_tactivate);
+ }
+
+out:
+ return ret;
+}
+
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
@@ -4975,6 +5196,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
/* set 1ms timeout for PA_TACTIVATE */
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
+ ufshcd_quirk_tune_host_pa_tactivate(hba);
}
/**
@@ -5027,9 +5251,11 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
__func__);
} else {
ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
- if (ret)
+ if (ret) {
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
__func__, ret);
+ goto out;
+ }
}
/* set the state as operational after switching to desired gear */
@@ -5062,8 +5288,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
hba->is_init_prefetch = true;
/* Resume devfreq after UFS device is detected */
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
+ ufshcd_resume_clkscaling(hba);
out:
/*
@@ -5389,6 +5614,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
if (!head || list_empty(head))
goto out;
+ ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+ if (ret)
+ return ret;
+
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
@@ -5410,7 +5639,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
}
}
- ret = ufshcd_vops_setup_clocks(hba, on);
+ ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+ if (ret)
+ return ret;
+
out:
if (ret) {
list_for_each_entry(clki, head, list) {
@@ -5500,8 +5732,6 @@ static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
if (!hba->vops)
return;
- ufshcd_vops_setup_clocks(hba, false);
-
ufshcd_vops_setup_regulators(hba, false);
ufshcd_vops_exit(hba);
@@ -5564,6 +5794,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
if (hba->is_powered) {
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
+ ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
@@ -5577,19 +5808,19 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
0,
0,
0,
- SCSI_SENSE_BUFFERSIZE,
+ UFSHCD_REQ_SENSE_SIZE,
0};
char *buffer;
int ret;
- buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
goto out;
}
ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
- SCSI_SENSE_BUFFERSIZE, NULL,
+ UFSHCD_REQ_SENSE_SIZE, NULL,
msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM);
if (ret)
pr_err("%s: failed with err %d\n", __func__, ret);
@@ -5766,7 +5997,6 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
!hba->dev_info.is_lu_power_on_wp) {
ret = ufshcd_setup_vreg(hba, true);
} else if (!ufshcd_is_ufs_dev_active(hba)) {
- ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
if (!ret && !ufshcd_is_link_active(hba)) {
ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
if (ret)
@@ -5775,6 +6005,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
if (ret)
goto vccq_lpm;
}
+ ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
}
goto out;
@@ -5839,6 +6070,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_hold(hba, false);
hba->clk_gating.is_suspended = true;
+ ufshcd_suspend_clkscaling(hba);
+
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
goto disable_clks;
@@ -5846,12 +6079,12 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
(req_link_state == hba->uic_link_state))
- goto out;
+ goto enable_gating;
/* UFS device & link must be active before we enter in this function */
if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
ret = -EINVAL;
- goto out;
+ goto enable_gating;
}
if (ufshcd_is_runtime_pm(pm_op)) {
@@ -5888,15 +6121,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
disable_clks:
/*
- * The clock scaling needs access to controller registers. Hence, Wait
- * for pending clock scaling work to be done before clocks are
- * turned off.
- */
- if (ufshcd_is_clkscaling_enabled(hba)) {
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
- }
- /*
* Call vendor specific suspend callback. As these callbacks may access
* vendor specific host controller register space call them before the
* host clocks are ON.
@@ -5905,10 +6129,6 @@ disable_clks:
if (ret)
goto set_link_active;
- ret = ufshcd_vops_setup_clocks(hba, false);
- if (ret)
- goto vops_resume;
-
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
else
@@ -5925,9 +6145,8 @@ disable_clks:
ufshcd_hba_vreg_set_lpm(hba);
goto out;
-vops_resume:
- ufshcd_vops_resume(hba, pm_op);
set_link_active:
+ ufshcd_resume_clkscaling(hba);
ufshcd_vreg_set_hpm(hba);
if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
ufshcd_set_link_active(hba);
@@ -5937,6 +6156,7 @@ set_dev_active:
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
+ ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
ufshcd_release(hba);
out:
@@ -6015,8 +6235,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_urgent_bkops(hba);
hba->clk_gating.is_suspended = false;
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
+ ufshcd_resume_clkscaling(hba);
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
@@ -6030,6 +6249,7 @@ disable_vreg:
ufshcd_vreg_set_lpm(hba);
disable_irq_and_vops_clks:
ufshcd_disable_irq(hba);
+ ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
out:
hba->pm_op_in_progress = 0;
@@ -6052,16 +6272,13 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
if (!hba || !hba->is_powered)
return 0;
- if (pm_runtime_suspended(hba->dev)) {
- if (hba->rpm_lvl == hba->spm_lvl)
- /*
- * There is possibility that device may still be in
- * active state during the runtime suspend.
- */
- if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
- hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
- goto out;
+ if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+ hba->curr_dev_pwr_mode) &&
+ (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+ hba->uic_link_state))
+ goto out;
+ if (pm_runtime_suspended(hba->dev)) {
/*
* UFS device and/or UFS link low power states during runtime
* suspend seems to be different than what is expected during
@@ -6092,7 +6309,10 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
int ufshcd_system_resume(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered || pm_runtime_suspended(hba->dev))
/*
* Let the runtime resume take care of resuming
* if runtime suspended.
@@ -6113,7 +6333,10 @@ EXPORT_SYMBOL(ufshcd_system_resume);
*/
int ufshcd_runtime_suspend(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered)
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
return 0;
return ufshcd_suspend(hba, UFS_RUNTIME_PM);
@@ -6143,10 +6366,13 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
*/
int ufshcd_runtime_resume(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered)
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
return 0;
- else
- return ufshcd_resume(hba, UFS_RUNTIME_PM);
+
+ return ufshcd_resume(hba, UFS_RUNTIME_PM);
}
EXPORT_SYMBOL(ufshcd_runtime_resume);
@@ -6198,11 +6424,7 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba, true);
- scsi_host_put(hba->host);
-
ufshcd_exit_clk_gating(hba);
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_remove_device(hba->devfreq);
ufshcd_hba_exit(hba);
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -6324,15 +6546,47 @@ static int ufshcd_devfreq_target(struct device *dev,
{
int err = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
+ bool release_clk_hold = false;
+ unsigned long irq_flags;
if (!ufshcd_is_clkscaling_enabled(hba))
return -EINVAL;
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return 0;
+ }
+
+ if (ufshcd_is_clkgating_allowed(hba) &&
+ (hba->clk_gating.state != CLKS_ON)) {
+ if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+ /* hold the vote until the scaling work is completed */
+ hba->clk_gating.active_reqs++;
+ release_clk_hold = true;
+ hba->clk_gating.state = CLKS_ON;
+ } else {
+ /*
+ * Clock gating work seems to be running in parallel
+ * hence skip scaling work to avoid deadlock between
+ * current scaling work and gating work.
+ */
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
if (*freq == UINT_MAX)
err = ufshcd_scale_clks(hba, true);
else if (*freq == 0)
err = ufshcd_scale_clks(hba, false);
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (release_clk_hold)
+ __ufshcd_release(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
return err;
}
@@ -6498,7 +6752,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
}
if (ufshcd_is_clkscaling_enabled(hba)) {
- hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
+ hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile,
"simple_ondemand", NULL);
if (IS_ERR(hba->devfreq)) {
dev_err(hba->dev, "Unable to register with devfreq %ld\n",
@@ -6507,18 +6761,19 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_remove_scsi_host;
}
/* Suspend devfreq until the UFS device is detected */
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
+ ufshcd_suspend_clkscaling(hba);
}
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
/*
- * The device-initialize-sequence hasn't been invoked yet.
- * Set the device to power-off state
+ * We are assuming that device wasn't put in sleep/power-down
+ * state exclusively during the boot stage before kernel.
+ * This assumption helps avoid doing link startup twice during
+ * ufshcd_probe_hba().
*/
- ufshcd_set_ufs_dev_poweroff(hba);
+ ufshcd_set_ufs_dev_active(hba);
async_schedule(ufshcd_async_scan, hba);
@@ -6530,7 +6785,6 @@ exit_gating:
ufshcd_exit_clk_gating(hba);
out_disable:
hba->is_irq_enabled = false;
- scsi_host_put(host);
ufshcd_hba_exit(hba);
out_error:
return err;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 430bef111293..7d9ff22acfea 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -261,6 +261,12 @@ struct ufs_pwr_mode_info {
* @pwr_change_notify: called before and after a power mode change
* is carried out to allow vendor spesific capabilities
* to be set.
+ * @setup_xfer_req: called before any transfer request is issued
+ * to set some things
+ * @setup_task_mgmt: called before any task management request is issued
+ * to set some things
+ * @hibern8_notify: called around hibern8 enter/exit
+ * to configure some things
* @suspend: called during host controller PM callback
* @resume: called during host controller PM callback
* @dbg_register_dump: used to dump controller debug information
@@ -273,7 +279,8 @@ struct ufs_hba_variant_ops {
u32 (*get_ufs_hci_version)(struct ufs_hba *);
int (*clk_scale_notify)(struct ufs_hba *, bool,
enum ufs_notify_change_status);
- int (*setup_clocks)(struct ufs_hba *, bool);
+ int (*setup_clocks)(struct ufs_hba *, bool,
+ enum ufs_notify_change_status);
int (*setup_regulators)(struct ufs_hba *, bool);
int (*hce_enable_notify)(struct ufs_hba *,
enum ufs_notify_change_status);
@@ -283,6 +290,10 @@ struct ufs_hba_variant_ops {
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *,
struct ufs_pa_layer_attr *);
+ void (*setup_xfer_req)(struct ufs_hba *, int, bool);
+ void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
+ void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
+ enum ufs_notify_change_status);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -474,6 +485,12 @@ struct ufs_hba {
*/
#define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5)
+ /*
+ * This quirk needs to be enabled if the host contoller regards
+ * resolution of the values of PRDTO and PRDTL in UTRD as byte.
+ */
+ #define UFSHCD_QUIRK_PRDT_BYTE_GRAN UFS_BIT(7)
+
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
/* Device deviations from standard UFS device spec. */
@@ -755,10 +772,11 @@ static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
return 0;
}
-static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on)
+static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
{
if (hba->vops && hba->vops->setup_clocks)
- return hba->vops->setup_clocks(hba, on);
+ return hba->vops->setup_clocks(hba, on, status);
return 0;
}
@@ -799,6 +817,28 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
return -ENOTSUPP;
}
+static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
+ bool is_scsi_cmd)
+{
+ if (hba->vops && hba->vops->setup_xfer_req)
+ return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
+}
+
+static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
+ int tag, u8 tm_function)
+{
+ if (hba->vops && hba->vops->setup_task_mgmt)
+ return hba->vops->setup_task_mgmt(hba, tag, tm_function);
+}
+
+static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
+ enum uic_cmd_dme cmd,
+ enum ufs_notify_change_status status)
+{
+ if (hba->vops && hba->vops->hibern8_notify)
+ return hba->vops->hibern8_notify(hba, cmd, status);
+}
+
static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
{
if (hba->vops && hba->vops->suspend)
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 9599741ff606..5d978867be57 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -83,6 +83,8 @@ enum {
MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
};
+#define UFS_MASK(mask, offset) ((mask) << (offset))
+
/* UFS Version 08h */
#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
@@ -166,6 +168,7 @@ enum {
/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31)
#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
+#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index eff8b5675575..23129d7b2678 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -123,6 +123,7 @@
#define PA_MAXRXHSGEAR 0x1587
#define PA_RXHSUNTERMCAP 0x15A5
#define PA_RXLSTERMCAP 0x15A6
+#define PA_GRANULARITY 0x15AA
#define PA_PACPREQTIMEOUT 0x1590
#define PA_PACPREQEOBTIMEOUT 0x1591
#define PA_HIBERN8TIME 0x15A7
@@ -158,6 +159,9 @@
#define VS_DEBUGOMC 0xD09E
#define VS_POWERSTATE 0xD083
+#define PA_GRANULARITY_MIN_VAL 1
+#define PA_GRANULARITY_MAX_VAL 6
+
/* PHY Adapter Protocol Constants */
#define PA_MAXDATALANES 4
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 31a096aa16ab..d8a16ca6baa5 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -137,7 +137,7 @@ static int target_fabric_mappedlun_link(
return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access_ro);
}
-static int target_fabric_mappedlun_unlink(
+static void target_fabric_mappedlun_unlink(
struct config_item *lun_acl_ci,
struct config_item *lun_ci)
{
@@ -146,7 +146,7 @@ static int target_fabric_mappedlun_unlink(
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
- return core_dev_del_initiator_node_lun_acl(lun, lacl);
+ core_dev_del_initiator_node_lun_acl(lun, lacl);
}
static struct se_lun_acl *item_to_lun_acl(struct config_item *item)
@@ -669,7 +669,7 @@ out:
return ret;
}
-static int target_fabric_port_unlink(
+static void target_fabric_port_unlink(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
{
@@ -688,7 +688,6 @@ static int target_fabric_port_unlink(
}
core_dev_del_lun(se_tpg, lun);
- return 0;
}
static void target_fabric_port_release(struct config_item *item)
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index ff5de9a96643..9af7842b8178 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -92,7 +92,7 @@ static void ft_free_cmd(struct ft_cmd *cmd)
fp = cmd->req_frame;
lport = fr_dev(fp);
if (fr_seq(fp))
- lport->tt.seq_release(fr_seq(fp));
+ fc_seq_release(fr_seq(fp));
fc_frame_free(fp);
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
ft_sess_put(sess); /* undo get from lookup at recv */
@@ -161,11 +161,11 @@ int ft_queue_status(struct se_cmd *se_cmd)
/*
* Send response.
*/
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
- rc = lport->tt.seq_send(lport, cmd->seq, fp);
+ rc = fc_seq_send(lport, cmd->seq, fp);
if (rc) {
pr_info_ratelimited("%s: Failed to send response frame %p, "
"xid <0x%x>\n", __func__, fp, ep->xid);
@@ -177,7 +177,7 @@ int ft_queue_status(struct se_cmd *se_cmd)
se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
return -ENOMEM;
}
- lport->tt.exch_done(cmd->seq);
+ fc_exch_done(cmd->seq);
/*
* Drop the extra ACK_KREF reference taken by target_submit_cmd()
* ahead of ft_check_stop_free() -> transport_generic_free_cmd()
@@ -221,7 +221,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
memset(txrdy, 0, sizeof(*txrdy));
txrdy->ft_burst_len = htonl(se_cmd->data_length);
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
@@ -242,7 +242,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
cmd->was_ddp_setup = 1;
}
}
- lport->tt.seq_send(lport, cmd->seq, fp);
+ fc_seq_send(lport, cmd->seq, fp);
return 0;
}
@@ -323,8 +323,8 @@ static void ft_send_resp_status(struct fc_lport *lport,
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
sp = fr_seq(fp);
if (sp) {
- lport->tt.seq_send(lport, sp, fp);
- lport->tt.exch_done(sp);
+ fc_seq_send(lport, sp, fp);
+ fc_exch_done(sp);
} else {
lport->tt.frame_send(lport, fp);
}
@@ -461,7 +461,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
cmd->se_cmd.map_tag = tag;
cmd->sess = sess;
- cmd->seq = lport->tt.seq_assign(lport, fp);
+ cmd->seq = fc_seq_assign(lport, fp);
if (!cmd->seq) {
percpu_ida_free(&se_sess->sess_tag_pool, tag);
goto busy;
@@ -563,7 +563,7 @@ static void ft_send_work(struct work_struct *work)
task_attr = TCM_SIMPLE_TAG;
}
- fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
+ fc_seq_set_resp(cmd->seq, ft_recv_seq, cmd);
cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid;
/*
* Use a single se_cmd->cmd_kref as we expect to release se_cmd
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 6f7c65abfe2a..1eb1f58e00e4 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -82,7 +82,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
remaining = se_cmd->data_length;
@@ -174,7 +174,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
f_ctl |= FC_FC_END_SEQ;
fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
FC_TYPE_FCP, f_ctl, fh_off);
- error = lport->tt.seq_send(lport, seq, fp);
+ error = fc_seq_send(lport, seq, fp);
if (error) {
pr_info_ratelimited("%s: Failed to send frame %p, "
"xid <0x%x>, remaining %zu, "
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 678559525618..efddaf5d11d1 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2566,7 +2566,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
*/
WARN_ONCE(dwc->revision < DWC3_REVISION_240A
&& dwc->has_lpm_erratum,
- "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
+ "LPM Erratum not available on dwc3 revisions < 2.40a\n");
if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 3984787f8e97..78c44979dde3 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -408,7 +408,7 @@ out:
return ret;
}
-static int config_usb_cfg_unlink(
+static void config_usb_cfg_unlink(
struct config_item *usb_cfg_ci,
struct config_item *usb_func_ci)
{
@@ -437,12 +437,11 @@ static int config_usb_cfg_unlink(
list_del(&f->list);
usb_put_function(f);
mutex_unlock(&gi->lock);
- return 0;
+ return;
}
}
mutex_unlock(&gi->lock);
WARN(1, "Unable to locate function to unbind\n");
- return 0;
}
static struct configfs_item_operations gadget_config_item_ops = {
@@ -865,7 +864,7 @@ out:
return ret;
}
-static int os_desc_unlink(struct config_item *os_desc_ci,
+static void os_desc_unlink(struct config_item *os_desc_ci,
struct config_item *usb_cfg_ci)
{
struct gadget_info *gi = container_of(to_config_group(os_desc_ci),
@@ -878,7 +877,6 @@ static int os_desc_unlink(struct config_item *os_desc_ci,
cdev->os_desc_config = NULL;
WARN_ON(gi->composite.gadget_driver.udc_name);
mutex_unlock(&gi->lock);
- return 0;
}
static struct configfs_item_operations os_desc_ops = {
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 31125a4a2658..4e037d2a7a60 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -547,7 +547,7 @@ out:
return ret;
}
-static int uvcg_control_class_drop_link(struct config_item *src,
+static void uvcg_control_class_drop_link(struct config_item *src,
struct config_item *target)
{
struct config_item *control, *header;
@@ -555,7 +555,6 @@ static int uvcg_control_class_drop_link(struct config_item *src,
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct uvc_descriptor_header **class_array;
struct uvcg_control_header *target_hdr;
- int ret = -EINVAL;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
@@ -569,23 +568,17 @@ static int uvcg_control_class_drop_link(struct config_item *src,
mutex_lock(&opts->lock);
class_array = uvcg_get_ctl_class_arr(src, opts);
- if (!class_array)
- goto unlock;
- if (opts->refcnt) {
- ret = -EBUSY;
+ if (!class_array || opts->refcnt)
goto unlock;
- }
target_hdr = to_uvcg_control_header(target);
--target_hdr->linked;
class_array[0] = NULL;
- ret = 0;
unlock:
mutex_unlock(&opts->lock);
out:
mutex_unlock(su_mutex);
- return ret;
}
static struct configfs_item_operations uvcg_control_class_item_ops = {
@@ -777,7 +770,7 @@ out:
return ret;
}
-static int uvcg_streaming_header_drop_link(struct config_item *src,
+static void uvcg_streaming_header_drop_link(struct config_item *src,
struct config_item *target)
{
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
@@ -786,7 +779,6 @@ static int uvcg_streaming_header_drop_link(struct config_item *src,
struct uvcg_streaming_header *src_hdr;
struct uvcg_format *target_fmt = NULL;
struct uvcg_format_ptr *format_ptr, *tmp;
- int ret = -EINVAL;
src_hdr = to_uvcg_streaming_header(src);
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
@@ -811,8 +803,6 @@ static int uvcg_streaming_header_drop_link(struct config_item *src,
out:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
- return ret;
-
}
static struct configfs_item_operations uvcg_streaming_header_item_ops = {
@@ -2051,7 +2041,7 @@ out:
return ret;
}
-static int uvcg_streaming_class_drop_link(struct config_item *src,
+static void uvcg_streaming_class_drop_link(struct config_item *src,
struct config_item *target)
{
struct config_item *streaming, *header;
@@ -2059,7 +2049,6 @@ static int uvcg_streaming_class_drop_link(struct config_item *src,
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct uvc_descriptor_header ***class_array;
struct uvcg_streaming_header *target_hdr;
- int ret = -EINVAL;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
@@ -2076,23 +2065,19 @@ static int uvcg_streaming_class_drop_link(struct config_item *src,
if (!class_array || !*class_array)
goto unlock;
- if (opts->refcnt) {
- ret = -EBUSY;
+ if (opts->refcnt)
goto unlock;
- }
target_hdr = to_uvcg_streaming_header(target);
--target_hdr->linked;
kfree(**class_array);
kfree(*class_array);
*class_array = NULL;
- ret = 0;
unlock:
mutex_unlock(&opts->lock);
out:
mutex_unlock(su_mutex);
- return ret;
}
static struct configfs_item_operations uvcg_streaming_class_item_ops = {