diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2017-03-09 12:06:41 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2017-03-09 12:06:41 +0100 |
commit | 920c634aff6cb66e7f352668521eb1313897e93c (patch) | |
tree | 0f2e2eb15756fdd93c8ea47f9080fc3c1abeeae6 /drivers | |
parent | b28ace12661fbcfd90959c1e84ff5a85113a82a1 (diff) | |
parent | 4b9de5da7e120c7f02395da729f0ec77ce7a6044 (diff) | |
download | linux-920c634aff6cb66e7f352668521eb1313897e93c.tar.bz2 |
Merge tag 'irq-fixes-4.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/urgent
Pull irqchip/irqdomain updates for 4.11-rc2 from Marc Zyngier
- irqchip/crossbar: Some type tidying up
- irqchip/gicv3-its: Workaround for a Qualcomm erratum
- irqdomain: Compile for for systems that don't use CONFIG_IRQ_DOMAIN
Fixed up minor conflict in the crossbar driver.
Diffstat (limited to 'drivers')
582 files changed, 31489 insertions, 8591 deletions
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c index f77956c3fd45..747c2ba98534 100644 --- a/drivers/acpi/acpi_ipmi.c +++ b/drivers/acpi/acpi_ipmi.c @@ -56,7 +56,7 @@ struct acpi_ipmi_device { struct ipmi_driver_data { struct list_head ipmi_devices; struct ipmi_smi_watcher bmc_events; - struct ipmi_user_hndl ipmi_hndlrs; + const struct ipmi_user_hndl ipmi_hndlrs; struct mutex ipmi_lock; /* diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 90d112a3063a..5edfd9c49044 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -21,6 +21,7 @@ #include <linux/platform_data/x86/pmc_atom.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include <linux/pwm.h> #include <linux/delay.h> #include "internal.h" @@ -154,6 +155,18 @@ static void byt_i2c_setup(struct lpss_private_data *pdata) writel(0, pdata->mmio_base + LPSS_I2C_ENABLE); } +/* BSW PWM used for backlight control by the i915 driver */ +static struct pwm_lookup bsw_pwm_lookup[] = { + PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0", + "pwm_backlight", 0, PWM_POLARITY_NORMAL, + "pwm-lpss-platform"), +}; + +static void bsw_pwm_setup(struct lpss_private_data *pdata) +{ + pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); +} + static const struct lpss_device_desc lpt_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, .prv_offset = 0x800, @@ -191,6 +204,7 @@ static const struct lpss_device_desc byt_pwm_dev_desc = { static const struct lpss_device_desc bsw_pwm_dev_desc = { .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, + .setup = bsw_pwm_setup, }; static const struct lpss_device_desc byt_uart_dev_desc = { diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index eb76a4c10dbf..754431031282 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -20,6 +20,7 @@ #include <linux/init.h> #include <linux/types.h> #include <linux/kthread.h> +#include <uapi/linux/sched/types.h> #include <linux/freezer.h> #include <linux/cpu.h> #include <linux/tick.h> diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c index a05b5c0cf181..12771fcf0417 100644 --- a/drivers/acpi/apei/bert.c +++ b/drivers/acpi/apei/bert.c @@ -97,6 +97,7 @@ static int __init bert_check_table(struct acpi_table_bert *bert_tab) static int __init bert_init(void) { + struct apei_resources bert_resources; struct acpi_bert_region *boot_error_region; struct acpi_table_bert *bert_tab; unsigned int region_len; @@ -127,13 +128,14 @@ static int __init bert_init(void) } region_len = bert_tab->region_length; - if (!request_mem_region(bert_tab->address, region_len, "APEI BERT")) { - pr_err("Can't request iomem region <%016llx-%016llx>.\n", - (unsigned long long)bert_tab->address, - (unsigned long long)bert_tab->address + region_len - 1); - return -EIO; - } - + apei_resources_init(&bert_resources); + rc = apei_resources_add(&bert_resources, bert_tab->address, + region_len, true); + if (rc) + return rc; + rc = apei_resources_request(&bert_resources, "APEI BERT"); + if (rc) + goto out_fini; boot_error_region = ioremap_cache(bert_tab->address, region_len); if (boot_error_region) { bert_print_all(boot_error_region, region_len); @@ -142,7 +144,9 @@ static int __init bert_init(void) rc = -ENOMEM; } - release_mem_region(bert_tab->address, region_len); + apei_resources_release(&bert_resources); +out_fini: + apei_resources_fini(&bert_resources); return rc; } diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index e53bef6cf53c..b192b42a8351 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -44,6 +44,7 @@ #include <linux/pci.h> #include <linux/aer.h> #include <linux/nmi.h> +#include <linux/sched/clock.h> #include <acpi/ghes.h> #include <acpi/apei.h> diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 7361d00818e2..662036bdc65e 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1603,7 +1603,7 @@ static size_t sizeof_nfit_set_info(int num_mappings) + num_mappings * sizeof(struct nfit_set_info_map); } -static int cmp_map(const void *m0, const void *m1) +static int cmp_map_compat(const void *m0, const void *m1) { const struct nfit_set_info_map *map0 = m0; const struct nfit_set_info_map *map1 = m1; @@ -1612,6 +1612,14 @@ static int cmp_map(const void *m0, const void *m1) sizeof(u64)); } +static int cmp_map(const void *m0, const void *m1) +{ + const struct nfit_set_info_map *map0 = m0; + const struct nfit_set_info_map *map1 = m1; + + return map0->region_offset - map1->region_offset; +} + /* Retrieve the nth entry referencing this spa */ static struct acpi_nfit_memory_map *memdev_from_spa( struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) @@ -1667,6 +1675,12 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), cmp_map, NULL); nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); + + /* support namespaces created with the wrong sort order */ + sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), + cmp_map_compat, NULL); + nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); + ndr_desc->nd_set = nd_set; devm_kfree(dev, info); diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 2bbcdc6fdfee..aae4d8d4be36 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -31,7 +31,8 @@ #include <linux/poll.h> #include <linux/debugfs.h> #include <linux/rbtree.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 12d3a66600a3..1ac70744ae7b 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -600,6 +600,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) u8 args[4], *argbuf = NULL, *sensebuf = NULL; int argsize = 0; enum dma_data_direction data_dir; + struct scsi_sense_hdr sshdr; int cmd_result; if (arg == NULL) @@ -648,7 +649,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) /* Good values for timeout and retries? Values below from scsi_ioctl_send_command() for default case... */ cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize, - sensebuf, (10*HZ), 5, 0, NULL); + sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL); if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ u8 *desc = sensebuf + 8; @@ -657,9 +658,6 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) /* If we set cc then ATA pass-through will cause a * check condition even if no error. Filter that. */ if (cmd_result & SAM_STAT_CHECK_CONDITION) { - struct scsi_sense_hdr sshdr; - scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, - &sshdr); if (sshdr.sense_key == RECOVERED_ERROR && sshdr.asc == 0 && sshdr.ascq == 0x1d) cmd_result &= ~SAM_STAT_CHECK_CONDITION; @@ -707,6 +705,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) int rc = 0; u8 scsi_cmd[MAX_COMMAND_SIZE]; u8 args[7], *sensebuf = NULL; + struct scsi_sense_hdr sshdr; int cmd_result; if (arg == NULL) @@ -734,7 +733,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) /* Good values for timeout and retries? Values below from scsi_ioctl_send_command() for default case... */ cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0, - sensebuf, (10*HZ), 5, 0, NULL); + sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL); if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ u8 *desc = sensebuf + 8; @@ -743,9 +742,6 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) /* If we set cc then ATA pass-through will cause a * check condition even if no error. Filter that. */ if (cmd_result & SAM_STAT_CHECK_CONDITION) { - struct scsi_sense_hdr sshdr; - scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, - &sshdr); if (sshdr.sense_key == RECOVERED_ERROR && sshdr.asc == 0 && sshdr.ascq == 0x1d) cmd_result &= ~SAM_STAT_CHECK_CONDITION; diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index 2bf1ef1c3c78..0f18480b33b5 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -27,6 +27,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/errno.h> diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index cb28579e8a94..d879f3bca107 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -1980,13 +1980,12 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) card->lbfqc = ns_stat_lfbqc_get(stat); id = le32_to_cpu(rsqe->buffer_handle); - skb = idr_find(&card->idr, id); + skb = idr_remove(&card->idr, id); if (!skb) { RXPRINTK(KERN_ERR - "nicstar%d: idr_find() failed!\n", card->index); + "nicstar%d: skb not found!\n", card->index); return; } - idr_remove(&card->idr, id); dma_sync_single_for_cpu(&card->pcidev->dev, NS_PRV_DMA(skb), (NS_PRV_BUFTYPE(skb) == BUF_SM diff --git a/drivers/base/core.c b/drivers/base/core.c index 3050e6f99403..684bda4d14a1 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -26,6 +26,7 @@ #include <linux/mutex.h> #include <linux/pm_runtime.h> #include <linux/netdevice.h> +#include <linux/sched/signal.h> #include <linux/sysfs.h> #include "base.h" diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 44a74cf1372c..d2fb9c8ed205 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -309,7 +309,8 @@ static int handle_remove(const char *nodename, struct device *dev) if (d_really_is_positive(dentry)) { struct kstat stat; struct path p = {.mnt = parent.mnt, .dentry = dentry}; - err = vfs_getattr(&p, &stat); + err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE, + AT_STATX_SYNC_AS_STAT); if (!err && dev_mynode(dev, d_inode(dentry), &stat)) { struct iattr newattrs; /* diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 3a75fb1b4126..e697dec9d25b 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -274,6 +274,93 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) } /** + * genpd_power_off - Remove power from a given PM domain. + * @genpd: PM domain to power down. + * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the + * RPM status of the releated device is in an intermediate state, not yet turned + * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not + * be RPM_SUSPENDED, while it tries to power off the PM domain. + * + * If all of the @genpd's devices have been suspended and all of its subdomains + * have been powered down, remove power from @genpd. + */ +static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, + unsigned int depth) +{ + struct pm_domain_data *pdd; + struct gpd_link *link; + unsigned int not_suspended = 0; + + /* + * Do not try to power off the domain in the following situations: + * (1) The domain is already in the "power off" state. + * (2) System suspend is in progress. + */ + if (genpd->status == GPD_STATE_POWER_OFF + || genpd->prepared_count > 0) + return 0; + + if (atomic_read(&genpd->sd_count) > 0) + return -EBUSY; + + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + enum pm_qos_flags_status stat; + + stat = dev_pm_qos_flags(pdd->dev, + PM_QOS_FLAG_NO_POWER_OFF + | PM_QOS_FLAG_REMOTE_WAKEUP); + if (stat > PM_QOS_FLAGS_NONE) + return -EBUSY; + + /* + * Do not allow PM domain to be powered off, when an IRQ safe + * device is part of a non-IRQ safe domain. + */ + if (!pm_runtime_suspended(pdd->dev) || + irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) + not_suspended++; + } + + if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on)) + return -EBUSY; + + if (genpd->gov && genpd->gov->power_down_ok) { + if (!genpd->gov->power_down_ok(&genpd->domain)) + return -EAGAIN; + } + + if (genpd->power_off) { + int ret; + + if (atomic_read(&genpd->sd_count) > 0) + return -EBUSY; + + /* + * If sd_count > 0 at this point, one of the subdomains hasn't + * managed to call genpd_power_on() for the master yet after + * incrementing it. In that case genpd_power_on() will wait + * for us to drop the lock, so we can call .power_off() and let + * the genpd_power_on() restore power for us (this shouldn't + * happen very often). + */ + ret = _genpd_power_off(genpd, true); + if (ret) + return ret; + } + + genpd->status = GPD_STATE_POWER_OFF; + + list_for_each_entry(link, &genpd->slave_links, slave_node) { + genpd_sd_counter_dec(link->master); + genpd_lock_nested(link->master, depth + 1); + genpd_power_off(link->master, false, depth + 1); + genpd_unlock(link->master); + } + + return 0; +} + +/** * genpd_power_on - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. * @depth: nesting count for lockdep. @@ -321,7 +408,9 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); - genpd_queue_power_off_work(link->master); + genpd_lock_nested(link->master, depth + 1); + genpd_power_off(link->master, false, depth + 1); + genpd_unlock(link->master); } return ret; @@ -368,87 +457,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, } /** - * genpd_power_off - Remove power from a given PM domain. - * @genpd: PM domain to power down. - * @is_async: PM domain is powered down from a scheduled work - * - * If all of the @genpd's devices have been suspended and all of its subdomains - * have been powered down, remove power from @genpd. - */ -static int genpd_power_off(struct generic_pm_domain *genpd, bool is_async) -{ - struct pm_domain_data *pdd; - struct gpd_link *link; - unsigned int not_suspended = 0; - - /* - * Do not try to power off the domain in the following situations: - * (1) The domain is already in the "power off" state. - * (2) System suspend is in progress. - */ - if (genpd->status == GPD_STATE_POWER_OFF - || genpd->prepared_count > 0) - return 0; - - if (atomic_read(&genpd->sd_count) > 0) - return -EBUSY; - - list_for_each_entry(pdd, &genpd->dev_list, list_node) { - enum pm_qos_flags_status stat; - - stat = dev_pm_qos_flags(pdd->dev, - PM_QOS_FLAG_NO_POWER_OFF - | PM_QOS_FLAG_REMOTE_WAKEUP); - if (stat > PM_QOS_FLAGS_NONE) - return -EBUSY; - - /* - * Do not allow PM domain to be powered off, when an IRQ safe - * device is part of a non-IRQ safe domain. - */ - if (!pm_runtime_suspended(pdd->dev) || - irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) - not_suspended++; - } - - if (not_suspended > 1 || (not_suspended == 1 && is_async)) - return -EBUSY; - - if (genpd->gov && genpd->gov->power_down_ok) { - if (!genpd->gov->power_down_ok(&genpd->domain)) - return -EAGAIN; - } - - if (genpd->power_off) { - int ret; - - if (atomic_read(&genpd->sd_count) > 0) - return -EBUSY; - - /* - * If sd_count > 0 at this point, one of the subdomains hasn't - * managed to call genpd_power_on() for the master yet after - * incrementing it. In that case genpd_power_on() will wait - * for us to drop the lock, so we can call .power_off() and let - * the genpd_power_on() restore power for us (this shouldn't - * happen very often). - */ - ret = _genpd_power_off(genpd, true); - if (ret) - return ret; - } - - genpd->status = GPD_STATE_POWER_OFF; - - list_for_each_entry(link, &genpd->slave_links, slave_node) { - genpd_sd_counter_dec(link->master); - genpd_queue_power_off_work(link->master); - } - - return 0; -} - -/** * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. * @work: Work structure used for scheduling the execution of this function. */ @@ -459,7 +467,7 @@ static void genpd_power_off_work_fn(struct work_struct *work) genpd = container_of(work, struct generic_pm_domain, power_off_work); genpd_lock(genpd); - genpd_power_off(genpd, true); + genpd_power_off(genpd, false, 0); genpd_unlock(genpd); } @@ -578,7 +586,7 @@ static int genpd_runtime_suspend(struct device *dev) return 0; genpd_lock(genpd); - genpd_power_off(genpd, false); + genpd_power_off(genpd, true, 0); genpd_unlock(genpd); return 0; @@ -658,7 +666,7 @@ err_poweroff: if (!pm_runtime_is_irq_safe(dev) || (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) { genpd_lock(genpd); - genpd_power_off(genpd, 0); + genpd_power_off(genpd, true, 0); genpd_unlock(genpd); } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 249e0304597f..9faee1c893e5 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -27,6 +27,7 @@ #include <linux/pm_wakeirq.h> #include <linux/interrupt.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/async.h> #include <linux/suspend.h> #include <trace/events/power.h> diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index 91ec3232d630..dae61720b314 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -231,7 +231,8 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) * The caller needs to ensure that opp_table (and hence the regulator) * isn't freed, while we are executing this routine. */ - for (i = 0; reg = regulators[i], i < count; i++) { + for (i = 0; i < count; i++) { + reg = regulators[i]; ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max); if (ret > 0) latency_ns += ret * 1000; diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index d888d9869b6a..f850daeffba4 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -17,12 +17,9 @@ * * This QoS design is best effort based. Dependents register their QoS needs. * Watchers register to keep track of the current QoS needs of the system. - * Watchers can register different types of notification callbacks: - * . a per-device notification callback using the dev_pm_qos_*_notifier API. - * The notification chain data is stored in the per-device constraint - * data struct. - * . a system-wide notification callback using the dev_pm_qos_*_global_notifier - * API. The notification chain data is stored in a static variable. + * Watchers can register a per-device notification callback using the + * dev_pm_qos_*_notifier API. The notification chain data is stored in the + * per-device constraint data struct. * * Note about the per-device constraint data struct allocation: * . The per-device constraints data struct ptr is tored into the device @@ -49,8 +46,6 @@ static DEFINE_MUTEX(dev_pm_qos_mtx); static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx); -static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); - /** * __dev_pm_qos_flags - Check PM QoS flags for a given device. * @dev: Device to check the PM QoS flags for. @@ -108,8 +103,7 @@ s32 __dev_pm_qos_read_value(struct device *dev) { lockdep_assert_held(&dev->power.lock); - return IS_ERR_OR_NULL(dev->power.qos) ? - 0 : pm_qos_read_value(&dev->power.qos->resume_latency); + return dev_pm_qos_raw_read_value(dev); } /** @@ -135,8 +129,7 @@ s32 dev_pm_qos_read_value(struct device *dev) * @value: Value to assign to the QoS request. * * Internal function to update the constraints list using the PM QoS core - * code and if needed call the per-device and the global notification - * callbacks + * code and if needed call the per-device callbacks. */ static int apply_constraint(struct dev_pm_qos_request *req, enum pm_qos_req_action action, s32 value) @@ -148,12 +141,6 @@ static int apply_constraint(struct dev_pm_qos_request *req, case DEV_PM_QOS_RESUME_LATENCY: ret = pm_qos_update_target(&qos->resume_latency, &req->data.pnode, action, value); - if (ret) { - value = pm_qos_read_value(&qos->resume_latency); - blocking_notifier_call_chain(&dev_pm_notifiers, - (unsigned long)value, - req); - } break; case DEV_PM_QOS_LATENCY_TOLERANCE: ret = pm_qos_update_target(&qos->latency_tolerance, @@ -536,36 +523,6 @@ int dev_pm_qos_remove_notifier(struct device *dev, EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); /** - * dev_pm_qos_add_global_notifier - sets notification entry for changes to - * target value of the PM QoS constraints for any device - * - * @notifier: notifier block managed by caller. - * - * Will register the notifier into a notification chain that gets called - * upon changes to the target value for any device. - */ -int dev_pm_qos_add_global_notifier(struct notifier_block *notifier) -{ - return blocking_notifier_chain_register(&dev_pm_notifiers, notifier); -} -EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier); - -/** - * dev_pm_qos_remove_global_notifier - deletes notification for changes to - * target value of PM QoS constraints for any device - * - * @notifier: notifier block to be removed. - * - * Will remove the notifier from the notification chain that gets called - * upon changes to the target value for any device. - */ -int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier) -{ - return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); -} -EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); - -/** * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. * @dev: Device whose ancestor to add the request for. * @req: Pointer to the preallocated handle. diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index a14fac6a01d3..7bcf80fa9ada 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -7,7 +7,7 @@ * This file is released under the GPLv2. */ -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/export.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeirq.h> diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index f546f8f107b0..136854970489 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -8,7 +8,7 @@ #include <linux/device.h> #include <linux/slab.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/capability.h> #include <linux/export.h> #include <linux/suspend.h> diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 27d613795653..8e1a4554951c 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -348,7 +348,7 @@ static void cciss_unmap_sg_chain_block(ctlr_info_t *h, CommandList_struct *c) pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); } -static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c, +static int cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c, SGDescriptor_struct *chain_block, int len) { SGDescriptor_struct *chain_sg; @@ -359,8 +359,16 @@ static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c, chain_sg->Len = len; temp64.val = pci_map_single(h->pdev, chain_block, len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&h->pdev->dev, temp64.val)) { + dev_warn(&h->pdev->dev, + "%s: error mapping chain block for DMA\n", + __func__); + return -1; + } chain_sg->Addr.lower = temp64.val32.lower; chain_sg->Addr.upper = temp64.val32.upper; + + return 0; } #include "cciss_scsi.c" /* For SCSI tape support */ @@ -3369,15 +3377,31 @@ static void do_cciss_request(struct request_queue *q) temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]), tmp_sg[i].offset, tmp_sg[i].length, dir); + if (dma_mapping_error(&h->pdev->dev, temp64.val)) { + dev_warn(&h->pdev->dev, + "%s: error mapping page for DMA\n", __func__); + creq->errors = make_status_bytes(SAM_STAT_GOOD, + 0, DRIVER_OK, + DID_SOFT_ERROR); + cmd_free(h, c); + return; + } curr_sg[sg_index].Addr.lower = temp64.val32.lower; curr_sg[sg_index].Addr.upper = temp64.val32.upper; curr_sg[sg_index].Ext = 0; /* we are not chaining */ ++sg_index; } - if (chained) - cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex], + if (chained) { + if (cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex], (seg - (h->max_cmd_sgentries - 1)) * - sizeof(SGDescriptor_struct)); + sizeof(SGDescriptor_struct))) { + creq->errors = make_status_bytes(SAM_STAT_GOOD, + 0, DRIVER_OK, + DID_SOFT_ERROR); + cmd_free(h, c); + return; + } + } /* track how many SG entries we are using */ if (seg > h->maxSG) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 4cb8f21ff4ef..724d1c50fc52 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -30,7 +30,7 @@ #include <linux/compiler.h> #include <linux/types.h> #include <linux/list.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/ratelimit.h> diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 615e5b5178a0..92c60cbd04ee 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -52,6 +52,7 @@ #define __KERNEL_SYSCALLS__ #include <linux/unistd.h> #include <linux/vmalloc.h> +#include <linux/sched/signal.h> #include <linux/drbd_limits.h> #include "drbd_int.h" @@ -1846,7 +1847,7 @@ int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_requ int drbd_send(struct drbd_connection *connection, struct socket *sock, void *buf, size_t size, unsigned msg_flags) { - struct kvec iov; + struct kvec iov = {.iov_base = buf, .iov_len = size}; struct msghdr msg; int rv, sent = 0; @@ -1855,15 +1856,14 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock, /* THINK if (signal_pending) return ... ? */ - iov.iov_base = buf; - iov.iov_len = size; - msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = msg_flags | MSG_NOSIGNAL; + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size); + if (sock == connection->data.socket) { rcu_read_lock(); connection->ko_count = rcu_dereference(connection->net_conf)->ko_count; @@ -1871,7 +1871,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock, drbd_update_congested(connection); } do { - rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); + rv = sock_sendmsg(sock, &msg); if (rv == -EAGAIN) { if (we_should_drop_the_connection(connection, sock)) break; @@ -1885,8 +1885,6 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock, if (rv < 0) break; sent += rv; - iov.iov_base += rv; - iov.iov_len -= rv; } while (sent < size); if (sock == connection->data.socket) @@ -2915,11 +2913,9 @@ out_idr_remove_vol: idr_remove(&connection->peer_devices, vnr); out_idr_remove_from_resource: for_each_connection(connection, resource) { - peer_device = idr_find(&connection->peer_devices, vnr); - if (peer_device) { - idr_remove(&connection->peer_devices, vnr); + peer_device = idr_remove(&connection->peer_devices, vnr); + if (peer_device) kref_put(&connection->kref, drbd_destroy_connection); - } } for_each_peer_device_safe(peer_device, tmp_peer_device, device) { list_del(&peer_device->peer_devices); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index c7728dd77230..aa6bf9692eff 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -36,6 +36,8 @@ #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/slab.h> +#include <uapi/linux/sched/types.h> +#include <linux/sched/signal.h> #include <linux/pkt_sched.h> #define __KERNEL_SYSCALLS__ #include <linux/unistd.h> diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index c6755c9a0aea..3bff33f21435 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -25,7 +25,7 @@ #include <linux/module.h> #include <linux/drbd.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/memcontrol.h> diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 4b52a1690329..0ecb6461ed81 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -501,9 +501,9 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, cmd->iocb.ki_flags = IOCB_DIRECT; if (rw == WRITE) - ret = file->f_op->write_iter(&cmd->iocb, &iter); + ret = call_write_iter(file, &cmd->iocb, &iter); else - ret = file->f_op->read_iter(&cmd->iocb, &iter); + ret = call_read_iter(file, &cmd->iocb, &iter); if (ret != -EIOCBQUEUED) cmd->iocb.ki_complete(&cmd->iocb, ret, 0); @@ -1142,13 +1142,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) (info->lo_flags & LO_FLAGS_AUTOCLEAR)) lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; - if ((info->lo_flags & LO_FLAGS_PARTSCAN) && - !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { - lo->lo_flags |= LO_FLAGS_PARTSCAN; - lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; - loop_reread_partitions(lo, lo->lo_device); - } - lo->lo_encrypt_key_size = info->lo_encrypt_key_size; lo->lo_init[0] = info->lo_init[0]; lo->lo_init[1] = info->lo_init[1]; @@ -1163,6 +1156,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) exit: blk_mq_unfreeze_queue(lo->lo_queue); + + if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && + !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { + lo->lo_flags |= LO_FLAGS_PARTSCAN; + lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; + loop_reread_partitions(lo, lo->lo_device); + } + return err; } @@ -1175,7 +1176,8 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info) if (lo->lo_state != Lo_bound) return -ENXIO; - error = vfs_getattr(&file->f_path, &stat); + error = vfs_getattr(&file->f_path, &stat, + STATX_INO, AT_STATX_SYNC_AS_STAT); if (error) return error; memset(info, 0, sizeof(*info)); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 0bf2b21a62cb..7e4287bc19e5 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -201,13 +201,12 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, /* * Send or receive packet. */ -static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf, - int size, int msg_flags) +static int sock_xmit(struct nbd_device *nbd, int index, int send, + struct iov_iter *iter, int msg_flags) { struct socket *sock = nbd->socks[index]->sock; int result; struct msghdr msg; - struct kvec iov; unsigned long pflags = current->flags; if (unlikely(!sock)) { @@ -217,11 +216,11 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf, return -EINVAL; } + msg.msg_iter = *iter; + current->flags |= PF_MEMALLOC; do { sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; - iov.iov_base = buf; - iov.iov_len = size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; @@ -229,47 +228,37 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf, msg.msg_flags = msg_flags | MSG_NOSIGNAL; if (send) - result = kernel_sendmsg(sock, &msg, &iov, 1, size); + result = sock_sendmsg(sock, &msg); else - result = kernel_recvmsg(sock, &msg, &iov, 1, size, - msg.msg_flags); + result = sock_recvmsg(sock, &msg, msg.msg_flags); if (result <= 0) { if (result == 0) result = -EPIPE; /* short read */ break; } - size -= result; - buf += result; - } while (size > 0); + } while (msg_data_left(&msg)); tsk_restore_flags(current, pflags, PF_MEMALLOC); return result; } -static inline int sock_send_bvec(struct nbd_device *nbd, int index, - struct bio_vec *bvec, int flags) -{ - int result; - void *kaddr = kmap(bvec->bv_page); - result = sock_xmit(nbd, index, 1, kaddr + bvec->bv_offset, - bvec->bv_len, flags); - kunmap(bvec->bv_page); - return result; -} - /* always call with the tx_lock held */ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) { struct request *req = blk_mq_rq_from_pdu(cmd); int result; - struct nbd_request request; + struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; + struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; + struct iov_iter from; unsigned long size = blk_rq_bytes(req); struct bio *bio; u32 type; u32 tag = blk_mq_unique_tag(req); + iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); + switch (req_op(req)) { case REQ_OP_DISCARD: type = NBD_CMD_TRIM; @@ -294,8 +283,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) return -EIO; } - memset(&request, 0, sizeof(request)); - request.magic = htonl(NBD_REQUEST_MAGIC); request.type = htonl(type); if (type != NBD_CMD_FLUSH) { request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); @@ -306,7 +293,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", cmd, nbdcmd_to_ascii(type), (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); - result = sock_xmit(nbd, index, 1, &request, sizeof(request), + result = sock_xmit(nbd, index, 1, &from, (type == NBD_CMD_WRITE) ? MSG_MORE : 0); if (result <= 0) { dev_err_ratelimited(disk_to_dev(nbd->disk), @@ -329,7 +316,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", cmd, bvec.bv_len); - result = sock_send_bvec(nbd, index, &bvec, flags); + iov_iter_bvec(&from, ITER_BVEC | WRITE, + &bvec, 1, bvec.bv_len); + result = sock_xmit(nbd, index, 1, &from, flags); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", @@ -350,17 +339,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) return 0; } -static inline int sock_recv_bvec(struct nbd_device *nbd, int index, - struct bio_vec *bvec) -{ - int result; - void *kaddr = kmap(bvec->bv_page); - result = sock_xmit(nbd, index, 0, kaddr + bvec->bv_offset, - bvec->bv_len, MSG_WAITALL); - kunmap(bvec->bv_page); - return result; -} - /* NULL returned = something went wrong, inform userspace */ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) { @@ -370,9 +348,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) struct request *req = NULL; u16 hwq; u32 tag; + struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; + struct iov_iter to; reply.magic = 0; - result = sock_xmit(nbd, index, 0, &reply, sizeof(reply), MSG_WAITALL); + iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); + result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); if (result <= 0) { if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) @@ -412,7 +393,9 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) struct bio_vec bvec; rq_for_each_segment(bvec, req, iter) { - result = sock_recv_bvec(nbd, index, &bvec); + iov_iter_bvec(&to, ITER_BVEC | READ, + &bvec, 1, bvec.bv_len); + result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", result); @@ -641,14 +624,17 @@ static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev) static void send_disconnects(struct nbd_device *nbd) { - struct nbd_request request = {}; + struct nbd_request request = { + .magic = htonl(NBD_REQUEST_MAGIC), + .type = htonl(NBD_CMD_DISC), + }; + struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; + struct iov_iter from; int i, ret; - request.magic = htonl(NBD_REQUEST_MAGIC); - request.type = htonl(NBD_CMD_DISC); - for (i = 0; i < nbd->num_connections; i++) { - ret = sock_xmit(nbd, i, 1, &request, sizeof(request), 0); + iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); + ret = sock_xmit(nbd, i, 1, &from, 0); if (ret <= 0) dev_err(disk_to_dev(nbd->disk), "Send disconnect failed %d\n", ret); @@ -689,8 +675,10 @@ static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev) nbd->num_connections) { int i; - for (i = 0; i < nbd->num_connections; i++) + for (i = 0; i < nbd->num_connections; i++) { + sockfd_put(nbd->socks[i]->sock); kfree(nbd->socks[i]); + } kfree(nbd->socks); nbd->socks = NULL; nbd->num_connections = 0; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 362cecc77130..4d6807723798 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -123,9 +123,11 @@ static int atomic_dec_return_safe(atomic_t *v) #define RBD_FEATURE_LAYERING (1<<0) #define RBD_FEATURE_STRIPINGV2 (1<<1) #define RBD_FEATURE_EXCLUSIVE_LOCK (1<<2) +#define RBD_FEATURE_DATA_POOL (1<<7) #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ RBD_FEATURE_STRIPINGV2 | \ - RBD_FEATURE_EXCLUSIVE_LOCK) + RBD_FEATURE_EXCLUSIVE_LOCK | \ + RBD_FEATURE_DATA_POOL) /* Features supported by this (client software) implementation. */ @@ -144,10 +146,9 @@ struct rbd_image_header { /* These six fields never change for a given rbd image */ char *object_prefix; __u8 obj_order; - __u8 crypt_type; - __u8 comp_type; u64 stripe_unit; u64 stripe_count; + s64 data_pool_id; u64 features; /* Might be changeable someday? */ /* The remaining fields need to be updated occasionally */ @@ -230,7 +231,7 @@ enum obj_req_flags { }; struct rbd_obj_request { - const char *object_name; + u64 object_no; u64 offset; /* object start byte */ u64 length; /* bytes from offset */ unsigned long flags; @@ -438,7 +439,6 @@ static DEFINE_SPINLOCK(rbd_client_list_lock); static struct kmem_cache *rbd_img_request_cache; static struct kmem_cache *rbd_obj_request_cache; -static struct kmem_cache *rbd_segment_name_cache; static int rbd_major; static DEFINE_IDA(rbd_dev_id_ida); @@ -973,6 +973,30 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk) } /* + * returns the size of an object in the image + */ +static u32 rbd_obj_bytes(struct rbd_image_header *header) +{ + return 1U << header->obj_order; +} + +static void rbd_init_layout(struct rbd_device *rbd_dev) +{ + if (rbd_dev->header.stripe_unit == 0 || + rbd_dev->header.stripe_count == 0) { + rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header); + rbd_dev->header.stripe_count = 1; + } + + rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit; + rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count; + rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header); + rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ? + rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id; + RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL); +} + +/* * Fill an rbd image header with information from the given format 1 * on-disk header. */ @@ -992,15 +1016,11 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev, /* Allocate this now to avoid having to handle failure below */ if (first_time) { - size_t len; - - len = strnlen(ondisk->object_prefix, - sizeof (ondisk->object_prefix)); - object_prefix = kmalloc(len + 1, GFP_KERNEL); + object_prefix = kstrndup(ondisk->object_prefix, + sizeof(ondisk->object_prefix), + GFP_KERNEL); if (!object_prefix) return -ENOMEM; - memcpy(object_prefix, ondisk->object_prefix, len); - object_prefix[len] = '\0'; } /* Allocate the snapshot context and fill it in */ @@ -1051,12 +1071,7 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev, if (first_time) { header->object_prefix = object_prefix; header->obj_order = ondisk->options.order; - header->crypt_type = ondisk->options.crypt_type; - header->comp_type = ondisk->options.comp_type; - /* The rest aren't used for format 1 images */ - header->stripe_unit = 0; - header->stripe_count = 0; - header->features = 0; + rbd_init_layout(rbd_dev); } else { ceph_put_snap_context(header->snapc); kfree(header->snap_names); @@ -1232,42 +1247,9 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev) rbd_dev->mapping.features = 0; } -static void rbd_segment_name_free(const char *name) -{ - /* The explicit cast here is needed to drop the const qualifier */ - - kmem_cache_free(rbd_segment_name_cache, (void *)name); -} - -static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) -{ - char *name; - u64 segment; - int ret; - char *name_format; - - name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO); - if (!name) - return NULL; - segment = offset >> rbd_dev->header.obj_order; - name_format = "%s.%012llx"; - if (rbd_dev->image_format == 2) - name_format = "%s.%016llx"; - ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format, - rbd_dev->header.object_prefix, segment); - if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) { - pr_err("error formatting segment name for #%llu (%d)\n", - segment, ret); - rbd_segment_name_free(name); - name = NULL; - } - - return name; -} - static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset) { - u64 segment_size = (u64) 1 << rbd_dev->header.obj_order; + u64 segment_size = rbd_obj_bytes(&rbd_dev->header); return offset & (segment_size - 1); } @@ -1275,7 +1257,7 @@ static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset) static u64 rbd_segment_length(struct rbd_device *rbd_dev, u64 offset, u64 length) { - u64 segment_size = (u64) 1 << rbd_dev->header.obj_order; + u64 segment_size = rbd_obj_bytes(&rbd_dev->header); offset &= segment_size - 1; @@ -1287,14 +1269,6 @@ static u64 rbd_segment_length(struct rbd_device *rbd_dev, } /* - * returns the size of an object in the image - */ -static u64 rbd_obj_bytes(struct rbd_image_header *header) -{ - return 1 << header->obj_order; -} - -/* * bio helpers */ @@ -1623,7 +1597,9 @@ static void rbd_obj_request_submit(struct rbd_obj_request *obj_request) { struct ceph_osd_request *osd_req = obj_request->osd_req; - dout("%s %p osd_req %p\n", __func__, obj_request, osd_req); + dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__, + obj_request, obj_request->object_no, obj_request->offset, + obj_request->length, osd_req); if (obj_request_img_data_test(obj_request)) { WARN_ON(obj_request->callback != rbd_img_obj_callback); rbd_img_request_get(obj_request->img_request); @@ -1631,44 +1607,6 @@ static void rbd_obj_request_submit(struct rbd_obj_request *obj_request) ceph_osdc_start_request(osd_req->r_osdc, osd_req, false); } -static void rbd_obj_request_end(struct rbd_obj_request *obj_request) -{ - dout("%s %p\n", __func__, obj_request); - ceph_osdc_cancel_request(obj_request->osd_req); -} - -/* - * Wait for an object request to complete. If interrupted, cancel the - * underlying osd request. - * - * @timeout: in jiffies, 0 means "wait forever" - */ -static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request, - unsigned long timeout) -{ - long ret; - - dout("%s %p\n", __func__, obj_request); - ret = wait_for_completion_interruptible_timeout( - &obj_request->completion, - ceph_timeout_jiffies(timeout)); - if (ret <= 0) { - if (ret == 0) - ret = -ETIMEDOUT; - rbd_obj_request_end(obj_request); - } else { - ret = 0; - } - - dout("%s %p ret %d\n", __func__, obj_request, (int)ret); - return ret; -} - -static int rbd_obj_request_wait(struct rbd_obj_request *obj_request) -{ - return __rbd_obj_request_wait(obj_request, 0); -} - static void rbd_img_request_complete(struct rbd_img_request *img_request) { @@ -1955,8 +1893,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req) rbd_osd_call_callback(obj_request); break; default: - rbd_warn(NULL, "%s: unsupported op %hu", - obj_request->object_name, (unsigned short) opcode); + rbd_warn(NULL, "unexpected OSD op: object_no %016llx opcode %d", + obj_request->object_no, opcode); break; } @@ -1980,6 +1918,40 @@ static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request) osd_req->r_data_offset = obj_request->offset; } +static struct ceph_osd_request * +__rbd_osd_req_create(struct rbd_device *rbd_dev, + struct ceph_snap_context *snapc, + int num_ops, unsigned int flags, + struct rbd_obj_request *obj_request) +{ + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; + struct ceph_osd_request *req; + const char *name_format = rbd_dev->image_format == 1 ? + RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT; + + req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO); + if (!req) + return NULL; + + req->r_flags = flags; + req->r_callback = rbd_osd_req_callback; + req->r_priv = obj_request; + + req->r_base_oloc.pool = rbd_dev->layout.pool_id; + if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format, + rbd_dev->header.object_prefix, obj_request->object_no)) + goto err_req; + + if (ceph_osdc_alloc_messages(req, GFP_NOIO)) + goto err_req; + + return req; + +err_req: + ceph_osdc_put_request(req); + return NULL; +} + /* * Create an osd request. A read request has one osd op (read). * A write request has either one (watch) or two (hint+write) osd ops. @@ -1993,8 +1965,6 @@ static struct ceph_osd_request *rbd_osd_req_create( struct rbd_obj_request *obj_request) { struct ceph_snap_context *snapc = NULL; - struct ceph_osd_client *osdc; - struct ceph_osd_request *osd_req; if (obj_request_img_data_test(obj_request) && (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) { @@ -2009,35 +1979,9 @@ static struct ceph_osd_request *rbd_osd_req_create( rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2)); - /* Allocate and initialize the request, for the num_ops ops */ - - osdc = &rbd_dev->rbd_client->client->osdc; - osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, - GFP_NOIO); - if (!osd_req) - goto fail; - - if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD) - osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK; - else - osd_req->r_flags = CEPH_OSD_FLAG_READ; - - osd_req->r_callback = rbd_osd_req_callback; - osd_req->r_priv = obj_request; - - osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id; - if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s", - obj_request->object_name)) - goto fail; - - if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO)) - goto fail; - - return osd_req; - -fail: - ceph_osdc_put_request(osd_req); - return NULL; + return __rbd_osd_req_create(rbd_dev, snapc, num_ops, + (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD) ? + CEPH_OSD_FLAG_WRITE : CEPH_OSD_FLAG_READ, obj_request); } /* @@ -2050,10 +1994,6 @@ static struct ceph_osd_request * rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request; - struct ceph_snap_context *snapc; - struct rbd_device *rbd_dev; - struct ceph_osd_client *osdc; - struct ceph_osd_request *osd_req; int num_osd_ops = 3; rbd_assert(obj_request_img_data_test(obj_request)); @@ -2065,77 +2005,34 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request) if (img_request_discard_test(img_request)) num_osd_ops = 2; - /* Allocate and initialize the request, for all the ops */ - - snapc = img_request->snapc; - rbd_dev = img_request->rbd_dev; - osdc = &rbd_dev->rbd_client->client->osdc; - osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops, - false, GFP_NOIO); - if (!osd_req) - goto fail; - - osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK; - osd_req->r_callback = rbd_osd_req_callback; - osd_req->r_priv = obj_request; - - osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id; - if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s", - obj_request->object_name)) - goto fail; - - if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO)) - goto fail; - - return osd_req; - -fail: - ceph_osdc_put_request(osd_req); - return NULL; + return __rbd_osd_req_create(img_request->rbd_dev, + img_request->snapc, num_osd_ops, + CEPH_OSD_FLAG_WRITE, obj_request); } - static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req) { ceph_osdc_put_request(osd_req); } -/* object_name is assumed to be a non-null pointer and NUL-terminated */ - -static struct rbd_obj_request *rbd_obj_request_create(const char *object_name, - u64 offset, u64 length, - enum obj_request_type type) +static struct rbd_obj_request * +rbd_obj_request_create(enum obj_request_type type) { struct rbd_obj_request *obj_request; - size_t size; - char *name; rbd_assert(obj_request_type_valid(type)); - size = strlen(object_name) + 1; - name = kmalloc(size, GFP_NOIO); - if (!name) - return NULL; - obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO); - if (!obj_request) { - kfree(name); + if (!obj_request) return NULL; - } - obj_request->object_name = memcpy(name, object_name, size); - obj_request->offset = offset; - obj_request->length = length; - obj_request->flags = 0; obj_request->which = BAD_WHICH; obj_request->type = type; INIT_LIST_HEAD(&obj_request->links); init_completion(&obj_request->completion); kref_init(&obj_request->kref); - dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name, - offset, length, (int)type, obj_request); - + dout("%s %p\n", __func__, obj_request); return obj_request; } @@ -2170,8 +2067,6 @@ static void rbd_obj_request_destroy(struct kref *kref) break; } - kfree(obj_request->object_name); - obj_request->object_name = NULL; kmem_cache_free(rbd_obj_request_cache, obj_request); } @@ -2546,22 +2441,18 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request, while (resid) { struct ceph_osd_request *osd_req; - const char *object_name; - u64 offset; - u64 length; + u64 object_no = img_offset >> rbd_dev->header.obj_order; + u64 offset = rbd_segment_offset(rbd_dev, img_offset); + u64 length = rbd_segment_length(rbd_dev, img_offset, resid); - object_name = rbd_segment_name(rbd_dev, img_offset); - if (!object_name) - goto out_unwind; - offset = rbd_segment_offset(rbd_dev, img_offset); - length = rbd_segment_length(rbd_dev, img_offset, resid); - obj_request = rbd_obj_request_create(object_name, - offset, length, type); - /* object request has its own copy of the object name */ - rbd_segment_name_free(object_name); + obj_request = rbd_obj_request_create(type); if (!obj_request) goto out_unwind; + obj_request->object_no = object_no; + obj_request->offset = offset; + obj_request->length = length; + /* * set obj_request->img_request before creating the * osd_request so that it gets the right snapc @@ -2771,7 +2662,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) * child image to which the original request was to be sent. */ img_offset = obj_request->img_offset - obj_request->offset; - length = (u64)1 << rbd_dev->header.obj_order; + length = rbd_obj_bytes(&rbd_dev->header); /* * There is no defined parent data beyond the parent @@ -2900,11 +2791,12 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request) size_t size; int ret; - stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0, - OBJ_REQUEST_PAGES); + stat_request = rbd_obj_request_create(OBJ_REQUEST_PAGES); if (!stat_request) return -ENOMEM; + stat_request->object_no = obj_request->object_no; + stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1, stat_request); if (!stat_request->osd_req) { @@ -3983,17 +3875,17 @@ out: * returned in the outbound buffer, or a negative error code. */ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, - const char *object_name, - const char *class_name, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, const char *method_name, const void *outbound, size_t outbound_size, void *inbound, size_t inbound_size) { - struct rbd_obj_request *obj_request; - struct page **pages; - u32 page_count; + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; + struct page *req_page = NULL; + struct page *reply_page; int ret; /* @@ -4003,61 +3895,35 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, * method. Currently if this is present it will be a * snapshot id. */ - page_count = (u32)calc_pages_for(0, inbound_size); - pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); - if (IS_ERR(pages)) - return PTR_ERR(pages); - - ret = -ENOMEM; - obj_request = rbd_obj_request_create(object_name, 0, inbound_size, - OBJ_REQUEST_PAGES); - if (!obj_request) - goto out; + if (outbound) { + if (outbound_size > PAGE_SIZE) + return -E2BIG; - obj_request->pages = pages; - obj_request->page_count = page_count; - - obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1, - obj_request); - if (!obj_request->osd_req) - goto out; - - osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL, - class_name, method_name); - if (outbound_size) { - struct ceph_pagelist *pagelist; - - pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); - if (!pagelist) - goto out; + req_page = alloc_page(GFP_KERNEL); + if (!req_page) + return -ENOMEM; - ceph_pagelist_init(pagelist); - ceph_pagelist_append(pagelist, outbound, outbound_size); - osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0, - pagelist); + memcpy(page_address(req_page), outbound, outbound_size); } - osd_req_op_cls_response_data_pages(obj_request->osd_req, 0, - obj_request->pages, inbound_size, - 0, false, false); - - rbd_obj_request_submit(obj_request); - ret = rbd_obj_request_wait(obj_request); - if (ret) - goto out; - ret = obj_request->result; - if (ret < 0) - goto out; + reply_page = alloc_page(GFP_KERNEL); + if (!reply_page) { + if (req_page) + __free_page(req_page); + return -ENOMEM; + } - rbd_assert(obj_request->xferred < (u64)INT_MAX); - ret = (int)obj_request->xferred; - ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred); -out: - if (obj_request) - rbd_obj_request_put(obj_request); - else - ceph_release_page_vector(pages, page_count); + ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name, + CEPH_OSD_FLAG_READ, req_page, outbound_size, + reply_page, &inbound_size); + if (!ret) { + memcpy(inbound, page_address(reply_page), inbound_size); + ret = inbound_size; + } + if (req_page) + __free_page(req_page); + __free_page(reply_page); return ret; } @@ -4256,63 +4122,46 @@ static void rbd_free_disk(struct rbd_device *rbd_dev) } static int rbd_obj_read_sync(struct rbd_device *rbd_dev, - const char *object_name, - u64 offset, u64 length, void *buf) + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + void *buf, int buf_len) { - struct rbd_obj_request *obj_request; - struct page **pages = NULL; - u32 page_count; - size_t size; + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; + struct ceph_osd_request *req; + struct page **pages; + int num_pages = calc_pages_for(0, buf_len); int ret; - page_count = (u32) calc_pages_for(offset, length); - pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); - if (IS_ERR(pages)) - return PTR_ERR(pages); - - ret = -ENOMEM; - obj_request = rbd_obj_request_create(object_name, offset, length, - OBJ_REQUEST_PAGES); - if (!obj_request) - goto out; - - obj_request->pages = pages; - obj_request->page_count = page_count; - - obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1, - obj_request); - if (!obj_request->osd_req) - goto out; + req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL); + if (!req) + return -ENOMEM; - osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ, - offset, length, 0, 0); - osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0, - obj_request->pages, - obj_request->length, - obj_request->offset & ~PAGE_MASK, - false, false); + ceph_oid_copy(&req->r_base_oid, oid); + ceph_oloc_copy(&req->r_base_oloc, oloc); + req->r_flags = CEPH_OSD_FLAG_READ; - rbd_obj_request_submit(obj_request); - ret = rbd_obj_request_wait(obj_request); + ret = ceph_osdc_alloc_messages(req, GFP_KERNEL); if (ret) - goto out; + goto out_req; - ret = obj_request->result; - if (ret < 0) - goto out; + pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto out_req; + } - rbd_assert(obj_request->xferred <= (u64) SIZE_MAX); - size = (size_t) obj_request->xferred; - ceph_copy_from_page_vector(pages, buf, 0, size); - rbd_assert(size <= (size_t)INT_MAX); - ret = (int)size; -out: - if (obj_request) - rbd_obj_request_put(obj_request); - else - ceph_release_page_vector(pages, page_count); + osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0); + osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false, + true); + + ceph_osdc_start_request(osdc, req, false); + ret = ceph_osdc_wait_request(osdc, req); + if (ret >= 0) + ceph_copy_from_page_vector(pages, buf, 0, ret); +out_req: + ceph_osdc_put_request(req); return ret; } @@ -4348,8 +4197,8 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) if (!ondisk) return -ENOMEM; - ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name, - 0, size, ondisk); + ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, ondisk, size); if (ret < 0) goto out; if ((size_t)ret < size) { @@ -4781,7 +4630,7 @@ static const struct attribute_group *rbd_attr_groups[] = { static void rbd_dev_release(struct device *dev); -static struct device_type rbd_device_type = { +static const struct device_type rbd_device_type = { .name = "rbd", .groups = rbd_attr_groups, .release = rbd_dev_release, @@ -4876,8 +4725,9 @@ static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc, INIT_LIST_HEAD(&rbd_dev->node); init_rwsem(&rbd_dev->header_rwsem); + rbd_dev->header.data_pool_id = CEPH_NOPOOL; ceph_oid_init(&rbd_dev->header_oid); - ceph_oloc_init(&rbd_dev->header_oloc); + rbd_dev->header_oloc.pool = spec->pool_id; mutex_init(&rbd_dev->watch_mutex); rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED; @@ -4899,12 +4749,6 @@ static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc, rbd_dev->rbd_client = rbdc; rbd_dev->spec = spec; - rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER; - rbd_dev->layout.stripe_count = 1; - rbd_dev->layout.object_size = 1 << RBD_MAX_OBJ_ORDER; - rbd_dev->layout.pool_id = spec->pool_id; - RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL); - return rbd_dev; } @@ -4970,10 +4814,10 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, __le64 size; } __attribute__ ((packed)) size_buf = { 0 }; - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, - "rbd", "get_size", - &snapid, sizeof (snapid), - &size_buf, sizeof (size_buf)); + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_size", + &snapid, sizeof(snapid), + &size_buf, sizeof(size_buf)); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) return ret; @@ -5010,9 +4854,9 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) if (!reply_buf) return -ENOMEM; - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, - "rbd", "get_object_prefix", NULL, 0, - reply_buf, RBD_OBJ_PREFIX_LEN_MAX); + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_object_prefix", + NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) goto out; @@ -5045,10 +4889,10 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, u64 unsup; int ret; - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, - "rbd", "get_features", - &snapid, sizeof (snapid), - &features_buf, sizeof (features_buf)); + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_features", + &snapid, sizeof(snapid), + &features_buf, sizeof(features_buf)); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) return ret; @@ -5107,10 +4951,9 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) } snapid = cpu_to_le64(rbd_dev->spec->snap_id); - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, - "rbd", "get_parent", - &snapid, sizeof (snapid), - reply_buf, size); + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_parent", + &snapid, sizeof(snapid), reply_buf, size); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) goto out_err; @@ -5210,9 +5053,9 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) u64 stripe_count; int ret; - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, - "rbd", "get_stripe_unit_count", NULL, 0, - (char *)&striping_info_buf, size); + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_stripe_unit_count", + NULL, 0, &striping_info_buf, size); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) return ret; @@ -5226,7 +5069,7 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) * out, and only fail if the image has non-default values. */ ret = -EINVAL; - obj_size = (u64)1 << rbd_dev->header.obj_order; + obj_size = rbd_obj_bytes(&rbd_dev->header); p = &striping_info_buf; stripe_unit = ceph_decode_64(&p); if (stripe_unit != obj_size) { @@ -5247,8 +5090,27 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) return 0; } +static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev) +{ + __le64 data_pool_id; + int ret; + + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_data_pool", + NULL, 0, &data_pool_id, sizeof(data_pool_id)); + if (ret < 0) + return ret; + if (ret < sizeof(data_pool_id)) + return -EBADMSG; + + rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id); + WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL); + return 0; +} + static char *rbd_dev_image_name(struct rbd_device *rbd_dev) { + CEPH_DEFINE_OID_ONSTACK(oid); size_t image_id_size; char *image_id; void *p; @@ -5276,10 +5138,10 @@ static char *rbd_dev_image_name(struct rbd_device *rbd_dev) if (!reply_buf) goto out; - ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY, - "rbd", "dir_get_name", - image_id, image_id_size, - reply_buf, size); + ceph_oid_printf(&oid, "%s", RBD_DIRECTORY); + ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc, + "dir_get_name", image_id, image_id_size, + reply_buf, size); if (ret < 0) goto out; p = reply_buf; @@ -5458,9 +5320,9 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) if (!reply_buf) return -ENOMEM; - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, - "rbd", "get_snapcontext", NULL, 0, - reply_buf, size); + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_snapcontext", + NULL, 0, reply_buf, size); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) goto out; @@ -5523,10 +5385,9 @@ static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, return ERR_PTR(-ENOMEM); snapid = cpu_to_le64(snap_id); - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, - "rbd", "get_snapshot_name", - &snapid, sizeof (snapid), - reply_buf, size); + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_snapshot_name", + &snapid, sizeof(snapid), reply_buf, size); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) { snap_name = ERR_PTR(ret); @@ -5833,7 +5694,7 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev) { int ret; size_t size; - char *object_name; + CEPH_DEFINE_OID_ONSTACK(oid); void *response; char *image_id; @@ -5853,12 +5714,12 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev) * First, see if the format 2 image id file exists, and if * so, get the image's persistent id from it. */ - size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name); - object_name = kmalloc(size, GFP_NOIO); - if (!object_name) - return -ENOMEM; - sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name); - dout("rbd id object name is %s\n", object_name); + ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX, + rbd_dev->spec->image_name); + if (ret) + return ret; + + dout("rbd id object name is %s\n", oid.name); /* Response will be an encoded string, which includes a length */ @@ -5871,9 +5732,9 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev) /* If it doesn't exist we'll assume it's a format 1 image */ - ret = rbd_obj_method_sync(rbd_dev, object_name, - "rbd", "get_id", NULL, 0, - response, RBD_IMAGE_ID_LEN_MAX); + ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc, + "get_id", NULL, 0, + response, RBD_IMAGE_ID_LEN_MAX); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret == -ENOENT) { image_id = kstrdup("", GFP_KERNEL); @@ -5896,8 +5757,7 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev) } out: kfree(response); - kfree(object_name); - + ceph_oid_destroy(&oid); return ret; } @@ -5944,14 +5804,20 @@ static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev) if (ret < 0) goto out_err; } - /* No support for crypto and compression type format 2 images */ + if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) { + ret = rbd_dev_v2_data_pool(rbd_dev); + if (ret) + goto out_err; + } + + rbd_init_layout(rbd_dev); return 0; + out_err: rbd_dev->header.features = 0; kfree(rbd_dev->header.object_prefix); rbd_dev->header.object_prefix = NULL; - return ret; } @@ -6077,8 +5943,6 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev) /* Record the header object name for this rbd image. */ rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); - - rbd_dev->header_oloc.pool = rbd_dev->layout.pool_id; if (rbd_dev->image_format == 1) ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s", spec->image_name, RBD_SUFFIX); @@ -6471,27 +6335,16 @@ static int rbd_slab_init(void) if (!rbd_obj_request_cache) goto out_err; - rbd_assert(!rbd_segment_name_cache); - rbd_segment_name_cache = kmem_cache_create("rbd_segment_name", - CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL); - if (rbd_segment_name_cache) - return 0; -out_err: - kmem_cache_destroy(rbd_obj_request_cache); - rbd_obj_request_cache = NULL; + return 0; +out_err: kmem_cache_destroy(rbd_img_request_cache); rbd_img_request_cache = NULL; - return -ENOMEM; } static void rbd_slab_exit(void) { - rbd_assert(rbd_segment_name_cache); - kmem_cache_destroy(rbd_segment_name_cache); - rbd_segment_name_cache = NULL; - rbd_assert(rbd_obj_request_cache); kmem_cache_destroy(rbd_obj_request_cache); rbd_obj_request_cache = NULL; diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h index 94f367db27b0..62ff50d3e7a6 100644 --- a/drivers/block/rbd_types.h +++ b/drivers/block/rbd_types.h @@ -25,8 +25,8 @@ */ #define RBD_HEADER_PREFIX "rbd_header." -#define RBD_DATA_PREFIX "rbd_data." #define RBD_ID_PREFIX "rbd_id." +#define RBD_V2_DATA_FORMAT "%s.%016llx" #define RBD_LOCK_NAME "rbd_lock" #define RBD_LOCK_TAG "internal" @@ -42,13 +42,14 @@ enum rbd_notify_op { /* * For format version 1, rbd image 'foo' consists of objects * foo.rbd - image metadata - * rb.<idhi>.<idlo>.00000000 - * rb.<idhi>.<idlo>.00000001 + * rb.<idhi>.<idlo>.<extra>.000000000000 + * rb.<idhi>.<idlo>.<extra>.000000000001 * ... - data * There is no notion of a persistent image id in rbd format 1. */ #define RBD_SUFFIX ".rbd" +#define RBD_V1_DATA_FORMAT "%s.%012llx" #define RBD_DIRECTORY "rbd_directory" #define RBD_INFO "rbd_info" @@ -57,9 +58,6 @@ enum rbd_notify_op { #define RBD_MIN_OBJ_ORDER 16 #define RBD_MAX_OBJ_ORDER 30 -#define RBD_COMP_NONE 0 -#define RBD_CRYPT_NONE 0 - #define RBD_HEADER_TEXT "<<< Rados Block Device Image >>>\n" #define RBD_HEADER_SIGNATURE "RBD" #define RBD_HEADER_VERSION "001.005" diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index aabd8e9d3035..61b3ffa4f458 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -20,7 +20,7 @@ #include <linux/stddef.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/fd.h> diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 024b473524c0..1d4c9f8bc1e1 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -5,6 +5,7 @@ #include <linux/hdreg.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/interrupt.h> #include <linux/virtio.h> #include <linux/virtio_blk.h> #include <linux/scatterlist.h> @@ -12,6 +13,7 @@ #include <scsi/scsi_cmnd.h> #include <linux/idr.h> #include <linux/blk-mq.h> +#include <linux/blk-mq-virtio.h> #include <linux/numa.h> #define PART_BITS 4 @@ -426,6 +428,7 @@ static int init_vq(struct virtio_blk *vblk) struct virtqueue **vqs; unsigned short num_vqs; struct virtio_device *vdev = vblk->vdev; + struct irq_affinity desc = { 0, }; err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ, struct virtio_blk_config, num_queues, @@ -452,7 +455,8 @@ static int init_vq(struct virtio_blk *vblk) } /* Discover virtqueues and write information to configuration. */ - err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); + err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names, + &desc); if (err) goto out; @@ -586,10 +590,18 @@ static int virtblk_init_request(void *data, struct request *rq, return 0; } +static int virtblk_map_queues(struct blk_mq_tag_set *set) +{ + struct virtio_blk *vblk = set->driver_data; + + return blk_mq_virtio_map_queues(set, vblk->vdev, 0); +} + static struct blk_mq_ops virtio_mq_ops = { .queue_rq = virtio_queue_rq, .complete = virtblk_request_done, .init_request = virtblk_init_request, + .map_queues = virtblk_map_queues, }; static unsigned int virtblk_queue_depth; diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index e5c62dcf2c11..e770ad977472 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -23,7 +23,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/mutex.h> diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 20b32bb8c2af..8bdc38d81adf 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -25,6 +25,7 @@ #include <linux/spinlock.h> #include <linux/sysctl.h> #include <linux/wait.h> +#include <linux/sched/signal.h> #include <linux/bcd.h> #include <linux/seq_file.h> #include <linux/bitops.h> diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 5c654b5d4adf..503a41dfa193 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -17,6 +17,7 @@ #include <linux/hw_random.h> #include <linux/kernel.h> #include <linux/kthread.h> +#include <linux/sched/signal.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/random.h> diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig index 7f816655cbbf..90f3edffb067 100644 --- a/drivers/char/ipmi/Kconfig +++ b/drivers/char/ipmi/Kconfig @@ -78,7 +78,8 @@ config IPMI_POWEROFF endif # IPMI_HANDLER config ASPEED_BT_IPMI_BMC - depends on ARCH_ASPEED + depends on ARCH_ASPEED || COMPILE_TEST + depends on REGMAP && REGMAP_MMIO && MFD_SYSCON tristate "BT IPMI bmc driver" help Provides a driver for the BT (Block Transfer) IPMI interface diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c index fc9e8891eae3..d6f5d9eb102d 100644 --- a/drivers/char/ipmi/bt-bmc.c +++ b/drivers/char/ipmi/bt-bmc.c @@ -12,10 +12,13 @@ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/mfd/syscon.h> #include <linux/miscdevice.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/poll.h> +#include <linux/regmap.h> #include <linux/sched.h> #include <linux/timer.h> @@ -60,7 +63,8 @@ struct bt_bmc { struct device dev; struct miscdevice miscdev; - void __iomem *base; + struct regmap *map; + int offset; int irq; wait_queue_head_t queue; struct timer_list poll_timer; @@ -69,14 +73,29 @@ struct bt_bmc { static atomic_t open_count = ATOMIC_INIT(0); +static const struct regmap_config bt_regmap_cfg = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + static u8 bt_inb(struct bt_bmc *bt_bmc, int reg) { - return ioread8(bt_bmc->base + reg); + uint32_t val = 0; + int rc; + + rc = regmap_read(bt_bmc->map, bt_bmc->offset + reg, &val); + WARN(rc != 0, "regmap_read() failed: %d\n", rc); + + return rc == 0 ? (u8) val : 0; } static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg) { - iowrite8(data, bt_bmc->base + reg); + int rc; + + rc = regmap_write(bt_bmc->map, bt_bmc->offset + reg, data); + WARN(rc != 0, "regmap_write() failed: %d\n", rc); } static void clr_rd_ptr(struct bt_bmc *bt_bmc) @@ -367,14 +386,18 @@ static irqreturn_t bt_bmc_irq(int irq, void *arg) { struct bt_bmc *bt_bmc = arg; u32 reg; + int rc; + + rc = regmap_read(bt_bmc->map, bt_bmc->offset + BT_CR2, ®); + if (rc) + return IRQ_NONE; - reg = ioread32(bt_bmc->base + BT_CR2); reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY; if (!reg) return IRQ_NONE; /* ack pending IRQs */ - iowrite32(reg, bt_bmc->base + BT_CR2); + regmap_write(bt_bmc->map, bt_bmc->offset + BT_CR2, reg); wake_up(&bt_bmc->queue); return IRQ_HANDLED; @@ -384,7 +407,6 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc, struct platform_device *pdev) { struct device *dev = &pdev->dev; - u32 reg; int rc; bt_bmc->irq = platform_get_irq(pdev, 0); @@ -405,18 +427,17 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc, * will be cleared (along with B2H) when we can write the next * message to the BT buffer */ - reg = ioread32(bt_bmc->base + BT_CR1); - reg |= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY; - iowrite32(reg, bt_bmc->base + BT_CR1); + rc = regmap_update_bits(bt_bmc->map, bt_bmc->offset + BT_CR1, + (BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY), + (BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY)); - return 0; + return rc; } static int bt_bmc_probe(struct platform_device *pdev) { struct bt_bmc *bt_bmc; struct device *dev; - struct resource *res; int rc; if (!pdev || !pdev->dev.of_node) @@ -431,10 +452,27 @@ static int bt_bmc_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, bt_bmc); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - bt_bmc->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(bt_bmc->base)) - return PTR_ERR(bt_bmc->base); + bt_bmc->map = syscon_node_to_regmap(pdev->dev.parent->of_node); + if (IS_ERR(bt_bmc->map)) { + struct resource *res; + void __iomem *base; + + /* + * Assume it's not the MFD-based devicetree description, in + * which case generate a regmap ourselves + */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + bt_bmc->map = devm_regmap_init_mmio(dev, base, &bt_regmap_cfg); + bt_bmc->offset = 0; + } else { + rc = of_property_read_u32(dev->of_node, "reg", &bt_bmc->offset); + if (rc) + return rc; + } mutex_init(&bt_bmc->mutex); init_waitqueue_head(&bt_bmc->queue); @@ -461,12 +499,12 @@ static int bt_bmc_probe(struct platform_device *pdev) add_timer(&bt_bmc->poll_timer); } - iowrite32((BT_IO_BASE << BT_CR0_IO_BASE) | - (BT_IRQ << BT_CR0_IRQ) | - BT_CR0_EN_CLR_SLV_RDP | - BT_CR0_EN_CLR_SLV_WRP | - BT_CR0_ENABLE_IBT, - bt_bmc->base + BT_CR0); + regmap_write(bt_bmc->map, bt_bmc->offset + BT_CR0, + (BT_IO_BASE << BT_CR0_IO_BASE) | + (BT_IRQ << BT_CR0_IRQ) | + BT_CR0_EN_CLR_SLV_RDP | + BT_CR0_EN_CLR_SLV_WRP | + BT_CR0_ENABLE_IBT); clr_b_busy(bt_bmc); diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index a21407de46ae..f45119c5337d 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c @@ -108,7 +108,7 @@ static int ipmi_fasync(int fd, struct file *file, int on) return (result); } -static struct ipmi_user_hndl ipmi_hndlrs = +static const struct ipmi_user_hndl ipmi_hndlrs = { .ipmi_recv_hndl = file_receive_handler, }; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 92e53acf2cd2..9f699951b75a 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -102,7 +102,7 @@ struct ipmi_user { struct kref refcount; /* The upper layer that handles receive messages. */ - struct ipmi_user_hndl *handler; + const struct ipmi_user_hndl *handler; void *handler_data; /* The interface this user is bound to. */ @@ -919,7 +919,7 @@ static int intf_err_seq(ipmi_smi_t intf, int ipmi_create_user(unsigned int if_num, - struct ipmi_user_hndl *handler, + const struct ipmi_user_hndl *handler, void *handler_data, ipmi_user_t *user) { diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c index 6e658aa114f1..b338a4becbf8 100644 --- a/drivers/char/ipmi/ipmi_powernv.c +++ b/drivers/char/ipmi/ipmi_powernv.c @@ -196,7 +196,7 @@ static void ipmi_powernv_poll(void *send_info) ipmi_powernv_recv(smi); } -static struct ipmi_smi_handlers ipmi_powernv_smi_handlers = { +static const struct ipmi_smi_handlers ipmi_powernv_smi_handlers = { .owner = THIS_MODULE, .start_processing = ipmi_powernv_start_processing, .sender = ipmi_powernv_send, diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 4035495f3a86..5ca24d9b101b 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -53,6 +53,7 @@ #include <linux/ctype.h> #include <linux/delay.h> #include <linux/atomic.h> +#include <linux/sched/signal.h> #ifdef CONFIG_X86 /* @@ -985,7 +986,7 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data) pretimeout_since_last_heartbeat = 1; } -static struct ipmi_user_hndl ipmi_hndlrs = { +static const struct ipmi_user_hndl ipmi_hndlrs = { .ipmi_recv_hndl = ipmi_wdog_msg_handler, .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler }; diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 5b6742770656..565e4cf04a02 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -117,7 +117,7 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/major.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/fcntl.h> #include <linux/delay.h> diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index 87885d146dbb..2a558c706581 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -58,7 +58,7 @@ #include <linux/module.h> #include <linux/init.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/device.h> #include <linux/ioctl.h> #include <linux/parport.h> diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index 35259961cc38..974d48927b07 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c @@ -74,7 +74,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/spinlock.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/sysctl.h> #include <linux/wait.h> #include <linux/bcd.h> diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c index ec07f0e99732..6aa32679fd58 100644 --- a/drivers/char/snsc.c +++ b/drivers/char/snsc.c @@ -16,7 +16,7 @@ */ #include <linux/interrupt.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/device.h> #include <linux/poll.h> #include <linux/init.h> diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c index 59bcefd6ec7c..e452673dff66 100644 --- a/drivers/char/snsc_event.c +++ b/drivers/char/snsc_event.c @@ -16,7 +16,7 @@ */ #include <linux/interrupt.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <asm/byteorder.h> #include <asm/sn/sn_sal.h> diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 17857beb4892..e9b7e0b3cabe 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1136,6 +1136,8 @@ static int put_chars(u32 vtermno, const char *buf, int count) { struct port *port; struct scatterlist sg[1]; + void *data; + int ret; if (unlikely(early_put_chars)) return early_put_chars(vtermno, buf, count); @@ -1144,8 +1146,14 @@ static int put_chars(u32 vtermno, const char *buf, int count) if (!port) return -EPIPE; - sg_init_one(sg, buf, count); - return __send_to_port(port, sg, 1, count, (void *)buf, false); + data = kmemdup(buf, count, GFP_ATOMIC); + if (!data) + return -ENOMEM; + + sg_init_one(sg, data, count); + ret = __send_to_port(port, sg, 1, count, data, false); + kfree(data); + return ret; } /* @@ -1939,7 +1947,7 @@ static int init_vqs(struct ports_device *portdev) /* Find the queues. */ err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, io_callbacks, - (const char **)io_names); + (const char **)io_names, NULL); if (err) goto free; diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 9d9af446bafc..1c1ec137a3cc 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -564,6 +564,46 @@ static struct clk_gate gxbb_clk81 = { }, }; +static struct clk_mux gxbb_sar_adc_clk_sel = { + .reg = (void *)HHI_SAR_CLK_CNTL, + .mask = 0x3, + .shift = 9, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "sar_adc_clk_sel", + .ops = &clk_mux_ops, + /* NOTE: The datasheet doesn't list the parents for bit 10 */ + .parent_names = (const char *[]){ "xtal", "clk81", }, + .num_parents = 2, + }, +}; + +static struct clk_divider gxbb_sar_adc_clk_div = { + .reg = (void *)HHI_SAR_CLK_CNTL, + .shift = 0, + .width = 8, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "sar_adc_clk_div", + .ops = &clk_divider_ops, + .parent_names = (const char *[]){ "sar_adc_clk_sel" }, + .num_parents = 1, + }, +}; + +static struct clk_gate gxbb_sar_adc_clk = { + .reg = (void *)HHI_SAR_CLK_CNTL, + .bit_idx = 8, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "sar_adc_clk", + .ops = &clk_gate_ops, + .parent_names = (const char *[]){ "sar_adc_clk_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + /* Everything Else (EE) domain gates */ static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0); static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1); @@ -754,6 +794,9 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = { [CLKID_SD_EMMC_A] = &gxbb_emmc_a.hw, [CLKID_SD_EMMC_B] = &gxbb_emmc_b.hw, [CLKID_SD_EMMC_C] = &gxbb_emmc_c.hw, + [CLKID_SAR_ADC_CLK] = &gxbb_sar_adc_clk.hw, + [CLKID_SAR_ADC_SEL] = &gxbb_sar_adc_clk_sel.hw, + [CLKID_SAR_ADC_DIV] = &gxbb_sar_adc_clk_div.hw, }, .num = NR_CLKS, }; @@ -856,6 +899,7 @@ static struct clk_gate *gxbb_clk_gates[] = { &gxbb_emmc_a, &gxbb_emmc_b, &gxbb_emmc_c, + &gxbb_sar_adc_clk, }; static int gxbb_clkc_probe(struct platform_device *pdev) @@ -888,6 +932,10 @@ static int gxbb_clkc_probe(struct platform_device *pdev) gxbb_mpeg_clk_sel.reg = clk_base + (u64)gxbb_mpeg_clk_sel.reg; gxbb_mpeg_clk_div.reg = clk_base + (u64)gxbb_mpeg_clk_div.reg; + /* Populate the base address for the SAR ADC clks */ + gxbb_sar_adc_clk_sel.reg = clk_base + (u64)gxbb_sar_adc_clk_sel.reg; + gxbb_sar_adc_clk_div.reg = clk_base + (u64)gxbb_sar_adc_clk_div.reg; + /* Populate base address for gates */ for (i = 0; i < ARRAY_SIZE(gxbb_clk_gates); i++) gxbb_clk_gates[i]->reg = clk_base + diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h index 0252939ba58f..8ee2022ce5d5 100644 --- a/drivers/clk/meson/gxbb.h +++ b/drivers/clk/meson/gxbb.h @@ -191,7 +191,7 @@ #define CLKID_PERIPHS 20 #define CLKID_SPICC 21 /* CLKID_I2C */ -#define CLKID_SAR_ADC 23 +/* #define CLKID_SAR_ADC */ #define CLKID_SMART_CARD 24 #define CLKID_RNG0 25 #define CLKID_UART0 26 @@ -204,7 +204,7 @@ #define CLKID_ASSIST_MISC 33 /* CLKID_SPI */ #define CLKID_I2S_SPDIF 35 -#define CLKID_ETH 36 +/* CLKID_ETH */ #define CLKID_DEMUX 37 #define CLKID_AIU_GLUE 38 #define CLKID_IEC958 39 @@ -231,13 +231,13 @@ #define CLKID_AHB_DATA_BUS 60 #define CLKID_AHB_CTRL_BUS 61 #define CLKID_HDMI_INTR_SYNC 62 -#define CLKID_HDMI_PCLK 63 +/* CLKID_HDMI_PCLK */ /* CLKID_USB1_DDR_BRIDGE */ /* CLKID_USB0_DDR_BRIDGE */ #define CLKID_MMC_PCLK 66 #define CLKID_DVIN 67 #define CLKID_UART2 68 -#define CLKID_SANA 69 +/* #define CLKID_SANA */ #define CLKID_VPU_INTR 70 #define CLKID_SEC_AHB_AHB3_BRIDGE 71 #define CLKID_CLK81_A53 72 @@ -245,7 +245,7 @@ #define CLKID_VCLK2_VENCI1 74 #define CLKID_VCLK2_VENCP0 75 #define CLKID_VCLK2_VENCP1 76 -#define CLKID_GCLK_VENCI_INT0 77 +/* CLKID_GCLK_VENCI_INT0 */ #define CLKID_GCLK_VENCI_INT 78 #define CLKID_DAC_CLK 79 #define CLKID_AOCLK_GATE 80 @@ -265,8 +265,11 @@ /* CLKID_SD_EMMC_A */ /* CLKID_SD_EMMC_B */ /* CLKID_SD_EMMC_C */ +/* CLKID_SAR_ADC_CLK */ +/* CLKID_SAR_ADC_SEL */ +#define CLKID_SAR_ADC_DIV 99 -#define NR_CLKS 97 +#define NR_CLKS 100 /* include the CLKIDs that have been made part of the stable DT binding */ #include <dt-bindings/clock/gxbb-clkc.h> diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 93aa1364376a..7a8a4117f123 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -24,6 +24,7 @@ #include <linux/of_address.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/sched/clock.h> #include <linux/sched_clock.h> #include <linux/acpi.h> diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c index 9cae38eebec2..1c24de215c14 100644 --- a/drivers/clocksource/pxa_timer.c +++ b/drivers/clocksource/pxa_timer.c @@ -19,6 +19,7 @@ #include <linux/clockchips.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/sched/clock.h> #include <linux/sched_clock.h> #include <clocksource/pxa.h> diff --git a/drivers/clocksource/timer-digicolor.c b/drivers/clocksource/timer-digicolor.c index 10318cc99c0e..e9f50d289362 100644 --- a/drivers/clocksource/timer-digicolor.c +++ b/drivers/clocksource/timer-digicolor.c @@ -31,6 +31,7 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqreturn.h> +#include <linux/sched/clock.h> #include <linux/sched_clock.h> #include <linux/of.h> #include <linux/of_address.h> diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 631bd2c86c5e..47e24b5384b3 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -18,7 +18,6 @@ #include <linux/export.h> #include <linux/kernel_stat.h> -#include <linux/sched.h> #include <linux/slab.h> #include "cpufreq_governor.h" diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index f5717ca070cc..0236ec2cd654 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -20,6 +20,7 @@ #include <linux/atomic.h> #include <linux/irq_work.h> #include <linux/cpufreq.h> +#include <linux/sched/cpufreq.h> #include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/mutex.h> diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 4a017e895296..3937acf7e026 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -16,6 +16,7 @@ #include <linux/percpu-defs.h> #include <linux/slab.h> #include <linux/tick.h> +#include <linux/sched/cpufreq.h> #include "cpufreq_ondemand.h" diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index eb0f7fb71685..b1fbaa30ae04 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -19,7 +19,7 @@ #include <linux/hrtimer.h> #include <linux/tick.h> #include <linux/slab.h> -#include <linux/sched.h> +#include <linux/sched/cpufreq.h> #include <linux/list.h> #include <linux/cpu.h> #include <linux/cpufreq.h> @@ -39,11 +39,6 @@ #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 -#define ATOM_RATIOS 0x66a -#define ATOM_VIDS 0x66b -#define ATOM_TURBO_RATIOS 0x66c -#define ATOM_TURBO_VIDS 0x66d - #ifdef CONFIG_ACPI #include <acpi/processor.h> #include <acpi/cppc_acpi.h> @@ -364,37 +359,25 @@ static bool driver_registered __read_mostly; static bool acpi_ppc; #endif -static struct perf_limits performance_limits = { - .no_turbo = 0, - .turbo_disabled = 0, - .max_perf_pct = 100, - .max_perf = int_ext_tofp(1), - .min_perf_pct = 100, - .min_perf = int_ext_tofp(1), - .max_policy_pct = 100, - .max_sysfs_pct = 100, - .min_policy_pct = 0, - .min_sysfs_pct = 0, -}; +static struct perf_limits performance_limits; +static struct perf_limits powersave_limits; +static struct perf_limits *limits; -static struct perf_limits powersave_limits = { - .no_turbo = 0, - .turbo_disabled = 0, - .max_perf_pct = 100, - .max_perf = int_ext_tofp(1), - .min_perf_pct = 0, - .min_perf = 0, - .max_policy_pct = 100, - .max_sysfs_pct = 100, - .min_policy_pct = 0, - .min_sysfs_pct = 0, -}; +static void intel_pstate_init_limits(struct perf_limits *limits) +{ + memset(limits, 0, sizeof(*limits)); + limits->max_perf_pct = 100; + limits->max_perf = int_ext_tofp(1); + limits->max_policy_pct = 100; + limits->max_sysfs_pct = 100; +} -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE -static struct perf_limits *limits = &performance_limits; -#else -static struct perf_limits *limits = &powersave_limits; -#endif +static void intel_pstate_set_performance_limits(struct perf_limits *limits) +{ + intel_pstate_init_limits(limits); + limits->min_perf_pct = 100; + limits->min_perf = int_ext_tofp(1); +} static DEFINE_MUTEX(intel_pstate_driver_lock); static DEFINE_MUTEX(intel_pstate_limits_lock); @@ -1367,7 +1350,7 @@ static int atom_get_min_pstate(void) { u64 value; - rdmsrl(ATOM_RATIOS, value); + rdmsrl(MSR_ATOM_CORE_RATIOS, value); return (value >> 8) & 0x7F; } @@ -1375,7 +1358,7 @@ static int atom_get_max_pstate(void) { u64 value; - rdmsrl(ATOM_RATIOS, value); + rdmsrl(MSR_ATOM_CORE_RATIOS, value); return (value >> 16) & 0x7F; } @@ -1383,7 +1366,7 @@ static int atom_get_turbo_pstate(void) { u64 value; - rdmsrl(ATOM_TURBO_RATIOS, value); + rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); return value & 0x7F; } @@ -1445,7 +1428,7 @@ static void atom_get_vid(struct cpudata *cpudata) { u64 value; - rdmsrl(ATOM_VIDS, value); + rdmsrl(MSR_ATOM_CORE_VIDS, value); cpudata->vid.min = int_tofp((value >> 8) & 0x7f); cpudata->vid.max = int_tofp((value >> 16) & 0x7f); cpudata->vid.ratio = div_fp( @@ -1453,7 +1436,7 @@ static void atom_get_vid(struct cpudata *cpudata) int_tofp(cpudata->pstate.max_pstate - cpudata->pstate.min_pstate)); - rdmsrl(ATOM_TURBO_VIDS, value); + rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); cpudata->vid.turbo = value & 0x7f; } @@ -2084,20 +2067,6 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu) synchronize_sched(); } -static void intel_pstate_set_performance_limits(struct perf_limits *limits) -{ - limits->no_turbo = 0; - limits->turbo_disabled = 0; - limits->max_perf_pct = 100; - limits->max_perf = int_ext_tofp(1); - limits->min_perf_pct = 100; - limits->min_perf = int_ext_tofp(1); - limits->max_policy_pct = 100; - limits->max_sysfs_pct = 100; - limits->min_policy_pct = 0; - limits->min_sysfs_pct = 0; -} - static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, struct perf_limits *limits) { @@ -2466,6 +2435,11 @@ static int intel_pstate_register_driver(void) { int ret; + intel_pstate_init_limits(&powersave_limits); + intel_pstate_set_performance_limits(&performance_limits); + limits = IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) ? + &performance_limits : &powersave_limits; + ret = cpufreq_register_driver(intel_pstate_driver); if (ret) { intel_pstate_driver_cleanup(); diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c index a6fefac8afe4..bfec1bcd3835 100644 --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c @@ -23,10 +23,6 @@ #include <linux/slab.h> #include <linux/smp.h> -#if !defined(CONFIG_ARM) -#include <asm/smp.h> /* for get_hard_smp_processor_id() in UP configs */ -#endif - /** * struct cpu_data * @pclk: the parent clock of cpu diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c index b73feeb666f9..35ddb6da93aa 100644 --- a/drivers/cpufreq/sparc-us2e-cpufreq.c +++ b/drivers/cpufreq/sparc-us2e-cpufreq.c @@ -234,7 +234,7 @@ static unsigned int us2e_freq_get(unsigned int cpu) cpumask_t cpus_allowed; unsigned long clock_tick, estar; - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); + cpumask_copy(&cpus_allowed, ¤t->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(cpu)); clock_tick = sparc64_get_clock_tick(cpu) / 1000; @@ -252,7 +252,7 @@ static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index) unsigned long clock_tick, divisor, old_divisor, estar; cpumask_t cpus_allowed; - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); + cpumask_copy(&cpus_allowed, ¤t->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(cpu)); new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c index 9bb42ba50efa..a8d86a449ca1 100644 --- a/drivers/cpufreq/sparc-us3-cpufreq.c +++ b/drivers/cpufreq/sparc-us3-cpufreq.c @@ -82,7 +82,7 @@ static unsigned int us3_freq_get(unsigned int cpu) unsigned long reg; unsigned int ret; - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); + cpumask_copy(&cpus_allowed, ¤t->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(cpu)); reg = read_safari_cfg(); @@ -99,7 +99,7 @@ static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index) unsigned long new_bits, new_freq, reg; cpumask_t cpus_allowed; - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); + cpumask_copy(&cpus_allowed, ¤t->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(cpu)); new_freq = sparc64_get_clock_tick(cpu) / 1000; diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 62810ff3b00f..548b90be7685 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/mutex.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/notifier.h> #include <linux/pm_qos.h> #include <linux/cpu.h> diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index ab264d393233..e53fb861beb0 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -11,6 +11,7 @@ #include <linux/mutex.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/idle.h> #include <linux/cpuidle.h> #include <linux/cpumask.h> #include <linux/tick.h> diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 8d6d25c38c02..b2330fd69e34 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -18,6 +18,8 @@ #include <linux/hrtimer.h> #include <linux/tick.h> #include <linux/sched.h> +#include <linux/sched/loadavg.h> +#include <linux/sched/stat.h> #include <linux/math64.h> #include <linux/cpu.h> @@ -287,7 +289,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) unsigned int interactivity_req; unsigned int expected_interval; unsigned long nr_iowaiters, cpu_load; - int resume_latency = dev_pm_qos_read_value(device); + int resume_latency = dev_pm_qos_raw_read_value(device); if (data->needs_update) { menu_update(drv, dev); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 0b49dbc423e2..473d31288ad8 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -459,6 +459,7 @@ config CRYPTO_DEV_ATMEL_AES config CRYPTO_DEV_ATMEL_TDES tristate "Support for Atmel DES/TDES hw accelerator" + depends on HAS_DMA depends on ARCH_AT91 || COMPILE_TEST select CRYPTO_DES select CRYPTO_BLKCIPHER @@ -472,6 +473,7 @@ config CRYPTO_DEV_ATMEL_TDES config CRYPTO_DEV_ATMEL_SHA tristate "Support for Atmel SHA hw accelerator" + depends on HAS_DMA depends on ARCH_AT91 || COMPILE_TEST select CRYPTO_HASH help @@ -583,6 +585,7 @@ config CRYPTO_DEV_ROCKCHIP config CRYPTO_DEV_MEDIATEK tristate "MediaTek's EIP97 Cryptographic Engine driver" + depends on HAS_DMA depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST select CRYPTO_AES select CRYPTO_AEAD diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c index aac2966ff8d9..6ffc740c7431 100644 --- a/drivers/crypto/cavium/cpt/cptvf_main.c +++ b/drivers/crypto/cavium/cpt/cptvf_main.c @@ -242,6 +242,7 @@ static int alloc_command_queues(struct cpt_vf *cptvf, if (!curr->head) { dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n", i, queue->nchunks); + kfree(curr); goto cmd_qfail; } @@ -815,8 +816,10 @@ static void cptvf_remove(struct pci_dev *pdev) { struct cpt_vf *cptvf = pci_get_drvdata(pdev); - if (!cptvf) + if (!cptvf) { dev_err(&pdev->dev, "Invalid CPT-VF device\n"); + return; + } /* Convey DOWN to PF */ if (cptvf_send_vf_down(cptvf)) { diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c index 7f57f30f8863..169e66231bcf 100644 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c @@ -330,8 +330,8 @@ void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info) { struct pci_dev *pdev = cptvf->pdev; - if (!info || !cptvf) { - dev_err(&pdev->dev, "Input params are incorrect for post processing\n"); + if (!info) { + dev_err(&pdev->dev, "incorrect cpt_info_buffer for post processing\n"); return; } diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index b5b153317376..21472e427f6f 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -120,7 +120,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi) } ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, - names); + names, NULL); if (ret) goto err_find; diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 94ad5c0adbcb..72a26eb4e954 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -27,11 +27,12 @@ #include <asm/switch_to.h> #include <crypto/aes.h> #include <crypto/scatterwalk.h> +#include <crypto/skcipher.h> #include "aesp8-ppc.h" struct p8_aes_cbc_ctx { - struct crypto_blkcipher *fallback; + struct crypto_skcipher *fallback; struct aes_key enc_key; struct aes_key dec_key; }; @@ -39,7 +40,7 @@ struct p8_aes_cbc_ctx { static int p8_aes_cbc_init(struct crypto_tfm *tfm) { const char *alg; - struct crypto_blkcipher *fallback; + struct crypto_skcipher *fallback; struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); if (!(alg = crypto_tfm_alg_name(tfm))) { @@ -47,8 +48,9 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm) return -ENOENT; } - fallback = - crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + fallback = crypto_alloc_skcipher(alg, 0, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", @@ -56,11 +58,12 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm) return PTR_ERR(fallback); } printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); + crypto_skcipher_driver_name(fallback)); + - crypto_blkcipher_set_flags( + crypto_skcipher_set_flags( fallback, - crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm)); + crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); ctx->fallback = fallback; return 0; @@ -71,7 +74,7 @@ static void p8_aes_cbc_exit(struct crypto_tfm *tfm) struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); if (ctx->fallback) { - crypto_free_blkcipher(ctx->fallback); + crypto_free_skcipher(ctx->fallback); ctx->fallback = NULL; } } @@ -91,7 +94,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, pagefault_enable(); preempt_enable(); - ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); + ret += crypto_skcipher_setkey(ctx->fallback, key, keylen); return ret; } @@ -103,15 +106,14 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); - struct blkcipher_desc fallback_desc = { - .tfm = ctx->fallback, - .info = desc->info, - .flags = desc->flags - }; if (in_interrupt()) { - ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, - nbytes); + SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); + skcipher_request_set_tfm(req, ctx->fallback); + skcipher_request_set_callback(req, desc->flags, NULL, NULL); + skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); + ret = crypto_skcipher_encrypt(req); + skcipher_request_zero(req); } else { preempt_disable(); pagefault_disable(); @@ -144,15 +146,14 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); - struct blkcipher_desc fallback_desc = { - .tfm = ctx->fallback, - .info = desc->info, - .flags = desc->flags - }; if (in_interrupt()) { - ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, - nbytes); + SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); + skcipher_request_set_tfm(req, ctx->fallback); + skcipher_request_set_callback(req, desc->flags, NULL, NULL); + skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); + ret = crypto_skcipher_decrypt(req); + skcipher_request_zero(req); } else { preempt_disable(); pagefault_disable(); diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 24353ec336c5..6adc9290557a 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c @@ -28,11 +28,12 @@ #include <crypto/aes.h> #include <crypto/scatterwalk.h> #include <crypto/xts.h> +#include <crypto/skcipher.h> #include "aesp8-ppc.h" struct p8_aes_xts_ctx { - struct crypto_blkcipher *fallback; + struct crypto_skcipher *fallback; struct aes_key enc_key; struct aes_key dec_key; struct aes_key tweak_key; @@ -41,7 +42,7 @@ struct p8_aes_xts_ctx { static int p8_aes_xts_init(struct crypto_tfm *tfm) { const char *alg; - struct crypto_blkcipher *fallback; + struct crypto_skcipher *fallback; struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); if (!(alg = crypto_tfm_alg_name(tfm))) { @@ -49,8 +50,8 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm) return -ENOENT; } - fallback = - crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + fallback = crypto_alloc_skcipher(alg, 0, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback)) { printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", @@ -58,11 +59,11 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm) return PTR_ERR(fallback); } printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); + crypto_skcipher_driver_name(fallback)); - crypto_blkcipher_set_flags( + crypto_skcipher_set_flags( fallback, - crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm)); + crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); ctx->fallback = fallback; return 0; @@ -73,7 +74,7 @@ static void p8_aes_xts_exit(struct crypto_tfm *tfm) struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); if (ctx->fallback) { - crypto_free_blkcipher(ctx->fallback); + crypto_free_skcipher(ctx->fallback); ctx->fallback = NULL; } } @@ -98,7 +99,7 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key, pagefault_enable(); preempt_enable(); - ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); + ret += crypto_skcipher_setkey(ctx->fallback, key, keylen); return ret; } @@ -113,15 +114,14 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); - struct blkcipher_desc fallback_desc = { - .tfm = ctx->fallback, - .info = desc->info, - .flags = desc->flags - }; if (in_interrupt()) { - ret = enc ? crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes) : - crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes); + SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); + skcipher_request_set_tfm(req, ctx->fallback); + skcipher_request_set_callback(req, desc->flags, NULL, NULL); + skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); + ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); + skcipher_request_zero(req); } else { preempt_disable(); pagefault_disable(); diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index b75c77254fdb..8d9829ff2a78 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c @@ -13,6 +13,7 @@ #include <linux/pagemap.h> #include <linux/module.h> #include <linux/device.h> +#include <linux/magic.h> #include <linux/mount.h> #include <linux/pfn_t.h> #include <linux/hash.h> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 718f832a5c71..0007b792827b 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -325,6 +325,9 @@ static const struct file_operations dma_buf_fops = { .llseek = dma_buf_llseek, .poll = dma_buf_poll, .unlocked_ioctl = dma_buf_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = dma_buf_ioctl, +#endif }; /* diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index d1f1f456f5c4..d195d617076d 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -22,6 +22,7 @@ #include <linux/export.h> #include <linux/atomic.h> #include <linux/dma-fence.h> +#include <linux/sched/signal.h> #define CREATE_TRACE_POINTS #include <trace/events/dma_fence.h> diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index c9297605058c..54d581d407aa 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -16,6 +16,7 @@ #include <linux/freezer.h> #include <linux/init.h> #include <linux/kthread.h> +#include <linux/sched/task.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/random.h> diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index aee149bdf4c0..a301fcf46e88 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -1307,8 +1307,7 @@ static void iso_resource_work(struct work_struct *work) */ if (r->todo == ISO_RES_REALLOC && !success && !client->in_shutdown && - idr_find(&client->resource_idr, r->resource.handle)) { - idr_remove(&client->resource_idr, r->resource.handle); + idr_remove(&client->resource_idr, r->resource.handle)) { client_put(client); free = true; } diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c index 29d58feaf675..6523ce962865 100644 --- a/drivers/firmware/psci_checker.c +++ b/drivers/firmware/psci_checker.c @@ -20,6 +20,7 @@ #include <linux/cpu_pm.h> #include <linux/kernel.h> #include <linux/kthread.h> +#include <uapi/linux/sched/types.h> #include <linux/module.h> #include <linux/preempt.h> #include <linux/psci.h> diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c index 4ff02d310868..84e4c9a58a0c 100644 --- a/drivers/firmware/tegra/bpmp.c +++ b/drivers/firmware/tegra/bpmp.c @@ -19,6 +19,7 @@ #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/semaphore.h> +#include <linux/sched/clock.h> #include <soc/tegra/bpmp.h> #include <soc/tegra/bpmp-abi.h> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index c02db01f6583..0218cea6be4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -70,10 +70,10 @@ static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id) struct amdgpu_bo_list *list; mutex_lock(&fpriv->bo_list_lock); - list = idr_find(&fpriv->bo_list_handles, id); + list = idr_remove(&fpriv->bo_list_handles, id); if (list) { + /* Another user may have a reference to this list still */ mutex_lock(&list->lock); - idr_remove(&fpriv->bo_list_handles, id); mutex_unlock(&list->lock); amdgpu_bo_list_free(list); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 400c66ba4c6b..cf0500671353 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -135,15 +135,11 @@ static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) struct amdgpu_ctx *ctx; mutex_lock(&mgr->lock); - ctx = idr_find(&mgr->ctx_handles, id); - if (ctx) { - idr_remove(&mgr->ctx_handles, id); + ctx = idr_remove(&mgr->ctx_handles, id); + if (ctx) kref_put(&ctx->refcount, amdgpu_ctx_do_release); - mutex_unlock(&mgr->lock); - return 0; - } mutex_unlock(&mgr->lock); - return -EINVAL; + return ctx ? 0 : -EINVAL; } static int amdgpu_ctx_query(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 6a3470f84998..d1ce83d73a87 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -23,7 +23,7 @@ #include <linux/mm_types.h> #include <linux/slab.h> #include <linux/types.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/uaccess.h> #include <linux/mm.h> #include <linux/mman.h> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index d83de985e88c..6acc4313363e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -23,6 +23,8 @@ #include <linux/printk.h> #include <linux/slab.h> +#include <linux/mm_types.h> + #include "kfd_priv.h" #include "kfd_mqd_manager.h" #include "cik_regs.h" diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index fa32c32fa1c2..a9b9882a9a77 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -23,6 +23,8 @@ #include <linux/printk.h> #include <linux/slab.h> +#include <linux/mm_types.h> + #include "kfd_priv.h" #include "kfd_mqd_manager.h" #include "vi_structs.h" diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index ca5f2aa7232d..84d1ffd1eef9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -23,6 +23,7 @@ #include <linux/mutex.h> #include <linux/log2.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/slab.h> #include <linux/amd-iommu.h> #include <linux/notifier.h> diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 1bf83ed113b3..16f96563cd2b 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -24,6 +24,7 @@ #include <linux/kthread.h> #include <linux/wait.h> #include <linux/sched.h> +#include <uapi/linux/sched/types.h> #include <drm/drmP.h> #include "gpu_scheduler.h" diff --git a/drivers/gpu/drm/ast/ast_dram_tables.h b/drivers/gpu/drm/ast/ast_dram_tables.h index cc04539c0ff3..1d9c4e75d303 100644 --- a/drivers/gpu/drm/ast/ast_dram_tables.h +++ b/drivers/gpu/drm/ast/ast_dram_tables.h @@ -141,4 +141,66 @@ static const struct ast_dramstruct ast2100_dram_table_data[] = { { 0xffff, 0xffffffff }, }; +/* + * AST2500 DRAM settings modules + */ +#define REGTBL_NUM 17 +#define REGIDX_010 0 +#define REGIDX_014 1 +#define REGIDX_018 2 +#define REGIDX_020 3 +#define REGIDX_024 4 +#define REGIDX_02C 5 +#define REGIDX_030 6 +#define REGIDX_214 7 +#define REGIDX_2E0 8 +#define REGIDX_2E4 9 +#define REGIDX_2E8 10 +#define REGIDX_2EC 11 +#define REGIDX_2F0 12 +#define REGIDX_2F4 13 +#define REGIDX_2F8 14 +#define REGIDX_RFC 15 +#define REGIDX_PLL 16 + +static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = { + 0x64604D38, /* 0x010 */ + 0x29690599, /* 0x014 */ + 0x00000300, /* 0x018 */ + 0x00000000, /* 0x020 */ + 0x00000000, /* 0x024 */ + 0x02181E70, /* 0x02C */ + 0x00000040, /* 0x030 */ + 0x00000024, /* 0x214 */ + 0x02001300, /* 0x2E0 */ + 0x0E0000A0, /* 0x2E4 */ + 0x000E001B, /* 0x2E8 */ + 0x35B8C105, /* 0x2EC */ + 0x08090408, /* 0x2F0 */ + 0x9B000800, /* 0x2F4 */ + 0x0E400A00, /* 0x2F8 */ + 0x9971452F, /* tRFC */ + 0x000071C1 /* PLL */ +}; + +static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = { + 0x63604E37, /* 0x010 */ + 0xE97AFA99, /* 0x014 */ + 0x00019000, /* 0x018 */ + 0x08000000, /* 0x020 */ + 0x00000400, /* 0x024 */ + 0x00000410, /* 0x02C */ + 0x00000101, /* 0x030 */ + 0x00000024, /* 0x214 */ + 0x03002900, /* 0x2E0 */ + 0x0E0000A0, /* 0x2E4 */ + 0x000E001C, /* 0x2E8 */ + 0x35B8C106, /* 0x2EC */ + 0x08080607, /* 0x2F0 */ + 0x9B000900, /* 0x2F4 */ + 0x0E400A00, /* 0x2F8 */ + 0x99714545, /* tRFC */ + 0x000071C1 /* PLL */ +}; + #endif diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 5a8fa1c85229..8880f0b62e9c 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -65,6 +65,7 @@ enum ast_chip { AST2150, AST2300, AST2400, + AST2500, AST1180, }; @@ -81,6 +82,7 @@ enum ast_tx_chip { #define AST_DRAM_1Gx32 3 #define AST_DRAM_2Gx16 6 #define AST_DRAM_4Gx16 7 +#define AST_DRAM_8Gx16 8 struct ast_fbdev; @@ -114,7 +116,11 @@ struct ast_private { struct ttm_bo_kmap_obj cache_kmap; int next_cursor; bool support_wide_screen; - bool DisableP2A; + enum { + ast_use_p2a, + ast_use_dt, + ast_use_defaults + } config_mode; enum ast_tx_chip tx_chip_type; u8 dp501_maxclk; @@ -301,8 +307,8 @@ struct ast_vbios_dclk_info { }; struct ast_vbios_mode_info { - struct ast_vbios_stdtable *std_table; - struct ast_vbios_enhtable *enh_table; + const struct ast_vbios_stdtable *std_table; + const struct ast_vbios_enhtable *enh_table; }; extern int ast_mode_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 993909430736..262c2c0e43b4 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -32,8 +32,6 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> -#include "ast_dram_tables.h" - void ast_set_index_reg_mask(struct ast_private *ast, uint32_t base, uint8_t index, uint8_t mask, uint8_t val) @@ -62,30 +60,99 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast, return ret; } +static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev) +{ + struct device_node *np = dev->pdev->dev.of_node; + struct ast_private *ast = dev->dev_private; + uint32_t data, jregd0, jregd1; + + /* Defaults */ + ast->config_mode = ast_use_defaults; + *scu_rev = 0xffffffff; + + /* Check if we have device-tree properties */ + if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", + scu_rev)) { + /* We do, disable P2A access */ + ast->config_mode = ast_use_dt; + DRM_INFO("Using device-tree for configuration\n"); + return; + } + + /* Not all families have a P2A bridge */ + if (dev->pdev->device != PCI_CHIP_AST2000) + return; + + /* + * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge + * is disabled. We force using P2A if VGA only mode bit + * is set D[7] + */ + jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); + if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) { + /* Double check it's actually working */ + data = ast_read32(ast, 0xf004); + if (data != 0xFFFFFFFF) { + /* P2A works, grab silicon revision */ + ast->config_mode = ast_use_p2a; + + DRM_INFO("Using P2A bridge for configuration\n"); + + /* Read SCU7c (silicon revision register) */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + *scu_rev = ast_read32(ast, 0x1207c); + return; + } + } + + /* We have a P2A bridge but it's disabled */ + DRM_INFO("P2A bridge disabled, using default configuration\n"); +} static int ast_detect_chip(struct drm_device *dev, bool *need_post) { struct ast_private *ast = dev->dev_private; - uint32_t data, jreg; + uint32_t jreg, scu_rev; + + /* + * If VGA isn't enabled, we need to enable now or subsequent + * access to the scratch registers will fail. We also inform + * our caller that it needs to POST the chip + * (Assumption: VGA not enabled -> need to POST) + */ + if (!ast_is_vga_enabled(dev)) { + ast_enable_vga(dev); + DRM_INFO("VGA not enabled on entry, requesting chip POST\n"); + *need_post = true; + } else + *need_post = false; + + + /* Enable extended register access */ + ast_enable_mmio(dev); ast_open_key(ast); + /* Find out whether P2A works or whether to use device-tree */ + ast_detect_config_mode(dev, &scu_rev); + + /* Identify chipset */ if (dev->pdev->device == PCI_CHIP_AST1180) { ast->chip = AST1100; DRM_INFO("AST 1180 detected\n"); } else { - if (dev->pdev->revision >= 0x30) { + if (dev->pdev->revision >= 0x40) { + ast->chip = AST2500; + DRM_INFO("AST 2500 detected\n"); + } else if (dev->pdev->revision >= 0x30) { ast->chip = AST2400; DRM_INFO("AST 2400 detected\n"); } else if (dev->pdev->revision >= 0x20) { ast->chip = AST2300; DRM_INFO("AST 2300 detected\n"); } else if (dev->pdev->revision >= 0x10) { - uint32_t data; - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - - data = ast_read32(ast, 0x1207c); - switch (data & 0x0300) { + switch (scu_rev & 0x0300) { case 0x0200: ast->chip = AST1100; DRM_INFO("AST 1100 detected\n"); @@ -110,26 +177,6 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } } - /* - * If VGA isn't enabled, we need to enable now or subsequent - * access to the scratch registers will fail. We also inform - * our caller that it needs to POST the chip - * (Assumption: VGA not enabled -> need to POST) - */ - if (!ast_is_vga_enabled(dev)) { - ast_enable_vga(dev); - ast_enable_mmio(dev); - DRM_INFO("VGA not enabled on entry, requesting chip POST\n"); - *need_post = true; - } else - *need_post = false; - - /* Check P2A Access */ - ast->DisableP2A = true; - data = ast_read32(ast, 0xf004); - if (data != 0xFFFFFFFF) - ast->DisableP2A = false; - /* Check if we support wide screen */ switch (ast->chip) { case AST1180: @@ -146,17 +193,15 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->support_wide_screen = true; else { ast->support_wide_screen = false; - if (ast->DisableP2A == false) { - /* Read SCU7c (silicon revision register) */ - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - data = ast_read32(ast, 0x1207c); - data &= 0x300; - if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ - ast->support_wide_screen = true; - if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ - ast->support_wide_screen = true; - } + if (ast->chip == AST2300 && + (scu_rev & 0x300) == 0x0) /* ast1300 */ + ast->support_wide_screen = true; + if (ast->chip == AST2400 && + (scu_rev & 0x300) == 0x100) /* ast1400 */ + ast->support_wide_screen = true; + if (ast->chip == AST2500 && + scu_rev == 0x100) /* ast2510 */ + ast->support_wide_screen = true; } break; } @@ -220,85 +265,121 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) static int ast_get_dram_info(struct drm_device *dev) { + struct device_node *np = dev->pdev->dev.of_node; struct ast_private *ast = dev->dev_private; - uint32_t data, data2; - uint32_t denum, num, div, ref_pll; + uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap; + uint32_t denum, num, div, ref_pll, dsel; - if (ast->DisableP2A) - { - ast->dram_bus_width = 16; - ast->dram_type = AST_DRAM_1Gx16; - ast->mclk = 396; - } - else - { + switch (ast->config_mode) { + case ast_use_dt: + /* + * If some properties are missing, use reasonable + * defaults for AST2400 + */ + if (of_property_read_u32(np, "aspeed,mcr-configuration", + &mcr_cfg)) + mcr_cfg = 0x00000577; + if (of_property_read_u32(np, "aspeed,mcr-scu-mpll", + &mcr_scu_mpll)) + mcr_scu_mpll = 0x000050C0; + if (of_property_read_u32(np, "aspeed,mcr-scu-strap", + &mcr_scu_strap)) + mcr_scu_strap = 0; + break; + case ast_use_p2a: ast_write32(ast, 0xf004, 0x1e6e0000); ast_write32(ast, 0xf000, 0x1); - data = ast_read32(ast, 0x10004); - - if (data & 0x40) - ast->dram_bus_width = 16; + mcr_cfg = ast_read32(ast, 0x10004); + mcr_scu_mpll = ast_read32(ast, 0x10120); + mcr_scu_strap = ast_read32(ast, 0x10170); + break; + case ast_use_defaults: + default: + ast->dram_bus_width = 16; + ast->dram_type = AST_DRAM_1Gx16; + if (ast->chip == AST2500) + ast->mclk = 800; else - ast->dram_bus_width = 32; - - if (ast->chip == AST2300 || ast->chip == AST2400) { - switch (data & 0x03) { - case 0: - ast->dram_type = AST_DRAM_512Mx16; - break; - default: - case 1: - ast->dram_type = AST_DRAM_1Gx16; - break; - case 2: - ast->dram_type = AST_DRAM_2Gx16; - break; - case 3: - ast->dram_type = AST_DRAM_4Gx16; - break; - } - } else { - switch (data & 0x0c) { - case 0: - case 4: - ast->dram_type = AST_DRAM_512Mx16; - break; - case 8: - if (data & 0x40) - ast->dram_type = AST_DRAM_1Gx16; - else - ast->dram_type = AST_DRAM_512Mx32; - break; - case 0xc: - ast->dram_type = AST_DRAM_1Gx32; - break; - } - } + ast->mclk = 396; + return 0; + } - data = ast_read32(ast, 0x10120); - data2 = ast_read32(ast, 0x10170); - if (data2 & 0x2000) - ref_pll = 14318; - else - ref_pll = 12000; + if (mcr_cfg & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; - denum = data & 0x1f; - num = (data & 0x3fe0) >> 5; - data = (data & 0xc000) >> 14; - switch (data) { - case 3: - div = 0x4; + if (ast->chip == AST2500) { + switch (mcr_cfg & 0x03) { + case 0: + ast->dram_type = AST_DRAM_1Gx16; break; - case 2: + default: case 1: - div = 0x2; + ast->dram_type = AST_DRAM_2Gx16; + break; + case 2: + ast->dram_type = AST_DRAM_4Gx16; + break; + case 3: + ast->dram_type = AST_DRAM_8Gx16; + break; + } + } else if (ast->chip == AST2300 || ast->chip == AST2400) { + switch (mcr_cfg & 0x03) { + case 0: + ast->dram_type = AST_DRAM_512Mx16; break; default: - div = 0x1; + case 1: + ast->dram_type = AST_DRAM_1Gx16; + break; + case 2: + ast->dram_type = AST_DRAM_2Gx16; + break; + case 3: + ast->dram_type = AST_DRAM_4Gx16; + break; + } + } else { + switch (mcr_cfg & 0x0c) { + case 0: + case 4: + ast->dram_type = AST_DRAM_512Mx16; + break; + case 8: + if (mcr_cfg & 0x40) + ast->dram_type = AST_DRAM_1Gx16; + else + ast->dram_type = AST_DRAM_512Mx32; + break; + case 0xc: + ast->dram_type = AST_DRAM_1Gx32; break; } - ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); } + + if (mcr_scu_strap & 0x2000) + ref_pll = 14318; + else + ref_pll = 12000; + + denum = mcr_scu_mpll & 0x1f; + num = (mcr_scu_mpll & 0x3fe0) >> 5; + dsel = (mcr_scu_mpll & 0xc000) >> 14; + switch (dsel) { + case 3: + div = 0x4; + break; + case 2: + case 1: + div = 0x2; + break; + default: + div = 0x1; + break; + } + ast->mclk = ref_pll * (num + 2) / ((denum + 2) * (div * 1000)); return 0; } @@ -437,17 +518,19 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags) ast_detect_chip(dev, &need_post); + if (need_post) + ast_post_gpu(dev); + if (ast->chip != AST1180) { ret = ast_get_dram_info(dev); if (ret) goto out_free; ast->vram_size = ast_get_vram_info(dev); - DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size); + DRM_INFO("dram MCLK=%u Mhz type=%d bus_width=%d size=%08x\n", + ast->mclk, ast->dram_type, + ast->dram_bus_width, ast->vram_size); } - if (need_post) - ast_post_gpu(dev); - ret = ast_mm_init(ast); if (ret) goto out_free; @@ -465,6 +548,7 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags) ast->chip == AST2200 || ast->chip == AST2300 || ast->chip == AST2400 || + ast->chip == AST2500 || ast->chip == AST1180) { dev->mode_config.max_width = 1920; dev->mode_config.max_height = 2048; diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 606cb40f6c7c..47b78e52691c 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -81,9 +81,9 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo struct ast_private *ast = crtc->dev->dev_private; const struct drm_framebuffer *fb = crtc->primary->fb; u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate; + const struct ast_vbios_enhtable *best = NULL; u32 hborder, vborder; bool check_sync; - struct ast_vbios_enhtable *best = NULL; switch (fb->format->cpp[0] * 8) { case 8: @@ -147,7 +147,7 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo refresh_rate = drm_mode_vrefresh(mode); check_sync = vbios_mode->enh_table->flags & WideScreenMode; do { - struct ast_vbios_enhtable *loop = vbios_mode->enh_table; + const struct ast_vbios_enhtable *loop = vbios_mode->enh_table; while (loop->refresh_rate != 0xff) { if ((check_sync) && @@ -227,7 +227,7 @@ static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode struct ast_vbios_mode_info *vbios_mode) { struct ast_private *ast = crtc->dev->dev_private; - struct ast_vbios_stdtable *stdtable; + const struct ast_vbios_stdtable *stdtable; u32 i; u8 jreg; @@ -273,7 +273,11 @@ static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mod { struct ast_private *ast = crtc->dev->dev_private; u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0; - u16 temp; + u16 temp, precache = 0; + + if ((ast->chip == AST2500) && + (vbios_mode->enh_table->flags & AST2500PreCatchCRT)) + precache = 40; ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00); @@ -299,12 +303,12 @@ static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mod jregAD |= 0x01; /* HBE D[5] */ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x03, 0xE0, (temp & 0x1f)); - temp = (mode->crtc_hsync_start >> 3) - 1; + temp = ((mode->crtc_hsync_start-precache) >> 3) - 1; if (temp & 0x100) jregAC |= 0x40; /* HRS D[5] */ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x04, 0x00, temp); - temp = ((mode->crtc_hsync_end >> 3) - 1) & 0x3f; + temp = (((mode->crtc_hsync_end-precache) >> 3) - 1) & 0x3f; if (temp & 0x20) jregAD |= 0x04; /* HRE D[5] */ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05)); @@ -365,6 +369,11 @@ static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mod ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x09, 0xdf, jreg09); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAE, 0x00, (jregAE | 0x80)); + if (precache) + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0x3f, 0x80); + else + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0x3f, 0x00); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80); } @@ -384,14 +393,18 @@ static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mo struct ast_vbios_mode_info *vbios_mode) { struct ast_private *ast = dev->dev_private; - struct ast_vbios_dclk_info *clk_info; + const struct ast_vbios_dclk_info *clk_info; - clk_info = &dclk_table[vbios_mode->enh_table->dclk_index]; + if (ast->chip == AST2500) + clk_info = &dclk_table_ast2500[vbios_mode->enh_table->dclk_index]; + else + clk_info = &dclk_table[vbios_mode->enh_table->dclk_index]; ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc0, 0x00, clk_info->param1); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc1, 0x00, clk_info->param2); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xbb, 0x0f, - (clk_info->param3 & 0x80) | ((clk_info->param3 & 0x3) << 4)); + (clk_info->param3 & 0xc0) | + ((clk_info->param3 & 0x3) << 4)); } static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode, @@ -425,7 +438,8 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8); /* Set Threshold */ - if (ast->chip == AST2300 || ast->chip == AST2400) { + if (ast->chip == AST2300 || ast->chip == AST2400 || + ast->chip == AST2500) { ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78); ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60); } else if (ast->chip == AST2100 || @@ -800,7 +814,9 @@ static int ast_mode_valid(struct drm_connector *connector, if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) return MODE_OK; - if ((ast->chip == AST2100) || (ast->chip == AST2200) || (ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST1180)) { + if ((ast->chip == AST2100) || (ast->chip == AST2200) || + (ast->chip == AST2300) || (ast->chip == AST2400) || + (ast->chip == AST2500) || (ast->chip == AST1180)) { if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080)) return MODE_OK; diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 5331ee1df086..f7d421359d56 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -31,7 +31,8 @@ #include "ast_dram_tables.h" -static void ast_init_dram_2300(struct drm_device *dev); +static void ast_post_chip_2300(struct drm_device *dev); +static void ast_post_chip_2500(struct drm_device *dev); void ast_enable_vga(struct drm_device *dev) { @@ -58,13 +59,9 @@ bool ast_is_vga_enabled(struct drm_device *dev) /* TODO 1180 */ } else { ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT); - if (ch) { - ast_open_key(ast); - ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff); - return ch & 0x04; - } + return !!(ch & 0x01); } - return 0; + return false; } static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff }; @@ -79,10 +76,11 @@ ast_set_def_ext_reg(struct drm_device *dev) const u8 *ext_reg_info; /* reset scratch */ - for (i = 0x81; i <= 0x8f; i++) + for (i = 0x81; i <= 0x9f; i++) ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00); - if (ast->chip == AST2300 || ast->chip == AST2400) { + if (ast->chip == AST2300 || ast->chip == AST2400 || + ast->chip == AST2500) { if (dev->pdev->revision >= 0x20) ext_reg_info = extreginfo_ast2300; else @@ -106,7 +104,8 @@ ast_set_def_ext_reg(struct drm_device *dev) /* Enable RAMDAC for A1 */ reg = 0x04; - if (ast->chip == AST2300 || ast->chip == AST2400) + if (ast->chip == AST2300 || ast->chip == AST2400 || + ast->chip == AST2500) reg |= 0x20; ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg); } @@ -375,21 +374,20 @@ void ast_post_gpu(struct drm_device *dev) pci_write_config_dword(ast->dev->pdev, 0x04, reg); ast_enable_vga(dev); - ast_enable_mmio(dev); ast_open_key(ast); + ast_enable_mmio(dev); ast_set_def_ext_reg(dev); - if (ast->DisableP2A == false) - { - if (ast->chip == AST2300 || ast->chip == AST2400) - ast_init_dram_2300(dev); + if (ast->config_mode == ast_use_p2a) { + if (ast->chip == AST2500) + ast_post_chip_2500(dev); + else if (ast->chip == AST2300 || ast->chip == AST2400) + ast_post_chip_2300(dev); else ast_init_dram_reg(dev); ast_init_3rdtx(dev); - } - else - { + } else { if (ast->tx_chip_type != AST_TX_NONE) ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ } @@ -448,85 +446,70 @@ static const u32 pattern[8] = { 0x7C61D253 }; -static int mmc_test_burst(struct ast_private *ast, u32 datagen) +static bool mmc_test(struct ast_private *ast, u32 datagen, u8 test_ctl) { u32 data, timeout; ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - ast_moutdwm(ast, 0x1e6e0070, 0x000000c1 | (datagen << 3)); + ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl); timeout = 0; do { data = ast_mindwm(ast, 0x1e6e0070) & 0x3000; - if (data & 0x2000) { - return 0; - } + if (data & 0x2000) + return false; if (++timeout > TIMEOUT) { ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - return 0; + return false; } } while (!data); - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - return 1; + ast_moutdwm(ast, 0x1e6e0070, 0x0); + return true; } -static int mmc_test_burst2(struct ast_private *ast, u32 datagen) +static u32 mmc_test2(struct ast_private *ast, u32 datagen, u8 test_ctl) { u32 data, timeout; ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - ast_moutdwm(ast, 0x1e6e0070, 0x00000041 | (datagen << 3)); + ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl); timeout = 0; do { data = ast_mindwm(ast, 0x1e6e0070) & 0x1000; if (++timeout > TIMEOUT) { ast_moutdwm(ast, 0x1e6e0070, 0x0); - return -1; + return 0xffffffff; } } while (!data); data = ast_mindwm(ast, 0x1e6e0078); data = (data | (data >> 16)) & 0xffff; - ast_moutdwm(ast, 0x1e6e0070, 0x0); + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); return data; } -static int mmc_test_single(struct ast_private *ast, u32 datagen) + +static bool mmc_test_burst(struct ast_private *ast, u32 datagen) { - u32 data, timeout; + return mmc_test(ast, datagen, 0xc1); +} - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - ast_moutdwm(ast, 0x1e6e0070, 0x000000c5 | (datagen << 3)); - timeout = 0; - do { - data = ast_mindwm(ast, 0x1e6e0070) & 0x3000; - if (data & 0x2000) - return 0; - if (++timeout > TIMEOUT) { - ast_moutdwm(ast, 0x1e6e0070, 0x0); - return 0; - } - } while (!data); - ast_moutdwm(ast, 0x1e6e0070, 0x0); - return 1; +static u32 mmc_test_burst2(struct ast_private *ast, u32 datagen) +{ + return mmc_test2(ast, datagen, 0x41); } -static int mmc_test_single2(struct ast_private *ast, u32 datagen) +static bool mmc_test_single(struct ast_private *ast, u32 datagen) { - u32 data, timeout; + return mmc_test(ast, datagen, 0xc5); +} - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3)); - timeout = 0; - do { - data = ast_mindwm(ast, 0x1e6e0070) & 0x1000; - if (++timeout > TIMEOUT) { - ast_moutdwm(ast, 0x1e6e0070, 0x0); - return -1; - } - } while (!data); - data = ast_mindwm(ast, 0x1e6e0078); - data = (data | (data >> 16)) & 0xffff; - ast_moutdwm(ast, 0x1e6e0070, 0x0); - return data; +static u32 mmc_test_single2(struct ast_private *ast, u32 datagen) +{ + return mmc_test2(ast, datagen, 0x05); +} + +static bool mmc_test_single_2500(struct ast_private *ast, u32 datagen) +{ + return mmc_test(ast, datagen, 0x85); } static int cbr_test(struct ast_private *ast) @@ -604,16 +587,16 @@ static u32 cbr_scan2(struct ast_private *ast) return data2; } -static u32 cbr_test3(struct ast_private *ast) +static bool cbr_test3(struct ast_private *ast) { if (!mmc_test_burst(ast, 0)) - return 0; + return false; if (!mmc_test_single(ast, 0)) - return 0; - return 1; + return false; + return true; } -static u32 cbr_scan3(struct ast_private *ast) +static bool cbr_scan3(struct ast_private *ast) { u32 patcnt, loop; @@ -624,9 +607,9 @@ static u32 cbr_scan3(struct ast_private *ast) break; } if (loop == 2) - return 0; + return false; } - return 1; + return true; } static bool finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param) @@ -1612,7 +1595,7 @@ ddr2_init_start: } -static void ast_init_dram_2300(struct drm_device *dev) +static void ast_post_chip_2300(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; struct ast2300_dram_param param; @@ -1638,12 +1621,44 @@ static void ast_init_dram_2300(struct drm_device *dev) temp |= 0x73; ast_write32(ast, 0x12008, temp); + param.dram_freq = 396; param.dram_type = AST_DDR3; + temp = ast_mindwm(ast, 0x1e6e2070); if (temp & 0x01000000) param.dram_type = AST_DDR2; - param.dram_chipid = ast->dram_type; - param.dram_freq = ast->mclk; - param.vram_size = ast->vram_size; + switch (temp & 0x18000000) { + case 0: + param.dram_chipid = AST_DRAM_512Mx16; + break; + default: + case 0x08000000: + param.dram_chipid = AST_DRAM_1Gx16; + break; + case 0x10000000: + param.dram_chipid = AST_DRAM_2Gx16; + break; + case 0x18000000: + param.dram_chipid = AST_DRAM_4Gx16; + break; + } + switch (temp & 0x0c) { + default: + case 0x00: + param.vram_size = AST_VIDMEM_SIZE_8M; + break; + + case 0x04: + param.vram_size = AST_VIDMEM_SIZE_16M; + break; + + case 0x08: + param.vram_size = AST_VIDMEM_SIZE_32M; + break; + + case 0x0c: + param.vram_size = AST_VIDMEM_SIZE_64M; + break; + } if (param.dram_type == AST_DDR3) { get_ddr3_info(ast, ¶m); @@ -1663,3 +1678,404 @@ static void ast_init_dram_2300(struct drm_device *dev) } while ((reg & 0x40) == 0); } +static bool cbr_test_2500(struct ast_private *ast) +{ + ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); + ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); + if (!mmc_test_burst(ast, 0)) + return false; + if (!mmc_test_single_2500(ast, 0)) + return false; + return true; +} + +static bool ddr_test_2500(struct ast_private *ast) +{ + ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); + ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); + if (!mmc_test_burst(ast, 0)) + return false; + if (!mmc_test_burst(ast, 1)) + return false; + if (!mmc_test_burst(ast, 2)) + return false; + if (!mmc_test_burst(ast, 3)) + return false; + if (!mmc_test_single_2500(ast, 0)) + return false; + return true; +} + +static void ddr_init_common_2500(struct ast_private *ast) +{ + ast_moutdwm(ast, 0x1E6E0034, 0x00020080); + ast_moutdwm(ast, 0x1E6E0008, 0x2003000F); + ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF); + ast_moutdwm(ast, 0x1E6E0040, 0x88448844); + ast_moutdwm(ast, 0x1E6E0044, 0x24422288); + ast_moutdwm(ast, 0x1E6E0048, 0x22222222); + ast_moutdwm(ast, 0x1E6E004C, 0x22222222); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0208, 0x00000000); + ast_moutdwm(ast, 0x1E6E0218, 0x00000000); + ast_moutdwm(ast, 0x1E6E0220, 0x00000000); + ast_moutdwm(ast, 0x1E6E0228, 0x00000000); + ast_moutdwm(ast, 0x1E6E0230, 0x00000000); + ast_moutdwm(ast, 0x1E6E02A8, 0x00000000); + ast_moutdwm(ast, 0x1E6E02B0, 0x00000000); + ast_moutdwm(ast, 0x1E6E0240, 0x86000000); + ast_moutdwm(ast, 0x1E6E0244, 0x00008600); + ast_moutdwm(ast, 0x1E6E0248, 0x80000000); + ast_moutdwm(ast, 0x1E6E024C, 0x80808080); +} + +static void ddr_phy_init_2500(struct ast_private *ast) +{ + u32 data, pass, timecnt; + + pass = 0; + ast_moutdwm(ast, 0x1E6E0060, 0x00000005); + while (!pass) { + for (timecnt = 0; timecnt < TIMEOUT; timecnt++) { + data = ast_mindwm(ast, 0x1E6E0060) & 0x1; + if (!data) + break; + } + if (timecnt != TIMEOUT) { + data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000; + if (!data) + pass = 1; + } + if (!pass) { + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + udelay(10); /* delay 10 us */ + ast_moutdwm(ast, 0x1E6E0060, 0x00000005); + } + } + + ast_moutdwm(ast, 0x1E6E0060, 0x00000006); +} + +/* + * Check DRAM Size + * 1Gb : 0x80000000 ~ 0x87FFFFFF + * 2Gb : 0x80000000 ~ 0x8FFFFFFF + * 4Gb : 0x80000000 ~ 0x9FFFFFFF + * 8Gb : 0x80000000 ~ 0xBFFFFFFF + */ +static void check_dram_size_2500(struct ast_private *ast, u32 tRFC) +{ + u32 reg_04, reg_14; + + reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc; + reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00; + + ast_moutdwm(ast, 0xA0100000, 0x41424344); + ast_moutdwm(ast, 0x90100000, 0x35363738); + ast_moutdwm(ast, 0x88100000, 0x292A2B2C); + ast_moutdwm(ast, 0x80100000, 0x1D1E1F10); + + /* Check 8Gbit */ + if (ast_mindwm(ast, 0xA0100000) == 0x41424344) { + reg_04 |= 0x03; + reg_14 |= (tRFC >> 24) & 0xFF; + /* Check 4Gbit */ + } else if (ast_mindwm(ast, 0x90100000) == 0x35363738) { + reg_04 |= 0x02; + reg_14 |= (tRFC >> 16) & 0xFF; + /* Check 2Gbit */ + } else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) { + reg_04 |= 0x01; + reg_14 |= (tRFC >> 8) & 0xFF; + } else { + reg_14 |= tRFC & 0xFF; + } + ast_moutdwm(ast, 0x1E6E0004, reg_04); + ast_moutdwm(ast, 0x1E6E0014, reg_14); +} + +static void enable_cache_2500(struct ast_private *ast) +{ + u32 reg_04, data; + + reg_04 = ast_mindwm(ast, 0x1E6E0004); + ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000); + + do + data = ast_mindwm(ast, 0x1E6E0004); + while (!(data & 0x80000)); + ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400); +} + +static void set_mpll_2500(struct ast_private *ast) +{ + u32 addr, data, param; + + /* Reset MMC */ + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); + ast_moutdwm(ast, 0x1E6E0034, 0x00020080); + for (addr = 0x1e6e0004; addr < 0x1e6e0090;) { + ast_moutdwm(ast, addr, 0x0); + addr += 4; + } + ast_moutdwm(ast, 0x1E6E0034, 0x00020000); + + ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); + data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000; + if (data) { + /* CLKIN = 25MHz */ + param = 0x930023E0; + ast_moutdwm(ast, 0x1E6E2160, 0x00011320); + } else { + /* CLKIN = 24MHz */ + param = 0x93002400; + } + ast_moutdwm(ast, 0x1E6E2020, param); + udelay(100); +} + +static void reset_mmc_2500(struct ast_private *ast) +{ + ast_moutdwm(ast, 0x1E78505C, 0x00000004); + ast_moutdwm(ast, 0x1E785044, 0x00000001); + ast_moutdwm(ast, 0x1E785048, 0x00004755); + ast_moutdwm(ast, 0x1E78504C, 0x00000013); + mdelay(100); + ast_moutdwm(ast, 0x1E785054, 0x00000077); + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); +} + +static void ddr3_init_2500(struct ast_private *ast, const u32 *ddr_table) +{ + + ast_moutdwm(ast, 0x1E6E0004, 0x00000303); + ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]); + ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]); + ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]); + ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */ + ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */ + ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */ + ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */ + + /* DDR PHY Setting */ + ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE); + ast_moutdwm(ast, 0x1E6E0204, 0x00001001); + ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B); + ast_moutdwm(ast, 0x1E6E0210, 0x20000000); + ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]); + ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]); + ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]); + ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]); + ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]); + ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]); + ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]); + ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]); + ast_moutdwm(ast, 0x1E6E0290, 0x00100008); + ast_moutdwm(ast, 0x1E6E02C0, 0x00000006); + + /* Controller Setting */ + ast_moutdwm(ast, 0x1E6E0034, 0x00020091); + + /* Wait DDR PHY init done */ + ddr_phy_init_2500(ast); + + ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]); + ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81); + ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93); + + check_dram_size_2500(ast, ddr_table[REGIDX_RFC]); + enable_cache_2500(ast); + ast_moutdwm(ast, 0x1E6E001C, 0x00000008); + ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); +} + +static void ddr4_init_2500(struct ast_private *ast, const u32 *ddr_table) +{ + u32 data, data2, pass, retrycnt; + u32 ddr_vref, phy_vref; + u32 min_ddr_vref = 0, min_phy_vref = 0; + u32 max_ddr_vref = 0, max_phy_vref = 0; + + ast_moutdwm(ast, 0x1E6E0004, 0x00000313); + ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]); + ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]); + ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]); + ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */ + ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */ + ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */ + ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */ + + /* DDR PHY Setting */ + ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE); + ast_moutdwm(ast, 0x1E6E0204, 0x09002000); + ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B); + ast_moutdwm(ast, 0x1E6E0210, 0x20000000); + ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]); + ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]); + ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]); + ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]); + ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]); + ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]); + ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]); + ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]); + ast_moutdwm(ast, 0x1E6E0290, 0x00100008); + ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C); + ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E); + + /* Controller Setting */ + ast_moutdwm(ast, 0x1E6E0034, 0x0001A991); + + /* Train PHY Vref first */ + pass = 0; + + for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) { + max_phy_vref = 0x0; + pass = 0; + ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06); + for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) { + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + ast_moutdwm(ast, 0x1E6E02CC, phy_vref | (phy_vref << 8)); + /* Fire DFI Init */ + ddr_phy_init_2500(ast); + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); + if (cbr_test_2500(ast)) { + pass++; + data = ast_mindwm(ast, 0x1E6E03D0); + data2 = data >> 8; + data = data & 0xff; + if (data > data2) + data = data2; + if (max_phy_vref < data) { + max_phy_vref = data; + min_phy_vref = phy_vref; + } + } else if (pass > 0) + break; + } + } + ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8)); + + /* Train DDR Vref next */ + pass = 0; + + for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) { + min_ddr_vref = 0xFF; + max_ddr_vref = 0x0; + pass = 0; + for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) { + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8)); + /* Fire DFI Init */ + ddr_phy_init_2500(ast); + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); + if (cbr_test_2500(ast)) { + pass++; + if (min_ddr_vref > ddr_vref) + min_ddr_vref = ddr_vref; + if (max_ddr_vref < ddr_vref) + max_ddr_vref = ddr_vref; + } else if (pass != 0) + break; + } + } + + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1; + ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8)); + + /* Wait DDR PHY init done */ + ddr_phy_init_2500(ast); + + ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]); + ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81); + ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93); + + check_dram_size_2500(ast, ddr_table[REGIDX_RFC]); + enable_cache_2500(ast); + ast_moutdwm(ast, 0x1E6E001C, 0x00000008); + ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); +} + +static bool ast_dram_init_2500(struct ast_private *ast) +{ + u32 data; + u32 max_tries = 5; + + do { + if (max_tries-- == 0) + return false; + set_mpll_2500(ast); + reset_mmc_2500(ast); + ddr_init_common_2500(ast); + + data = ast_mindwm(ast, 0x1E6E2070); + if (data & 0x01000000) + ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table); + else + ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table); + } while (!ddr_test_2500(ast)); + + ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41); + + /* Patch code */ + data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF; + ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000); + + return true; +} + +void ast_post_chip_2500(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + u32 temp; + u8 reg; + + reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + if ((reg & 0x80) == 0) {/* vga only */ + /* Clear bus lock condition */ + ast_moutdwm(ast, 0x1e600000, 0xAEED1A03); + ast_moutdwm(ast, 0x1e600084, 0x00010000); + ast_moutdwm(ast, 0x1e600088, 0x00000000); + ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x12000, 0x1688a8a8); + while (ast_read32(ast, 0x12000) != 0x1) + ; + + ast_write32(ast, 0x10000, 0xfc600309); + while (ast_read32(ast, 0x10000) != 0x1) + ; + + /* Slow down CPU/AHB CLK in VGA only mode */ + temp = ast_read32(ast, 0x12008); + temp |= 0x73; + ast_write32(ast, 0x12008, temp); + + /* Reset USB port to patch USB unknown device issue */ + ast_moutdwm(ast, 0x1e6e2090, 0x20000000); + temp = ast_mindwm(ast, 0x1e6e2094); + temp |= 0x00004000; + ast_moutdwm(ast, 0x1e6e2094, temp); + temp = ast_mindwm(ast, 0x1e6e2070); + if (temp & 0x00800000) { + ast_moutdwm(ast, 0x1e6e207c, 0x00800000); + mdelay(100); + ast_moutdwm(ast, 0x1e6e2070, 0x00800000); + } + + if (!ast_dram_init_2500(ast)) + DRM_ERROR("DRAM init failed !\n"); + + temp = ast_mindwm(ast, 0x1e6e2040); + ast_moutdwm(ast, 0x1e6e2040, temp | 0x40); + } + + /* wait ready */ + do { + reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + } while ((reg & 0x40) == 0); +} diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index 3608d5aa7451..5f4c2e833a65 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h @@ -47,6 +47,7 @@ #define SyncPN (PVSync | NHSync) #define SyncNP (NVSync | PHSync) #define SyncNN (NVSync | NHSync) +#define AST2500PreCatchCRT 0x00004000 /* DCLK Index */ #define VCLK25_175 0x00 @@ -78,37 +79,67 @@ #define VCLK97_75 0x19 #define VCLK118_25 0x1A -static struct ast_vbios_dclk_info dclk_table[] = { - {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ - {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ - {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ - {0x76, 0x63, 0x01}, /* 03: VCLK36 */ - {0xEE, 0x67, 0x01}, /* 04: VCLK40 */ - {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ - {0xC6, 0x64, 0x01}, /* 06: VCLK50 */ - {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ - {0x80, 0x64, 0x00}, /* 08: VCLK65 */ - {0x7B, 0x63, 0x00}, /* 09: VCLK75 */ - {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */ - {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */ - {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */ - {0x85, 0x24, 0x00}, /* 0D: VCLK135 */ - {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ - {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ - {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ - {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ - {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ - {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ - {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ - {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ - {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ - {0x77, 0x58, 0x80}, /* 17: VCLK119 */ - {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ - {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ - {0x3b, 0x2c, 0x81}, /* 1A: VCLK118_25 */ +static const struct ast_vbios_dclk_info dclk_table[] = { + {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ + {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ + {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ + {0x76, 0x63, 0x01}, /* 03: VCLK36 */ + {0xEE, 0x67, 0x01}, /* 04: VCLK40 */ + {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ + {0xC6, 0x64, 0x01}, /* 06: VCLK50 */ + {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ + {0x80, 0x64, 0x00}, /* 08: VCLK65 */ + {0x7B, 0x63, 0x00}, /* 09: VCLK75 */ + {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */ + {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */ + {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */ + {0x85, 0x24, 0x00}, /* 0D: VCLK135 */ + {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ + {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ + {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ + {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ + {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ + {0x77, 0x58, 0x80}, /* 17: VCLK119 */ + {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ + {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ + {0x3b, 0x2c, 0x81}, /* 1A: VCLK118_25 */ }; -static struct ast_vbios_stdtable vbios_stdtable[] = { +static const struct ast_vbios_dclk_info dclk_table_ast2500[] = { + {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ + {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ + {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ + {0x76, 0x63, 0x01}, /* 03: VCLK36 */ + {0xEE, 0x67, 0x01}, /* 04: VCLK40 */ + {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ + {0xC6, 0x64, 0x01}, /* 06: VCLK50 */ + {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ + {0x80, 0x64, 0x00}, /* 08: VCLK65 */ + {0x7B, 0x63, 0x00}, /* 09: VCLK75 */ + {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */ + {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */ + {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */ + {0x85, 0x24, 0x00}, /* 0D: VCLK135 */ + {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ + {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ + {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ + {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ + {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ + {0x58, 0x01, 0x42}, /* 17: VCLK119 */ + {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ + {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ + {0x44, 0x20, 0x43}, /* 1A: VCLK118_25 */ +}; + +static const struct ast_vbios_stdtable vbios_stdtable[] = { /* MD_2_3_400 */ { 0x67, @@ -181,21 +212,21 @@ static struct ast_vbios_stdtable vbios_stdtable[] = { }, }; -static struct ast_vbios_enhtable res_640x480[] = { +static const struct ast_vbios_enhtable res_640x480[] = { { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60Hz */ (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E }, { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72Hz */ (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E }, { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75Hz */ (SyncNN | Charx8Dot) , 75, 3, 0x2E }, - { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */ + { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */ (SyncNN | Charx8Dot) , 85, 4, 0x2E }, - { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */ + { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */ (SyncNN | Charx8Dot) , 0xFF, 4, 0x2E }, }; -static struct ast_vbios_enhtable res_800x600[] = { - {1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */ +static const struct ast_vbios_enhtable res_800x600[] = { + {1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */ (SyncPP | Charx8Dot), 56, 1, 0x30 }, {1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60Hz */ (SyncPP | Charx8Dot), 60, 2, 0x30 }, @@ -210,7 +241,7 @@ static struct ast_vbios_enhtable res_800x600[] = { }; -static struct ast_vbios_enhtable res_1024x768[] = { +static const struct ast_vbios_enhtable res_1024x768[] = { {1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60Hz */ (SyncNN | Charx8Dot), 60, 1, 0x31 }, {1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70Hz */ @@ -223,7 +254,7 @@ static struct ast_vbios_enhtable res_1024x768[] = { (SyncPP | Charx8Dot), 0xFF, 4, 0x31 }, }; -static struct ast_vbios_enhtable res_1280x1024[] = { +static const struct ast_vbios_enhtable res_1280x1024[] = { {1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60Hz */ (SyncPP | Charx8Dot), 60, 1, 0x32 }, {1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75Hz */ @@ -234,7 +265,7 @@ static struct ast_vbios_enhtable res_1280x1024[] = { (SyncPP | Charx8Dot), 0xFF, 3, 0x32 }, }; -static struct ast_vbios_enhtable res_1600x1200[] = { +static const struct ast_vbios_enhtable res_1600x1200[] = { {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60Hz */ (SyncPP | Charx8Dot), 60, 1, 0x33 }, {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* end */ @@ -242,34 +273,39 @@ static struct ast_vbios_enhtable res_1600x1200[] = { }; /* 16:9 */ -static struct ast_vbios_enhtable res_1360x768[] = { - {1792, 1360, 64,112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */ +static const struct ast_vbios_enhtable res_1360x768[] = { + {1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */ (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x39 }, - {1792, 1360, 64,112, 795, 768, 3, 6, VCLK85_5, /* end */ - (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x39 }, + {1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* end */ + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 0xFF, 1, 0x39 }, }; -static struct ast_vbios_enhtable res_1600x900[] = { - {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x3A }, - {2112, 1600, 88,168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */ +static const struct ast_vbios_enhtable res_1600x900[] = { + {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 60, 1, 0x3A }, + {2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x3A }, - {2112, 1600, 88,168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */ + {2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x3A }, }; -static struct ast_vbios_enhtable res_1920x1080[] = { +static const struct ast_vbios_enhtable res_1920x1080[] = { {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x38 }, + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 60, 1, 0x38 }, {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x38 }, + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 0xFF, 1, 0x38 }, }; /* 16:10 */ -static struct ast_vbios_enhtable res_1280x800[] = { - {1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x35 }, +static const struct ast_vbios_enhtable res_1280x800[] = { + {1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 60, 1, 0x35 }, {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x35 }, {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */ @@ -277,29 +313,33 @@ static struct ast_vbios_enhtable res_1280x800[] = { }; -static struct ast_vbios_enhtable res_1440x900[] = { +static const struct ast_vbios_enhtable res_1440x900[] = { {1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60Hz RB */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x36 }, + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 60, 1, 0x36 }, {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x36 }, {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x36 }, }; -static struct ast_vbios_enhtable res_1680x1050[] = { - {1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x37 }, +static const struct ast_vbios_enhtable res_1680x1050[] = { + {1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 60, 1, 0x37 }, {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x37 }, {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x37 }, }; -static struct ast_vbios_enhtable res_1920x1200[] = { - {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB*/ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x34 }, - {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x34 }, +static const struct ast_vbios_enhtable res_1920x1200[] = { + {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB*/ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 60, 1, 0x34 }, + {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 0xFF, 1, 0x34 }, }; #endif diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 32d43f86a8f2..96bb6badb818 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -34,6 +34,8 @@ */ #include <linux/export.h> +#include <linux/sched/signal.h> + #include <drm/drmP.h> #include "drm_legacy.h" #include "drm_internal.h" diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index e78f1406885d..fd56f92f3469 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -16,6 +16,8 @@ #include <linux/spinlock.h> #include <linux/shmem_fs.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include "etnaviv_drv.h" #include "etnaviv_gem.h" diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index d037adcda6f2..29bb8011dbc4 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -141,7 +141,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct * if (!obj->base.filp) return -ENODEV; - ret = obj->base.filp->f_op->mmap(obj->base.filp, vma); + ret = call_mmap(obj->base.filp, vma); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index f31deeb72703..e7c3c0318ff6 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -24,6 +24,9 @@ #include <linux/prefetch.h> #include <linux/dma-fence-array.h> +#include <linux/sched.h> +#include <linux/sched/clock.h> +#include <linux/sched/signal.h> #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 0115989e324a..22b46398831e 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -31,6 +31,7 @@ #include <linux/mmu_notifier.h> #include <linux/mempolicy.h> #include <linux/swap.h> +#include <linux/sched/mm.h> struct i915_mm_struct { struct mm_struct *mm; diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index fcfa423d08bd..7044e9a6abf7 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -23,6 +23,7 @@ */ #include <linux/kthread.h> +#include <uapi/linux/sched/types.h> #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index b9cde116dab3..344f238b283f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -28,6 +28,7 @@ #include <linux/async.h> #include <linux/i2c.h> #include <linux/hdmi.h> +#include <linux/sched/clock.h> #include <drm/i915_drm.h> #include "i915_drv.h" #include <drm/drm_crtc.h> diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index a2bb855a2851..ac5800c72cb4 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -18,7 +18,7 @@ #include <linux/jiffies.h> #include <linux/module.h> #include <linux/platform_device.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/of_device.h> diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index ad31b3eb408f..0e4eb845cbb0 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -24,6 +24,7 @@ config ROCKCHIP_ANALOGIX_DP config ROCKCHIP_CDN_DP tristate "Rockchip cdn DP" depends on DRM_ROCKCHIP + depends on EXTCON select SND_SOC_HDMI_CODEC if SND_SOC help This selects support for Rockchip SoC specific extensions diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index 9ab67a670885..fd79a70b8552 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -111,7 +111,7 @@ static int cdn_dp_clk_enable(struct cdn_dp_device *dp) ret = pm_runtime_get_sync(dp->dev); if (ret < 0) { DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret); - goto err_pclk; + goto err_pm_runtime_get; } reset_control_assert(dp->core_rst); @@ -133,6 +133,8 @@ static int cdn_dp_clk_enable(struct cdn_dp_device *dp) return 0; err_set_rate: + pm_runtime_put(dp->dev); +err_pm_runtime_get: clk_disable_unprepare(dp->core_clk); err_core_clk: clk_disable_unprepare(dp->pclk); diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c index f154fb1929bd..913f4318cdc0 100644 --- a/drivers/gpu/drm/ttm/ttm_lock.c +++ b/drivers/gpu/drm/ttm/ttm_lock.c @@ -33,7 +33,7 @@ #include <linux/atomic.h> #include <linux/errno.h> #include <linux/wait.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/module.h> #define TTM_WRITE_LOCK_PENDING (1 << 0) diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index ab3016982466..1eef98c3331d 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -26,6 +26,7 @@ #include <linux/pm_runtime.h> #include <linux/device.h> #include <linux/io.h> +#include <linux/sched/signal.h> #include "uapi/drm/vc4_drm.h" #include "vc4_drv.h" diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 7ccbb03e98de..a1f42d125e6e 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -288,7 +288,7 @@ static int vgem_prime_mmap(struct drm_gem_object *obj, if (!obj->filp) return -ENODEV; - ret = obj->filp->f_op->mmap(obj->filp, vma); + ret = call_mmap(obj->filp, vma); if (ret) return ret; diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 30f989a0cafc..491866865c33 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -176,7 +176,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) #endif ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs, - callbacks, names); + callbacks, names, NULL); if (ret) { DRM_ERROR("failed to find virt queues\n"); goto err_vqs; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 541a5887dd6c..d08f26973d0b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -199,9 +199,14 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { VMW_IOCTL_DEF(VMW_PRESENT_READBACK, vmw_present_readback_ioctl, DRM_MASTER | DRM_AUTH), + /* + * The permissions of the below ioctl are overridden in + * vmw_generic_ioctl(). We require either + * DRM_MASTER or capable(CAP_SYS_ADMIN). + */ VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, - DRM_MASTER | DRM_CONTROL_ALLOW), + DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_CREATE_SHADER, vmw_shader_define_ioctl, DRM_AUTH | DRM_RENDER_ALLOW), @@ -1123,6 +1128,10 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, return (long) vmw_execbuf_ioctl(dev, arg, file_priv, _IOC_SIZE(cmd)); + } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) { + if (!drm_is_current_master(file_priv) && + !capable(CAP_SYS_ADMIN)) + return -EACCES; } if (unlikely(ioctl->cmd != cmd)) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 1e59a486bba8..59ff4197173a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -41,9 +41,9 @@ #include <drm/ttm/ttm_module.h> #include "vmwgfx_fence.h" -#define VMWGFX_DRIVER_DATE "20160210" +#define VMWGFX_DRIVER_DATE "20170221" #define VMWGFX_DRIVER_MAJOR 2 -#define VMWGFX_DRIVER_MINOR 11 +#define VMWGFX_DRIVER_MINOR 12 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c index 1d08ba381098..d646ac931663 100644 --- a/drivers/gpu/drm/zte/zx_plane.c +++ b/drivers/gpu/drm/zte/zx_plane.c @@ -159,7 +159,7 @@ static void zx_vl_rsz_setup(struct zx_plane *zplane, uint32_t format, void __iomem *rsz = zplane->rsz; u32 src_chroma_w = src_w; u32 src_chroma_h = src_h; - u32 fmt; + int fmt; /* Set up source and destination resolution */ zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1)); @@ -203,7 +203,7 @@ static void zx_vl_plane_atomic_update(struct drm_plane *plane, u32 src_x, src_y, src_w, src_h; u32 dst_x, dst_y, dst_w, dst_h; uint32_t format; - u32 fmt; + int fmt; int num_planes; int i; diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 0f5b2dd24507..92f1452dad57 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -41,7 +41,7 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/list.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/spinlock.h> #include <linux/poll.h> diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index acfb522a432a..c6c9c51c806f 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -30,7 +30,7 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/uaccess.h> diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c index 76d06cf87b2a..fb77dec720a4 100644 --- a/drivers/hid/hid-roccat.c +++ b/drivers/hid/hid-roccat.c @@ -25,7 +25,7 @@ #include <linux/cdev.h> #include <linux/poll.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/hid-roccat.h> #include <linux/module.h> diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index f0e2757cb909..ec530454e6f6 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -33,7 +33,7 @@ #include <linux/slab.h> #include <linux/hid.h> #include <linux/mutex.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/string.h> #include <linux/hidraw.h> diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 700145b15088..774bd701dae0 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -27,6 +27,7 @@ #include <linux/poll.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c index 7175e6bedf21..727f968ac1cb 100644 --- a/drivers/hsi/clients/cmt_speech.c +++ b/drivers/hsi/clients/cmt_speech.c @@ -31,7 +31,7 @@ #include <linux/slab.h> #include <linux/fs.h> #include <linux/poll.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/ioctl.h> #include <linux/uaccess.h> #include <linux/pm_qos.h> diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index f7f6b9144b07..da6b59ba5940 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -34,6 +34,8 @@ #include <linux/kernel_stat.h> #include <linux/clockchips.h> #include <linux/cpu.h> +#include <linux/sched/task_stack.h> + #include <asm/hyperv.h> #include <asm/hypervisor.h> #include <asm/mshyperv.h> diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c index 412b91d255ad..961c5f42d956 100644 --- a/drivers/i2c/busses/i2c-ibm_iic.c +++ b/drivers/i2c/busses/i2c-ibm_iic.c @@ -37,6 +37,8 @@ #include <linux/delay.h> #include <linux/slab.h> #include <linux/interrupt.h> +#include <linux/sched/signal.h> + #include <asm/irq.h> #include <linux/io.h> #include <linux/i2c.h> diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 565a49a0c564..96caf378b1dc 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -15,7 +15,7 @@ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index aef00511ca86..74f1b7dc03f7 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -28,6 +28,7 @@ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> +#include <linux/sched/task_stack.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/seq_file.h> diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index 247b9faccce1..4c0007cb74e3 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -19,6 +19,7 @@ #include <linux/delay.h> #include <linux/hdreg.h> #include <linux/ide.h> +#include <linux/nmi.h> #include <linux/scatterlist.h> #include <linux/uaccess.h> diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 7d8ea3d5fda6..5805b041dd0f 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -125,7 +125,7 @@ static struct cpuidle_state *cpuidle_state_table; */ static struct cpuidle_state nehalem_cstates[] = { { - .name = "C1-NHM", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 3, @@ -133,7 +133,7 @@ static struct cpuidle_state nehalem_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-NHM", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -141,7 +141,7 @@ static struct cpuidle_state nehalem_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-NHM", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 20, @@ -149,7 +149,7 @@ static struct cpuidle_state nehalem_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-NHM", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, @@ -162,7 +162,7 @@ static struct cpuidle_state nehalem_cstates[] = { static struct cpuidle_state snb_cstates[] = { { - .name = "C1-SNB", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -170,7 +170,7 @@ static struct cpuidle_state snb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-SNB", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -178,7 +178,7 @@ static struct cpuidle_state snb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-SNB", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, @@ -186,7 +186,7 @@ static struct cpuidle_state snb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-SNB", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 104, @@ -194,7 +194,7 @@ static struct cpuidle_state snb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7-SNB", + .name = "C7", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 109, @@ -207,7 +207,7 @@ static struct cpuidle_state snb_cstates[] = { static struct cpuidle_state byt_cstates[] = { { - .name = "C1-BYT", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -215,7 +215,7 @@ static struct cpuidle_state byt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6N-BYT", + .name = "C6N", .desc = "MWAIT 0x58", .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 300, @@ -223,7 +223,7 @@ static struct cpuidle_state byt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6S-BYT", + .name = "C6S", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 500, @@ -231,7 +231,7 @@ static struct cpuidle_state byt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7-BYT", + .name = "C7", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1200, @@ -239,7 +239,7 @@ static struct cpuidle_state byt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7S-BYT", + .name = "C7S", .desc = "MWAIT 0x64", .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, @@ -252,7 +252,7 @@ static struct cpuidle_state byt_cstates[] = { static struct cpuidle_state cht_cstates[] = { { - .name = "C1-CHT", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -260,7 +260,7 @@ static struct cpuidle_state cht_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6N-CHT", + .name = "C6N", .desc = "MWAIT 0x58", .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, @@ -268,7 +268,7 @@ static struct cpuidle_state cht_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6S-CHT", + .name = "C6S", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, @@ -276,7 +276,7 @@ static struct cpuidle_state cht_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7-CHT", + .name = "C7", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1200, @@ -284,7 +284,7 @@ static struct cpuidle_state cht_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7S-CHT", + .name = "C7S", .desc = "MWAIT 0x64", .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, @@ -297,7 +297,7 @@ static struct cpuidle_state cht_cstates[] = { static struct cpuidle_state ivb_cstates[] = { { - .name = "C1-IVB", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -305,7 +305,7 @@ static struct cpuidle_state ivb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-IVB", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -313,7 +313,7 @@ static struct cpuidle_state ivb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-IVB", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, @@ -321,7 +321,7 @@ static struct cpuidle_state ivb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-IVB", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, @@ -329,7 +329,7 @@ static struct cpuidle_state ivb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7-IVB", + .name = "C7", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 87, @@ -342,7 +342,7 @@ static struct cpuidle_state ivb_cstates[] = { static struct cpuidle_state ivt_cstates[] = { { - .name = "C1-IVT", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -350,7 +350,7 @@ static struct cpuidle_state ivt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-IVT", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -358,7 +358,7 @@ static struct cpuidle_state ivt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-IVT", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, @@ -366,7 +366,7 @@ static struct cpuidle_state ivt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-IVT", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 82, @@ -379,7 +379,7 @@ static struct cpuidle_state ivt_cstates[] = { static struct cpuidle_state ivt_cstates_4s[] = { { - .name = "C1-IVT-4S", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -387,7 +387,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-IVT-4S", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -395,7 +395,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-IVT-4S", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, @@ -403,7 +403,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-IVT-4S", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 84, @@ -416,7 +416,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { static struct cpuidle_state ivt_cstates_8s[] = { { - .name = "C1-IVT-8S", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -424,7 +424,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-IVT-8S", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -432,7 +432,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-IVT-8S", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, @@ -440,7 +440,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-IVT-8S", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 88, @@ -453,7 +453,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { static struct cpuidle_state hsw_cstates[] = { { - .name = "C1-HSW", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -461,7 +461,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-HSW", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -469,7 +469,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-HSW", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 33, @@ -477,7 +477,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-HSW", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 133, @@ -485,7 +485,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7s-HSW", + .name = "C7s", .desc = "MWAIT 0x32", .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 166, @@ -493,7 +493,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C8-HSW", + .name = "C8", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 300, @@ -501,7 +501,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C9-HSW", + .name = "C9", .desc = "MWAIT 0x50", .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 600, @@ -509,7 +509,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C10-HSW", + .name = "C10", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 2600, @@ -521,7 +521,7 @@ static struct cpuidle_state hsw_cstates[] = { }; static struct cpuidle_state bdw_cstates[] = { { - .name = "C1-BDW", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -529,7 +529,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-BDW", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -537,7 +537,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-BDW", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 40, @@ -545,7 +545,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-BDW", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 133, @@ -553,7 +553,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7s-BDW", + .name = "C7s", .desc = "MWAIT 0x32", .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 166, @@ -561,7 +561,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C8-BDW", + .name = "C8", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 300, @@ -569,7 +569,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C9-BDW", + .name = "C9", .desc = "MWAIT 0x50", .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 600, @@ -577,7 +577,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C10-BDW", + .name = "C10", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 2600, @@ -590,7 +590,7 @@ static struct cpuidle_state bdw_cstates[] = { static struct cpuidle_state skl_cstates[] = { { - .name = "C1-SKL", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -598,7 +598,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-SKL", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -606,7 +606,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-SKL", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 70, @@ -614,7 +614,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-SKL", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 85, @@ -622,7 +622,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7s-SKL", + .name = "C7s", .desc = "MWAIT 0x33", .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 124, @@ -630,7 +630,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C8-SKL", + .name = "C8", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, @@ -638,7 +638,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C9-SKL", + .name = "C9", .desc = "MWAIT 0x50", .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 480, @@ -646,7 +646,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C10-SKL", + .name = "C10", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 890, @@ -659,7 +659,7 @@ static struct cpuidle_state skl_cstates[] = { static struct cpuidle_state skx_cstates[] = { { - .name = "C1-SKX", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -667,7 +667,7 @@ static struct cpuidle_state skx_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-SKX", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -675,7 +675,7 @@ static struct cpuidle_state skx_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-SKX", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 133, @@ -688,7 +688,7 @@ static struct cpuidle_state skx_cstates[] = { static struct cpuidle_state atom_cstates[] = { { - .name = "C1E-ATM", + .name = "C1E", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 10, @@ -696,7 +696,7 @@ static struct cpuidle_state atom_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C2-ATM", + .name = "C2", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10), .exit_latency = 20, @@ -704,7 +704,7 @@ static struct cpuidle_state atom_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C4-ATM", + .name = "C4", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 100, @@ -712,7 +712,7 @@ static struct cpuidle_state atom_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-ATM", + .name = "C6", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, @@ -724,7 +724,7 @@ static struct cpuidle_state atom_cstates[] = { }; static struct cpuidle_state tangier_cstates[] = { { - .name = "C1-TNG", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -732,7 +732,7 @@ static struct cpuidle_state tangier_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C4-TNG", + .name = "C4", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 100, @@ -740,7 +740,7 @@ static struct cpuidle_state tangier_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-TNG", + .name = "C6", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, @@ -748,7 +748,7 @@ static struct cpuidle_state tangier_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7-TNG", + .name = "C7", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1200, @@ -756,7 +756,7 @@ static struct cpuidle_state tangier_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C9-TNG", + .name = "C9", .desc = "MWAIT 0x64", .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, @@ -768,7 +768,7 @@ static struct cpuidle_state tangier_cstates[] = { }; static struct cpuidle_state avn_cstates[] = { { - .name = "C1-AVN", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -776,7 +776,7 @@ static struct cpuidle_state avn_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-AVN", + .name = "C6", .desc = "MWAIT 0x51", .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 15, @@ -788,7 +788,7 @@ static struct cpuidle_state avn_cstates[] = { }; static struct cpuidle_state knl_cstates[] = { { - .name = "C1-KNL", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -796,7 +796,7 @@ static struct cpuidle_state knl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze }, { - .name = "C6-KNL", + .name = "C6", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 120, @@ -809,7 +809,7 @@ static struct cpuidle_state knl_cstates[] = { static struct cpuidle_state bxt_cstates[] = { { - .name = "C1-BXT", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -817,7 +817,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-BXT", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -825,7 +825,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-BXT", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 133, @@ -833,7 +833,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7s-BXT", + .name = "C7s", .desc = "MWAIT 0x31", .flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 155, @@ -841,7 +841,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C8-BXT", + .name = "C8", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1000, @@ -849,7 +849,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C9-BXT", + .name = "C9", .desc = "MWAIT 0x50", .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 2000, @@ -857,7 +857,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C10-BXT", + .name = "C10", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, @@ -870,7 +870,7 @@ static struct cpuidle_state bxt_cstates[] = { static struct cpuidle_state dnv_cstates[] = { { - .name = "C1-DNV", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -878,7 +878,7 @@ static struct cpuidle_state dnv_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-DNV", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -886,7 +886,7 @@ static struct cpuidle_state dnv_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-DNV", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 50, @@ -961,9 +961,9 @@ static void auto_demotion_disable(void) { unsigned long long msr_bits; - rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); msr_bits &= ~(icpu->auto_demotion_disable_flags); - wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); + wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); } static void c1e_promotion_disable(void) { @@ -1273,7 +1273,7 @@ static void sklh_idle_state_table_update(void) if ((mwait_substates & (0xF << 28)) == 0) return; - rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr); + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); /* PC10 is not enabled in PKG C-state limit */ if ((msr & 0xF) != 8) diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c index 0c44f72c32a8..018ed360e717 100644 --- a/drivers/iio/adc/rcar-gyroadc.c +++ b/drivers/iio/adc/rcar-gyroadc.c @@ -336,7 +336,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) struct device_node *child; struct regulator *vref; unsigned int reg; - unsigned int adcmode, childmode; + unsigned int adcmode = -1, childmode; unsigned int sample_width; unsigned int num_channels; int ret, first = 1; @@ -366,6 +366,8 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) channels = rcar_gyroadc_iio_channels_3; num_channels = ARRAY_SIZE(rcar_gyroadc_iio_channels_3); break; + default: + return -EINVAL; } /* diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index 0a6beb3d99cb..56cf5907a5f0 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -1208,7 +1208,7 @@ static int xadc_probe(struct platform_device *pdev) ret = xadc->ops->setup(pdev, indio_dev, irq); if (ret) - goto err_free_samplerate_trigger; + goto err_clk_disable_unprepare; ret = request_irq(irq, xadc->ops->interrupt_handler, 0, dev_name(&pdev->dev), indio_dev); @@ -1268,6 +1268,8 @@ static int xadc_probe(struct platform_device *pdev) err_free_irq: free_irq(irq, indio_dev); +err_clk_disable_unprepare: + clk_disable_unprepare(xadc->clk); err_free_samplerate_trigger: if (xadc->ops->flags & XADC_FLAGS_BUFFERED) iio_trigger_free(xadc->samplerate_trigger); @@ -1277,8 +1279,6 @@ err_free_convst_trigger: err_triggered_buffer_cleanup: if (xadc->ops->flags & XADC_FLAGS_BUFFERED) iio_triggered_buffer_cleanup(indio_dev); -err_clk_disable_unprepare: - clk_disable_unprepare(xadc->clk); err_device_free: kfree(indio_dev->channels); diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c index a5913e97945e..f9b8fc9ae13f 100644 --- a/drivers/iio/counter/104-quad-8.c +++ b/drivers/iio/counter/104-quad-8.c @@ -76,7 +76,7 @@ static int quad8_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT; } - flags = inb(base_offset); + flags = inb(base_offset + 1); borrow = flags & BIT(0); carry = !!(flags & BIT(1)); diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 4972986f6455..d2b465140a6b 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -20,7 +20,7 @@ #include <linux/cdev.h> #include <linux/slab.h> #include <linux/poll.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/iio/iio.h> #include "iio_core.h" diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 446b56a5260b..27f155d2df8d 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -34,7 +34,8 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/export.h> #include <linux/hugetlb.h> #include <linux/slab.h> diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index f2fc0431512d..cb2742b548bb 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -32,6 +32,8 @@ #include <linux/types.h> #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include <linux/pid.h> #include <linux/slab.h> #include <linux/export.h> diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 318ec5267bdf..86ecd3ea6a4b 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -37,7 +37,7 @@ #include <linux/delay.h> #include <linux/errno.h> #include <linux/list.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/rtnetlink.h> diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index d19662f635b1..5846c47c8d55 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -37,7 +37,7 @@ #include <linux/idr.h> #include <linux/completion.h> #include <linux/netdevice.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/inet.h> diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 7a3d906b3671..e2cd2cd3b28a 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -576,7 +576,7 @@ int hfi1_get_proc_affinity(int node) struct hfi1_affinity_node *entry; cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask; const struct cpumask *node_mask, - *proc_mask = tsk_cpus_allowed(current); + *proc_mask = ¤t->cpus_allowed; struct hfi1_affinity_node_list *affinity = &node_affinity; struct cpu_mask_set *set = &affinity->proc; diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 3b19c16a9e45..f78c739b330a 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -48,6 +48,7 @@ #include <linux/cdev.h> #include <linux/vmalloc.h> #include <linux/io.h> +#include <linux/sched/mm.h> #include <rdma/ib.h> diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 1d81cac1fa6c..5cde1ecda0fe 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -856,7 +856,7 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, { struct sdma_rht_node *rht_node; struct sdma_engine *sde = NULL; - const struct cpumask *current_mask = tsk_cpus_allowed(current); + const struct cpumask *current_mask = ¤t->cpus_allowed; unsigned long cpu_id; /* diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c index 20f4ddcac3b0..68295a12b771 100644 --- a/drivers/infiniband/hw/hfi1/user_pages.c +++ b/drivers/infiniband/hw/hfi1/user_pages.c @@ -46,7 +46,7 @@ */ #include <linux/mm.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/device.h> #include <linux/module.h> diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 88608906ce25..fba94df28cf1 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -39,6 +39,9 @@ #include <linux/inetdevice.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> + #include <net/ipv6.h> #include <net/addrconf.h> #include <net/devlink.h> diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 5b3355268725..4dc0a8785fe0 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -41,6 +41,8 @@ #include <asm/pat.h> #endif #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include <linux/delay.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_addr.h> diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index 75f08624ac05..ce83ba9a12ef 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c @@ -32,6 +32,7 @@ */ #include <linux/mm.h> +#include <linux/sched/signal.h> #include <linux/device.h> #include "qib.h" diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 1ccee6ea5bc3..c49db7c33979 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -34,7 +34,8 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/hugetlb.h> #include <linux/iommu.h> #include <linux/workqueue.h> diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index a6d6c617b597..0cdf2b7f272f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -38,6 +38,7 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/moduleparam.h> +#include <linux/sched/signal.h> #include "ipoib.h" diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index deedb6fc1b05..3e10e3dac2e7 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -31,6 +31,7 @@ */ #include <linux/module.h> +#include <linux/sched/signal.h> #include <linux/init.h> #include <linux/seq_file.h> diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c index 3422464af229..198678613382 100644 --- a/drivers/input/rmi4/rmi_f30.c +++ b/drivers/input/rmi4/rmi_f30.c @@ -258,9 +258,10 @@ static int rmi_f30_map_gpios(struct rmi_function *fn, /* * Buttonpad could be also inferred from f30->has_mech_mouse_btns, - * but I am not sure, so use only the pdata info. + * but I am not sure, so use only the pdata info and the number of + * mapped buttons. */ - if (pdata->f30_data.buttonpad) + if (pdata->f30_data.buttonpad || (button - BTN_LEFT == 1)) __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); return 0; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 04cdac7ab3e3..6130278c5d71 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1507,7 +1507,7 @@ static ssize_t amd_iommu_show_cap(struct device *dev, struct device_attribute *attr, char *buf) { - struct amd_iommu *iommu = dev_get_drvdata(dev); + struct amd_iommu *iommu = dev_to_amd_iommu(dev); return sprintf(buf, "%x\n", iommu->cap); } static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); @@ -1516,7 +1516,7 @@ static ssize_t amd_iommu_show_features(struct device *dev, struct device_attribute *attr, char *buf) { - struct amd_iommu *iommu = dev_get_drvdata(dev); + struct amd_iommu *iommu = dev_to_amd_iommu(dev); return sprintf(buf, "%llx\n", iommu->features); } static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index af00f381a7b1..003f3ceb2661 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -569,6 +569,11 @@ struct amd_iommu { volatile u64 __aligned(8) cmd_sem; }; +static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) +{ + return container_of(dev, struct amd_iommu, iommu.dev); +} + #define ACPIHID_UID_LEN 256 #define ACPIHID_HID_LEN 9 diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index f8ed8c95b685..063343909b0d 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -22,6 +22,7 @@ #include <linux/profile.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/iommu.h> #include <linux/wait.h> #include <linux/pci.h> diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a8f7ae0eb7a4..238ad3447712 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4730,11 +4730,16 @@ static int intel_iommu_cpu_dead(unsigned int cpu) return 0; } +static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) +{ + return container_of(dev, struct intel_iommu, iommu.dev); +} + static ssize_t intel_iommu_show_version(struct device *dev, struct device_attribute *attr, char *buf) { - struct intel_iommu *iommu = dev_get_drvdata(dev); + struct intel_iommu *iommu = dev_to_intel_iommu(dev); u32 ver = readl(iommu->reg + DMAR_VER_REG); return sprintf(buf, "%d:%d\n", DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver)); @@ -4745,7 +4750,7 @@ static ssize_t intel_iommu_show_address(struct device *dev, struct device_attribute *attr, char *buf) { - struct intel_iommu *iommu = dev_get_drvdata(dev); + struct intel_iommu *iommu = dev_to_intel_iommu(dev); return sprintf(buf, "%llx\n", iommu->reg_phys); } static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL); @@ -4754,7 +4759,7 @@ static ssize_t intel_iommu_show_cap(struct device *dev, struct device_attribute *attr, char *buf) { - struct intel_iommu *iommu = dev_get_drvdata(dev); + struct intel_iommu *iommu = dev_to_intel_iommu(dev); return sprintf(buf, "%llx\n", iommu->cap); } static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL); @@ -4763,7 +4768,7 @@ static ssize_t intel_iommu_show_ecap(struct device *dev, struct device_attribute *attr, char *buf) { - struct intel_iommu *iommu = dev_get_drvdata(dev); + struct intel_iommu *iommu = dev_to_intel_iommu(dev); return sprintf(buf, "%llx\n", iommu->ecap); } static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL); @@ -4772,7 +4777,7 @@ static ssize_t intel_iommu_show_ndoms(struct device *dev, struct device_attribute *attr, char *buf) { - struct intel_iommu *iommu = dev_get_drvdata(dev); + struct intel_iommu *iommu = dev_to_intel_iommu(dev); return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap)); } static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL); @@ -4781,7 +4786,7 @@ static ssize_t intel_iommu_show_ndoms_used(struct device *dev, struct device_attribute *attr, char *buf) { - struct intel_iommu *iommu = dev_get_drvdata(dev); + struct intel_iommu *iommu = dev_to_intel_iommu(dev); return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap))); } diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 51f2b228723f..23c427602c55 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -16,6 +16,7 @@ #include <linux/intel-iommu.h> #include <linux/mmu_notifier.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/slab.h> #include <linux/intel-svm.h> #include <linux/rculist.h> diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c index 05bbf171df37..f96601268f71 100644 --- a/drivers/irqchip/irq-crossbar.c +++ b/drivers/irqchip/irq-crossbar.c @@ -198,8 +198,8 @@ static const struct irq_domain_ops crossbar_domain_ops = { static int __init crossbar_of_init(struct device_node *node) { + u32 max = 0, entry, reg_size; int i, size, reserved = 0; - u32 max = 0, entry; const __be32 *irqsr; int ret = -ENOMEM; @@ -276,9 +276,9 @@ static int __init crossbar_of_init(struct device_node *node) if (!cb->register_offsets) goto err_irq_map; - of_property_read_u32(node, "ti,reg-size", &size); + of_property_read_u32(node, "ti,reg-size", ®_size); - switch (size) { + switch (reg_size) { case 1: cb->write = crossbar_writeb; break; @@ -304,7 +304,7 @@ static int __init crossbar_of_init(struct device_node *node) continue; cb->register_offsets[i] = reserved; - reserved += size; + reserved += reg_size; } of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 23201004fd7a..f77f840d2b5f 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1601,6 +1601,14 @@ static void __maybe_unused its_enable_quirk_cavium_23144(void *data) its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; } +static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) +{ + struct its_node *its = data; + + /* On QDF2400, the size of the ITE is 16Bytes */ + its->ite_size = 16; +} + static const struct gic_quirk its_quirks[] = { #ifdef CONFIG_CAVIUM_ERRATUM_22375 { @@ -1618,6 +1626,14 @@ static const struct gic_quirk its_quirks[] = { .init = its_enable_quirk_cavium_23144, }, #endif +#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 + { + .desc = "ITS: QDF2400 erratum 0065", + .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ + .mask = 0xffffffff, + .init = its_enable_quirk_qdf2400_e0065, + }, +#endif { } }; diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 49d0f70c2bae..1dfd1085a04f 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -18,7 +18,7 @@ #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/proc_fs.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/seq_file.h> #include <linux/skbuff.h> #include <linux/workqueue.h> diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index 63eaa0a9f8a1..1b169559a240 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -15,6 +15,7 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/mutex.h> +#include <linux/sched/signal.h> #include "isdn_common.h" #include "isdn_tty.h" #ifdef CONFIG_ISDN_AUDIO diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 67c21876c35f..6ceca7db62ad 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -234,6 +234,8 @@ #include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/slab.h> +#include <linux/sched/signal.h> + #include <net/sock.h> #include "core.h" #include "l1oip.h" diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index b324474c0c12..8b7faea2ddf8 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c @@ -19,6 +19,9 @@ #include <linux/mISDNif.h> #include <linux/kthread.h> #include <linux/sched.h> +#include <linux/sched/cputime.h> +#include <linux/signal.h> + #include "core.h" static u_int *debug; diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c index 9438d7ec3308..b1e135fc1fb5 100644 --- a/drivers/isdn/mISDN/timerdev.c +++ b/drivers/isdn/mISDN/timerdev.c @@ -25,6 +25,8 @@ #include <linux/module.h> #include <linux/mISDNif.h> #include <linux/mutex.h> +#include <linux/sched/signal.h> + #include "core.h" static DEFINE_MUTEX(mISDN_mutex); diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c index a9145aa7f36a..8d456dc6c5bf 100644 --- a/drivers/leds/leds-pwm.c +++ b/drivers/leds/leds-pwm.c @@ -29,7 +29,6 @@ struct led_pwm_data { unsigned int active_low; unsigned int period; int duty; - bool can_sleep; }; struct led_pwm_priv { @@ -49,8 +48,8 @@ static void __led_pwm_set(struct led_pwm_data *led_dat) pwm_enable(led_dat->pwm); } -static void led_pwm_set(struct led_classdev *led_cdev, - enum led_brightness brightness) +static int led_pwm_set(struct led_classdev *led_cdev, + enum led_brightness brightness) { struct led_pwm_data *led_dat = container_of(led_cdev, struct led_pwm_data, cdev); @@ -66,12 +65,7 @@ static void led_pwm_set(struct led_classdev *led_cdev, led_dat->duty = duty; __led_pwm_set(led_dat); -} -static int led_pwm_set_blocking(struct led_classdev *led_cdev, - enum led_brightness brightness) -{ - led_pwm_set(led_cdev, brightness); return 0; } @@ -112,11 +106,7 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, return ret; } - led_data->can_sleep = pwm_can_sleep(led_data->pwm); - if (!led_data->can_sleep) - led_data->cdev.brightness_set = led_pwm_set; - else - led_data->cdev.brightness_set_blocking = led_pwm_set_blocking; + led_data->cdev.brightness_set_blocking = led_pwm_set; /* * FIXME: pwm_apply_args() should be removed when switching to the diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c index e6f2f8b9f09a..afa3b4099214 100644 --- a/drivers/leds/trigger/ledtrig-heartbeat.c +++ b/drivers/leds/trigger/ledtrig-heartbeat.c @@ -17,6 +17,7 @@ #include <linux/slab.h> #include <linux/timer.h> #include <linux/sched.h> +#include <linux/sched/loadavg.h> #include <linux/leds.h> #include <linux/reboot.h> #include <linux/suspend.h> diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index ac219045daf7..395ed1961dbf 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -8,6 +8,7 @@ #include <linux/stddef.h> #include <linux/io.h> #include <linux/mm.h> +#include <linux/sched/signal.h> #include <linux/vmalloc.h> #include <linux/cpu.h> #include <linux/freezer.h> diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 30c60687d277..1a6787bc9386 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -8,6 +8,7 @@ #include <linux/miscdevice.h> #include <linux/fs.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/export.h> diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index 152414e6378a..fee939efc4fc 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c @@ -23,7 +23,7 @@ #include <linux/module.h> #include <linux/fs.h> #include <linux/mm.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/adb.h> #include <linux/cuda.h> #include <linux/pmu.h> diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 227869159ac0..1ac66421877a 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -39,6 +39,7 @@ #include <linux/of_platform.h> #include <linux/slab.h> #include <linux/memblock.h> +#include <linux/sched/signal.h> #include <asm/byteorder.h> #include <asm/io.h> diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 43b8db2b5445..cce99f72e4ae 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -23,7 +23,7 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/miscdevice.h> #include <linux/blkdev.h> #include <linux/pci.h> diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c index 9c79f8019d2a..97fb956bb6e0 100644 --- a/drivers/mailbox/mailbox-test.c +++ b/drivers/mailbox/mailbox-test.c @@ -21,6 +21,7 @@ #include <linux/poll.h> #include <linux/slab.h> #include <linux/uaccess.h> +#include <linux/sched/signal.h> #define MBOX_MAX_SIG_LEN 8 #define MBOX_MAX_MSG_LEN 128 diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 646fe85261c1..18526d44688d 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -11,6 +11,7 @@ #include "bset.h" #include <linux/console.h> +#include <linux/sched/clock.h> #include <linux/random.h> #include <linux/prefetch.h> diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index a43eedd5804d..450d0e848ae4 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -32,6 +32,9 @@ #include <linux/prefetch.h> #include <linux/random.h> #include <linux/rcupdate.h> +#include <linux/sched/clock.h> +#include <linux/rculist.h> + #include <trace/events/bcache.h> /* diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 9b2fe2d3e3a9..1ec84ca81146 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -3,6 +3,7 @@ #include <linux/llist.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/workqueue.h> /* diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index b3ff57d61dde..f90f13616980 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -13,6 +13,7 @@ #include <linux/blkdev.h> #include <linux/sort.h> +#include <linux/sched/clock.h> static const char * const cache_replacement_policies[] = { "lru", diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index dde6172f3f10..8c3a938f4bf0 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -12,6 +12,7 @@ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/types.h> +#include <linux/sched/clock.h> #include "util.h" diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index cf2cbc211d83..a126919ed102 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -6,6 +6,7 @@ #include <linux/errno.h> #include <linux/blkdev.h> #include <linux/kernel.h> +#include <linux/sched/clock.h> #include <linux/llist.h> #include <linux/ratelimit.h> #include <linux/vmalloc.h> diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 69e1ae59cab8..6ac2e48b9235 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -13,6 +13,7 @@ #include <linux/delay.h> #include <linux/kthread.h> +#include <linux/sched/clock.h> #include <trace/events/bcache.h> /* Rate limiting */ diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index d36d427a9efb..df4859f6ac6a 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -11,6 +11,7 @@ #include <linux/device-mapper.h> #include <linux/dm-io.h> #include <linux/slab.h> +#include <linux/sched/mm.h> #include <linux/jiffies.h> #include <linux/vmalloc.h> #include <linux/shrinker.h> diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 1cb2ca9dfae3..389a3637ffcc 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1536,7 +1536,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string down_read(&key->sem); - ukp = user_key_payload(key); + ukp = user_key_payload_locked(key); if (!ukp) { up_read(&key->sem); key_put(key); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index a5a9b17f0f7f..4da6fc6b1ffd 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/miscdevice.h> +#include <linux/sched/mm.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/slab.h> diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 5c9e95d66f3b..f8564d63982f 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -101,6 +101,8 @@ struct raid_dev { #define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS) #define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV) +#define RESUME_STAY_FROZEN_FLAGS (CTR_FLAG_DELTA_DISKS | CTR_FLAG_DATA_OFFSET) + /* * Definitions of various constructor flags to * be used in checks of valid / invalid flags @@ -3462,9 +3464,11 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv) else if (!strcasecmp(argv[0], "recover")) set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); else { - if (!strcasecmp(argv[0], "check")) + if (!strcasecmp(argv[0], "check")) { set_bit(MD_RECOVERY_CHECK, &mddev->recovery); - else if (!strcasecmp(argv[0], "repair")) { + set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); + set_bit(MD_RECOVERY_SYNC, &mddev->recovery); + } else if (!strcasecmp(argv[0], "repair")) { set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); set_bit(MD_RECOVERY_SYNC, &mddev->recovery); } else @@ -3771,7 +3775,15 @@ static void raid_resume(struct dm_target *ti) mddev->ro = 0; mddev->in_sync = 0; - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + /* + * Keep the RAID set frozen if reshape/rebuild flags are set. + * The RAID set is unfrozen once the next table load/resume, + * which clears the reshape/rebuild flags, occurs. + * This ensures that the constructor for the inactive table + * retrieves an up-to-date reshape_position. + */ + if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS)) + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); if (mddev->suspended) mddev_resume(mddev); @@ -3779,7 +3791,7 @@ static void raid_resume(struct dm_target *ti) static struct target_type raid_target = { .name = "raid", - .version = {1, 10, 0}, + .version = {1, 10, 1}, .module = THIS_MODULE, .ctr = raid_ctr, .dtr = raid_dtr, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 9f37d7fc2786..f4ffd1eb8f44 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/sched/signal.h> #include <linux/blkpg.h> #include <linux/bio.h> #include <linux/mempool.h> diff --git a/drivers/md/md.c b/drivers/md/md.c index 985374f20e2e..548d1b8014f8 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -44,6 +44,7 @@ */ +#include <linux/sched/signal.h> #include <linux/kthread.h> #include <linux/blkdev.h> #include <linux/badblocks.h> diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 0863905dee02..8589e0a14068 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -13,6 +13,7 @@ #include <linux/rwsem.h> #include <linux/device-mapper.h> #include <linux/stacktrace.h> +#include <linux/sched/task.h> #define DM_MSG_PREFIX "block manager" diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 7453d94eeed7..fbc2d7851b49 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -37,7 +37,10 @@ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/ratelimit.h> +#include <linux/sched/signal.h> + #include <trace/events/block.h> + #include "md.h" #include "raid1.h" #include "bitmap.h" diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 2ce23b01dbb2..4fb09b3fcb41 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -55,6 +55,8 @@ #include <linux/ratelimit.h> #include <linux/nodemask.h> #include <linux/flex_array.h> +#include <linux/sched/signal.h> + #include <trace/events/block.h> #include "md.h" diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c index 000d737ad827..8d65028c7a74 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb-core/dvb_ca_en50221.c @@ -34,7 +34,7 @@ #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/spinlock.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/kthread.h> #include "dvb_ca_en50221.h" diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c index 4eac71e50c5f..6628f80d184f 100644 --- a/drivers/media/dvb-core/dvb_demux.c +++ b/drivers/media/dvb-core/dvb_demux.c @@ -19,7 +19,7 @@ #define pr_fmt(fmt) "dvb_demux: " fmt -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/vmalloc.h> diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 85ae3669aa66..e3fff8f64d37 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -29,7 +29,7 @@ #include <linux/string.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/poll.h> diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h index fef3c736fcba..7be2088c45fe 100644 --- a/drivers/media/pci/cx18/cx18-driver.h +++ b/drivers/media/pci/cx18/cx18-driver.h @@ -24,7 +24,7 @@ #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/fs.h> #include <linux/pci.h> #include <linux/interrupt.h> diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c index ab2ae53618e8..e73c153285f0 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.c +++ b/drivers/media/pci/ivtv/ivtv-driver.c @@ -59,6 +59,7 @@ #include <media/tveeprom.h> #include <media/i2c/saa7115.h> #include "tuner-xc2028.h" +#include <uapi/linux/sched/types.h> /* If you have already X v4l cards, then set this to X. This way the device numbers stay matched. Example: you have a WinTV card diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h index cde452e30746..d27c5c2c07ea 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.h +++ b/drivers/media/pci/ivtv/ivtv-driver.h @@ -38,37 +38,38 @@ * using information provided by Jiun-Kuei Jung @ AVerMedia. */ -#include <asm/byteorder.h> +#include <linux/module.h> +#include <linux/init.h> #include <linux/delay.h> -#include <linux/device.h> +#include <linux/sched/signal.h> #include <linux/fs.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/ivtv.h> -#include <linux/kernel.h> -#include <linux/kthread.h> #include <linux/list.h> -#include <linux/module.h> -#include <linux/mutex.h> +#include <linux/unistd.h> #include <linux/pagemap.h> -#include <linux/pci.h> #include <linux/scatterlist.h> -#include <linux/sched.h> +#include <linux/kthread.h> +#include <linux/mutex.h> #include <linux/slab.h> -#include <linux/spinlock.h> #include <linux/uaccess.h> -#include <linux/unistd.h> +#include <asm/byteorder.h> -#include <media/drv-intf/cx2341x.h> -#include <media/i2c/ir-kbd-i2c.h> -#include <media/tuner.h> +#include <linux/dvb/video.h> +#include <linux/dvb/audio.h> #include <media/v4l2-common.h> +#include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-fh.h> -#include <media/v4l2-ioctl.h> +#include <media/tuner.h> +#include <media/drv-intf/cx2341x.h> +#include <media/i2c/ir-kbd-i2c.h> + +#include <linux/ivtv.h> /* Memory layout */ #define IVTV_ENCODER_OFFSET 0x00000000 diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c index da1eebd2016f..3219d2f3271e 100644 --- a/drivers/media/pci/pt1/pt1.c +++ b/drivers/media/pci/pt1/pt1.c @@ -18,6 +18,7 @@ */ #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c index 77f4d15f322b..e8b5d0992157 100644 --- a/drivers/media/pci/pt3/pt3.c +++ b/drivers/media/pci/pt3/pt3.c @@ -21,6 +21,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/string.h> +#include <linux/sched/signal.h> #include "dmxdev.h" #include "dvbdev.h" diff --git a/drivers/media/pci/solo6x10/solo6x10-i2c.c b/drivers/media/pci/solo6x10/solo6x10-i2c.c index c908672b2c40..e83bb79f9349 100644 --- a/drivers/media/pci/solo6x10/solo6x10-i2c.c +++ b/drivers/media/pci/solo6x10/solo6x10-i2c.c @@ -27,6 +27,7 @@ * thread context, ACK the interrupt, and move on. -- BenC */ #include <linux/kernel.h> +#include <linux/sched/signal.h> #include "solo6x10.h" diff --git a/drivers/media/pci/zoran/zoran_device.c b/drivers/media/pci/zoran/zoran_device.c index 671907a6e6b6..40adceebca7e 100644 --- a/drivers/media/pci/zoran/zoran_device.c +++ b/drivers/media/pci/zoran/zoran_device.c @@ -28,6 +28,7 @@ #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/ktime.h> +#include <linux/sched/signal.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> diff --git a/drivers/media/platform/vivid/vivid-radio-rx.c b/drivers/media/platform/vivid/vivid-radio-rx.c index f99092ca8f5c..47c36c26096b 100644 --- a/drivers/media/platform/vivid/vivid-radio-rx.c +++ b/drivers/media/platform/vivid/vivid-radio-rx.c @@ -22,6 +22,8 @@ #include <linux/delay.h> #include <linux/videodev2.h> #include <linux/v4l2-dv-timings.h> +#include <linux/sched/signal.h> + #include <media/v4l2-common.h> #include <media/v4l2-event.h> #include <media/v4l2-dv-timings.h> diff --git a/drivers/media/platform/vivid/vivid-radio-tx.c b/drivers/media/platform/vivid/vivid-radio-tx.c index 8c59d4f53200..0e8025b7b4dd 100644 --- a/drivers/media/platform/vivid/vivid-radio-tx.c +++ b/drivers/media/platform/vivid/vivid-radio-tx.c @@ -19,6 +19,7 @@ #include <linux/errno.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/delay.h> #include <linux/videodev2.h> #include <linux/v4l2-dv-timings.h> diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index a54ca531d8ef..393dccaabdd0 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -19,7 +19,7 @@ #include <linux/module.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/ioctl.h> #include <linux/fs.h> diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c index 431dd0b4b332..b1d13444ff30 100644 --- a/drivers/media/usb/cpia2/cpia2_core.c +++ b/drivers/media/usb/cpia2/cpia2_core.c @@ -32,6 +32,7 @@ #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/firmware.h> +#include <linux/sched/signal.h> #define FIRMWARE "cpia2/stv0672_vp4.bin" MODULE_FIRMWARE(FIRMWARE); diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c index 23d3285f182a..e91d00762e94 100644 --- a/drivers/media/usb/gspca/cpia1.c +++ b/drivers/media/usb/gspca/cpia1.c @@ -27,6 +27,8 @@ #define MODULE_NAME "cpia1" #include <linux/input.h> +#include <linux/sched/signal.h> + #include "gspca.h" MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index 36bd904946bd..0b5c43f7e020 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -21,7 +21,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/slab.h> #include <linux/interrupt.h> diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 6c722d96b775..79e60ec70bd3 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -418,8 +418,9 @@ struct cxl_afu { struct dentry *debugfs; struct mutex contexts_lock; spinlock_t afu_cntl_lock; - /* Used to block access to AFU config space while deconfigured */ - struct rw_semaphore configured_rwsem; + + /* -1: AFU deconfigured/locked, >= 0: number of readers */ + atomic_t configured_state; /* AFU error buffer fields and bin attribute for sysfs */ u64 eb_len, eb_offset; diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c index 377e650a2a1d..2fa015c05561 100644 --- a/drivers/misc/cxl/fault.c +++ b/drivers/misc/cxl/fault.c @@ -8,7 +8,8 @@ */ #include <linux/workqueue.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/pid.h> #include <linux/mm.h> #include <linux/moduleparam.h> diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index 859959f19f10..e7139c76f961 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -12,7 +12,7 @@ #include <linux/export.h> #include <linux/kernel.h> #include <linux/bitmap.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/poll.h> #include <linux/pid.h> #include <linux/fs.h> diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c index 2a6bf1d0a3a4..b0b6ed31918e 100644 --- a/drivers/misc/cxl/main.c +++ b/drivers/misc/cxl/main.c @@ -19,6 +19,8 @@ #include <linux/slab.h> #include <linux/idr.h> #include <linux/pci.h> +#include <linux/sched/task.h> + #include <asm/cputable.h> #include <misc/cxl-base.h> @@ -268,8 +270,7 @@ struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice) idr_init(&afu->contexts_idr); mutex_init(&afu->contexts_lock); spin_lock_init(&afu->afu_cntl_lock); - init_rwsem(&afu->configured_rwsem); - down_write(&afu->configured_rwsem); + atomic_set(&afu->configured_state, -1); afu->prefault_mode = CXL_PREFAULT_NONE; afu->irqs_max = afu->adapter->user_irqs; diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 09505f432eda..7ae710585267 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -9,6 +9,7 @@ #include <linux/spinlock.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/mm.h> diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index cca938845ffd..91f645992c94 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1129,7 +1129,7 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc if ((rc = cxl_native_register_psl_irq(afu))) goto err2; - up_write(&afu->configured_rwsem); + atomic_set(&afu->configured_state, 0); return 0; err2: @@ -1142,7 +1142,14 @@ err1: static void pci_deconfigure_afu(struct cxl_afu *afu) { - down_write(&afu->configured_rwsem); + /* + * It's okay to deconfigure when AFU is already locked, otherwise wait + * until there are no readers + */ + if (atomic_read(&afu->configured_state) != -1) { + while (atomic_cmpxchg(&afu->configured_state, 0, -1) != -1) + schedule(); + } cxl_native_release_psl_irq(afu); if (afu->adapter->native->sl_ops->release_serr_irq) afu->adapter->native->sl_ops->release_serr_irq(afu); diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index 639a343b7836..512a4897dbf6 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c @@ -83,6 +83,16 @@ static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus) return phb ? phb->private_data : NULL; } +static void cxl_afu_configured_put(struct cxl_afu *afu) +{ + atomic_dec_if_positive(&afu->configured_state); +} + +static bool cxl_afu_configured_get(struct cxl_afu *afu) +{ + return atomic_inc_unless_negative(&afu->configured_state); +} + static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn, struct cxl_afu *afu, int *_record) { @@ -107,7 +117,7 @@ static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn, afu = pci_bus_to_afu(bus); /* Grab a reader lock on afu. */ - if (afu == NULL || !down_read_trylock(&afu->configured_rwsem)) + if (afu == NULL || !cxl_afu_configured_get(afu)) return PCIBIOS_DEVICE_NOT_FOUND; rc = cxl_pcie_config_info(bus, devfn, afu, &record); @@ -132,7 +142,7 @@ static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn, } out: - up_read(&afu->configured_rwsem); + cxl_afu_configured_put(afu); return rc ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; } @@ -144,7 +154,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn, afu = pci_bus_to_afu(bus); /* Grab a reader lock on afu. */ - if (afu == NULL || !down_read_trylock(&afu->configured_rwsem)) + if (afu == NULL || !cxl_afu_configured_get(afu)) return PCIBIOS_DEVICE_NOT_FOUND; rc = cxl_pcie_config_info(bus, devfn, afu, &record); @@ -166,7 +176,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn, } out: - up_read(&afu->configured_rwsem); + cxl_afu_configured_put(afu); return rc ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL; } diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c index 3d1d55157e5f..2fad790db3bf 100644 --- a/drivers/misc/eeprom/eeprom.c +++ b/drivers/misc/eeprom/eeprom.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> +#include <linux/capability.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/mutex.h> diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c index cb290b8ca0c8..dd4617764f14 100644 --- a/drivers/misc/genwqe/card_dev.c +++ b/drivers/misc/genwqe/card_dev.c @@ -29,7 +29,7 @@ #include <linux/pci.h> #include <linux/string.h> #include <linux/fs.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/atomic.h> diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c index 232034f5da48..5c7dd26db716 100644 --- a/drivers/misc/ibmasm/r_heartbeat.c +++ b/drivers/misc/ibmasm/r_heartbeat.c @@ -20,7 +20,7 @@ * */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include "ibmasm.h" #include "dot_command.h" diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 99635dd9dbac..fc7efedbc4be 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -103,6 +103,8 @@ #include <linux/delay.h> #include <linux/kthread.h> #include <linux/module.h> +#include <linux/sched/task.h> + #include <asm/sections.h> #define v1printk(a...) do { \ diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index fb8705fc3aca..e389b0b5278d 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c @@ -23,6 +23,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/dmi.h> #include <linux/module.h> #include <linux/types.h> diff --git a/drivers/misc/lkdtm_heap.c b/drivers/misc/lkdtm_heap.c index 0f1581664c1c..ffb6aeac07b3 100644 --- a/drivers/misc/lkdtm_heap.c +++ b/drivers/misc/lkdtm_heap.c @@ -4,6 +4,7 @@ */ #include "lkdtm.h" #include <linux/slab.h> +#include <linux/sched.h> /* * This tries to stay within the next largest power-of-2 kmalloc cache diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c index 1dd611423d8b..df6ac985fbb5 100644 --- a/drivers/misc/lkdtm_usercopy.c +++ b/drivers/misc/lkdtm_usercopy.c @@ -5,6 +5,7 @@ #include "lkdtm.h" #include <linux/slab.h> #include <linux/vmalloc.h> +#include <linux/sched/task_stack.h> #include <linux/mman.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index cb3e9e0ca049..df5f78ae3d25 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -16,7 +16,7 @@ #include <linux/module.h> #include <linux/device.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/slab.h> diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 68fe37b5bc52..d3e3372424d6 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -14,7 +14,7 @@ * */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/slab.h> diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 9d0b7050c79a..bf816449cd40 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -26,7 +26,7 @@ #include <linux/init.h> #include <linux/ioctl.h> #include <linux/cdev.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/uuid.h> #include <linux/compat.h> #include <linux/jiffies.h> diff --git a/drivers/misc/mic/cosm/cosm_scif_server.c b/drivers/misc/mic/cosm/cosm_scif_server.c index 5696df4326b5..85f7d09cc65f 100644 --- a/drivers/misc/mic/cosm/cosm_scif_server.c +++ b/drivers/misc/mic/cosm/cosm_scif_server.c @@ -19,6 +19,8 @@ * */ #include <linux/kthread.h> +#include <linux/sched/signal.h> + #include "cosm_main.h" /* diff --git a/drivers/misc/mic/cosm_client/cosm_scif_client.c b/drivers/misc/mic/cosm_client/cosm_scif_client.c index 03e98bf1ac15..aa530fcceaa9 100644 --- a/drivers/misc/mic/cosm_client/cosm_scif_client.c +++ b/drivers/misc/mic/cosm_client/cosm_scif_client.c @@ -22,6 +22,8 @@ #include <linux/delay.h> #include <linux/reboot.h> #include <linux/kthread.h> +#include <linux/sched/signal.h> + #include "../cosm/cosm_main.h" #define COSM_SCIF_MAX_RETRIES 10 diff --git a/drivers/misc/mic/scif/scif_main.h b/drivers/misc/mic/scif/scif_main.h index a08f0b600a9e..0e5eff9ad080 100644 --- a/drivers/misc/mic/scif/scif_main.h +++ b/drivers/misc/mic/scif/scif_main.h @@ -18,7 +18,7 @@ #ifndef SCIF_MAIN_H #define SCIF_MAIN_H -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/pci.h> #include <linux/miscdevice.h> #include <linux/dmaengine.h> diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c index f806a4471eb9..329727e00e97 100644 --- a/drivers/misc/mic/scif/scif_rma.c +++ b/drivers/misc/mic/scif/scif_rma.c @@ -17,6 +17,9 @@ */ #include <linux/dma_remapping.h> #include <linux/pagemap.h> +#include <linux/sched/mm.h> +#include <linux/sched/signal.h> + #include "scif_main.h" #include "scif_map.h" diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index 1a2b67f3183d..c2e29d7f0de8 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c @@ -374,7 +374,7 @@ unmap: static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], struct irq_affinity *desc) { struct _vop_vdev *vdev = to_vopvdev(dev); struct vop_device *vpdev = vdev->vpdev; diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c index c344483fa7d6..2cde80c7bb93 100644 --- a/drivers/misc/vexpress-syscfg.c +++ b/drivers/misc/vexpress-syscfg.c @@ -16,7 +16,7 @@ #include <linux/io.h> #include <linux/of.h> #include <linux/platform_device.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/syscore_ops.h> #include <linux/vexpress.h> diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c index f35f0c8606b9..21d0fa592145 100644 --- a/drivers/misc/vmw_vmci/vmci_context.c +++ b/drivers/misc/vmw_vmci/vmci_context.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/slab.h> #include "vmci_queue_pair.h" diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c index 8449516d6ac6..84258a48029d 100644 --- a/drivers/misc/vmw_vmci/vmci_event.c +++ b/drivers/misc/vmw_vmci/vmci_event.c @@ -19,6 +19,7 @@ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/rculist.h> #include "vmci_driver.h" #include "vmci_event.h" diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c index ec090105eb4b..8a16a26e9658 100644 --- a/drivers/misc/vmw_vmci/vmci_host.c +++ b/drivers/misc/vmw_vmci/vmci_host.c @@ -24,6 +24,7 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/init.h> diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c index 9a53a30de445..1ab6e8737a5f 100644 --- a/drivers/misc/vmw_vmci/vmci_resource.c +++ b/drivers/misc/vmw_vmci/vmci_resource.c @@ -17,6 +17,7 @@ #include <linux/hash.h> #include <linux/types.h> #include <linux/rculist.h> +#include <linux/completion.h> #include "vmci_resource.h" #include "vmci_driver.h" diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index d29faf2addfe..6d4b72080d51 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <uapi/linux/sched/types.h> #include <linux/kthread.h> #include <linux/export.h> #include <linux/wait.h> diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c index 82bd00af5cc3..268aae45b514 100644 --- a/drivers/mtd/devices/lart.c +++ b/drivers/mtd/devices/lart.c @@ -75,18 +75,18 @@ static char module_name[] = "lart"; /* blob */ #define NUM_BLOB_BLOCKS FLASH_NUMBLOCKS_16m_PARAM -#define BLOB_START 0x00000000 -#define BLOB_LEN (NUM_BLOB_BLOCKS * FLASH_BLOCKSIZE_PARAM) +#define PART_BLOB_START 0x00000000 +#define PART_BLOB_LEN (NUM_BLOB_BLOCKS * FLASH_BLOCKSIZE_PARAM) /* kernel */ #define NUM_KERNEL_BLOCKS 7 -#define KERNEL_START (BLOB_START + BLOB_LEN) -#define KERNEL_LEN (NUM_KERNEL_BLOCKS * FLASH_BLOCKSIZE_MAIN) +#define PART_KERNEL_START (PART_BLOB_START + PART_BLOB_LEN) +#define PART_KERNEL_LEN (NUM_KERNEL_BLOCKS * FLASH_BLOCKSIZE_MAIN) /* initial ramdisk */ #define NUM_INITRD_BLOCKS 24 -#define INITRD_START (KERNEL_START + KERNEL_LEN) -#define INITRD_LEN (NUM_INITRD_BLOCKS * FLASH_BLOCKSIZE_MAIN) +#define PART_INITRD_START (PART_KERNEL_START + PART_KERNEL_LEN) +#define PART_INITRD_LEN (NUM_INITRD_BLOCKS * FLASH_BLOCKSIZE_MAIN) /* * See section 4.0 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet @@ -587,20 +587,20 @@ static struct mtd_partition lart_partitions[] = { /* blob */ { .name = "blob", - .offset = BLOB_START, - .size = BLOB_LEN, + .offset = PART_BLOB_START, + .size = PART_BLOB_LEN, }, /* kernel */ { .name = "kernel", - .offset = KERNEL_START, /* MTDPART_OFS_APPEND */ - .size = KERNEL_LEN, + .offset = PART_KERNEL_START, /* MTDPART_OFS_APPEND */ + .size = PART_KERNEL_LEN, }, /* initial ramdisk / file system */ { .name = "file system", - .offset = INITRD_START, /* MTDPART_OFS_APPEND */ - .size = INITRD_LEN, /* MTDPART_SIZ_FULL */ + .offset = PART_INITRD_START, /* MTDPART_OFS_APPEND */ + .size = PART_INITRD_LEN, /* MTDPART_SIZ_FULL */ } }; #define NUM_PARTITIONS ARRAY_SIZE(lart_partitions) diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index 6c062b8251d2..d52139635b67 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -20,6 +20,7 @@ */ #include <linux/clk.h> #include <linux/slab.h> +#include <linux/sched/task_stack.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/mtd/partitions.h> diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 1492c12906f6..b0524f8accb6 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -36,6 +36,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/mm.h> +#include <linux/nmi.h> #include <linux/types.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> diff --git a/drivers/mtd/tests/mtd_test.h b/drivers/mtd/tests/mtd_test.h index 4b7bee17c924..04afd0e7074f 100644 --- a/drivers/mtd/tests/mtd_test.h +++ b/drivers/mtd/tests/mtd_test.h @@ -1,5 +1,5 @@ #include <linux/mtd/mtd.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> static inline int mtdtest_relax(void) { diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 85d54f37e28f..77513195f50e 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -1159,7 +1159,7 @@ static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) if (err) return ERR_PTR(err); - err = vfs_getattr(&path, &stat); + err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT); path_put(&path); if (err) return ERR_PTR(err); diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 88b1897aeb40..d4b2e8744498 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c @@ -314,7 +314,7 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) if (error) return ERR_PTR(error); - error = vfs_getattr(&path, &stat); + error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT); path_put(&path); if (error) return ERR_PTR(error); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6321f12630c8..8a4ba8b88e52 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4179,6 +4179,7 @@ void bond_setup(struct net_device *bond_dev) /* Initialize the device entry points */ ether_setup(bond_dev); + bond_dev->max_mtu = ETH_MAX_MTU; bond_dev->netdev_ops = &bond_netdev_ops; bond_dev->ethtool_ops = &bond_ethtool_ops; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 577e57cad1dc..1bcbb8913e17 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -16,6 +16,8 @@ #include <linux/rcupdate.h> #include <linux/ctype.h> #include <linux/inet.h> +#include <linux/sched/signal.h> + #include <net/bonding.h> static int bond_option_active_slave_set(struct bonding *bond, diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index e23c3ed737de..770623a0cc01 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -24,7 +24,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/string.h> diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index b306210b02b7..bc0eb47eccee 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -679,7 +679,8 @@ static int cfv_probe(struct virtio_device *vdev) goto err; /* Get the TX virtio ring. This is a "guest side vring". */ - err = vdev->config->find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names); + err = vdev->config->find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names, + NULL); if (err) goto err; diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index ea57fed375c6..13f0f219d8aa 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -196,7 +196,7 @@ #define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */ #define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ #define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ -#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disble Memory error detection */ +#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ /* Structure of the message buffer */ diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c index 4063215c9b54..aac58ce6e371 100644 --- a/drivers/net/can/softing/softing_fw.c +++ b/drivers/net/can/softing/softing_fw.c @@ -17,7 +17,7 @@ */ #include <linux/firmware.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <asm/div64.h> #include <asm/io.h> diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 77e3cc06a30c..300349fe8dc0 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -258,7 +258,7 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev) rc = usb_control_msg(interface_to_usbdev(intf), usb_sndctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_MODE, - USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, gsdev->channel, 0, dm, @@ -432,7 +432,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev) rc = usb_control_msg(interface_to_usbdev(intf), usb_sndctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_BITTIMING, - USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, dev->channel, 0, dbt, @@ -546,7 +546,6 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, hf, urb->transfer_dma); - if (rc == -ENODEV) { netif_device_detach(netdev); } else { @@ -804,7 +803,7 @@ static struct gs_can *gs_make_candev(unsigned int channel, rc = usb_control_msg(interface_to_usbdev(intf), usb_rcvctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_BT_CONST, - USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, channel, 0, bt_const, @@ -908,57 +907,72 @@ static int gs_usb_probe(struct usb_interface *intf, struct gs_usb *dev; int rc = -ENOMEM; unsigned int icount, i; - struct gs_host_config hconf = { - .byte_order = 0x0000beef, - }; - struct gs_device_config dconf; + struct gs_host_config *hconf; + struct gs_device_config *dconf; + + hconf = kmalloc(sizeof(*hconf), GFP_KERNEL); + if (!hconf) + return -ENOMEM; + + hconf->byte_order = 0x0000beef; /* send host config */ rc = usb_control_msg(interface_to_usbdev(intf), usb_sndctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_HOST_FORMAT, - USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 1, intf->altsetting[0].desc.bInterfaceNumber, - &hconf, - sizeof(hconf), + hconf, + sizeof(*hconf), 1000); + kfree(hconf); + if (rc < 0) { dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc); return rc; } + dconf = kmalloc(sizeof(*dconf), GFP_KERNEL); + if (!dconf) + return -ENOMEM; + /* read device config */ rc = usb_control_msg(interface_to_usbdev(intf), usb_rcvctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_DEVICE_CONFIG, - USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 1, intf->altsetting[0].desc.bInterfaceNumber, - &dconf, - sizeof(dconf), + dconf, + sizeof(*dconf), 1000); if (rc < 0) { dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n", rc); + kfree(dconf); return rc; } - icount = dconf.icount + 1; + icount = dconf->icount + 1; dev_info(&intf->dev, "Configuring for %d interfaces\n", icount); if (icount > GS_MAX_INTF) { dev_err(&intf->dev, "Driver cannot handle more that %d CAN interfaces\n", GS_MAX_INTF); + kfree(dconf); return -EINVAL; } dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) + if (!dev) { + kfree(dconf); return -ENOMEM; + } + init_usb_anchor(&dev->rx_submitted); atomic_set(&dev->active_channels, 0); @@ -967,7 +981,7 @@ static int gs_usb_probe(struct usb_interface *intf, dev->udev = interface_to_usbdev(intf); for (i = 0; i < icount; i++) { - dev->canch[i] = gs_make_candev(i, intf, &dconf); + dev->canch[i] = gs_make_candev(i, intf, dconf); if (IS_ERR_OR_NULL(dev->canch[i])) { /* save error code to return later */ rc = PTR_ERR(dev->canch[i]); @@ -978,12 +992,15 @@ static int gs_usb_probe(struct usb_interface *intf, gs_destroy_candev(dev->canch[i]); usb_kill_anchored_urbs(&dev->rx_submitted); + kfree(dconf); kfree(dev); return rc; } dev->canch[i]->parent = dev; } + kfree(dconf); + return 0; } diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index 108a30e15097..d000cb62d6ae 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -951,8 +951,8 @@ static int usb_8dev_probe(struct usb_interface *intf, for (i = 0; i < MAX_TX_URBS; i++) priv->tx_contexts[i].echo_index = MAX_TX_URBS; - priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg), - GFP_KERNEL); + priv->cmd_msg_buffer = devm_kzalloc(&intf->dev, sizeof(struct usb_8dev_cmd_msg), + GFP_KERNEL); if (!priv->cmd_msg_buffer) goto cleanup_candev; @@ -966,7 +966,7 @@ static int usb_8dev_probe(struct usb_interface *intf, if (err) { netdev_err(netdev, "couldn't register CAN device: %d\n", err); - goto cleanup_cmd_msg_buffer; + goto cleanup_candev; } err = usb_8dev_cmd_version(priv, &version); @@ -987,9 +987,6 @@ static int usb_8dev_probe(struct usb_interface *intf, cleanup_unregister_candev: unregister_netdev(priv->netdev); -cleanup_cmd_msg_buffer: - kfree(priv->cmd_msg_buffer); - cleanup_candev: free_candev(netdev); diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index 76e5fc7adff5..6c98901f1b89 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -1276,18 +1276,6 @@ err_out: return ret; } -static void __exit dec_lance_remove(struct device *bdev) -{ - struct net_device *dev = dev_get_drvdata(bdev); - resource_size_t start, len; - - unregister_netdev(dev); - start = to_tc_dev(bdev)->resource.start; - len = to_tc_dev(bdev)->resource.end - start + 1; - release_mem_region(start, len); - free_netdev(dev); -} - /* Find all the lance cards on the system and initialize them */ static int __init dec_lance_platform_probe(void) { @@ -1320,7 +1308,7 @@ static void __exit dec_lance_platform_remove(void) #ifdef CONFIG_TC static int dec_lance_tc_probe(struct device *dev); -static int __exit dec_lance_tc_remove(struct device *dev); +static int dec_lance_tc_remove(struct device *dev); static const struct tc_device_id dec_lance_tc_table[] = { { "DEC ", "PMAD-AA " }, @@ -1334,7 +1322,7 @@ static struct tc_driver dec_lance_tc_driver = { .name = "declance", .bus = &tc_bus_type, .probe = dec_lance_tc_probe, - .remove = __exit_p(dec_lance_tc_remove), + .remove = dec_lance_tc_remove, }, }; @@ -1346,7 +1334,19 @@ static int dec_lance_tc_probe(struct device *dev) return status; } -static int __exit dec_lance_tc_remove(struct device *dev) +static void dec_lance_remove(struct device *bdev) +{ + struct net_device *dev = dev_get_drvdata(bdev); + resource_size_t start, len; + + unregister_netdev(dev); + start = to_tc_dev(bdev)->resource.start; + len = to_tc_dev(bdev)->resource.end - start + 1; + release_mem_region(start, len); + free_netdev(dev); +} + +static int dec_lance_tc_remove(struct device *dev) { put_device(dev); dec_lance_remove(dev); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index a7d16db5c4b2..937f37a5dcb2 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1323,7 +1323,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port, enum xgbe_mdio_mode mode) { - unsigned int reg_val = 0; + unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R); switch (mode) { case XGBE_MDIO_MODE_CL22: diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 3aa457c8ca21..248f60d171a5 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1131,12 +1131,12 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) hw_if->disable_tx(pdata); hw_if->disable_rx(pdata); + phy_if->phy_stop(pdata); + xgbe_free_irqs(pdata); xgbe_napi_disable(pdata, 1); - phy_if->phy_stop(pdata); - hw_if->exit(pdata); channel = pdata->channel; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 9d8c953083b4..e707c49cc55a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -716,6 +716,8 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) pdata->phy.duplex = DUPLEX_UNKNOWN; pdata->phy.autoneg = AUTONEG_ENABLE; pdata->phy.advertising = pdata->phy.supported; + + return; } pdata->phy.advertising &= ~ADVERTISED_Autoneg; @@ -875,6 +877,16 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) !phy_data->sfp_phy_avail) return 0; + /* Set the proper MDIO mode for the PHY */ + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr, + phy_data->phydev_mode); + if (ret) { + netdev_err(pdata->netdev, + "mdio port/clause not compatible (%u/%u)\n", + phy_data->mdio_addr, phy_data->phydev_mode); + return ret; + } + /* Create and connect to the PHY device */ phydev = get_phy_device(phy_data->mii, phy_data->mdio_addr, (phy_data->phydev_mode == XGBE_MDIO_MODE_CL45)); @@ -2722,6 +2734,18 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata) if (ret) return ret; + /* Set the proper MDIO mode for the re-driver */ + if (phy_data->redrv && !phy_data->redrv_if) { + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr, + XGBE_MDIO_MODE_CL22); + if (ret) { + netdev_err(pdata->netdev, + "redriver mdio port not compatible (%u)\n", + phy_data->redrv_addr); + return ret; + } + } + /* Start in highest supported mode */ xgbe_phy_set_mode(pdata, phy_data->start_mode); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index e536301acfde..b3568c453b14 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1749,6 +1749,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) pdata->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pdata->clk)) { + /* Abort if the clock is defined but couldn't be retrived. + * Always abort if the clock is missing on DT system as + * the driver can't cope with this case. + */ + if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node) + return PTR_ERR(pdata->clk); /* Firmware may have set up the clock already. */ dev_info(dev, "clocks have been setup already\n"); } diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index 7b1af950f312..da1b8b225eb9 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -51,8 +51,7 @@ static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value) static bool platform_bgmac_clk_enabled(struct bgmac *bgmac) { - if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & - (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC)) != BCMA_IOCTL_CLK) + if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & BGMAC_CLK_EN) != BGMAC_CLK_EN) return false; if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET) return false; @@ -61,15 +60,25 @@ static bool platform_bgmac_clk_enabled(struct bgmac *bgmac) static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags) { - bgmac_idm_write(bgmac, BCMA_IOCTL, - (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags)); - bgmac_idm_read(bgmac, BCMA_IOCTL); + u32 val; - bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0); - bgmac_idm_read(bgmac, BCMA_RESET_CTL); - udelay(1); + /* The Reset Control register only contains a single bit to show if the + * controller is currently in reset. Do a sanity check here, just in + * case the bootloader happened to leave the device in reset. + */ + val = bgmac_idm_read(bgmac, BCMA_RESET_CTL); + if (val) { + bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0); + bgmac_idm_read(bgmac, BCMA_RESET_CTL); + udelay(1); + } - bgmac_idm_write(bgmac, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags)); + val = bgmac_idm_read(bgmac, BCMA_IOCTL); + /* Some bits of BCMA_IOCTL set by HW/ATF and should not change */ + val |= flags & ~(BGMAC_AWCACHE | BGMAC_ARCACHE | BGMAC_AWUSER | + BGMAC_ARUSER); + val |= BGMAC_CLK_EN; + bgmac_idm_write(bgmac, BCMA_IOCTL, val); bgmac_idm_read(bgmac, BCMA_IOCTL); udelay(1); } diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 415046750bb4..fd66fca00e01 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1223,12 +1223,16 @@ static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb, static int bgmac_set_mac_address(struct net_device *net_dev, void *addr) { struct bgmac *bgmac = netdev_priv(net_dev); + struct sockaddr *sa = addr; int ret; ret = eth_prepare_mac_addr_change(net_dev, addr); if (ret < 0) return ret; - bgmac_write_mac_address(bgmac, (u8 *)addr); + + ether_addr_copy(net_dev->dev_addr, sa->sa_data); + bgmac_write_mac_address(bgmac, net_dev->dev_addr); + eth_commit_mac_addr_change(net_dev, addr); return 0; } diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 248727dc62f2..6d1c6ff1ed96 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -213,6 +213,22 @@ /* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */ #define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */ #define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */ +/* The IOCTL values appear to be different in NS, NSP, and NS2, and do not match + * the values directly above + */ +#define BGMAC_CLK_EN BIT(0) +#define BGMAC_RESERVED_0 BIT(1) +#define BGMAC_SOURCE_SYNC_MODE_EN BIT(2) +#define BGMAC_DEST_SYNC_MODE_EN BIT(3) +#define BGMAC_TX_CLK_OUT_INVERT_EN BIT(4) +#define BGMAC_DIRECT_GMII_MODE BIT(5) +#define BGMAC_CLK_250_SEL BIT(6) +#define BGMAC_AWCACHE (0xf << 7) +#define BGMAC_RESERVED_1 (0x1f << 11) +#define BGMAC_ARCACHE (0xf << 16) +#define BGMAC_AWUSER (0x3f << 20) +#define BGMAC_ARUSER (0x3f << 26) +#define BGMAC_RESERVED BIT(31) /* BCMA GMAC core specific IO status (BCMA_IOST) flags */ #define BGMAC_BCMA_IOST_ATTACHED 0x00000800 diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 89d4feba1a9a..55c8e25b43d9 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2617,7 +2617,7 @@ out_out: return err; } -static int __exit sbmac_remove(struct platform_device *pldev) +static int sbmac_remove(struct platform_device *pldev) { struct net_device *dev = platform_get_drvdata(pldev); struct sbmac_softc *sc = netdev_priv(dev); @@ -2634,7 +2634,7 @@ static int __exit sbmac_remove(struct platform_device *pldev) static struct platform_driver sbmac_driver = { .probe = sbmac_probe, - .remove = __exit_p(sbmac_remove), + .remove = sbmac_remove, .driver = { .name = sbmac_string, }, diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a448177990fe..30d1eb9ebec9 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -20,6 +20,7 @@ #include <linux/moduleparam.h> #include <linux/stringify.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/types.h> #include <linux/compiler.h> #include <linux/slab.h> diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h index 8cd389148166..aa36e9ae7676 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -23,6 +23,8 @@ #ifndef _OCTEON_MAIN_H_ #define _OCTEON_MAIN_H_ +#include <linux/sched/signal.h> + #if BITS_PER_LONG == 32 #define CVM_CAST64(v) ((long long)(v)) #elif BITS_PER_LONG == 64 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 5043b64805f0..8098c93cd16e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -1364,6 +1364,10 @@ struct cpl_tx_data { #define TX_FORCE_S 13 #define TX_FORCE_V(x) ((x) << TX_FORCE_S) +#define T6_TX_FORCE_S 20 +#define T6_TX_FORCE_V(x) ((x) << T6_TX_FORCE_S) +#define T6_TX_FORCE_F T6_TX_FORCE_V(1U) + enum { ULP_TX_MEM_READ = 2, ULP_TX_MEM_WRITE = 3, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index 5fdaa16426c5..fa376444e57c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -37,7 +37,7 @@ #define T4FW_VERSION_MAJOR 0x01 #define T4FW_VERSION_MINOR 0x10 -#define T4FW_VERSION_MICRO 0x1A +#define T4FW_VERSION_MICRO 0x21 #define T4FW_VERSION_BUILD 0x00 #define T4FW_MIN_VERSION_MAJOR 0x01 @@ -46,7 +46,7 @@ #define T5FW_VERSION_MAJOR 0x01 #define T5FW_VERSION_MINOR 0x10 -#define T5FW_VERSION_MICRO 0x1A +#define T5FW_VERSION_MICRO 0x21 #define T5FW_VERSION_BUILD 0x00 #define T5FW_MIN_VERSION_MAJOR 0x00 @@ -55,7 +55,7 @@ #define T6FW_VERSION_MAJOR 0x01 #define T6FW_VERSION_MINOR 0x10 -#define T6FW_VERSION_MICRO 0x1A +#define T6FW_VERSION_MICRO 0x21 #define T6FW_VERSION_BUILD 0x00 #define T6FW_MIN_VERSION_MAJOR 0x00 diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h index e995a1a3840a..a91ad766cef0 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h @@ -59,7 +59,7 @@ struct cxgbi_pagepod_hdr { #define PPOD_PAGES_MAX 4 struct cxgbi_pagepod { struct cxgbi_pagepod_hdr hdr; - u64 addr[PPOD_PAGES_MAX + 1]; + __be64 addr[PPOD_PAGES_MAX + 1]; }; /* ddp tag format diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 262587240c86..928b0df2b8e0 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1456,7 +1456,7 @@ err_alloc_etherdev: return err; } -static int __exit ftgmac100_remove(struct platform_device *pdev) +static int ftgmac100_remove(struct platform_device *pdev) { struct net_device *netdev; struct ftgmac100 *priv; @@ -1483,7 +1483,7 @@ MODULE_DEVICE_TABLE(of, ftgmac100_of_match); static struct platform_driver ftgmac100_driver = { .probe = ftgmac100_probe, - .remove = __exit_p(ftgmac100_remove), + .remove = ftgmac100_remove, .driver = { .name = DRV_NAME, .of_match_table = ftgmac100_of_match, diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index c0ddbbe6c226..6ac336b546e6 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -1156,7 +1156,7 @@ err_alloc_etherdev: return err; } -static int __exit ftmac100_remove(struct platform_device *pdev) +static int ftmac100_remove(struct platform_device *pdev) { struct net_device *netdev; struct ftmac100 *priv; @@ -1176,7 +1176,7 @@ static int __exit ftmac100_remove(struct platform_device *pdev) static struct platform_driver ftmac100_driver = { .probe = ftmac100_probe, - .remove = __exit_p(ftmac100_remove), + .remove = ftmac100_remove, .driver = { .name = DRV_NAME, }, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index a2cc43d28888..b1ecc2627a5a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -96,7 +96,7 @@ #define IXGBE_MAX_FRAME_BUILD_SKB \ (SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD) #else -#define IGB_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K +#define IXGBE_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K #endif /* @@ -929,6 +929,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring); u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); +void ixgbe_store_key(struct ixgbe_adapter *adapter); void ixgbe_store_reta(struct ixgbe_adapter *adapter); s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index a7574c7b12af..90fa5bf23d1b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2998,8 +2998,10 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, } /* Fill out the rss hash key */ - if (key) + if (key) { memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev)); + ixgbe_store_key(adapter); + } ixgbe_store_reta(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 060cdce8058f..a7a430a7be2c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3474,6 +3474,21 @@ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) } /** + * ixgbe_store_key - Write the RSS key to HW + * @adapter: device handle + * + * Write the RSS key stored in adapter.rss_key to HW. + */ +void ixgbe_store_key(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); +} + +/** * ixgbe_store_reta - Write the RETA table to HW * @adapter: device handle * @@ -3538,7 +3553,6 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) { - struct ixgbe_hw *hw = &adapter->hw; u32 i, j; u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; @@ -3551,8 +3565,7 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) rss_i = 4; /* Fill out hash function seeds */ - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); + ixgbe_store_key(adapter); /* Fill out redirection table */ memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); @@ -3959,7 +3972,8 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); - if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) + if ((max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) || + (max_frame > IXGBE_MAX_FRAME_BUILD_SKB)) set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); #endif } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index d7ac22d7f940..bd8de6b9be71 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -441,30 +441,40 @@ static int mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, struct mlxsw_sp_prefix_usage *req_prefix_usage) { - struct mlxsw_sp_lpm_tree *lpm_tree; + struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree; + struct mlxsw_sp_lpm_tree *new_tree; + int err; - if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, - &vr->lpm_tree->prefix_usage)) + if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage)) return 0; - lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, + new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, vr->proto, false); - if (IS_ERR(lpm_tree)) { + if (IS_ERR(new_tree)) { /* We failed to get a tree according to the required * prefix usage. However, the current tree might be still good * for us if our requirement is subset of the prefixes used * in the tree. */ if (mlxsw_sp_prefix_usage_subset(req_prefix_usage, - &vr->lpm_tree->prefix_usage)) + &lpm_tree->prefix_usage)) return 0; - return PTR_ERR(lpm_tree); + return PTR_ERR(new_tree); } - mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr); - mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); + /* Prevent packet loss by overwriting existing binding */ + vr->lpm_tree = new_tree; + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); + if (err) + goto err_tree_bind; + mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); + + return 0; + +err_tree_bind: vr->lpm_tree = lpm_tree; - return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); + mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree); + return err; } static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 074259cc8e06..9179a99563af 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1498,7 +1498,7 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, txbuf->real_len = pkt_len; dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off, - pkt_len, DMA_TO_DEVICE); + pkt_len, DMA_BIDIRECTIONAL); /* Build TX descriptor */ txd = &tx_ring->txds[wr_idx]; @@ -1611,7 +1611,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) dma_sync_single_for_cpu(&nn->pdev->dev, rxbuf->dma_addr + pkt_off, - pkt_len, DMA_FROM_DEVICE); + pkt_len, DMA_BIDIRECTIONAL); act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off, pkt_len); switch (act) { @@ -2198,7 +2198,8 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn) nfp_net_write_mac_addr(nn); nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu); - nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz); + nn_writel(nn, NFP_NET_CFG_FLBUFSZ, + nn->fl_bufsz - NFP_NET_RX_BUF_NON_DATA); /* Enable device */ new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index ed34196028b8..70347720fdf9 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -807,7 +807,7 @@ err_out: return err; } -static int __exit sgiseeq_remove(struct platform_device *pdev) +static int sgiseeq_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sgiseeq_private *sp = netdev_priv(dev); @@ -822,7 +822,7 @@ static int __exit sgiseeq_remove(struct platform_device *pdev) static struct platform_driver sgiseeq_driver = { .probe = sgiseeq_probe, - .remove = __exit_p(sgiseeq_remove), + .remove = sgiseeq_remove, .driver = { .name = "sgiseeq", } diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 92e1c6d8b293..c60c2d4c646a 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -828,9 +828,7 @@ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) static int efx_ef10_link_piobufs(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; - _MCDI_DECLARE_BUF(inbuf, - max(MC_CMD_LINK_PIOBUF_IN_LEN, - MC_CMD_UNLINK_PIOBUF_IN_LEN)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN); struct efx_channel *channel; struct efx_tx_queue *tx_queue; unsigned int offset, index; @@ -839,8 +837,6 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx) BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); - memset(inbuf, 0, sizeof(inbuf)); - /* Link a buffer to each VI in the write-combining mapping */ for (index = 0; index < nic_data->n_piobufs; ++index) { MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, @@ -920,6 +916,10 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx) return 0; fail: + /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same + * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter. + */ + BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN); while (index--) { MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, nic_data->pio_write_vi_base + index); @@ -2183,7 +2183,7 @@ static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, /* Modify IPv4 header if needed. */ ip->tot_len = 0; ip->check = 0; - ipv4_id = ip->id; + ipv4_id = ntohs(ip->id); } else { /* Modify IPv6 header if needed. */ struct ipv6hdr *ipv6 = ipv6_hdr(skb); diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c index c6ff0cc5ef18..93c713c1f627 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon.c +++ b/drivers/net/ethernet/sfc/falcon/falcon.c @@ -16,6 +16,8 @@ #include <linux/i2c.h> #include <linux/mii.h> #include <linux/slab.h> +#include <linux/sched/signal.h> + #include "net_driver.h" #include "bitfield.h" #include "efx.h" diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 69d2d30e5ef1..ea55abd62ec7 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -854,7 +854,7 @@ static int meth_probe(struct platform_device *pdev) return 0; } -static int __exit meth_remove(struct platform_device *pdev) +static int meth_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); @@ -866,7 +866,7 @@ static int __exit meth_remove(struct platform_device *pdev) static struct platform_driver meth_driver = { .probe = meth_probe, - .remove = __exit_p(meth_remove), + .remove = meth_remove, .driver = { .name = "meth", } diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 45301cb98bc1..7074b40ebd7f 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -881,12 +881,14 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) info = &geneve->info; } + rcu_read_lock(); #if IS_ENABLED(CONFIG_IPV6) if (info->mode & IP_TUNNEL_INFO_IPV6) err = geneve6_xmit_skb(skb, dev, geneve, info); else #endif err = geneve_xmit_skb(skb, dev, geneve, info); + rcu_read_unlock(); if (likely(!err)) return NETDEV_TX_OK; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 2d3cdb026a99..bc05c895d958 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -859,15 +859,22 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) if (ret) goto out; + memset(&device_info, 0, sizeof(device_info)); + device_info.ring_size = ring_size; + device_info.num_chn = nvdev->num_chn; + device_info.max_num_vrss_chns = nvdev->num_chn; + ndevctx->start_remove = true; rndis_filter_device_remove(hdev, nvdev); + /* 'nvdev' has been freed in rndis_filter_device_remove() -> + * netvsc_device_remove () -> free_netvsc_device(). + * We mustn't access it before it's re-created in + * rndis_filter_device_add() -> netvsc_device_add(). + */ + ndev->mtu = mtu; - memset(&device_info, 0, sizeof(device_info)); - device_info.ring_size = ring_size; - device_info.num_chn = nvdev->num_chn; - device_info.max_num_vrss_chns = nvdev->num_chn; rndis_filter_device_add(hdev, &device_info); out: diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index 6e8f616be48e..1dba16bc7f8d 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c @@ -24,6 +24,7 @@ #include <linux/dma/pxa-dma.h> #include <linux/gpio.h> #include <linux/slab.h> +#include <linux/sched/clock.h> #include <net/irda/irda.h> #include <net/irda/irmod.h> diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index 42da094b68dd..7ee514879531 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c @@ -40,6 +40,7 @@ #include <linux/moduleparam.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/ktime.h> #include <linux/types.h> #include <linux/time.h> diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index a4bfc10b61dd..da85057680d6 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -9,7 +9,7 @@ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/cache.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/wait.h> diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index a411b43a69eb..f9c0e62716ea 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -24,6 +24,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/kmod.h> #include <linux/init.h> #include <linux/list.h> diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 08db4d687533..1da31dc47f86 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -66,7 +66,7 @@ #include <linux/uaccess.h> #include <linux/bitops.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 35b55a2fa1a1..4d4173d25dd0 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -8,7 +8,7 @@ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/cache.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/wait.h> diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 30863e378925..dc1b1dd9157c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -44,6 +44,7 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/major.h> #include <linux/slab.h> #include <linux/poll.h> diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 6e98ede997d3..0dd510604118 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -346,7 +346,7 @@ static int ax88772_reset(struct usbnet *dev) if (ret < 0) goto out; - asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT, 0); + ret = asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT, 0); if (ret < 0) goto out; diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index e7b516342678..4f2e8141dbe2 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -52,7 +52,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/delay.h> diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 24d5272cdce5..805674550683 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -11,6 +11,7 @@ */ #include <linux/module.h> +#include <linux/sched/signal.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index bf95016f442a..ea9890d61967 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -51,7 +51,7 @@ module_param(gso, bool, 0444); * at once, the weight is chosen so that the EWMA will be insensitive to short- * term, transient changes in packet size. */ -DECLARE_EWMA(pkt_len, 1, 64) +DECLARE_EWMA(pkt_len, 0, 64) /* With mergeable buffers we align buffer address and use the low bits to * encode its true size. Buffer size is up to 1 page so we need to align to @@ -2080,7 +2080,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi) } ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, - names); + names, NULL); if (ret) goto err_find; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index b7911994112a..e375560cc74e 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2105,6 +2105,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, vxlan->cfg.port_max, true); + rcu_read_lock(); if (dst->sa.sa_family == AF_INET) { struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); struct rtable *rt; @@ -2127,7 +2128,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dst_port, vni, &rt->dst, rt->rt_flags); if (err) - return; + goto out_unlock; } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) { df = htons(IP_DF); } @@ -2166,7 +2167,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dst_port, vni, ndst, rt6i_flags); if (err) - return; + goto out_unlock; } tos = ip_tunnel_ecn_encap(tos, old_iph, skb); @@ -2183,6 +2184,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, label, src_port, dst_port, !udp_sum); #endif } +out_unlock: + rcu_read_unlock(); return; drop: @@ -2191,6 +2194,7 @@ drop: return; tx_error: + rcu_read_unlock(); if (err == -ELOOP) dev->stats.collisions++; else if (err == -ENETUNREACH) diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 087eb266601f..4ca71bca39ac 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -78,7 +78,7 @@ #include <linux/module.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fs.h> diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index dd902b43f8f7..0a8e29e9a0eb 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -18,6 +18,8 @@ #include <linux/module.h> #include <linux/firmware.h> #include <linux/of.h> +#include <linux/dmi.h> +#include <linux/ctype.h> #include <asm/byteorder.h> #include "core.h" @@ -711,6 +713,72 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) return 0; } +static void ath10k_core_check_bdfext(const struct dmi_header *hdr, void *data) +{ + struct ath10k *ar = data; + const char *bdf_ext; + const char *magic = ATH10K_SMBIOS_BDF_EXT_MAGIC; + u8 bdf_enabled; + int i; + + if (hdr->type != ATH10K_SMBIOS_BDF_EXT_TYPE) + return; + + if (hdr->length != ATH10K_SMBIOS_BDF_EXT_LENGTH) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "wrong smbios bdf ext type length (%d).\n", + hdr->length); + return; + } + + bdf_enabled = *((u8 *)hdr + ATH10K_SMBIOS_BDF_EXT_OFFSET); + if (!bdf_enabled) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not found.\n"); + return; + } + + /* Only one string exists (per spec) */ + bdf_ext = (char *)hdr + hdr->length; + + if (memcmp(bdf_ext, magic, strlen(magic)) != 0) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant magic does not match.\n"); + return; + } + + for (i = 0; i < strlen(bdf_ext); i++) { + if (!isascii(bdf_ext[i]) || !isprint(bdf_ext[i])) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant name contains non ascii chars.\n"); + return; + } + } + + /* Copy extension name without magic suffix */ + if (strscpy(ar->id.bdf_ext, bdf_ext + strlen(magic), + sizeof(ar->id.bdf_ext)) < 0) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant string is longer than the buffer can accommodate (variant: %s)\n", + bdf_ext); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "found and validated bdf variant smbios_type 0x%x bdf %s\n", + ATH10K_SMBIOS_BDF_EXT_TYPE, bdf_ext); +} + +static int ath10k_core_check_smbios(struct ath10k *ar) +{ + ar->id.bdf_ext[0] = '\0'; + dmi_walk(ath10k_core_check_bdfext, ar); + + if (ar->id.bdf_ext[0] == '\0') + return -ENODATA; + + return 0; +} + static int ath10k_download_and_run_otp(struct ath10k *ar) { u32 result, address = ar->hw_params.patch_load_addr; @@ -1020,6 +1088,23 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, case ATH10K_BD_IE_BOARD: ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len, boardname); + if (ret == -ENOENT && ar->id.bdf_ext[0] != '\0') { + /* try default bdf if variant was not found */ + char *s, *v = ",variant="; + char boardname2[100]; + + strlcpy(boardname2, boardname, + sizeof(boardname2)); + + s = strstr(boardname2, v); + if (s) + *s = '\0'; /* strip ",variant=%s" */ + + ret = ath10k_core_parse_bd_ie_board(ar, data, + ie_len, + boardname2); + } + if (ret == -ENOENT) /* no match found, continue */ break; @@ -1057,6 +1142,9 @@ err: static int ath10k_core_create_board_name(struct ath10k *ar, char *name, size_t name_len) { + /* strlen(',variant=') + strlen(ar->id.bdf_ext) */ + char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 }; + if (ar->id.bmi_ids_valid) { scnprintf(name, name_len, "bus=%s,bmi-chip-id=%d,bmi-board-id=%d", @@ -1066,12 +1154,15 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name, goto out; } + if (ar->id.bdf_ext[0] != '\0') + scnprintf(variant, sizeof(variant), ",variant=%s", + ar->id.bdf_ext); + scnprintf(name, name_len, - "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x", + "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s", ath10k_bus_str(ar->hif.bus), ar->id.vendor, ar->id.device, - ar->id.subsystem_vendor, ar->id.subsystem_device); - + ar->id.subsystem_vendor, ar->id.subsystem_device, variant); out: ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name); @@ -2128,6 +2219,10 @@ static int ath10k_core_probe_fw(struct ath10k *ar) goto err_free_firmware_files; } + ret = ath10k_core_check_smbios(ar); + if (ret) + ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not set.\n"); + ret = ath10k_core_fetch_board_file(ar); if (ret) { ath10k_err(ar, "failed to fetch board file: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 757242ef52ac..88d14be7fcce 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -69,6 +69,23 @@ #define ATH10K_NAPI_BUDGET 64 #define ATH10K_NAPI_QUOTA_LIMIT 60 +/* SMBIOS type containing Board Data File Name Extension */ +#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8 + +/* SMBIOS type structure length (excluding strings-set) */ +#define ATH10K_SMBIOS_BDF_EXT_LENGTH 0x9 + +/* Offset pointing to Board Data File Name Extension */ +#define ATH10K_SMBIOS_BDF_EXT_OFFSET 0x8 + +/* Board Data File Name Extension string length. + * String format: BDF_<Customer ID>_<Extension>\0 + */ +#define ATH10K_SMBIOS_BDF_EXT_STR_LENGTH 0x20 + +/* The magic used by QCA spec */ +#define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_" + struct ath10k; enum ath10k_bus { @@ -798,6 +815,8 @@ struct ath10k { bool bmi_ids_valid; u8 bmi_board_id; u8 bmi_chip_id; + + char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH]; } id; int fw_api; diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h index 67fedb61fcc0..979800c6f57f 100644 --- a/drivers/net/wireless/ath/ath5k/ath5k.h +++ b/drivers/net/wireless/ath/ath5k/ath5k.h @@ -1252,7 +1252,7 @@ struct ath5k_statistics { #define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */ #define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */ -DECLARE_EWMA(beacon_rssi, 1024, 8) +DECLARE_EWMA(beacon_rssi, 10, 8) /* Driver state associated with an instance of a device */ struct ath5k_hw { diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index b7fe0af4cb24..363b30a549c2 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -20,6 +20,7 @@ #include <linux/moduleparam.h> #include <linux/inetdevice.h> #include <linux/export.h> +#include <linux/sched/signal.h> #include "core.h" #include "cfg80211.h" diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index e97ab2b91663..cdafebb9c936 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -36,7 +36,7 @@ #include <linux/etherdevice.h> #include <linux/firmware.h> #include <linux/workqueue.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/skbuff.h> #include <linux/dma-mapping.h> #include <linux/slab.h> diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index c5744b45ec8f..65689469c5a1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -22,7 +22,7 @@ #include <linux/pci_ids.h> #include <linux/netdevice.h> #include <linux/interrupt.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/mmc/sdio.h> #include <linux/mmc/sdio_ids.h> #include <linux/mmc/sdio_func.h> diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c index 544ef7adde7d..04dfd040a650 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_hw.c +++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c @@ -43,7 +43,7 @@ #include <linux/delay.h> #include <linux/random.h> #include <linux/wait.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/rtnetlink.h> #include <linux/wireless.h> #include <net/iw_handler.h> diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c index a5656bc0e6aa..b2c6b065b542 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c @@ -2,7 +2,7 @@ #include <linux/slab.h> #include <linux/types.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/ethtool.h> #include <linux/if_arp.h> #include <linux/module.h> diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 0889fc81ce9e..50c219fb1a52 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3056,6 +3056,7 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2, static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct hwsim_new_radio_params param = { 0 }; + const char *hwname = NULL; param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG]; param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE]; @@ -3069,8 +3070,14 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) if (info->attrs[HWSIM_ATTR_NO_VIF]) param.no_vif = true; - if (info->attrs[HWSIM_ATTR_RADIO_NAME]) - param.hwname = nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]); + if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { + hwname = kasprintf(GFP_KERNEL, "%.*s", + nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), + (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); + if (!hwname) + return -ENOMEM; + param.hwname = hwname; + } if (info->attrs[HWSIM_ATTR_USE_CHANCTX]) param.use_chanctx = true; @@ -3098,11 +3105,15 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) s64 idx = -1; const char *hwname = NULL; - if (info->attrs[HWSIM_ATTR_RADIO_ID]) + if (info->attrs[HWSIM_ATTR_RADIO_ID]) { idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); - else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) - hwname = (void *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]); - else + } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { + hwname = kasprintf(GFP_KERNEL, "%.*s", + nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), + (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); + if (!hwname) + return -ENOMEM; + } else return -EINVAL; spin_lock_bh(&hwsim_radio_lock); @@ -3111,7 +3122,8 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) if (data->idx != idx) continue; } else { - if (strcmp(hwname, wiphy_name(data->hw->wiphy))) + if (!hwname || + strcmp(hwname, wiphy_name(data->hw->wiphy))) continue; } @@ -3122,10 +3134,12 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) spin_unlock_bh(&hwsim_radio_lock); mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), info); + kfree(hwname); return 0; } spin_unlock_bh(&hwsim_radio_lock); + kfree(hwname); return -ENODEV; } diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c index abdd0cf710bf..fac28bd8fbee 100644 --- a/drivers/net/wireless/marvell/mwifiex/txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/txrx.c @@ -346,9 +346,7 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv, return; spin_lock_irqsave(&priv->ack_status_lock, flags); - ack_skb = idr_find(&priv->ack_status_frames, tx_status->tx_token_id); - if (ack_skb) - idr_remove(&priv->ack_status_frames, tx_status->tx_token_id); + ack_skb = idr_remove(&priv->ack_status_frames, tx_status->tx_token_id); spin_unlock_irqrestore(&priv->ack_status_lock, flags); if (ack_skb) { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 26869b3bef45..340787894c69 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -257,7 +257,7 @@ struct link_qual { int tx_failed; }; -DECLARE_EWMA(rssi, 1024, 8) +DECLARE_EWMA(rssi, 10, 8) /* * Antenna settings about the currently active link. diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c index e8c5dddc54ba..3c4c58b9fe76 100644 --- a/drivers/net/xen-netback/hash.c +++ b/drivers/net/xen-netback/hash.c @@ -39,7 +39,7 @@ static void xenvif_add_hash(struct xenvif *vif, const u8 *tag, unsigned long flags; bool found; - new = kmalloc(sizeof(*entry), GFP_KERNEL); + new = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!new) return; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index a2d326760a72..829b26cd4549 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -31,6 +31,7 @@ #include "common.h" #include <linux/kthread.h> +#include <linux/sched/task.h> #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index bb854f92f5a5..d2d7cd9145b1 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -492,24 +492,31 @@ static int backend_create_xenvif(struct backend_info *be) static void backend_disconnect(struct backend_info *be) { - if (be->vif) { + struct xenvif *vif = be->vif; + + if (vif) { unsigned int queue_index; + struct xenvif_queue *queues; - xen_unregister_watchers(be->vif); + xen_unregister_watchers(vif); #ifdef CONFIG_DEBUG_FS - xenvif_debugfs_delif(be->vif); + xenvif_debugfs_delif(vif); #endif /* CONFIG_DEBUG_FS */ - xenvif_disconnect_data(be->vif); - for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index) - xenvif_deinit_queue(&be->vif->queues[queue_index]); + xenvif_disconnect_data(vif); + for (queue_index = 0; + queue_index < vif->num_queues; + ++queue_index) + xenvif_deinit_queue(&vif->queues[queue_index]); + + spin_lock(&vif->lock); + queues = vif->queues; + vif->num_queues = 0; + vif->queues = NULL; + spin_unlock(&vif->lock); - spin_lock(&be->vif->lock); - vfree(be->vif->queues); - be->vif->num_queues = 0; - be->vif->queues = NULL; - spin_unlock(&be->vif->lock); + vfree(queues); - xenvif_disconnect_ctrl(be->vif); + xenvif_disconnect_ctrl(vif); } } diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index ce3e8dfa10ad..1b481a5fb966 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1700,6 +1700,7 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) struct device *create_namespace_pmem(struct nd_region *nd_region, struct nd_namespace_label *nd_label) { + u64 altcookie = nd_region_interleave_set_altcookie(nd_region); u64 cookie = nd_region_interleave_set_cookie(nd_region); struct nd_label_ent *label_ent; struct nd_namespace_pmem *nspm; @@ -1718,7 +1719,11 @@ struct device *create_namespace_pmem(struct nd_region *nd_region, if (__le64_to_cpu(nd_label->isetcookie) != cookie) { dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n", nd_label->uuid); - return ERR_PTR(-EAGAIN); + if (__le64_to_cpu(nd_label->isetcookie) != altcookie) + return ERR_PTR(-EAGAIN); + + dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n", + nd_label->uuid); } nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); @@ -1733,9 +1738,14 @@ struct device *create_namespace_pmem(struct nd_region *nd_region, res->name = dev_name(&nd_region->dev); res->flags = IORESOURCE_MEM; - for (i = 0; i < nd_region->ndr_mappings; i++) - if (!has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i)) - break; + for (i = 0; i < nd_region->ndr_mappings; i++) { + if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i)) + continue; + if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i)) + continue; + break; + } + if (i < nd_region->ndr_mappings) { struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]); diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 35dd75057e16..2a99c83aa19f 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -328,6 +328,7 @@ struct nd_region *to_nd_region(struct device *dev); int nd_region_to_nstype(struct nd_region *nd_region); int nd_region_register_namespaces(struct nd_region *nd_region, int *err); u64 nd_region_interleave_set_cookie(struct nd_region *nd_region); +u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region); void nvdimm_bus_lock(struct device *dev); void nvdimm_bus_unlock(struct device *dev); bool is_nvdimm_bus_locked(struct device *dev); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 7cd705f3247c..b7cb5066d961 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -505,6 +505,15 @@ u64 nd_region_interleave_set_cookie(struct nd_region *nd_region) return 0; } +u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) +{ + struct nd_interleave_set *nd_set = nd_region->nd_set; + + if (nd_set) + return nd_set->altcookie; + return 0; +} + void nd_mapping_free_labels(struct nd_mapping *nd_mapping) { struct nd_label_ent *label_ent, *e; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 25ec4e585220..9b3b57fef446 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2344,6 +2344,53 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_kill_queues); +void nvme_unfreeze(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + mutex_lock(&ctrl->namespaces_mutex); + list_for_each_entry(ns, &ctrl->namespaces, list) + blk_mq_unfreeze_queue(ns->queue); + mutex_unlock(&ctrl->namespaces_mutex); +} +EXPORT_SYMBOL_GPL(nvme_unfreeze); + +void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) +{ + struct nvme_ns *ns; + + mutex_lock(&ctrl->namespaces_mutex); + list_for_each_entry(ns, &ctrl->namespaces, list) { + timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); + if (timeout <= 0) + break; + } + mutex_unlock(&ctrl->namespaces_mutex); +} +EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); + +void nvme_wait_freeze(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + mutex_lock(&ctrl->namespaces_mutex); + list_for_each_entry(ns, &ctrl->namespaces, list) + blk_mq_freeze_queue_wait(ns->queue); + mutex_unlock(&ctrl->namespaces_mutex); +} +EXPORT_SYMBOL_GPL(nvme_wait_freeze); + +void nvme_start_freeze(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + mutex_lock(&ctrl->namespaces_mutex); + list_for_each_entry(ns, &ctrl->namespaces, list) + blk_mq_freeze_queue_start(ns->queue); + mutex_unlock(&ctrl->namespaces_mutex); +} +EXPORT_SYMBOL_GPL(nvme_start_freeze); + void nvme_stop_queues(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index a3da1e90b99d..2aa20e3e5675 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -294,6 +294,10 @@ void nvme_queue_async_events(struct nvme_ctrl *ctrl); void nvme_stop_queues(struct nvme_ctrl *ctrl); void nvme_start_queues(struct nvme_ctrl *ctrl); void nvme_kill_queues(struct nvme_ctrl *ctrl); +void nvme_unfreeze(struct nvme_ctrl *ctrl); +void nvme_wait_freeze(struct nvme_ctrl *ctrl); +void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); +void nvme_start_freeze(struct nvme_ctrl *ctrl); #define NVME_QID_ANY -1 struct request *nvme_alloc_request(struct request_queue *q, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 57a1af52b06e..26a5fd05fe88 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1038,9 +1038,10 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, } static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, - int depth) + int depth, int node) { - struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); + struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL, + node); if (!nvmeq) return NULL; @@ -1217,7 +1218,8 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) nvmeq = dev->queues[0]; if (!nvmeq) { - nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); + nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, + dev_to_node(dev->dev)); if (!nvmeq) return -ENOMEM; } @@ -1309,7 +1311,9 @@ static int nvme_create_io_queues(struct nvme_dev *dev) int ret = 0; for (i = dev->queue_count; i <= dev->max_qid; i++) { - if (!nvme_alloc_queue(dev, i, dev->q_depth)) { + /* vector == qid - 1, match nvme_create_queue */ + if (!nvme_alloc_queue(dev, i, dev->q_depth, + pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) { ret = -ENOMEM; break; } @@ -1671,21 +1675,34 @@ static void nvme_pci_disable(struct nvme_dev *dev) static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) { int i, queues; - u32 csts = -1; + bool dead = true; + struct pci_dev *pdev = to_pci_dev(dev->dev); del_timer_sync(&dev->watchdog_timer); mutex_lock(&dev->shutdown_lock); - if (pci_is_enabled(to_pci_dev(dev->dev))) { - nvme_stop_queues(&dev->ctrl); - csts = readl(dev->bar + NVME_REG_CSTS); + if (pci_is_enabled(pdev)) { + u32 csts = readl(dev->bar + NVME_REG_CSTS); + + if (dev->ctrl.state == NVME_CTRL_LIVE) + nvme_start_freeze(&dev->ctrl); + dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || + pdev->error_state != pci_channel_io_normal); } + /* + * Give the controller a chance to complete all entered requests if + * doing a safe shutdown. + */ + if (!dead && shutdown) + nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); + nvme_stop_queues(&dev->ctrl); + queues = dev->online_queues - 1; for (i = dev->queue_count - 1; i > 0; i--) nvme_suspend_queue(dev->queues[i]); - if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { + if (dead) { /* A device might become IO incapable very soon during * probe, before the admin queue is configured. Thus, * queue_count can be 0 here. @@ -1700,6 +1717,14 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl); + + /* + * The driver will not be starting up queues again if shutting down so + * must flush all entered requests to their failed completion to avoid + * deadlocking blk-mq hot-cpu notifier. + */ + if (shutdown) + nvme_start_queues(&dev->ctrl); mutex_unlock(&dev->shutdown_lock); } @@ -1822,7 +1847,9 @@ static void nvme_reset_work(struct work_struct *work) nvme_remove_namespaces(&dev->ctrl); } else { nvme_start_queues(&dev->ctrl); + nvme_wait_freeze(&dev->ctrl); nvme_dev_add(dev); + nvme_unfreeze(&dev->ctrl); } if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 94e524fea568..a7bcff45f437 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -13,6 +13,8 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> +#include <linux/rculist.h> + #include <generated/utsrelease.h> #include <asm/unaligned.h> #include "nvmet.h" diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 5267ce20c12d..11b0a0a5f661 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -14,6 +14,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/random.h> +#include <linux/rculist.h> + #include "nvmet.h" static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 642478d35e99..ac27f3d3fbb4 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -31,6 +31,8 @@ #include <linux/fs.h> #include <linux/oprofile.h> #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include <linux/gfp.h> #include "oprofile_stats.h" diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 0581461c3a67..eda2633a393d 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -23,6 +23,8 @@ #include <linux/oprofile.h> #include <linux/errno.h> +#include <asm/ptrace.h> + #include "event_buffer.h" #include "cpu_buffer.h" #include "buffer_sync.h" diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c index 67935fbbbcab..32888f2bd1a9 100644 --- a/drivers/oprofile/event_buffer.c +++ b/drivers/oprofile/event_buffer.c @@ -14,7 +14,7 @@ #include <linux/vmalloc.h> #include <linux/oprofile.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/capability.h> #include <linux/dcookies.h> #include <linux/fs.h> diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index aeb073b5fe16..e32ca2ef9e54 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1539,7 +1539,7 @@ static int __init ccio_probe(struct parisc_device *dev) ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL); if (ioc == NULL) { printk(KERN_ERR MODULE_NAME ": memory allocation failure\n"); - return 1; + return -ENOMEM; } ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn"; @@ -1554,6 +1554,10 @@ static int __init ccio_probe(struct parisc_device *dev) ioc->hw_path = dev->hw_path; ioc->ioc_regs = ioremap_nocache(dev->hpa.start, 4096); + if (!ioc->ioc_regs) { + kfree(ioc); + return -ENOMEM; + } ccio_ioc_init(ioc); ccio_init_resources(ioc); hppa_dma_ops = &ccio_ops; diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c index 103095bbe8c0..7e2f6d5a6aaf 100644 --- a/drivers/parisc/eisa.c +++ b/drivers/parisc/eisa.c @@ -14,16 +14,16 @@ * Wax ASIC also includes a PS/2 and RS-232 controller, but those are * dealt with elsewhere; this file is concerned only with the EISA portions * of Wax. - * - * + * + * * HINT: * ----- * To allow an ISA card to work properly in the EISA slot you need to - * set an edge trigger level. This may be done on the palo command line - * by adding the kernel parameter "eisa_irq_edge=n,n2,[...]]", with + * set an edge trigger level. This may be done on the palo command line + * by adding the kernel parameter "eisa_irq_edge=n,n2,[...]]", with * n and n2 as the irq levels you want to use. - * - * Example: "eisa_irq_edge=10,11" allows ISA cards to operate at + * + * Example: "eisa_irq_edge=10,11" allows ISA cards to operate at * irq levels 10 and 11. */ @@ -46,9 +46,9 @@ #include <asm/eisa_eeprom.h> #if 0 -#define EISA_DBG(msg, arg... ) printk(KERN_DEBUG "eisa: " msg , ## arg ) +#define EISA_DBG(msg, arg...) printk(KERN_DEBUG "eisa: " msg, ## arg) #else -#define EISA_DBG(msg, arg... ) +#define EISA_DBG(msg, arg...) #endif #define SNAKES_EEPROM_BASE_ADDR 0xF0810400 @@ -108,7 +108,7 @@ void eisa_out8(unsigned char data, unsigned short port) void eisa_out16(unsigned short data, unsigned short port) { - if (EISA_bus) + if (EISA_bus) gsc_writew(cpu_to_le16(data), eisa_permute(port)); } @@ -135,9 +135,9 @@ static int master_mask; static int slave_mask; /* the trig level can be set with the - * eisa_irq_edge=n,n,n commandline parameter - * We should really read this from the EEPROM - * in the furure. + * eisa_irq_edge=n,n,n commandline parameter + * We should really read this from the EEPROM + * in the furure. */ /* irq 13,8,2,1,0 must be edge */ static unsigned int eisa_irq_level __read_mostly; /* default to edge triggered */ @@ -170,7 +170,7 @@ static void eisa_unmask_irq(struct irq_data *d) unsigned int irq = d->irq; unsigned long flags; EISA_DBG("enable irq %d\n", irq); - + spin_lock_irqsave(&eisa_irq_lock, flags); if (irq & 8) { slave_mask &= ~(1 << (irq&7)); @@ -194,7 +194,7 @@ static irqreturn_t eisa_irq(int wax_irq, void *intr_dev) { int irq = gsc_readb(0xfc01f000); /* EISA supports 16 irqs */ unsigned long flags; - + spin_lock_irqsave(&eisa_irq_lock, flags); /* read IRR command */ eisa_out8(0x0a, 0x20); @@ -202,31 +202,31 @@ static irqreturn_t eisa_irq(int wax_irq, void *intr_dev) EISA_DBG("irq IAR %02x 8259-1 irr %02x 8259-2 irr %02x\n", irq, eisa_in8(0x20), eisa_in8(0xa0)); - + /* read ISR command */ eisa_out8(0x0a, 0x20); eisa_out8(0x0a, 0xa0); EISA_DBG("irq 8259-1 isr %02x imr %02x 8259-2 isr %02x imr %02x\n", eisa_in8(0x20), eisa_in8(0x21), eisa_in8(0xa0), eisa_in8(0xa1)); - + irq &= 0xf; - + /* mask irq and write eoi */ if (irq & 8) { slave_mask |= (1 << (irq&7)); eisa_out8(slave_mask, 0xa1); eisa_out8(0x60 | (irq&7),0xa0);/* 'Specific EOI' to slave */ - eisa_out8(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */ - + eisa_out8(0x62, 0x20); /* 'Specific EOI' to master-IRQ2 */ + } else { master_mask |= (1 << (irq&7)); eisa_out8(master_mask, 0x21); - eisa_out8(0x60|irq,0x20); /* 'Specific EOI' to master */ + eisa_out8(0x60|irq, 0x20); /* 'Specific EOI' to master */ } spin_unlock_irqrestore(&eisa_irq_lock, flags); generic_handle_irq(irq); - + spin_lock_irqsave(&eisa_irq_lock, flags); /* unmask */ if (irq & 8) { @@ -254,44 +254,44 @@ static struct irqaction irq2_action = { static void init_eisa_pic(void) { unsigned long flags; - + spin_lock_irqsave(&eisa_irq_lock, flags); eisa_out8(0xff, 0x21); /* mask during init */ eisa_out8(0xff, 0xa1); /* mask during init */ - + /* master pic */ - eisa_out8(0x11,0x20); /* ICW1 */ - eisa_out8(0x00,0x21); /* ICW2 */ - eisa_out8(0x04,0x21); /* ICW3 */ - eisa_out8(0x01,0x21); /* ICW4 */ - eisa_out8(0x40,0x20); /* OCW2 */ - + eisa_out8(0x11, 0x20); /* ICW1 */ + eisa_out8(0x00, 0x21); /* ICW2 */ + eisa_out8(0x04, 0x21); /* ICW3 */ + eisa_out8(0x01, 0x21); /* ICW4 */ + eisa_out8(0x40, 0x20); /* OCW2 */ + /* slave pic */ - eisa_out8(0x11,0xa0); /* ICW1 */ - eisa_out8(0x08,0xa1); /* ICW2 */ - eisa_out8(0x02,0xa1); /* ICW3 */ - eisa_out8(0x01,0xa1); /* ICW4 */ - eisa_out8(0x40,0xa0); /* OCW2 */ - + eisa_out8(0x11, 0xa0); /* ICW1 */ + eisa_out8(0x08, 0xa1); /* ICW2 */ + eisa_out8(0x02, 0xa1); /* ICW3 */ + eisa_out8(0x01, 0xa1); /* ICW4 */ + eisa_out8(0x40, 0xa0); /* OCW2 */ + udelay(100); - - slave_mask = 0xff; - master_mask = 0xfb; + + slave_mask = 0xff; + master_mask = 0xfb; eisa_out8(slave_mask, 0xa1); /* OCW1 */ eisa_out8(master_mask, 0x21); /* OCW1 */ - + /* setup trig level */ EISA_DBG("EISA edge/level %04x\n", eisa_irq_level); - + eisa_out8(eisa_irq_level&0xff, 0x4d0); /* Set all irq's to edge */ - eisa_out8((eisa_irq_level >> 8) & 0xff, 0x4d1); - + eisa_out8((eisa_irq_level >> 8) & 0xff, 0x4d1); + EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21)); EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1)); EISA_DBG("pic0 edge/level %02x\n", eisa_in8(0x4d0)); EISA_DBG("pic1 edge/level %02x\n", eisa_in8(0x4d1)); - + spin_unlock_irqrestore(&eisa_irq_lock, flags); } @@ -305,7 +305,7 @@ static int __init eisa_probe(struct parisc_device *dev) char *name = is_mongoose(dev) ? "Mongoose" : "Wax"; - printk(KERN_INFO "%s EISA Adapter found at 0x%08lx\n", + printk(KERN_INFO "%s EISA Adapter found at 0x%08lx\n", name, (unsigned long)dev->hpa.start); eisa_dev.hba.dev = dev; @@ -334,16 +334,16 @@ static int __init eisa_probe(struct parisc_device *dev) result = request_irq(dev->irq, eisa_irq, IRQF_SHARED, "EISA", &eisa_dev); if (result) { printk(KERN_ERR "EISA: request_irq failed!\n"); - return result; + goto error_release; } - + /* Reserve IRQ2 */ setup_irq(2, &irq2_action); for (i = 0; i < 16; i++) { irq_set_chip_and_handler(i, &eisa_interrupt_type, handle_simple_irq); } - + EISA_bus = 1; if (dev->num_addrs) { @@ -358,6 +358,11 @@ static int __init eisa_probe(struct parisc_device *dev) } } eisa_eeprom_addr = ioremap_nocache(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH); + if (!eisa_eeprom_addr) { + result = -ENOMEM; + printk(KERN_ERR "EISA: ioremap_nocache failed!\n"); + goto error_free_irq; + } result = eisa_enumerator(eisa_dev.eeprom_addr, &eisa_dev.hba.io_space, &eisa_dev.hba.lmmio_space); init_eisa_pic(); @@ -372,11 +377,20 @@ static int __init eisa_probe(struct parisc_device *dev) eisa_dev.root.dma_mask = 0xffffffff; /* wild guess */ if (eisa_root_register (&eisa_dev.root)) { printk(KERN_ERR "EISA: Failed to register EISA root\n"); - return -1; + result = -ENOMEM; + goto error_iounmap; } } - + return 0; + +error_iounmap: + iounmap(eisa_eeprom_addr); +error_free_irq: + free_irq(dev->irq, &eisa_dev); +error_release: + release_resource(&eisa_dev.hba.io_space); + return result; } static const struct parisc_device_id eisa_tbl[] = { @@ -404,7 +418,7 @@ void eisa_make_irq_level(int num) { if (eisa_irq_configured& (1<<num)) { printk(KERN_WARNING - "IRQ %d polarity configured twice (last to level)\n", + "IRQ %d polarity configured twice (last to level)\n", num); } eisa_irq_level |= (1<<num); /* set the corresponding bit */ @@ -414,7 +428,7 @@ void eisa_make_irq_level(int num) void eisa_make_irq_edge(int num) { if (eisa_irq_configured& (1<<num)) { - printk(KERN_WARNING + printk(KERN_WARNING "IRQ %d polarity configured twice (last to edge)\n", num); } @@ -430,18 +444,18 @@ static int __init eisa_irq_setup(char *str) EISA_DBG("IRQ setup\n"); while (cur != NULL) { char *pe; - + val = (int) simple_strtoul(cur, &pe, 0); if (val > 15 || val < 0) { printk(KERN_ERR "eisa: EISA irq value are 0-15\n"); continue; } - if (val == 2) { + if (val == 2) { val = 9; } eisa_make_irq_edge(val); /* clear the corresponding bit */ EISA_DBG("setting IRQ %d to edge-triggered mode\n", val); - + if ((cur = strchr(cur, ','))) { cur++; } else { diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c index ef31b77404ef..e2a3112f1c98 100644 --- a/drivers/parisc/power.c +++ b/drivers/parisc/power.c @@ -39,7 +39,7 @@ #include <linux/kernel.h> #include <linux/notifier.h> #include <linux/reboot.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/kthread.h> #include <linux/pm.h> diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c index d998d0ed2bec..46eb15fb57ff 100644 --- a/drivers/parport/daisy.c +++ b/drivers/parport/daisy.c @@ -23,7 +23,7 @@ #include <linux/parport.h> #include <linux/delay.h> #include <linux/slab.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <asm/current.h> #include <linux/uaccess.h> diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c index f9fd4b33a546..74cc6dd982d2 100644 --- a/drivers/parport/ieee1284.c +++ b/drivers/parport/ieee1284.c @@ -23,7 +23,7 @@ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/timer.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #undef DEBUG /* undef me for production */ diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c index 75071605d22f..a959224d011b 100644 --- a/drivers/parport/ieee1284_ops.c +++ b/drivers/parport/ieee1284_ops.c @@ -17,7 +17,7 @@ #include <linux/module.h> #include <linux/parport.h> #include <linux/delay.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/uaccess.h> #undef DEBUG /* undef me for production */ diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c index 30e981be14c2..dcbeeb220dda 100644 --- a/drivers/parport/parport_ip32.c +++ b/drivers/parport/parport_ip32.c @@ -102,7 +102,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/parport.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/stddef.h> diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 3e56e7deab8e..9d42dfe65d44 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -44,7 +44,7 @@ #include <linux/module.h> #include <linux/init.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/interrupt.h> diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 3308427ed9f7..bc090daa850a 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -27,7 +27,7 @@ #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/slab.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/kmod.h> #include <linux/device.h> diff --git a/drivers/pci/access.c b/drivers/pci/access.c index b9dd37c8c9ce..8b7382705bf2 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -1,7 +1,7 @@ #include <linux/delay.h> #include <linux/pci.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/wait.h> diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c index 001c91a945aa..993b650ef275 100644 --- a/drivers/pci/dwc/pci-exynos.c +++ b/drivers/pci/dwc/pci-exynos.c @@ -668,6 +668,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; + ep->pci = pci; ep->ops = (const struct exynos_pcie_ops *) of_device_get_match_data(dev); diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c index 3ab6761db9e8..801e46cd266d 100644 --- a/drivers/pci/dwc/pci-imx6.c +++ b/drivers/pci/dwc/pci-imx6.c @@ -605,6 +605,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; + imx6_pcie->pci = pci; imx6_pcie->variant = (enum imx6_pcie_variants)of_device_get_match_data(dev); diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c index 8dc66409182d..fcc9723bad6e 100644 --- a/drivers/pci/dwc/pci-keystone.c +++ b/drivers/pci/dwc/pci-keystone.c @@ -401,6 +401,8 @@ static int __init ks_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; + ks_pcie->pci = pci; + /* initialize SerDes Phy if present */ phy = devm_phy_get(dev, "pcie-phy"); if (PTR_ERR_OR_ZERO(phy) == -EPROBE_DEFER) diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c index 175c09e3a932..c32e392a0ae6 100644 --- a/drivers/pci/dwc/pci-layerscape.c +++ b/drivers/pci/dwc/pci-layerscape.c @@ -280,6 +280,8 @@ static int __init ls_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = pcie->drvdata->dw_pcie_ops; + pcie->pci = pci; + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); pci->dbi_base = devm_ioremap_resource(dev, dbi_base); if (IS_ERR(pci->dbi_base)) diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c index 66bac6fbfa9f..f110e3b24a26 100644 --- a/drivers/pci/dwc/pcie-armada8k.c +++ b/drivers/pci/dwc/pcie-armada8k.c @@ -220,6 +220,8 @@ static int armada8k_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; + pcie->pci = pci; + pcie->clk = devm_clk_get(dev, NULL); if (IS_ERR(pcie->clk)) return PTR_ERR(pcie->clk); diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c index 59ecc9e66436..fcd3ef845883 100644 --- a/drivers/pci/dwc/pcie-artpec6.c +++ b/drivers/pci/dwc/pcie-artpec6.c @@ -253,6 +253,8 @@ static int artpec6_pcie_probe(struct platform_device *pdev) pci->dev = dev; + artpec6_pcie->pci = pci; + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); pci->dbi_base = devm_ioremap_resource(dev, dbi_base); if (IS_ERR(pci->dbi_base)) diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c index 65250f63515c..b6c832ba39dd 100644 --- a/drivers/pci/dwc/pcie-designware-plat.c +++ b/drivers/pci/dwc/pcie-designware-plat.c @@ -104,6 +104,8 @@ static int dw_plat_pcie_probe(struct platform_device *pdev) pci->dev = dev; + dw_plat_pcie->pci = pci; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pci->dbi_base = devm_ioremap_resource(dev, res); if (IS_ERR(pci->dbi_base)) diff --git a/drivers/pci/dwc/pcie-hisi.c b/drivers/pci/dwc/pcie-hisi.c index e3e4fedd9f68..fd66a3199db7 100644 --- a/drivers/pci/dwc/pcie-hisi.c +++ b/drivers/pci/dwc/pcie-hisi.c @@ -284,6 +284,8 @@ static int hisi_pcie_probe(struct platform_device *pdev) driver = dev->driver; + hisi_pcie->pci = pci; + hisi_pcie->soc_ops = of_device_get_match_data(dev); hisi_pcie->subctrl = diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c index e36abe0d9d6f..67eb7f5926dd 100644 --- a/drivers/pci/dwc/pcie-qcom.c +++ b/drivers/pci/dwc/pcie-qcom.c @@ -686,6 +686,8 @@ static int qcom_pcie_probe(struct platform_device *pdev) pci->ops = &dw_pcie_ops; pp = &pci->pp; + pcie->pci = pci; + pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev); pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW); diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c index 348f9c5e0433..eaa4ea8e2ea4 100644 --- a/drivers/pci/dwc/pcie-spear13xx.c +++ b/drivers/pci/dwc/pcie-spear13xx.c @@ -247,6 +247,8 @@ static int spear13xx_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; + spear13xx_pcie->pci = pci; + spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy"); if (IS_ERR(spear13xx_pcie->phy)) { ret = PTR_ERR(spear13xx_pcie->phy); diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c index 5043b5f00ed8..75ec5cea26f6 100644 --- a/drivers/pci/host/pcie-altera.c +++ b/drivers/pci/host/pcie-altera.c @@ -57,10 +57,14 @@ #define TLP_WRITE_TAG 0x10 #define RP_DEVFN 0 #define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) -#define TLP_CFG_DW0(pcie, bus) \ +#define TLP_CFGRD_DW0(pcie, bus) \ ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \ : TLP_FMTTYPE_CFGRD1) << 24) | \ TLP_PAYLOAD_SIZE) +#define TLP_CFGWR_DW0(pcie, bus) \ + ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGWR0 \ + : TLP_FMTTYPE_CFGWR1) << 24) | \ + TLP_PAYLOAD_SIZE) #define TLP_CFG_DW1(pcie, tag, be) \ (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) #define TLP_CFG_DW2(bus, devfn, offset) \ @@ -222,7 +226,7 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, { u32 headers[TLP_HDR_SIZE]; - headers[0] = TLP_CFG_DW0(pcie, bus); + headers[0] = TLP_CFGRD_DW0(pcie, bus); headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en); headers[2] = TLP_CFG_DW2(bus, devfn, where); @@ -237,7 +241,7 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, u32 headers[TLP_HDR_SIZE]; int ret; - headers[0] = TLP_CFG_DW0(pcie, bus); + headers[0] = TLP_CFGWR_DW0(pcie, bus); headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en); headers[2] = TLP_CFG_DW2(bus, devfn, where); diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c index 7ec8a8f72c69..95f689f53920 100644 --- a/drivers/pci/hotplug/cpci_hotplug_core.c +++ b/drivers/pci/hotplug/cpci_hotplug_core.c @@ -27,6 +27,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h index 9103a7b9f3b9..48c8a066a6b7 100644 --- a/drivers/pci/hotplug/cpqphp.h +++ b/drivers/pci/hotplug/cpqphp.h @@ -32,7 +32,7 @@ #include <asm/io.h> /* for read? and write? functions */ #include <linux/delay.h> /* for delays */ #include <linux/mutex.h> -#include <linux/sched.h> /* for signal_pending() */ +#include <linux/sched/signal.h> /* for signal_pending() */ #define MY_NAME "cpqphp" diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 37d70b5ad22f..06109d40c4ac 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -33,7 +33,7 @@ #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/delay.h> -#include <linux/sched.h> /* signal_pending() */ +#include <linux/sched/signal.h> /* signal_pending() */ #include <linux/pcieport_if.h> #include <linux/mutex.h> #include <linux/workqueue.h> diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c index d2961ef39a3a..7c203198b582 100644 --- a/drivers/pci/hotplug/pnv_php.c +++ b/drivers/pci/hotplug/pnv_php.c @@ -35,9 +35,11 @@ static void pnv_php_register(struct device_node *dn); static void pnv_php_unregister_one(struct device_node *dn); static void pnv_php_unregister(struct device_node *dn); -static void pnv_php_disable_irq(struct pnv_php_slot *php_slot) +static void pnv_php_disable_irq(struct pnv_php_slot *php_slot, + bool disable_device) { struct pci_dev *pdev = php_slot->pdev; + int irq = php_slot->irq; u16 ctrl; if (php_slot->irq > 0) { @@ -56,10 +58,14 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot) php_slot->wq = NULL; } - if (pdev->msix_enabled) - pci_disable_msix(pdev); - else if (pdev->msi_enabled) - pci_disable_msi(pdev); + if (disable_device || irq > 0) { + if (pdev->msix_enabled) + pci_disable_msix(pdev); + else if (pdev->msi_enabled) + pci_disable_msi(pdev); + + pci_disable_device(pdev); + } } static void pnv_php_free_slot(struct kref *kref) @@ -68,7 +74,7 @@ static void pnv_php_free_slot(struct kref *kref) struct pnv_php_slot, kref); WARN_ON(!list_empty(&php_slot->children)); - pnv_php_disable_irq(php_slot); + pnv_php_disable_irq(php_slot, false); kfree(php_slot->name); kfree(php_slot); } @@ -76,7 +82,7 @@ static void pnv_php_free_slot(struct kref *kref) static inline void pnv_php_put_slot(struct pnv_php_slot *php_slot) { - if (WARN_ON(!php_slot)) + if (!php_slot) return; kref_put(&php_slot->kref, pnv_php_free_slot); @@ -430,9 +436,21 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan) if (ret) return ret; - /* Proceed if there have nothing behind the slot */ - if (presence == OPAL_PCI_SLOT_EMPTY) + /* + * Proceed if there have nothing behind the slot. However, + * we should leave the slot in registered state at the + * beginning. Otherwise, the PCI devices inserted afterwards + * won't be probed and populated. + */ + if (presence == OPAL_PCI_SLOT_EMPTY) { + if (!php_slot->power_state_check) { + php_slot->power_state_check = true; + + return 0; + } + goto scan; + } /* * If the power supply to the slot is off, we can't detect @@ -705,10 +723,15 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data) if (sts & PCI_EXP_SLTSTA_DLLSC) { pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lsts); added = !!(lsts & PCI_EXP_LNKSTA_DLLLA); - } else if (sts & PCI_EXP_SLTSTA_PDC) { + } else if (!(php_slot->flags & PNV_PHP_FLAG_BROKEN_PDC) && + (sts & PCI_EXP_SLTSTA_PDC)) { ret = pnv_pci_get_presence_state(php_slot->id, &presence); - if (!ret) + if (ret) { + dev_warn(&pdev->dev, "PCI slot [%s] error %d getting presence (0x%04x), to retry the operation.\n", + php_slot->name, ret, sts); return IRQ_HANDLED; + } + added = !!(presence == OPAL_PCI_SLOT_PRESENT); } else { return IRQ_NONE; @@ -752,6 +775,7 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data) static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq) { struct pci_dev *pdev = php_slot->pdev; + u32 broken_pdc = 0; u16 sts, ctrl; int ret; @@ -759,29 +783,44 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq) php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name); if (!php_slot->wq) { dev_warn(&pdev->dev, "Cannot alloc workqueue\n"); - pnv_php_disable_irq(php_slot); + pnv_php_disable_irq(php_slot, true); return; } + /* Check PDC (Presence Detection Change) is broken or not */ + ret = of_property_read_u32(php_slot->dn, "ibm,slot-broken-pdc", + &broken_pdc); + if (!ret && broken_pdc) + php_slot->flags |= PNV_PHP_FLAG_BROKEN_PDC; + /* Clear pending interrupts */ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &sts); - sts |= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); + if (php_slot->flags & PNV_PHP_FLAG_BROKEN_PDC) + sts |= PCI_EXP_SLTSTA_DLLSC; + else + sts |= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, sts); /* Request the interrupt */ ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED, php_slot->name, php_slot); if (ret) { - pnv_php_disable_irq(php_slot); + pnv_php_disable_irq(php_slot, true); dev_warn(&pdev->dev, "Error %d enabling IRQ %d\n", ret, irq); return; } /* Enable the interrupts */ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &ctrl); - ctrl |= (PCI_EXP_SLTCTL_HPIE | - PCI_EXP_SLTCTL_PDCE | - PCI_EXP_SLTCTL_DLLSCE); + if (php_slot->flags & PNV_PHP_FLAG_BROKEN_PDC) { + ctrl &= ~PCI_EXP_SLTCTL_PDCE; + ctrl |= (PCI_EXP_SLTCTL_HPIE | + PCI_EXP_SLTCTL_DLLSCE); + } else { + ctrl |= (PCI_EXP_SLTCTL_HPIE | + PCI_EXP_SLTCTL_PDCE | + PCI_EXP_SLTCTL_DLLSCE); + } pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, ctrl); /* The interrupt is initialized successfully when @irq is valid */ @@ -793,6 +832,14 @@ static void pnv_php_enable_irq(struct pnv_php_slot *php_slot) struct pci_dev *pdev = php_slot->pdev; int irq, ret; + /* + * The MSI/MSIx interrupt might have been occupied by other + * drivers. Don't populate the surprise hotplug capability + * in that case. + */ + if (pci_dev_msi_enabled(pdev)) + return; + ret = pci_enable_device(pdev); if (ret) { dev_warn(&pdev->dev, "Error %d enabling device\n", ret); diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index 4da8fc601467..70c7ea6af034 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h @@ -33,7 +33,7 @@ #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/delay.h> -#include <linux/sched.h> /* signal_pending(), struct timer_list */ +#include <linux/sched/signal.h> /* signal_pending(), struct timer_list */ #include <linux/mutex.h> #include <linux/workqueue.h> diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 980eaf588281..d571bc330686 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1298,6 +1298,22 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) } EXPORT_SYMBOL(pci_irq_get_affinity); +/** + * pci_irq_get_node - return the numa node of a particular msi vector + * @pdev: PCI device to operate on + * @vec: device-relative interrupt vector index (0-based). + */ +int pci_irq_get_node(struct pci_dev *pdev, int vec) +{ + const struct cpumask *mask; + + mask = pci_irq_get_affinity(pdev, vec); + if (mask) + return local_memory_node(cpu_to_node(cpumask_first(mask))); + return dev_to_node(&pdev->dev); +} +EXPORT_SYMBOL(pci_irq_get_node); + struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) { return to_pci_dev(desc->dev); diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 6d9335865880..9612b84bc3e0 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -20,6 +20,7 @@ #include <linux/perf/arm_pmu.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/sched/clock.h> #include <linux/spinlock.h> #include <linux/irq.h> #include <linux/irqdesc.h> diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index 55663b3d7282..58dcee562d64 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c @@ -68,6 +68,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/sched.h> +#include <linux/sched/loadavg.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/tick.h> diff --git a/drivers/ps3/ps3-sys-manager.c b/drivers/ps3/ps3-sys-manager.c index f2ab435954f6..73e496a72113 100644 --- a/drivers/ps3/ps3-sys-manager.c +++ b/drivers/ps3/ps3-sys-manager.c @@ -22,6 +22,7 @@ #include <linux/module.h> #include <linux/workqueue.h> #include <linux/reboot.h> +#include <linux/sched/signal.h> #include <asm/firmware.h> #include <asm/lv1call.h> diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 2d0cfaa6d84c..42e37c20b361 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -76,7 +76,9 @@ config PWM_ATMEL_TCB config PWM_BCM_IPROC tristate "iProc PWM support" - depends on ARCH_BCM_IPROC + depends on ARCH_BCM_IPROC || COMPILE_TEST + depends on COMMON_CLK + default ARCH_BCM_IPROC help Generic PWM framework driver for Broadcom iProc PWM block. This block is used in Broadcom iProc SoC's. diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 172ef8245811..a0860b30bd93 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -137,9 +137,14 @@ of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args) { struct pwm_device *pwm; + /* check, whether the driver supports a third cell for flags */ if (pc->of_pwm_n_cells < 3) return ERR_PTR(-EINVAL); + /* flags in the third cell are optional */ + if (args->args_count < 2) + return ERR_PTR(-EINVAL); + if (args->args[0] >= pc->npwm) return ERR_PTR(-EINVAL); @@ -148,11 +153,10 @@ of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args) return pwm; pwm->args.period = args->args[1]; + pwm->args.polarity = PWM_POLARITY_NORMAL; - if (args->args[2] & PWM_POLARITY_INVERTED) + if (args->args_count > 2 && args->args[2] & PWM_POLARITY_INVERTED) pwm->args.polarity = PWM_POLARITY_INVERSED; - else - pwm->args.polarity = PWM_POLARITY_NORMAL; return pwm; } @@ -163,9 +167,14 @@ of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args) { struct pwm_device *pwm; + /* sanity check driver support */ if (pc->of_pwm_n_cells < 2) return ERR_PTR(-EINVAL); + /* all cells are required */ + if (args->args_count != pc->of_pwm_n_cells) + return ERR_PTR(-EINVAL); + if (args->args[0] >= pc->npwm) return ERR_PTR(-EINVAL); @@ -663,24 +672,17 @@ struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id) err = of_parse_phandle_with_args(np, "pwms", "#pwm-cells", index, &args); if (err) { - pr_debug("%s(): can't parse \"pwms\" property\n", __func__); + pr_err("%s(): can't parse \"pwms\" property\n", __func__); return ERR_PTR(err); } pc = of_node_to_pwmchip(args.np); if (IS_ERR(pc)) { - pr_debug("%s(): PWM chip not found\n", __func__); + pr_err("%s(): PWM chip not found\n", __func__); pwm = ERR_CAST(pc); goto put; } - if (args.args_count != pc->of_pwm_n_cells) { - pr_debug("%s: wrong #pwm-cells for %s\n", np->full_name, - args.np->full_name); - pwm = ERR_PTR(-EINVAL); - goto put; - } - pwm = pc->of_xlate(pc, &args); if (IS_ERR(pwm)) goto put; @@ -757,12 +759,13 @@ void pwm_remove_table(struct pwm_lookup *table, size_t num) */ struct pwm_device *pwm_get(struct device *dev, const char *con_id) { - struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER); const char *dev_id = dev ? dev_name(dev) : NULL; - struct pwm_chip *chip = NULL; + struct pwm_device *pwm; + struct pwm_chip *chip; unsigned int best = 0; struct pwm_lookup *p, *chosen = NULL; unsigned int match; + int err; /* look up via DT first */ if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) @@ -817,24 +820,35 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) } } - if (!chosen) { - pwm = ERR_PTR(-ENODEV); - goto out; - } + mutex_unlock(&pwm_lookup_lock); + + if (!chosen) + return ERR_PTR(-ENODEV); chip = pwmchip_find_by_name(chosen->provider); + + /* + * If the lookup entry specifies a module, load the module and retry + * the PWM chip lookup. This can be used to work around driver load + * ordering issues if driver's can't be made to properly support the + * deferred probe mechanism. + */ + if (!chip && chosen->module) { + err = request_module(chosen->module); + if (err == 0) + chip = pwmchip_find_by_name(chosen->provider); + } + if (!chip) - goto out; + return ERR_PTR(-EPROBE_DEFER); pwm = pwm_request_from_chip(chip, chosen->index, con_id ?: dev_id); if (IS_ERR(pwm)) - goto out; + return pwm; pwm->args.period = chosen->period; pwm->args.polarity = chosen->polarity; -out: - mutex_unlock(&pwm_lookup_lock); return pwm; } EXPORT_SYMBOL_GPL(pwm_get); @@ -960,18 +974,6 @@ void devm_pwm_put(struct device *dev, struct pwm_device *pwm) } EXPORT_SYMBOL_GPL(devm_pwm_put); -/** - * pwm_can_sleep() - report whether PWM access will sleep - * @pwm: PWM device - * - * Returns: True if accessing the PWM can sleep, false otherwise. - */ -bool pwm_can_sleep(struct pwm_device *pwm) -{ - return true; -} -EXPORT_SYMBOL_GPL(pwm_can_sleep); - #ifdef CONFIG_DEBUG_FS static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s) { diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c index 14fc011faa32..999187277ea5 100644 --- a/drivers/pwm/pwm-atmel-hlcdc.c +++ b/drivers/pwm/pwm-atmel-hlcdc.c @@ -270,7 +270,6 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev) chip->chip.npwm = 1; chip->chip.of_xlate = of_pwm_xlate_with_flags; chip->chip.of_pwm_n_cells = 3; - chip->chip.can_sleep = 1; ret = pwmchip_add_with_polarity(&chip->chip, PWM_POLARITY_INVERSED); if (ret) { diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c index e6b8b1b7e6ba..67a7023be5c2 100644 --- a/drivers/pwm/pwm-atmel.c +++ b/drivers/pwm/pwm-atmel.c @@ -385,7 +385,6 @@ static int atmel_pwm_probe(struct platform_device *pdev) atmel_pwm->chip.base = -1; atmel_pwm->chip.npwm = 4; - atmel_pwm->chip.can_sleep = true; atmel_pwm->config = data->config; atmel_pwm->updated_pwms = 0; mutex_init(&atmel_pwm->isr_lock); diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c index c63418322023..09a95aeb3a70 100644 --- a/drivers/pwm/pwm-bcm-kona.c +++ b/drivers/pwm/pwm-bcm-kona.c @@ -276,7 +276,6 @@ static int kona_pwmc_probe(struct platform_device *pdev) kp->chip.npwm = 6; kp->chip.of_xlate = of_pwm_xlate_with_flags; kp->chip.of_pwm_n_cells = 3; - kp->chip.can_sleep = true; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); kp->base = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c index 01339c152ab0..771859aca4be 100644 --- a/drivers/pwm/pwm-berlin.c +++ b/drivers/pwm/pwm-berlin.c @@ -206,7 +206,6 @@ static int berlin_pwm_probe(struct platform_device *pdev) pwm->chip.ops = &berlin_pwm_ops; pwm->chip.base = -1; pwm->chip.npwm = 4; - pwm->chip.can_sleep = true; pwm->chip.of_xlate = of_pwm_xlate_with_flags; pwm->chip.of_pwm_n_cells = 3; diff --git a/drivers/pwm/pwm-bfin.c b/drivers/pwm/pwm-bfin.c index 7631ef194de7..d2ed0a2a18e8 100644 --- a/drivers/pwm/pwm-bfin.c +++ b/drivers/pwm/pwm-bfin.c @@ -103,7 +103,7 @@ static void bfin_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) disable_gptimer(priv->pin); } -static struct pwm_ops bfin_pwm_ops = { +static const struct pwm_ops bfin_pwm_ops = { .request = bfin_pwm_request, .free = bfin_pwm_free, .config = bfin_pwm_config, diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c index 5d5adee16886..8063cffa1c96 100644 --- a/drivers/pwm/pwm-brcmstb.c +++ b/drivers/pwm/pwm-brcmstb.c @@ -270,7 +270,6 @@ static int brcmstb_pwm_probe(struct platform_device *pdev) p->chip.ops = &brcmstb_pwm_ops; p->chip.base = -1; p->chip.npwm = 2; - p->chip.can_sleep = true; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); p->base = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c index fad968eb75f6..557b4ea16796 100644 --- a/drivers/pwm/pwm-fsl-ftm.c +++ b/drivers/pwm/pwm-fsl-ftm.c @@ -446,7 +446,6 @@ static int fsl_pwm_probe(struct platform_device *pdev) fpc->chip.of_pwm_n_cells = 3; fpc->chip.base = -1; fpc->chip.npwm = 8; - fpc->chip.can_sleep = true; ret = pwmchip_add(&fpc->chip); if (ret < 0) { diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c index d600fd5cd4ba..2ba5c3a398ff 100644 --- a/drivers/pwm/pwm-imx.c +++ b/drivers/pwm/pwm-imx.c @@ -38,6 +38,7 @@ #define MX3_PWMCR_DOZEEN (1 << 24) #define MX3_PWMCR_WAITEN (1 << 23) #define MX3_PWMCR_DBGEN (1 << 22) +#define MX3_PWMCR_POUTC (1 << 18) #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16) #define MX3_PWMCR_CLKSRC_IPG (1 << 16) #define MX3_PWMCR_SWR (1 << 3) @@ -49,15 +50,10 @@ struct imx_chip { struct clk *clk_per; - struct clk *clk_ipg; void __iomem *mmio_base; struct pwm_chip chip; - - int (*config)(struct pwm_chip *chip, - struct pwm_device *pwm, int duty_ns, int period_ns); - void (*set_enable)(struct pwm_chip *chip, bool enable); }; #define to_imx_chip(chip) container_of(chip, struct imx_chip, chip) @@ -91,176 +87,170 @@ static int imx_pwm_config_v1(struct pwm_chip *chip, return 0; } -static void imx_pwm_set_enable_v1(struct pwm_chip *chip, bool enable) +static int imx_pwm_enable_v1(struct pwm_chip *chip, struct pwm_device *pwm) { struct imx_chip *imx = to_imx_chip(chip); u32 val; + int ret; - val = readl(imx->mmio_base + MX1_PWMC); - - if (enable) - val |= MX1_PWMC_EN; - else - val &= ~MX1_PWMC_EN; + ret = clk_prepare_enable(imx->clk_per); + if (ret < 0) + return ret; + val = readl(imx->mmio_base + MX1_PWMC); + val |= MX1_PWMC_EN; writel(val, imx->mmio_base + MX1_PWMC); -} - -static int imx_pwm_config_v2(struct pwm_chip *chip, - struct pwm_device *pwm, int duty_ns, int period_ns) -{ - struct imx_chip *imx = to_imx_chip(chip); - struct device *dev = chip->dev; - unsigned long long c; - unsigned long period_cycles, duty_cycles, prescale; - unsigned int period_ms; - bool enable = pwm_is_enabled(pwm); - int wait_count = 0, fifoav; - u32 cr, sr; - - /* - * i.MX PWMv2 has a 4-word sample FIFO. - * In order to avoid FIFO overflow issue, we do software reset - * to clear all sample FIFO if the controller is disabled or - * wait for a full PWM cycle to get a relinquished FIFO slot - * when the controller is enabled and the FIFO is fully loaded. - */ - if (enable) { - sr = readl(imx->mmio_base + MX3_PWMSR); - fifoav = sr & MX3_PWMSR_FIFOAV_MASK; - if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) { - period_ms = DIV_ROUND_UP(pwm_get_period(pwm), - NSEC_PER_MSEC); - msleep(period_ms); - - sr = readl(imx->mmio_base + MX3_PWMSR); - if (fifoav == (sr & MX3_PWMSR_FIFOAV_MASK)) - dev_warn(dev, "there is no free FIFO slot\n"); - } - } else { - writel(MX3_PWMCR_SWR, imx->mmio_base + MX3_PWMCR); - do { - usleep_range(200, 1000); - cr = readl(imx->mmio_base + MX3_PWMCR); - } while ((cr & MX3_PWMCR_SWR) && - (wait_count++ < MX3_PWM_SWR_LOOP)); - - if (cr & MX3_PWMCR_SWR) - dev_warn(dev, "software reset timeout\n"); - } - - c = clk_get_rate(imx->clk_per); - c = c * period_ns; - do_div(c, 1000000000); - period_cycles = c; - - prescale = period_cycles / 0x10000 + 1; - - period_cycles /= prescale; - c = (unsigned long long)period_cycles * duty_ns; - do_div(c, period_ns); - duty_cycles = c; - - /* - * according to imx pwm RM, the real period value should be - * PERIOD value in PWMPR plus 2. - */ - if (period_cycles > 2) - period_cycles -= 2; - else - period_cycles = 0; - - writel(duty_cycles, imx->mmio_base + MX3_PWMSAR); - writel(period_cycles, imx->mmio_base + MX3_PWMPR); - - cr = MX3_PWMCR_PRESCALER(prescale) | - MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN | - MX3_PWMCR_DBGEN | MX3_PWMCR_CLKSRC_IPG_HIGH; - - if (enable) - cr |= MX3_PWMCR_EN; - - writel(cr, imx->mmio_base + MX3_PWMCR); return 0; } -static void imx_pwm_set_enable_v2(struct pwm_chip *chip, bool enable) +static void imx_pwm_disable_v1(struct pwm_chip *chip, struct pwm_device *pwm) { struct imx_chip *imx = to_imx_chip(chip); u32 val; - val = readl(imx->mmio_base + MX3_PWMCR); - - if (enable) - val |= MX3_PWMCR_EN; - else - val &= ~MX3_PWMCR_EN; + val = readl(imx->mmio_base + MX1_PWMC); + val &= ~MX1_PWMC_EN; + writel(val, imx->mmio_base + MX1_PWMC); - writel(val, imx->mmio_base + MX3_PWMCR); + clk_disable_unprepare(imx->clk_per); } -static int imx_pwm_config(struct pwm_chip *chip, - struct pwm_device *pwm, int duty_ns, int period_ns) +static void imx_pwm_sw_reset(struct pwm_chip *chip) { struct imx_chip *imx = to_imx_chip(chip); - int ret; - - ret = clk_prepare_enable(imx->clk_ipg); - if (ret) - return ret; + struct device *dev = chip->dev; + int wait_count = 0; + u32 cr; + + writel(MX3_PWMCR_SWR, imx->mmio_base + MX3_PWMCR); + do { + usleep_range(200, 1000); + cr = readl(imx->mmio_base + MX3_PWMCR); + } while ((cr & MX3_PWMCR_SWR) && + (wait_count++ < MX3_PWM_SWR_LOOP)); + + if (cr & MX3_PWMCR_SWR) + dev_warn(dev, "software reset timeout\n"); +} - ret = imx->config(chip, pwm, duty_ns, period_ns); +static void imx_pwm_wait_fifo_slot(struct pwm_chip *chip, + struct pwm_device *pwm) +{ + struct imx_chip *imx = to_imx_chip(chip); + struct device *dev = chip->dev; + unsigned int period_ms; + int fifoav; + u32 sr; - clk_disable_unprepare(imx->clk_ipg); + sr = readl(imx->mmio_base + MX3_PWMSR); + fifoav = sr & MX3_PWMSR_FIFOAV_MASK; + if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) { + period_ms = DIV_ROUND_UP(pwm_get_period(pwm), + NSEC_PER_MSEC); + msleep(period_ms); - return ret; + sr = readl(imx->mmio_base + MX3_PWMSR); + if (fifoav == (sr & MX3_PWMSR_FIFOAV_MASK)) + dev_warn(dev, "there is no free FIFO slot\n"); + } } -static int imx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) { + unsigned long period_cycles, duty_cycles, prescale; struct imx_chip *imx = to_imx_chip(chip); + struct pwm_state cstate; + unsigned long long c; int ret; + u32 cr; + + pwm_get_state(pwm, &cstate); + + if (state->enabled) { + c = clk_get_rate(imx->clk_per); + c *= state->period; + + do_div(c, 1000000000); + period_cycles = c; + + prescale = period_cycles / 0x10000 + 1; + + period_cycles /= prescale; + c = (unsigned long long)period_cycles * state->duty_cycle; + do_div(c, state->period); + duty_cycles = c; + + /* + * according to imx pwm RM, the real period value should be + * PERIOD value in PWMPR plus 2. + */ + if (period_cycles > 2) + period_cycles -= 2; + else + period_cycles = 0; + + /* + * Wait for a free FIFO slot if the PWM is already enabled, and + * flush the FIFO if the PWM was disabled and is about to be + * enabled. + */ + if (cstate.enabled) { + imx_pwm_wait_fifo_slot(chip, pwm); + } else { + ret = clk_prepare_enable(imx->clk_per); + if (ret) + return ret; + + imx_pwm_sw_reset(chip); + } - ret = clk_prepare_enable(imx->clk_per); - if (ret) - return ret; + writel(duty_cycles, imx->mmio_base + MX3_PWMSAR); + writel(period_cycles, imx->mmio_base + MX3_PWMPR); - imx->set_enable(chip, true); + cr = MX3_PWMCR_PRESCALER(prescale) | + MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN | + MX3_PWMCR_DBGEN | MX3_PWMCR_CLKSRC_IPG_HIGH | + MX3_PWMCR_EN; - return 0; -} + if (state->polarity == PWM_POLARITY_INVERSED) + cr |= MX3_PWMCR_POUTC; -static void imx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) -{ - struct imx_chip *imx = to_imx_chip(chip); + writel(cr, imx->mmio_base + MX3_PWMCR); + } else if (cstate.enabled) { + writel(0, imx->mmio_base + MX3_PWMCR); - imx->set_enable(chip, false); + clk_disable_unprepare(imx->clk_per); + } - clk_disable_unprepare(imx->clk_per); + return 0; } -static struct pwm_ops imx_pwm_ops = { - .enable = imx_pwm_enable, - .disable = imx_pwm_disable, - .config = imx_pwm_config, +static const struct pwm_ops imx_pwm_ops_v1 = { + .enable = imx_pwm_enable_v1, + .disable = imx_pwm_disable_v1, + .config = imx_pwm_config_v1, + .owner = THIS_MODULE, +}; + +static const struct pwm_ops imx_pwm_ops_v2 = { + .apply = imx_pwm_apply_v2, .owner = THIS_MODULE, }; struct imx_pwm_data { - int (*config)(struct pwm_chip *chip, - struct pwm_device *pwm, int duty_ns, int period_ns); - void (*set_enable)(struct pwm_chip *chip, bool enable); + bool polarity_supported; + const struct pwm_ops *ops; }; static struct imx_pwm_data imx_pwm_data_v1 = { - .config = imx_pwm_config_v1, - .set_enable = imx_pwm_set_enable_v1, + .ops = &imx_pwm_ops_v1, }; static struct imx_pwm_data imx_pwm_data_v2 = { - .config = imx_pwm_config_v2, - .set_enable = imx_pwm_set_enable_v2, + .polarity_supported = true, + .ops = &imx_pwm_ops_v2, }; static const struct of_device_id imx_pwm_dt_ids[] = { @@ -282,6 +272,8 @@ static int imx_pwm_probe(struct platform_device *pdev) if (!of_id) return -ENODEV; + data = of_id->data; + imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL); if (imx == NULL) return -ENOMEM; @@ -293,28 +285,22 @@ static int imx_pwm_probe(struct platform_device *pdev) return PTR_ERR(imx->clk_per); } - imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); - if (IS_ERR(imx->clk_ipg)) { - dev_err(&pdev->dev, "getting ipg clock failed with %ld\n", - PTR_ERR(imx->clk_ipg)); - return PTR_ERR(imx->clk_ipg); - } - - imx->chip.ops = &imx_pwm_ops; + imx->chip.ops = data->ops; imx->chip.dev = &pdev->dev; imx->chip.base = -1; imx->chip.npwm = 1; - imx->chip.can_sleep = true; + + if (data->polarity_supported) { + dev_dbg(&pdev->dev, "PWM supports output inversion\n"); + imx->chip.of_xlate = of_pwm_xlate_with_flags; + imx->chip.of_pwm_n_cells = 3; + } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); imx->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(imx->mmio_base)) return PTR_ERR(imx->mmio_base); - data = of_id->data; - imx->config = data->config; - imx->set_enable = data->set_enable; - ret = pwmchip_add(&imx->chip); if (ret < 0) return ret; diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c index 872ea76a4f19..52584e9962ed 100644 --- a/drivers/pwm/pwm-lp3943.c +++ b/drivers/pwm/pwm-lp3943.c @@ -278,7 +278,6 @@ static int lp3943_pwm_probe(struct platform_device *pdev) lp3943_pwm->chip.dev = &pdev->dev; lp3943_pwm->chip.ops = &lp3943_pwm_ops; lp3943_pwm->chip.npwm = LP3943_NUM_PWMS; - lp3943_pwm->chip.can_sleep = true; platform_set_drvdata(pdev, lp3943_pwm); diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c index 3622f093490e..053088b9b66e 100644 --- a/drivers/pwm/pwm-lpss-pci.c +++ b/drivers/pwm/pwm-lpss-pci.c @@ -17,6 +17,27 @@ #include "pwm-lpss.h" +/* BayTrail */ +static const struct pwm_lpss_boardinfo pwm_lpss_byt_info = { + .clk_rate = 25000000, + .npwm = 1, + .base_unit_bits = 16, +}; + +/* Braswell */ +static const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = { + .clk_rate = 19200000, + .npwm = 1, + .base_unit_bits = 16, +}; + +/* Broxton */ +static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = { + .clk_rate = 19200000, + .npwm = 4, + .base_unit_bits = 22, +}; + static int pwm_lpss_probe_pci(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -80,6 +101,7 @@ static const struct pci_device_id pwm_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info}, { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info}, { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info}, + { PCI_VDEVICE(INTEL, 0x31c8), (unsigned long)&pwm_lpss_bxt_info}, { PCI_VDEVICE(INTEL, 0x5ac8), (unsigned long)&pwm_lpss_bxt_info}, { }, }; diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c index 54433fc6d1a4..b22b6fdadb9a 100644 --- a/drivers/pwm/pwm-lpss-platform.c +++ b/drivers/pwm/pwm-lpss-platform.c @@ -18,6 +18,27 @@ #include "pwm-lpss.h" +/* BayTrail */ +static const struct pwm_lpss_boardinfo pwm_lpss_byt_info = { + .clk_rate = 25000000, + .npwm = 1, + .base_unit_bits = 16, +}; + +/* Braswell */ +static const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = { + .clk_rate = 19200000, + .npwm = 1, + .base_unit_bits = 16, +}; + +/* Broxton */ +static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = { + .clk_rate = 19200000, + .npwm = 4, + .base_unit_bits = 22, +}; + static int pwm_lpss_probe_platform(struct platform_device *pdev) { const struct pwm_lpss_boardinfo *info; diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c index 72c0bce5a75c..689d2c1cbead 100644 --- a/drivers/pwm/pwm-lpss.c +++ b/drivers/pwm/pwm-lpss.c @@ -15,6 +15,7 @@ #include <linux/delay.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pm_runtime.h> @@ -37,30 +38,6 @@ struct pwm_lpss_chip { const struct pwm_lpss_boardinfo *info; }; -/* BayTrail */ -const struct pwm_lpss_boardinfo pwm_lpss_byt_info = { - .clk_rate = 25000000, - .npwm = 1, - .base_unit_bits = 16, -}; -EXPORT_SYMBOL_GPL(pwm_lpss_byt_info); - -/* Braswell */ -const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = { - .clk_rate = 19200000, - .npwm = 1, - .base_unit_bits = 16, -}; -EXPORT_SYMBOL_GPL(pwm_lpss_bsw_info); - -/* Broxton */ -const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = { - .clk_rate = 19200000, - .npwm = 4, - .base_unit_bits = 22, -}; -EXPORT_SYMBOL_GPL(pwm_lpss_bxt_info); - static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip) { return container_of(chip, struct pwm_lpss_chip, chip); @@ -80,17 +57,42 @@ static inline void pwm_lpss_write(const struct pwm_device *pwm, u32 value) writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM); } -static void pwm_lpss_update(struct pwm_device *pwm) +static int pwm_lpss_update(struct pwm_device *pwm) { + struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip); + const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM; + const unsigned int ms = 500 * USEC_PER_MSEC; + u32 val; + int err; + pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE); - /* Give it some time to propagate */ - usleep_range(10, 50); + + /* + * PWM Configuration register has SW_UPDATE bit that is set when a new + * configuration is written to the register. The bit is automatically + * cleared at the start of the next output cycle by the IP block. + * + * If one writes a new configuration to the register while it still has + * the bit enabled, PWM may freeze. That is, while one can still write + * to the register, it won't have an effect. Thus, we try to sleep long + * enough that the bit gets cleared and make sure the bit is not + * enabled while we update the configuration. + */ + err = readl_poll_timeout(addr, val, !(val & PWM_SW_UPDATE), 40, ms); + if (err) + dev_err(pwm->chip->dev, "PWM_SW_UPDATE was not cleared\n"); + + return err; } -static int pwm_lpss_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) +static inline int pwm_lpss_is_updating(struct pwm_device *pwm) +{ + return (pwm_lpss_read(pwm) & PWM_SW_UPDATE) ? -EBUSY : 0; +} + +static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, + int duty_ns, int period_ns) { - struct pwm_lpss_chip *lpwm = to_lpwm(chip); unsigned long long on_time_div; unsigned long c = lpwm->info->clk_rate, base_unit_range; unsigned long long base_unit, freq = NSEC_PER_SEC; @@ -102,62 +104,62 @@ static int pwm_lpss_config(struct pwm_chip *chip, struct pwm_device *pwm, * The equation is: * base_unit = round(base_unit_range * freq / c) */ - base_unit_range = BIT(lpwm->info->base_unit_bits); + base_unit_range = BIT(lpwm->info->base_unit_bits) - 1; freq *= base_unit_range; base_unit = DIV_ROUND_CLOSEST_ULL(freq, c); - if (duty_ns <= 0) - duty_ns = 1; on_time_div = 255ULL * duty_ns; do_div(on_time_div, period_ns); on_time_div = 255ULL - on_time_div; - pm_runtime_get_sync(chip->dev); - ctrl = pwm_lpss_read(pwm); ctrl &= ~PWM_ON_TIME_DIV_MASK; - ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT); - base_unit &= (base_unit_range - 1); + ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT); + base_unit &= base_unit_range; ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT; ctrl |= on_time_div; pwm_lpss_write(pwm, ctrl); - - /* - * If the PWM is already enabled we need to notify the hardware - * about the change by setting PWM_SW_UPDATE. - */ - if (pwm_is_enabled(pwm)) - pwm_lpss_update(pwm); - - pm_runtime_put(chip->dev); - - return 0; } -static int pwm_lpss_enable(struct pwm_chip *chip, struct pwm_device *pwm) +static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) { - pm_runtime_get_sync(chip->dev); + struct pwm_lpss_chip *lpwm = to_lpwm(chip); + int ret; - /* - * Hardware must first see PWM_SW_UPDATE before the PWM can be - * enabled. - */ - pwm_lpss_update(pwm); - pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE); - return 0; -} + if (state->enabled) { + if (!pwm_is_enabled(pwm)) { + pm_runtime_get_sync(chip->dev); + ret = pwm_lpss_is_updating(pwm); + if (ret) { + pm_runtime_put(chip->dev); + return ret; + } + pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); + ret = pwm_lpss_update(pwm); + if (ret) { + pm_runtime_put(chip->dev); + return ret; + } + pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE); + } else { + ret = pwm_lpss_is_updating(pwm); + if (ret) + return ret; + pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); + return pwm_lpss_update(pwm); + } + } else if (pwm_is_enabled(pwm)) { + pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE); + pm_runtime_put(chip->dev); + } -static void pwm_lpss_disable(struct pwm_chip *chip, struct pwm_device *pwm) -{ - pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE); - pm_runtime_put(chip->dev); + return 0; } static const struct pwm_ops pwm_lpss_ops = { - .config = pwm_lpss_config, - .enable = pwm_lpss_enable, - .disable = pwm_lpss_disable, + .apply = pwm_lpss_apply, .owner = THIS_MODULE, }; diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h index 04766e0d41aa..c94cd7c2695d 100644 --- a/drivers/pwm/pwm-lpss.h +++ b/drivers/pwm/pwm-lpss.h @@ -24,10 +24,6 @@ struct pwm_lpss_boardinfo { unsigned long base_unit_bits; }; -extern const struct pwm_lpss_boardinfo pwm_lpss_byt_info; -extern const struct pwm_lpss_boardinfo pwm_lpss_bsw_info; -extern const struct pwm_lpss_boardinfo pwm_lpss_bxt_info; - struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, const struct pwm_lpss_boardinfo *info); int pwm_lpss_remove(struct pwm_lpss_chip *lpwm); diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c index 9a596324ebef..a6017ad9926c 100644 --- a/drivers/pwm/pwm-mxs.c +++ b/drivers/pwm/pwm-mxs.c @@ -151,7 +151,7 @@ static int mxs_pwm_probe(struct platform_device *pdev) mxs->chip.dev = &pdev->dev; mxs->chip.ops = &mxs_pwm_ops; mxs->chip.base = -1; - mxs->chip.can_sleep = true; + ret = of_property_read_u32(np, "fsl,pwm-number", &mxs->chip.npwm); if (ret < 0) { dev_err(&pdev->dev, "failed to get pwm number: %d\n", ret); diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c index 117fccf7934a..0cfb3571a732 100644 --- a/drivers/pwm/pwm-pca9685.c +++ b/drivers/pwm/pwm-pca9685.c @@ -20,8 +20,10 @@ */ #include <linux/acpi.h> +#include <linux/gpio/driver.h> #include <linux/i2c.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/pwm.h> @@ -65,7 +67,6 @@ #define PCA9685_MAXCHAN 0x10 #define LED_FULL (1 << 4) -#define MODE1_RESTART (1 << 7) #define MODE1_SLEEP (1 << 4) #define MODE2_INVRT (1 << 4) #define MODE2_OUTDRV (1 << 2) @@ -81,6 +82,10 @@ struct pca9685 { int active_cnt; int duty_ns; int period_ns; +#if IS_ENABLED(CONFIG_GPIOLIB) + struct mutex lock; + struct gpio_chip gpio; +#endif }; static inline struct pca9685 *to_pca(struct pwm_chip *chip) @@ -88,6 +93,151 @@ static inline struct pca9685 *to_pca(struct pwm_chip *chip) return container_of(chip, struct pca9685, chip); } +#if IS_ENABLED(CONFIG_GPIOLIB) +static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset) +{ + struct pca9685 *pca = gpiochip_get_data(gpio); + struct pwm_device *pwm; + + mutex_lock(&pca->lock); + + pwm = &pca->chip.pwms[offset]; + + if (pwm->flags & (PWMF_REQUESTED | PWMF_EXPORTED)) { + mutex_unlock(&pca->lock); + return -EBUSY; + } + + pwm_set_chip_data(pwm, (void *)1); + + mutex_unlock(&pca->lock); + return 0; +} + +static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset) +{ + struct pca9685 *pca = gpiochip_get_data(gpio); + struct pwm_device *pwm; + + mutex_lock(&pca->lock); + pwm = &pca->chip.pwms[offset]; + pwm_set_chip_data(pwm, NULL); + mutex_unlock(&pca->lock); +} + +static bool pca9685_pwm_is_gpio(struct pca9685 *pca, struct pwm_device *pwm) +{ + bool is_gpio = false; + + mutex_lock(&pca->lock); + + if (pwm->hwpwm >= PCA9685_MAXCHAN) { + unsigned int i; + + /* + * Check if any of the GPIOs are requested and in that case + * prevent using the "all LEDs" channel. + */ + for (i = 0; i < pca->gpio.ngpio; i++) + if (gpiochip_is_requested(&pca->gpio, i)) { + is_gpio = true; + break; + } + } else if (pwm_get_chip_data(pwm)) { + is_gpio = true; + } + + mutex_unlock(&pca->lock); + return is_gpio; +} + +static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset) +{ + struct pca9685 *pca = gpiochip_get_data(gpio); + struct pwm_device *pwm = &pca->chip.pwms[offset]; + unsigned int value; + + regmap_read(pca->regmap, LED_N_ON_H(pwm->hwpwm), &value); + + return value & LED_FULL; +} + +static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset, + int value) +{ + struct pca9685 *pca = gpiochip_get_data(gpio); + struct pwm_device *pwm = &pca->chip.pwms[offset]; + unsigned int on = value ? LED_FULL : 0; + + /* Clear both OFF registers */ + regmap_write(pca->regmap, LED_N_OFF_L(pwm->hwpwm), 0); + regmap_write(pca->regmap, LED_N_OFF_H(pwm->hwpwm), 0); + + /* Set the full ON bit */ + regmap_write(pca->regmap, LED_N_ON_H(pwm->hwpwm), on); +} + +static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + /* Always out */ + return 0; +} + +static int pca9685_pwm_gpio_direction_input(struct gpio_chip *gpio, + unsigned int offset) +{ + return -EINVAL; +} + +static int pca9685_pwm_gpio_direction_output(struct gpio_chip *gpio, + unsigned int offset, int value) +{ + pca9685_pwm_gpio_set(gpio, offset, value); + + return 0; +} + +/* + * The PCA9685 has a bit for turning the PWM output full off or on. Some + * boards like Intel Galileo actually uses these as normal GPIOs so we + * expose a GPIO chip here which can exclusively take over the underlying + * PWM channel. + */ +static int pca9685_pwm_gpio_probe(struct pca9685 *pca) +{ + struct device *dev = pca->chip.dev; + + mutex_init(&pca->lock); + + pca->gpio.label = dev_name(dev); + pca->gpio.parent = dev; + pca->gpio.request = pca9685_pwm_gpio_request; + pca->gpio.free = pca9685_pwm_gpio_free; + pca->gpio.get_direction = pca9685_pwm_gpio_get_direction; + pca->gpio.direction_input = pca9685_pwm_gpio_direction_input; + pca->gpio.direction_output = pca9685_pwm_gpio_direction_output; + pca->gpio.get = pca9685_pwm_gpio_get; + pca->gpio.set = pca9685_pwm_gpio_set; + pca->gpio.base = -1; + pca->gpio.ngpio = PCA9685_MAXCHAN; + pca->gpio.can_sleep = true; + + return devm_gpiochip_add_data(dev, &pca->gpio, pca); +} +#else +static inline bool pca9685_pwm_is_gpio(struct pca9685 *pca, + struct pwm_device *pwm) +{ + return false; +} + +static inline int pca9685_pwm_gpio_probe(struct pca9685 *pca) +{ + return 0; +} +#endif + static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { @@ -117,16 +267,6 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, udelay(500); pca->period_ns = period_ns; - - /* - * If the duty cycle did not change, restart PWM with - * the same duty cycle to period ratio and return. - */ - if (duty_ns == pca->duty_ns) { - regmap_update_bits(pca->regmap, PCA9685_MODE1, - MODE1_RESTART, 0x1); - return 0; - } } else { dev_err(chip->dev, "prescaler not set: period out of bounds!\n"); @@ -264,6 +404,9 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) { struct pca9685 *pca = to_pca(chip); + if (pca9685_pwm_is_gpio(pca, pwm)) + return -EBUSY; + if (pca->active_cnt++ == 0) return regmap_update_bits(pca->regmap, PCA9685_MODE1, MODE1_SLEEP, 0x0); @@ -343,9 +486,16 @@ static int pca9685_pwm_probe(struct i2c_client *client, pca->chip.dev = &client->dev; pca->chip.base = -1; - pca->chip.can_sleep = true; - return pwmchip_add(&pca->chip); + ret = pwmchip_add(&pca->chip); + if (ret < 0) + return ret; + + ret = pca9685_pwm_gpio_probe(pca); + if (ret < 0) + pwmchip_remove(&pca->chip); + + return ret; } static int pca9685_pwm_remove(struct i2c_client *client) diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c index 58b709f29130..4143a46684d2 100644 --- a/drivers/pwm/pwm-pxa.c +++ b/drivers/pwm/pwm-pxa.c @@ -118,7 +118,7 @@ static void pxa_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) clk_disable_unprepare(pc->clk); } -static struct pwm_ops pxa_pwm_ops = { +static const struct pwm_ops pxa_pwm_ops = { .config = pxa_pwm_config, .enable = pxa_pwm_enable, .disable = pxa_pwm_disable, diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c index dd82dc840af9..2b7c31c9d1ab 100644 --- a/drivers/pwm/pwm-sti.c +++ b/drivers/pwm/pwm-sti.c @@ -635,7 +635,6 @@ skip_cpt: pc->chip.ops = &sti_pwm_ops; pc->chip.base = -1; pc->chip.npwm = pc->cdata->pwm_num_devs; - pc->chip.can_sleep = true; ret = pwmchip_add(&pc->chip); if (ret < 0) { diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c index b0803f6c64d9..1284ffa05921 100644 --- a/drivers/pwm/pwm-sun4i.c +++ b/drivers/pwm/pwm-sun4i.c @@ -340,7 +340,6 @@ static int sun4i_pwm_probe(struct platform_device *pdev) pwm->chip.ops = &sun4i_pwm_ops; pwm->chip.base = -1; pwm->chip.npwm = pwm->data->npwm; - pwm->chip.can_sleep = true; pwm->chip.of_xlate = of_pwm_xlate_with_flags; pwm->chip.of_pwm_n_cells = 3; diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c index b964470025c5..21eff991d0e3 100644 --- a/drivers/pwm/pwm-twl-led.c +++ b/drivers/pwm/pwm-twl-led.c @@ -303,7 +303,6 @@ static int twl_pwmled_probe(struct platform_device *pdev) twl->chip.dev = &pdev->dev; twl->chip.base = -1; - twl->chip.can_sleep = true; mutex_init(&twl->mutex); diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c index 7a993b056638..9de617b76680 100644 --- a/drivers/pwm/pwm-twl.c +++ b/drivers/pwm/pwm-twl.c @@ -323,7 +323,6 @@ static int twl_pwm_probe(struct platform_device *pdev) twl->chip.dev = &pdev->dev; twl->chip.base = -1; twl->chip.npwm = 2; - twl->chip.can_sleep = true; mutex_init(&twl->mutex); diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c index cdb58fd4619d..8141a4984126 100644 --- a/drivers/pwm/pwm-vt8500.c +++ b/drivers/pwm/pwm-vt8500.c @@ -184,7 +184,7 @@ static int vt8500_pwm_set_polarity(struct pwm_chip *chip, return 0; } -static struct pwm_ops vt8500_pwm_ops = { +static const struct pwm_ops vt8500_pwm_ops = { .enable = vt8500_pwm_enable, .disable = vt8500_pwm_disable, .config = vt8500_pwm_config, diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 364411fb7734..0142cc3f0c91 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -137,7 +137,8 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev) static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], + struct irq_affinity *desc) { int i, ret; diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 3090b0d3072f..5e66e081027e 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -869,7 +869,7 @@ static int rpmsg_probe(struct virtio_device *vdev) init_waitqueue_head(&vrp->sendq); /* We expect two virtqueues, rx and tx (and in this order) */ - err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names); + err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names, NULL); if (err) goto free_vrp; diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index a6d9434addf6..6dc8f29697ab 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c @@ -15,7 +15,7 @@ #include <linux/module.h> #include <linux/rtc.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include "rtc-core.h" static dev_t rtc_devt; diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 85eca1cef063..c4518168fd02 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/compat.h> +#include <linux/sched/signal.h> #include <linux/module.h> #include <linux/list.h> #include <linux/slab.h> diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 82c913318b73..ba0e4f93503d 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -7,7 +7,7 @@ */ #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/sysrq.h> diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index de6fccc13124..1b350665c823 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -29,7 +29,7 @@ #include <asm/chpid.h> #include <asm/airq.h> #include <asm/isc.h> -#include <linux/cputime.h> +#include <linux/sched/cputime.h> #include <asm/fcx.h> #include <asm/nmi.h> #include <asm/crw.h> diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 79823ee9c100..b8006ea9099c 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -24,6 +24,7 @@ #include <linux/delay.h> #include <linux/timer.h> #include <linux/kernel_stat.h> +#include <linux/sched/signal.h> #include <asm/ccwdev.h> #include <asm/cio.h> diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 8ad98a902a91..c61164f4528e 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -8,6 +8,8 @@ #include <linux/slab.h> #include <linux/kernel_stat.h> #include <linux/atomic.h> +#include <linux/rculist.h> + #include <asm/debug.h> #include <asm/qdio.h> #include <asm/airq.h> diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c index 5e5c11f37b24..2ce0b3eb2efe 100644 --- a/drivers/s390/virtio/kvm_virtio.c +++ b/drivers/s390/virtio/kvm_virtio.c @@ -255,7 +255,8 @@ static void kvm_del_vqs(struct virtio_device *vdev) static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], + struct irq_affinity *desc) { struct kvm_device *kdev = to_kvmdev(vdev); int i; diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 648373cde4a1..0ed209f3d8b0 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -628,7 +628,8 @@ out: static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], + struct irq_affinity *desc) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); unsigned long *indicatorp = NULL; diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index d4023bf1e739..230043c1c90f 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1235,11 +1235,13 @@ config SCSI_QLOGICPTI source "drivers/scsi/qla2xxx/Kconfig" source "drivers/scsi/qla4xxx/Kconfig" source "drivers/scsi/qedi/Kconfig" +source "drivers/scsi/qedf/Kconfig" config SCSI_LPFC tristate "Emulex LightPulse Fibre Channel Support" depends on PCI && SCSI depends on SCSI_FC_ATTRS + depends on NVME_FC && NVME_TARGET_FC select CRC_T10DIF help This lpfc driver supports the Emulex LightPulse @@ -1478,7 +1480,7 @@ config ATARI_SCSI config MAC_SCSI tristate "Macintosh NCR5380 SCSI" - depends on MAC && SCSI=y + depends on MAC && SCSI select SCSI_SPI_ATTRS help This is the NCR 5380 SCSI controller included on most of the 68030 diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 736b77414a4b..fc2855565a51 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_FCOE) += fcoe/ obj-$(CONFIG_FCOE_FNIC) += fnic/ obj-$(CONFIG_SCSI_SNIC) += snic/ obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/ +obj-$(CONFIG_QEDF) += qedf/ obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 907f1e80665b..e3e93def722b 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -294,6 +294,10 @@ MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for" "deregistering them. This is typically adjusted for heavily burdened" " systems."); +int aac_fib_dump; +module_param(aac_fib_dump, int, 0644); +MODULE_PARM_DESC(aac_fib_dump, "Dump controller fibs prior to IOP_RESET 0=off, 1=on"); + int numacb = -1; module_param(numacb, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control" @@ -311,7 +315,7 @@ module_param(update_interval, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync" " updates issued to adapter."); -int check_interval = 24 * 60 * 60; +int check_interval = 60; module_param(check_interval, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health" " checks."); @@ -483,7 +487,7 @@ int aac_get_containers(struct aac_dev *dev) if (status >= 0) { dresp = (struct aac_get_container_count_resp *)fib_data(fibptr); maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); - if (fibptr->dev->supplement_adapter_info.SupportedOptions2 & + if (fibptr->dev->supplement_adapter_info.supported_options2 & AAC_OPTION_SUPPORTED_240_VOLUMES) { maximum_num_containers = le32_to_cpu(dresp->MaxSimpleVolumes); @@ -639,13 +643,16 @@ static void _aac_probe_container2(void * context, struct fib * fibptr) fsa_dev_ptr = fibptr->dev->fsa_dev; if (fsa_dev_ptr) { struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr); + __le32 sup_options2; + fsa_dev_ptr += scmd_id(scsicmd); + sup_options2 = + fibptr->dev->supplement_adapter_info.supported_options2; if ((le32_to_cpu(dresp->status) == ST_OK) && (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { - if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 & - AAC_OPTION_VARIABLE_BLOCK_SIZE)) { + if (!(sup_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE)) { dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200; fsa_dev_ptr->block_size = 0x200; } else { @@ -688,7 +695,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr) int status; dresp = (struct aac_mount *) fib_data(fibptr); - if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 & + if (!(fibptr->dev->supplement_adapter_info.supported_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE)) dresp->mnt[0].capacityhigh = 0; if ((le32_to_cpu(dresp->status) != ST_OK) || @@ -705,7 +712,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr) dinfo = (struct aac_query_mount *)fib_data(fibptr); - if (fibptr->dev->supplement_adapter_info.SupportedOptions2 & + if (fibptr->dev->supplement_adapter_info.supported_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE) dinfo->command = cpu_to_le32(VM_NameServeAllBlk); else @@ -745,7 +752,7 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru dinfo = (struct aac_query_mount *)fib_data(fibptr); - if (fibptr->dev->supplement_adapter_info.SupportedOptions2 & + if (fibptr->dev->supplement_adapter_info.supported_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE) dinfo->command = cpu_to_le32(VM_NameServeAllBlk); else @@ -896,12 +903,14 @@ char * get_container_type(unsigned tindex) static void setinqstr(struct aac_dev *dev, void *data, int tindex) { struct scsi_inq *str; + struct aac_supplement_adapter_info *sup_adap_info; + sup_adap_info = &dev->supplement_adapter_info; str = (struct scsi_inq *)(data); /* cast data to scsi inq block */ memset(str, ' ', sizeof(*str)); - if (dev->supplement_adapter_info.AdapterTypeText[0]) { - char * cp = dev->supplement_adapter_info.AdapterTypeText; + if (sup_adap_info->adapter_type_text[0]) { + char *cp = sup_adap_info->adapter_type_text; int c; if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C')) inqstrcpy("SMC", str->vid); @@ -911,8 +920,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex) ++cp; c = *cp; *cp = '\0'; - inqstrcpy (dev->supplement_adapter_info.AdapterTypeText, - str->vid); + inqstrcpy(sup_adap_info->adapter_type_text, str->vid); *cp = c; while (*cp && *cp != ' ') ++cp; @@ -1675,8 +1683,8 @@ int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target) if (!identify_resp) goto fib_free_ptr; - vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus); - vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget); + vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus); + vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target); aac_fib_init(fibptr); @@ -1815,9 +1823,9 @@ int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan) } vbus = (u32) le16_to_cpu( - dev->supplement_adapter_info.VirtDeviceBus); + dev->supplement_adapter_info.virt_device_bus); vid = (u32) le16_to_cpu( - dev->supplement_adapter_info.VirtDeviceTarget); + dev->supplement_adapter_info.virt_device_target); aac_fib_init(fibptr); @@ -1893,7 +1901,7 @@ int aac_get_adapter_info(struct aac_dev* dev) } memcpy(&dev->adapter_info, info, sizeof(*info)); - dev->supplement_adapter_info.VirtDeviceBus = 0xffff; + dev->supplement_adapter_info.virt_device_bus = 0xffff; if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) { struct aac_supplement_adapter_info * sinfo; @@ -1961,7 +1969,7 @@ int aac_get_adapter_info(struct aac_dev* dev) } if (!dev->sync_mode && dev->sa_firmware && - dev->supplement_adapter_info.VirtDeviceBus != 0xffff) { + dev->supplement_adapter_info.virt_device_bus != 0xffff) { /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */ rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT); } @@ -1976,8 +1984,8 @@ int aac_get_adapter_info(struct aac_dev* dev) (tmp>>16)&0xff, tmp&0xff, le32_to_cpu(dev->adapter_info.kernelbuild), - (int)sizeof(dev->supplement_adapter_info.BuildDate), - dev->supplement_adapter_info.BuildDate); + (int)sizeof(dev->supplement_adapter_info.build_date), + dev->supplement_adapter_info.build_date); tmp = le32_to_cpu(dev->adapter_info.monitorrev); printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n", dev->name, dev->id, @@ -1993,14 +2001,15 @@ int aac_get_adapter_info(struct aac_dev* dev) shost_to_class(dev->scsi_host_ptr), buffer)) printk(KERN_INFO "%s%d: serial %s", dev->name, dev->id, buffer); - if (dev->supplement_adapter_info.VpdInfo.Tsid[0]) { + if (dev->supplement_adapter_info.vpd_info.tsid[0]) { printk(KERN_INFO "%s%d: TSID %.*s\n", dev->name, dev->id, - (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid), - dev->supplement_adapter_info.VpdInfo.Tsid); + (int)sizeof(dev->supplement_adapter_info + .vpd_info.tsid), + dev->supplement_adapter_info.vpd_info.tsid); } if (!aac_check_reset || ((aac_check_reset == 1) && - (dev->supplement_adapter_info.SupportedOptions2 & + (dev->supplement_adapter_info.supported_options2 & AAC_OPTION_IGNORE_RESET))) { printk(KERN_INFO "%s%d: Reset Adapter Ignored\n", dev->name, dev->id); @@ -2008,7 +2017,7 @@ int aac_get_adapter_info(struct aac_dev* dev) } dev->cache_protected = 0; - dev->jbod = ((dev->supplement_adapter_info.FeatureBits & + dev->jbod = ((dev->supplement_adapter_info.feature_bits & AAC_FEATURE_JBOD) != 0); dev->nondasd_support = 0; dev->raid_scsi_mode = 0; @@ -2631,7 +2640,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd) struct scsi_device *sdev = scsicmd->device; struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; - if (!(aac->supplement_adapter_info.SupportedOptions2 & + if (!(aac->supplement_adapter_info.supported_options2 & AAC_OPTION_POWER_MANAGEMENT)) { scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index f2344971e3cb..d036a806f31c 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -97,7 +97,7 @@ enum { #define PMC_GLOBAL_INT_BIT0 0x00000001 #ifndef AAC_DRIVER_BUILD -# define AAC_DRIVER_BUILD 50740 +# define AAC_DRIVER_BUILD 50792 # define AAC_DRIVER_BRANCH "-custom" #endif #define MAXIMUM_NUM_CONTAINERS 32 @@ -1380,57 +1380,57 @@ struct aac_adapter_info struct aac_supplement_adapter_info { - u8 AdapterTypeText[17+1]; - u8 Pad[2]; - __le32 FlashMemoryByteSize; - __le32 FlashImageId; - __le32 MaxNumberPorts; - __le32 Version; - __le32 FeatureBits; - u8 SlotNumber; - u8 ReservedPad0[3]; - u8 BuildDate[12]; - __le32 CurrentNumberPorts; + u8 adapter_type_text[17+1]; + u8 pad[2]; + __le32 flash_memory_byte_size; + __le32 flash_image_id; + __le32 max_number_ports; + __le32 version; + __le32 feature_bits; + u8 slot_number; + u8 reserved_pad0[3]; + u8 build_date[12]; + __le32 current_number_ports; struct { - u8 AssemblyPn[8]; - u8 FruPn[8]; - u8 BatteryFruPn[8]; - u8 EcVersionString[8]; - u8 Tsid[12]; - } VpdInfo; - __le32 FlashFirmwareRevision; - __le32 FlashFirmwareBuild; - __le32 RaidTypeMorphOptions; - __le32 FlashFirmwareBootRevision; - __le32 FlashFirmwareBootBuild; - u8 MfgPcbaSerialNo[12]; - u8 MfgWWNName[8]; - __le32 SupportedOptions2; - __le32 StructExpansion; + u8 assembly_pn[8]; + u8 fru_pn[8]; + u8 battery_fru_pn[8]; + u8 ec_version_string[8]; + u8 tsid[12]; + } vpd_info; + __le32 flash_firmware_revision; + __le32 flash_firmware_build; + __le32 raid_type_morph_options; + __le32 flash_firmware_boot_revision; + __le32 flash_firmware_boot_build; + u8 mfg_pcba_serial_no[12]; + u8 mfg_wwn_name[8]; + __le32 supported_options2; + __le32 struct_expansion; /* StructExpansion == 1 */ - __le32 FeatureBits3; - __le32 SupportedPerformanceModes; - u8 HostBusType; /* uses HOST_BUS_TYPE_xxx defines */ - u8 HostBusWidth; /* actual width in bits or links */ - u16 HostBusSpeed; /* actual bus speed/link rate in MHz */ - u8 MaxRRCDrives; /* max. number of ITP-RRC drives/pool */ - u8 MaxDiskXtasks; /* max. possible num of DiskX Tasks */ - - u8 CpldVerLoaded; - u8 CpldVerInFlash; - - __le64 MaxRRCCapacity; - __le32 CompiledMaxHistLogLevel; - u8 CustomBoardName[12]; - u16 SupportedCntlrMode; /* identify supported controller mode */ - u16 ReservedForFuture16; - __le32 SupportedOptions3; /* reserved for future options */ - - __le16 VirtDeviceBus; /* virt. SCSI device for Thor */ - __le16 VirtDeviceTarget; - __le16 VirtDeviceLUN; - __le16 Unused; - __le32 ReservedForFutureGrowth[68]; + __le32 feature_bits3; + __le32 supported_performance_modes; + u8 host_bus_type; /* uses HOST_BUS_TYPE_xxx defines */ + u8 host_bus_width; /* actual width in bits or links */ + u16 host_bus_speed; /* actual bus speed/link rate in MHz */ + u8 max_rrc_drives; /* max. number of ITP-RRC drives/pool */ + u8 max_disk_xtasks; /* max. possible num of DiskX Tasks */ + + u8 cpld_ver_loaded; + u8 cpld_ver_in_flash; + + __le64 max_rrc_capacity; + __le32 compiled_max_hist_log_level; + u8 custom_board_name[12]; + u16 supported_cntlr_mode; /* identify supported controller mode */ + u16 reserved_for_future16; + __le32 supported_options3; /* reserved for future options */ + + __le16 virt_device_bus; /* virt. SCSI device for Thor */ + __le16 virt_device_target; + __le16 virt_device_lun; + __le16 unused; + __le32 reserved_for_future_growth[68]; }; #define AAC_FEATURE_FALCON cpu_to_le32(0x00000010) @@ -1444,6 +1444,10 @@ struct aac_supplement_adapter_info #define AAC_OPTION_VARIABLE_BLOCK_SIZE cpu_to_le32(0x00040000) /* 240 simple volume support */ #define AAC_OPTION_SUPPORTED_240_VOLUMES cpu_to_le32(0x10000000) +/* + * Supports FIB dump sync command send prior to IOP_RESET + */ +#define AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP cpu_to_le32(0x00004000) #define AAC_SIS_VERSION_V3 3 #define AAC_SIS_SLOT_UNKNOWN 0xFF @@ -2483,6 +2487,7 @@ struct aac_hba_info { #define GET_DRIVER_BUFFER_PROPERTIES 0x00000023 #define RCV_TEMP_READINGS 0x00000025 #define GET_COMM_PREFERRED_SETTINGS 0x00000026 +#define IOP_RESET_FW_FIB_DUMP 0x00000034 #define IOP_RESET 0x00001000 #define IOP_RESET_ALWAYS 0x00001001 #define RE_INIT_ADAPTER 0x000000ee @@ -2639,6 +2644,7 @@ void aac_hba_callback(void *context, struct fib *fibptr); #define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data) struct aac_dev *aac_init_adapter(struct aac_dev *dev); void aac_src_access_devreg(struct aac_dev *dev, int mode); +void aac_set_intx_mode(struct aac_dev *dev); int aac_get_config_status(struct aac_dev *dev, int commit_flag); int aac_get_containers(struct aac_dev *dev); int aac_scsi_cmd(struct scsi_cmnd *cmd); @@ -2685,4 +2691,5 @@ extern int aac_commit; extern int update_interval; extern int check_interval; extern int aac_check_reset; +extern int aac_fib_dump; #endif diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 614842a9eb07..f6afd50579c0 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -580,7 +580,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) goto cleanup; } - chn = aac_logical_to_phys(user_srbcmd->channel); + chn = user_srbcmd->channel; if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS && dev->hba_map[chn][user_srbcmd->id].devtype == AAC_DEVTYPE_NATIVE_RAW) { diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 40bfc57b6849..35607005f7e1 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -330,7 +330,7 @@ int aac_send_shutdown(struct aac_dev * dev) dev->pdev->device == PMC_DEVICE_S8 || dev->pdev->device == PMC_DEVICE_S9) && dev->msi_enabled) - aac_src_access_devreg(dev, AAC_ENABLE_INTX); + aac_set_intx_mode(dev); return status; } diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 969727b67cdd..a3ad04293487 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -95,12 +95,20 @@ static int fib_map_alloc(struct aac_dev *dev) void aac_fib_map_free(struct aac_dev *dev) { - if (dev->hw_fib_va && dev->max_cmd_size) { - pci_free_consistent(dev->pdev, - (dev->max_cmd_size * - (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)), - dev->hw_fib_va, dev->hw_fib_pa); - } + size_t alloc_size; + size_t fib_size; + int num_fibs; + + if(!dev->hw_fib_va || !dev->max_cmd_size) + return; + + num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB; + fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr); + alloc_size = fib_size * num_fibs + ALIGN32 - 1; + + pci_free_consistent(dev->pdev, alloc_size, dev->hw_fib_va, + dev->hw_fib_pa); + dev->hw_fib_va = NULL; dev->hw_fib_pa = 0; } @@ -153,22 +161,20 @@ int aac_fib_setup(struct aac_dev * dev) if (i<0) return -ENOMEM; - /* 32 byte alignment for PMC */ - hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1); - dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + - (hw_fib_pa - dev->hw_fib_pa)); - dev->hw_fib_pa = hw_fib_pa; memset(dev->hw_fib_va, 0, (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); + /* 32 byte alignment for PMC */ + hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1); + hw_fib = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + + (hw_fib_pa - dev->hw_fib_pa)); + /* add Xport header */ - dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + + hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + sizeof(struct aac_fib_xporthdr)); - dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr); + hw_fib_pa += sizeof(struct aac_fib_xporthdr); - hw_fib = dev->hw_fib_va; - hw_fib_pa = dev->hw_fib_pa; /* * Initialise the fibs */ @@ -461,6 +467,35 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw return 0; } +#ifdef CONFIG_EEH +static inline int aac_check_eeh_failure(struct aac_dev *dev) +{ + /* Check for an EEH failure for the given + * device node. Function eeh_dev_check_failure() + * returns 0 if there has not been an EEH error + * otherwise returns a non-zero value. + * + * Need to be called before any PCI operation, + * i.e.,before aac_adapter_check_health() + */ + struct eeh_dev *edev = pci_dev_to_eeh_dev(dev->pdev); + + if (eeh_dev_check_failure(edev)) { + /* The EEH mechanisms will handle this + * error and reset the device if + * necessary. + */ + return 1; + } + return 0; +} +#else +static inline int aac_check_eeh_failure(struct aac_dev *dev) +{ + return 0; +} +#endif + /* * Define the highest level of host to adapter communication routines. * These routines will support host to adapter FS commuication. These @@ -496,9 +531,12 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, unsigned long mflags = 0; unsigned long sflags = 0; - if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) return -EBUSY; + + if (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)) + return -EINVAL; + /* * There are 5 cases with the wait and response requested flags. * The only invalid cases are if the caller requests to wait and @@ -662,6 +700,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, } return -ETIMEDOUT; } + + if (aac_check_eeh_failure(dev)) + return -EFAULT; + if ((blink = aac_adapter_check_health(dev)) > 0) { if (wait == -1) { printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n" @@ -755,7 +797,12 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, FIB_COUNTER_INCREMENT(aac_config.NativeSent); if (wait) { + spin_unlock_irqrestore(&fibptr->event_lock, flags); + + if (aac_check_eeh_failure(dev)) + return -EFAULT; + /* Only set for first known interruptable command */ if (down_interruptible(&fibptr->event_wait)) { fibptr->done = 2; @@ -1590,11 +1637,29 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) command->SCp.phase = AAC_OWNER_ERROR_HANDLER; command->scsi_done(command); } + /* + * Any Device that was already marked offline needs to be cleaned up + */ + __shost_for_each_device(dev, host) { + if (!scsi_device_online(dev)) { + sdev_printk(KERN_INFO, dev, "Removing offline device\n"); + scsi_remove_device(dev); + scsi_device_put(dev); + } + } retval = 0; out: aac->in_reset = 0; scsi_unblock_requests(host); + /* + * Issue bus rescan to catch any configuration that might have + * occurred + */ + if (!retval) { + dev_info(&aac->pdev->dev, "Issuing bus rescan\n"); + scsi_scan_host(host); + } if (jafo) { spin_lock_irq(host->host_lock); } @@ -1815,7 +1880,7 @@ int aac_check_health(struct aac_dev * aac) printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); if (!aac_check_reset || ((aac_check_reset == 1) && - (aac->supplement_adapter_info.SupportedOptions2 & + (aac->supplement_adapter_info.supported_options2 & AAC_OPTION_IGNORE_RESET))) goto out; host = aac->scsi_host_ptr; @@ -1843,9 +1908,6 @@ static void aac_resolve_luns(struct aac_dev *dev) for (bus = 0; bus < AAC_MAX_BUSES; bus++) { for (target = 0; target < AAC_MAX_TARGETS; target++) { - if (aac_phys_to_logical(bus) == ENCLOSURE_CHANNEL) - continue; - if (bus == CONTAINER_CHANNEL) channel = CONTAINER_CHANNEL; else @@ -1857,7 +1919,7 @@ static void aac_resolve_luns(struct aac_dev *dev) sdev = scsi_device_lookup(dev->scsi_host_ptr, channel, target, 0); - if (!sdev && devtype) + if (!sdev && new_devtype) scsi_add_device(dev->scsi_host_ptr, channel, target, 0); else if (sdev && new_devtype != devtype) @@ -2150,7 +2212,7 @@ static void aac_process_events(struct aac_dev *dev) /* Thor AIF */ aac_handle_sa_aif(dev, fib); aac_fib_adapter_complete(fib, (u16)sizeof(u32)); - continue; + goto free_fib; } /* * We will process the FIB here or pass it to a @@ -2264,8 +2326,8 @@ static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str, aac_fib_init(fibptr); - vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus); - vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget); + vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus); + vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target); srbcmd = (struct aac_srb *)fib_data(fibptr); @@ -2434,7 +2496,7 @@ int aac_command_thread(void *data) /* Don't even try to talk to adapter if its sick */ ret = aac_check_health(dev); - if (!dev->queues) + if (ret || !dev->queues) break; next_check_jiffies = jiffies + ((long)(unsigned)check_interval) @@ -2446,8 +2508,7 @@ int aac_command_thread(void *data) && (now.tv_usec > (1000000 / HZ))) difference = (((1000000 - now.tv_usec) * HZ) + 500000) / 1000000; - else if (ret == 0) { - + else { if (now.tv_usec > 500000) ++now.tv_sec; @@ -2458,9 +2519,6 @@ int aac_command_thread(void *data) ret = aac_send_hosttime(dev, &now); difference = (long)(unsigned)update_interval*HZ; - } else { - /* retry shortly */ - difference = 10 * HZ; } next_jiffies = jiffies + difference; if (time_before(next_check_jiffies,next_jiffies)) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 838347c44f32..520ada8266af 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -891,13 +891,13 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) * Adapters that support a register, instead of a commanded, * reset. */ - if (((aac->supplement_adapter_info.SupportedOptions2 & + if (((aac->supplement_adapter_info.supported_options2 & AAC_OPTION_MU_RESET) || - (aac->supplement_adapter_info.SupportedOptions2 & + (aac->supplement_adapter_info.supported_options2 & AAC_OPTION_DOORBELL_RESET)) && aac_check_reset && ((aac_check_reset != 1) || - !(aac->supplement_adapter_info.SupportedOptions2 & + !(aac->supplement_adapter_info.supported_options2 & AAC_OPTION_IGNORE_RESET))) { /* Bypass wait for command quiesce */ aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET); @@ -1029,8 +1029,8 @@ static ssize_t aac_show_model(struct device *device, struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; int len; - if (dev->supplement_adapter_info.AdapterTypeText[0]) { - char * cp = dev->supplement_adapter_info.AdapterTypeText; + if (dev->supplement_adapter_info.adapter_type_text[0]) { + char *cp = dev->supplement_adapter_info.adapter_type_text; while (*cp && *cp != ' ') ++cp; while (*cp == ' ') @@ -1046,18 +1046,20 @@ static ssize_t aac_show_vendor(struct device *device, struct device_attribute *attr, char *buf) { struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; + struct aac_supplement_adapter_info *sup_adap_info; int len; - if (dev->supplement_adapter_info.AdapterTypeText[0]) { - char * cp = dev->supplement_adapter_info.AdapterTypeText; + sup_adap_info = &dev->supplement_adapter_info; + if (sup_adap_info->adapter_type_text[0]) { + char *cp = sup_adap_info->adapter_type_text; while (*cp && *cp != ' ') ++cp; len = snprintf(buf, PAGE_SIZE, "%.*s\n", - (int)(cp - (char *)dev->supplement_adapter_info.AdapterTypeText), - dev->supplement_adapter_info.AdapterTypeText); + (int)(cp - (char *)sup_adap_info->adapter_type_text), + sup_adap_info->adapter_type_text); } else len = snprintf(buf, PAGE_SIZE, "%s\n", - aac_drivers[dev->cardtype].vname); + aac_drivers[dev->cardtype].vname); return len; } @@ -1078,7 +1080,7 @@ static ssize_t aac_show_flags(struct device *cdev, "SAI_READ_CAPACITY_16\n"); if (dev->jbod) len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n"); - if (dev->supplement_adapter_info.SupportedOptions2 & + if (dev->supplement_adapter_info.supported_options2 & AAC_OPTION_POWER_MANAGEMENT) len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_POWER_MANAGEMENT\n"); @@ -1129,6 +1131,13 @@ static ssize_t aac_show_bios_version(struct device *device, return len; } +static ssize_t aac_show_driver_version(struct device *device, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", aac_driver_version); +} + static ssize_t aac_show_serial_number(struct device *device, struct device_attribute *attr, char *buf) { @@ -1139,12 +1148,12 @@ static ssize_t aac_show_serial_number(struct device *device, len = snprintf(buf, 16, "%06X\n", le32_to_cpu(dev->adapter_info.serial[0])); if (len && - !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[ - sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len], + !memcmp(&dev->supplement_adapter_info.mfg_pcba_serial_no[ + sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no)-len], buf, len-1)) len = snprintf(buf, 16, "%.*s\n", - (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo), - dev->supplement_adapter_info.MfgPcbaSerialNo); + (int)sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no), + dev->supplement_adapter_info.mfg_pcba_serial_no); return min(len, 16); } @@ -1239,6 +1248,13 @@ static struct device_attribute aac_bios_version = { }, .show = aac_show_bios_version, }; +static struct device_attribute aac_lld_version = { + .attr = { + .name = "driver_version", + .mode = 0444, + }, + .show = aac_show_driver_version, +}; static struct device_attribute aac_serial_number = { .attr = { .name = "serial_number", @@ -1276,6 +1292,7 @@ static struct device_attribute *aac_attrs[] = { &aac_kernel_version, &aac_monitor_version, &aac_bios_version, + &aac_lld_version, &aac_serial_number, &aac_max_channel, &aac_max_id, diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index 0e69a80c3275..5d19c31e3bba 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c @@ -475,7 +475,7 @@ static int aac_rx_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) { u32 var = 0; - if (!(dev->supplement_adapter_info.SupportedOptions2 & + if (!(dev->supplement_adapter_info.supported_options2 & AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) { if (bled) printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 8e4e2ddbafd7..2e5338dec621 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -437,16 +437,23 @@ static int aac_src_check_health(struct aac_dev *dev) u32 status = src_readl(dev, MUnit.OMR); /* + * Check to see if the board panic'd. + */ + if (unlikely(status & KERNEL_PANIC)) + goto err_blink; + + /* * Check to see if the board failed any self tests. */ if (unlikely(status & SELF_TEST_FAILED)) - return -1; + goto err_out; /* - * Check to see if the board panic'd. + * Check to see if the board failed any self tests. */ - if (unlikely(status & KERNEL_PANIC)) - return (status >> 16) & 0xFF; + if (unlikely(status & MONITOR_PANIC)) + goto err_out; + /* * Wait for the adapter to be up and running. */ @@ -456,6 +463,12 @@ static int aac_src_check_health(struct aac_dev *dev) * Everything is OK */ return 0; + +err_out: + return -1; + +err_blink: + return (status > 16) & 0xFF; } static inline u32 aac_get_vector(struct aac_dev *dev) @@ -657,7 +670,7 @@ static int aac_srcv_ioremap(struct aac_dev *dev, u32 size) return 0; } -static void aac_set_intx_mode(struct aac_dev *dev) +void aac_set_intx_mode(struct aac_dev *dev) { if (dev->msi_enabled) { aac_src_access_devreg(dev, AAC_ENABLE_INTX); @@ -666,10 +679,27 @@ static void aac_set_intx_mode(struct aac_dev *dev) } } +static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev) +{ + __le32 supported_options3; + + if (!aac_fib_dump) + return; + + supported_options3 = dev->supplement_adapter_info.supported_options3; + if (!(supported_options3 & AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP)) + return; + + aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP, + 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); +} + static void aac_send_iop_reset(struct aac_dev *dev, int bled) { u32 var, reset_mask; + aac_dump_fw_fib_iop_reset(dev); + bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL); @@ -684,7 +714,7 @@ static void aac_send_iop_reset(struct aac_dev *dev, int bled) aac_set_intx_mode(dev); - if (!bled && (dev->supplement_adapter_info.SupportedOptions2 & + if (!bled && (dev->supplement_adapter_info.supported_options2 & AAC_OPTION_DOORBELL_RESET)) { src_writel(dev, MUnit.IDR, reset_mask); } else { @@ -714,6 +744,12 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) pr_err("%s%d: adapter kernel panic'd %x.\n", dev->name, dev->id, bled); + /* + * When there is a BlinkLED, IOP_RESET has not effect + */ + if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET) + reset_type &= ~HW_IOP_RESET; + dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; switch (reset_type) { diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index fdd4eb4e41b2..4fc8ed5fe067 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -39,7 +39,7 @@ #include <linux/bitops.h> #include <linux/log2.h> #include <linux/interrupt.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/io.h> #include <scsi/scsi.h> diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index ed7f3228e234..89ef1a1678d1 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -25,7 +25,7 @@ #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/delay.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/in.h> #include <linux/kfifo.h> #include <linux/netdevice.h> diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 7069639e92bc..3061d8045382 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -2259,6 +2259,8 @@ static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS, 0ULL }; static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, CXLFLASH_NOTIFY_SHUTDOWN }; +static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS, + CXLFLASH_NOTIFY_SHUTDOWN }; /* * PCI device binding table @@ -2268,6 +2270,8 @@ static struct pci_device_id cxlflash_pci_table[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, + {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals}, {} }; diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h index e43545c86bcf..0be2261e6312 100644 --- a/drivers/scsi/cxlflash/main.h +++ b/drivers/scsi/cxlflash/main.h @@ -25,6 +25,7 @@ #define PCI_DEVICE_ID_IBM_CORSA 0x04F0 #define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600 +#define PCI_DEVICE_ID_IBM_BRIARD 0x0624 /* Since there is only one target, make it 0 */ #define CXLFLASH_TARGET 0 diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index ef5bf55f08a4..b46fd2f45628 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -305,6 +305,7 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli) struct cxlflash_cfg *cfg = shost_priv(sdev->host); struct device *dev = &cfg->dev->dev; struct glun_info *gli = lli->parent; + struct scsi_sense_hdr sshdr; u8 *cmd_buf = NULL; u8 *scsi_cmd = NULL; u8 *sense_buf = NULL; @@ -332,7 +333,8 @@ retry: /* Drop the ioctl read semahpore across lengthy call */ up_read(&cfg->ioctl_rwsem); result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf, - CMD_BUFSIZE, sense_buf, to, CMD_RETRIES, 0, NULL); + CMD_BUFSIZE, sense_buf, &sshdr, to, CMD_RETRIES, + 0, 0, NULL); down_read(&cfg->ioctl_rwsem); rc = check_state(cfg); if (rc) { @@ -345,10 +347,6 @@ retry: if (driver_byte(result) == DRIVER_SENSE) { result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */ if (result & SAM_STAT_CHECK_CONDITION) { - struct scsi_sense_hdr sshdr; - - scsi_normalize_sense(sense_buf, SCSI_SENSE_BUFFERSIZE, - &sshdr); switch (sshdr.sense_key) { case NO_SENSE: case RECOVERED_ERROR: diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c index 8fcc804dbef9..7aa06ef229fd 100644 --- a/drivers/scsi/cxlflash/vlun.c +++ b/drivers/scsi/cxlflash/vlun.c @@ -453,8 +453,8 @@ static int write_same16(struct scsi_device *sdev, /* Drop the ioctl read semahpore across lengthy call */ up_read(&cfg->ioctl_rwsem); result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf, - CMD_BUFSIZE, sense_buf, to, CMD_RETRIES, - 0, NULL); + CMD_BUFSIZE, sense_buf, NULL, to, + CMD_RETRIES, 0, 0, NULL); down_read(&cfg->ioctl_rwsem); rc = check_state(cfg); if (rc) { diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index d704752b6332..48e200102221 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -151,11 +151,9 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff, cdb[1] = MI_REPORT_TARGET_PGS; put_unaligned_be32(bufflen, &cdb[6]); - return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE, - buff, bufflen, sshdr, - ALUA_FAILOVER_TIMEOUT * HZ, - ALUA_FAILOVER_RETRIES, NULL, - req_flags, 0); + return scsi_execute(sdev, cdb, DMA_FROM_DEVICE, buff, bufflen, NULL, + sshdr, ALUA_FAILOVER_TIMEOUT * HZ, + ALUA_FAILOVER_RETRIES, req_flags, 0, NULL); } /* @@ -185,11 +183,9 @@ static int submit_stpg(struct scsi_device *sdev, int group_id, cdb[1] = MO_SET_TARGET_PGS; put_unaligned_be32(stpg_len, &cdb[6]); - return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE, - stpg_data, stpg_len, - sshdr, ALUA_FAILOVER_TIMEOUT * HZ, - ALUA_FAILOVER_RETRIES, NULL, - req_flags, 0); + return scsi_execute(sdev, cdb, DMA_TO_DEVICE, stpg_data, stpg_len, NULL, + sshdr, ALUA_FAILOVER_TIMEOUT * HZ, + ALUA_FAILOVER_RETRIES, req_flags, 0, NULL); } static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index 4a7679f6c73d..8654e940e1a8 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -276,10 +276,9 @@ static int send_trespass_cmd(struct scsi_device *sdev, BUG_ON((len > CLARIION_BUFFER_SIZE)); memcpy(csdev->buffer, page22, len); - err = scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE, - csdev->buffer, len, &sshdr, - CLARIION_TIMEOUT * HZ, CLARIION_RETRIES, - NULL, req_flags, 0); + err = scsi_execute(sdev, cdb, DMA_TO_DEVICE, csdev->buffer, len, NULL, + &sshdr, CLARIION_TIMEOUT * HZ, CLARIION_RETRIES, + req_flags, 0, NULL); if (err) { if (scsi_sense_valid(&sshdr)) res = trespass_endio(sdev, &sshdr); @@ -358,7 +357,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req) static int clariion_std_inquiry(struct scsi_device *sdev, struct clariion_dh_data *csdev) { - int err; + int err = SCSI_DH_OK; char *sp_model; sp_model = parse_sp_model(sdev, sdev->inquiry); diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index be43c940636d..62d314e07d11 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -100,9 +100,8 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) REQ_FAILFAST_DRIVER; retry: - res = scsi_execute_req_flags(sdev, cmd, DMA_NONE, NULL, 0, &sshdr, - HP_SW_TIMEOUT, HP_SW_RETRIES, - NULL, req_flags, 0); + res = scsi_execute(sdev, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, + HP_SW_TIMEOUT, HP_SW_RETRIES, req_flags, 0, NULL); if (res) { if (scsi_sense_valid(&sshdr)) ret = tur_done(sdev, h, &sshdr); @@ -139,9 +138,8 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h) REQ_FAILFAST_DRIVER; retry: - res = scsi_execute_req_flags(sdev, cmd, DMA_NONE, NULL, 0, &sshdr, - HP_SW_TIMEOUT, HP_SW_RETRIES, - NULL, req_flags, 0); + res = scsi_execute(sdev, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, + HP_SW_TIMEOUT, HP_SW_RETRIES, req_flags, 0, NULL); if (res) { if (!scsi_sense_valid(&sshdr)) { sdev_printk(KERN_WARNING, sdev, diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index b64eaae8533d..3cbab8710e58 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -555,10 +555,9 @@ static void send_mode_select(struct work_struct *work) (char *) h->ctlr->array_name, h->ctlr->index, (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying"); - if (scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE, - &h->ctlr->mode_select, data_size, &sshdr, - RDAC_TIMEOUT * HZ, - RDAC_RETRIES, NULL, req_flags, 0)) { + if (scsi_execute(sdev, cdb, DMA_TO_DEVICE, &h->ctlr->mode_select, + data_size, NULL, &sshdr, RDAC_TIMEOUT * HZ, + RDAC_RETRIES, req_flags, 0, NULL)) { err = mode_select_handle_sense(sdev, &sshdr); if (err == SCSI_DH_RETRY && retry_cnt--) goto retry; diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 6103231104da..fd501f8dbb11 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -36,6 +36,8 @@ #include <linux/slab.h> #include <linux/err.h> #include <linux/export.h> +#include <linux/rculist.h> + #include <asm/unaligned.h> #include <scsi/fc/fc_gs.h> diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index c991f3b822f8..b44c3136eb51 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -65,6 +65,8 @@ #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/export.h> +#include <linux/rculist.h> + #include <asm/unaligned.h> #include <scsi/libfc.h> diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 834d1212b6d5..07c08ce68d70 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -26,6 +26,7 @@ #include <linux/delay.h> #include <linux/log2.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/module.h> #include <asm/unaligned.h> #include <net/tcp.h> diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index e2516ba8ebfa..cb6aa802c48e 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile @@ -1,9 +1,11 @@ #/******************************************************************* # * This file is part of the Emulex Linux Device Driver for * # * Fibre Channel Host Bus Adapters. * +# * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * +# * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * # * Copyright (C) 2004-2012 Emulex. All rights reserved. * # * EMULEX and SLI are trademarks of Emulex. * -# * www.emulex.com * +# * www.broadcom.com * # * * # * This program is free software; you can redistribute it and/or * # * modify it under the terms of version 2 of the GNU General * @@ -28,6 +30,7 @@ endif obj-$(CONFIG_SCSI_LPFC) := lpfc.o -lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \ - lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \ - lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o +lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \ + lpfc_hbadisc.o lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o \ + lpfc_scsi.o lpfc_attr.o lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o \ + lpfc_nvme.o lpfc_nvmet.o diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 6593b073c524..0bba2e30b4f0 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -20,6 +22,7 @@ *******************************************************************/ #include <scsi/scsi_host.h> +#include <linux/ktime.h> #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS) #define CONFIG_SCSI_LPFC_DEBUG_FS @@ -53,6 +56,7 @@ struct lpfc_sli2_slim; #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ #define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */ #define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */ +#define LPFC_MIN_NVME_SEG_CNT 254 #define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ @@ -114,6 +118,20 @@ enum lpfc_polling_flags { DISABLE_FCP_RING_INT = 0x2 }; +struct perf_prof { + uint16_t cmd_cpu[40]; + uint16_t rsp_cpu[40]; + uint16_t qh_cpu[40]; + uint16_t wqidx[40]; +}; + +/* + * Provide for FC4 TYPE x28 - NVME. The + * bit mask for FCP and NVME is 0x8 identically + * because they are 32 bit positions distance. + */ +#define LPFC_FC4_TYPE_BITMASK 0x00000100 + /* Provide DMA memory definitions the driver uses per port instance. */ struct lpfc_dmabuf { struct list_head list; @@ -131,10 +149,24 @@ struct lpfc_dma_pool { struct hbq_dmabuf { struct lpfc_dmabuf hbuf; struct lpfc_dmabuf dbuf; - uint32_t size; + uint16_t total_size; + uint16_t bytes_recv; uint32_t tag; struct lpfc_cq_event cq_event; unsigned long time_stamp; + void *context; +}; + +struct rqb_dmabuf { + struct lpfc_dmabuf hbuf; + struct lpfc_dmabuf dbuf; + uint16_t total_size; + uint16_t bytes_recv; + void *context; + struct lpfc_iocbq *iocbq; + struct lpfc_sglq *sglq; + struct lpfc_queue *hrq; /* ptr to associated Header RQ */ + struct lpfc_queue *drq; /* ptr to associated Data RQ */ }; /* Priority bit. Set value to exceed low water mark in lpfc_mem. */ @@ -367,7 +399,8 @@ struct lpfc_vport { int32_t stopped; /* HBA has not been restarted since last ERATT */ uint8_t fc_linkspeed; /* Link speed after last READ_LA */ - uint32_t num_disc_nodes; /*in addition to hba_state */ + uint32_t num_disc_nodes; /* in addition to hba_state */ + uint32_t gidft_inp; /* cnt of outstanding GID_FTs */ uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */ uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */ @@ -420,7 +453,6 @@ struct lpfc_vport { uint32_t cfg_max_scsicmpl_time; uint32_t cfg_tgt_queue_depth; uint32_t cfg_first_burst_size; - uint32_t dev_loss_tmo_changed; struct fc_vport *fc_vport; @@ -428,6 +460,9 @@ struct lpfc_vport { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct dentry *debug_disc_trc; struct dentry *debug_nodelist; + struct dentry *debug_nvmestat; + struct dentry *debug_nvmektime; + struct dentry *debug_cpucheck; struct dentry *vport_debugfs_root; struct lpfc_debugfs_trc *disc_trc; atomic_t disc_trc_cnt; @@ -442,6 +477,11 @@ struct lpfc_vport { uint16_t fdmi_num_disc; uint32_t fdmi_hba_mask; uint32_t fdmi_port_mask; + + /* There is a single nvme instance per vport. */ + struct nvme_fc_local_port *localport; + uint8_t nvmei_support; /* driver supports NVME Initiator */ + uint32_t last_fcp_wqidx; }; struct hbq_s { @@ -459,10 +499,9 @@ struct hbq_s { struct hbq_dmabuf *); }; -#define LPFC_MAX_HBQS 4 /* this matches the position in the lpfc_hbq_defs array */ #define LPFC_ELS_HBQ 0 -#define LPFC_EXTRA_HBQ 1 +#define LPFC_MAX_HBQS 1 enum hba_temp_state { HBA_NORMAL_TEMP, @@ -652,6 +691,8 @@ struct lpfc_hba { * Firmware supports Forced Link Speed * capability */ +#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */ + uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ struct lpfc_dmabuf slim2p; @@ -700,6 +741,9 @@ struct lpfc_hba { uint8_t wwpn[8]; uint32_t RandomData[7]; uint8_t fcp_embed_io; + uint8_t nvme_support; /* Firmware supports NVME */ + uint8_t nvmet_support; /* driver supports NVMET */ +#define LPFC_NVMET_MAX_PORTS 32 uint8_t mds_diags_support; /* HBA Config Parameters */ @@ -725,6 +769,14 @@ struct lpfc_hba { uint32_t cfg_fcp_imax; uint32_t cfg_fcp_cpu_map; uint32_t cfg_fcp_io_channel; + uint32_t cfg_suppress_rsp; + uint32_t cfg_nvme_oas; + uint32_t cfg_nvme_io_channel; + uint32_t cfg_nvmet_mrq; + uint32_t cfg_nvmet_mrq_post; + uint32_t cfg_enable_nvmet; + uint32_t cfg_nvme_enable_fb; + uint32_t cfg_nvmet_fb_size; uint32_t cfg_total_seg_cnt; uint32_t cfg_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; @@ -770,6 +822,13 @@ struct lpfc_hba { #define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */ uint32_t cfg_enable_SmartSAN; uint32_t cfg_enable_mds_diags; + uint32_t cfg_enable_fc4_type; + uint32_t cfg_xri_split; +#define LPFC_ENABLE_FCP 1 +#define LPFC_ENABLE_NVME 2 +#define LPFC_ENABLE_BOTH 3 + uint32_t io_channel_irqs; /* number of irqs for io channels */ + struct nvmet_fc_target_port *targetport; lpfc_vpd_t vpd; /* vital product data */ struct pci_dev *pcidev; @@ -784,11 +843,11 @@ struct lpfc_hba { unsigned long data_flags; uint32_t hbq_in_use; /* HBQs in use flag */ - struct list_head rb_pend_list; /* Received buffers to be processed */ uint32_t hbq_count; /* Count of configured HBQs */ struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ - atomic_t fcp_qidx; /* next work queue to post work to */ + atomic_t fcp_qidx; /* next FCP WQ (RR Policy) */ + atomic_t nvme_qidx; /* next NVME WQ (RR Policy) */ phys_addr_t pci_bar0_map; /* Physical address for PCI BAR0 */ phys_addr_t pci_bar1_map; /* Physical address for PCI BAR1 */ @@ -843,9 +902,17 @@ struct lpfc_hba { /* * stat counters */ - uint64_t fc4InputRequests; - uint64_t fc4OutputRequests; - uint64_t fc4ControlRequests; + uint64_t fc4ScsiInputRequests; + uint64_t fc4ScsiOutputRequests; + uint64_t fc4ScsiControlRequests; + uint64_t fc4ScsiIoCmpls; + uint64_t fc4NvmeInputRequests; + uint64_t fc4NvmeOutputRequests; + uint64_t fc4NvmeControlRequests; + uint64_t fc4NvmeIoCmpls; + uint64_t fc4NvmeLsRequests; + uint64_t fc4NvmeLsCmpls; + uint64_t bg_guard_err_cnt; uint64_t bg_apptag_err_cnt; uint64_t bg_reftag_err_cnt; @@ -856,17 +923,23 @@ struct lpfc_hba { struct list_head lpfc_scsi_buf_list_get; struct list_head lpfc_scsi_buf_list_put; uint32_t total_scsi_bufs; + spinlock_t nvme_buf_list_get_lock; /* NVME buf alloc list lock */ + spinlock_t nvme_buf_list_put_lock; /* NVME buf free list lock */ + struct list_head lpfc_nvme_buf_list_get; + struct list_head lpfc_nvme_buf_list_put; + uint32_t total_nvme_bufs; struct list_head lpfc_iocb_list; uint32_t total_iocbq_bufs; struct list_head active_rrq_list; spinlock_t hbalock; /* pci_mem_pools */ - struct pci_pool *lpfc_scsi_dma_buf_pool; + struct pci_pool *lpfc_sg_dma_buf_pool; struct pci_pool *lpfc_mbuf_pool; struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ + struct pci_pool *txrdy_payload_pool; struct lpfc_dma_pool lpfc_mbuf_safety_pool; mempool_t *mbox_mem_pool; @@ -878,8 +951,6 @@ struct lpfc_hba { enum intr_type_t intr_type; uint32_t intr_mode; #define LPFC_INTR_ERROR 0xFFFFFFFF - struct msix_entry msix_entries[LPFC_MSIX_VECTORS]; - struct list_head port_list; struct lpfc_vport *pport; /* physical lpfc_vport pointer */ uint16_t max_vpi; /* Maximum virtual nports */ @@ -925,6 +996,12 @@ struct lpfc_hba { struct dentry *debug_readApp; /* inject read app_tag errors */ struct dentry *debug_readRef; /* inject read ref_tag errors */ + struct dentry *debug_nvmeio_trc; + struct lpfc_debugfs_nvmeio_trc *nvmeio_trc; + atomic_t nvmeio_trc_cnt; + uint32_t nvmeio_trc_size; + uint32_t nvmeio_trc_output_idx; + /* T10 DIF error injection */ uint32_t lpfc_injerr_wgrd_cnt; uint32_t lpfc_injerr_wapp_cnt; @@ -950,7 +1027,9 @@ struct lpfc_hba { struct dentry *idiag_ctl_acc; struct dentry *idiag_mbx_acc; struct dentry *idiag_ext_acc; + uint8_t lpfc_idiag_last_eq; #endif + uint16_t nvmeio_trc_on; /* Used for deferred freeing of ELS data buffers */ struct list_head elsbuf; @@ -1023,6 +1102,53 @@ struct lpfc_hba { #define LPFC_TRANSGRESSION_LOW_RXPOWER 0x4000 uint16_t sfp_alarm; uint16_t sfp_warning; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +#define LPFC_CHECK_CPU_CNT 32 + uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT]; + uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT]; + uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT]; + uint32_t cpucheck_ccmpl_io[LPFC_CHECK_CPU_CNT]; + uint16_t cpucheck_on; +#define LPFC_CHECK_OFF 0 +#define LPFC_CHECK_NVME_IO 1 +#define LPFC_CHECK_NVMET_RCV 2 +#define LPFC_CHECK_NVMET_IO 4 + uint16_t ktime_on; + uint64_t ktime_data_samples; + uint64_t ktime_status_samples; + uint64_t ktime_last_cmd; + uint64_t ktime_seg1_total; + uint64_t ktime_seg1_min; + uint64_t ktime_seg1_max; + uint64_t ktime_seg2_total; + uint64_t ktime_seg2_min; + uint64_t ktime_seg2_max; + uint64_t ktime_seg3_total; + uint64_t ktime_seg3_min; + uint64_t ktime_seg3_max; + uint64_t ktime_seg4_total; + uint64_t ktime_seg4_min; + uint64_t ktime_seg4_max; + uint64_t ktime_seg5_total; + uint64_t ktime_seg5_min; + uint64_t ktime_seg5_max; + uint64_t ktime_seg6_total; + uint64_t ktime_seg6_min; + uint64_t ktime_seg6_max; + uint64_t ktime_seg7_total; + uint64_t ktime_seg7_min; + uint64_t ktime_seg7_max; + uint64_t ktime_seg8_total; + uint64_t ktime_seg8_min; + uint64_t ktime_seg8_max; + uint64_t ktime_seg9_total; + uint64_t ktime_seg9_min; + uint64_t ktime_seg9_max; + uint64_t ktime_seg10_total; + uint64_t ktime_seg10_min; + uint64_t ktime_seg10_max; +#endif }; static inline struct Scsi_Host * @@ -1093,3 +1219,11 @@ lpfc_sli_read_hs(struct lpfc_hba *phba) return 0; } + +static inline struct lpfc_sli_ring * +lpfc_phba_elsring(struct lpfc_hba *phba) +{ + if (phba->sli_rev == LPFC_SLI_REV4) + return phba->sli4_hba.els_wq->pring; + return &phba->sli.sli3_ring[LPFC_ELS_RING]; +} diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 03cb05abc821..5c783ef7f260 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -35,14 +37,18 @@ #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> +#include <linux/nvme-fc-driver.h> + #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_nvmet.h" #include "lpfc_logmsg.h" #include "lpfc_version.h" #include "lpfc_compat.h" @@ -50,9 +56,13 @@ #include "lpfc_vport.h" #include "lpfc_attr.h" -#define LPFC_DEF_DEVLOSS_TMO 30 -#define LPFC_MIN_DEVLOSS_TMO 1 -#define LPFC_MAX_DEVLOSS_TMO 255 +#define LPFC_DEF_DEVLOSS_TMO 30 +#define LPFC_MIN_DEVLOSS_TMO 1 +#define LPFC_MAX_DEVLOSS_TMO 255 + +#define LPFC_DEF_MRQ_POST 256 +#define LPFC_MIN_MRQ_POST 32 +#define LPFC_MAX_MRQ_POST 512 /* * Write key size should be multiple of 4. If write key is changed @@ -130,6 +140,211 @@ lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr, } static ssize_t +lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = shost_priv(shost); + struct lpfc_hba *phba = vport->phba; + struct lpfc_nvmet_tgtport *tgtp; + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + struct lpfc_nvme_rport *rport; + struct nvme_fc_remote_port *nrport; + char *statep; + int len = 0; + + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { + len += snprintf(buf, PAGE_SIZE, "NVME Disabled\n"); + return len; + } + if (phba->nvmet_support) { + if (!phba->targetport) { + len = snprintf(buf, PAGE_SIZE, + "NVME Target: x%llx is not allocated\n", + wwn_to_u64(vport->fc_portname.u.wwn)); + return len; + } + /* Port state is only one of two values for now. */ + if (phba->targetport->port_id) + statep = "REGISTERED"; + else + statep = "INIT"; + len += snprintf(buf + len, PAGE_SIZE - len, + "NVME Target: Enabled State %s\n", + statep); + len += snprintf(buf + len, PAGE_SIZE - len, + "%s%d WWPN x%llx WWNN x%llx DID x%06x\n", + "NVME Target: lpfc", + phba->brd_no, + wwn_to_u64(vport->fc_portname.u.wwn), + wwn_to_u64(vport->fc_nodename.u.wwn), + phba->targetport->port_id); + + len += snprintf(buf + len, PAGE_SIZE, + "\nNVME Target: Statistics\n"); + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + len += snprintf(buf+len, PAGE_SIZE-len, + "LS: Rcv %08x Drop %08x Abort %08x\n", + atomic_read(&tgtp->rcv_ls_req_in), + atomic_read(&tgtp->rcv_ls_req_drop), + atomic_read(&tgtp->xmt_ls_abort)); + if (atomic_read(&tgtp->rcv_ls_req_in) != + atomic_read(&tgtp->rcv_ls_req_out)) { + len += snprintf(buf+len, PAGE_SIZE-len, + "Rcv LS: in %08x != out %08x\n", + atomic_read(&tgtp->rcv_ls_req_in), + atomic_read(&tgtp->rcv_ls_req_out)); + } + + len += snprintf(buf+len, PAGE_SIZE-len, + "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n", + atomic_read(&tgtp->xmt_ls_rsp), + atomic_read(&tgtp->xmt_ls_drop), + atomic_read(&tgtp->xmt_ls_rsp_cmpl), + atomic_read(&tgtp->xmt_ls_rsp_error)); + + len += snprintf(buf+len, PAGE_SIZE-len, + "FCP: Rcv %08x Drop %08x\n", + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_drop)); + + if (atomic_read(&tgtp->rcv_fcp_cmd_in) != + atomic_read(&tgtp->rcv_fcp_cmd_out)) { + len += snprintf(buf+len, PAGE_SIZE-len, + "Rcv FCP: in %08x != out %08x\n", + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out)); + } + + len += snprintf(buf+len, PAGE_SIZE-len, + "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x\n", + atomic_read(&tgtp->xmt_fcp_read), + atomic_read(&tgtp->xmt_fcp_read_rsp), + atomic_read(&tgtp->xmt_fcp_write), + atomic_read(&tgtp->xmt_fcp_rsp)); + + len += snprintf(buf+len, PAGE_SIZE-len, + "FCP Rsp: abort %08x drop %08x\n", + atomic_read(&tgtp->xmt_fcp_abort), + atomic_read(&tgtp->xmt_fcp_drop)); + + len += snprintf(buf+len, PAGE_SIZE-len, + "FCP Rsp Cmpl: %08x err %08x drop %08x\n", + atomic_read(&tgtp->xmt_fcp_rsp_cmpl), + atomic_read(&tgtp->xmt_fcp_rsp_error), + atomic_read(&tgtp->xmt_fcp_rsp_drop)); + + len += snprintf(buf+len, PAGE_SIZE-len, + "ABORT: Xmt %08x Err %08x Cmpl %08x", + atomic_read(&tgtp->xmt_abort_rsp), + atomic_read(&tgtp->xmt_abort_rsp_error), + atomic_read(&tgtp->xmt_abort_cmpl)); + + len += snprintf(buf+len, PAGE_SIZE-len, "\n"); + return len; + } + + localport = vport->localport; + if (!localport) { + len = snprintf(buf, PAGE_SIZE, + "NVME Initiator x%llx is not allocated\n", + wwn_to_u64(vport->fc_portname.u.wwn)); + return len; + } + len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n"); + + spin_lock_irq(shost->host_lock); + lport = (struct lpfc_nvme_lport *)localport->private; + + /* Port state is only one of two values for now. */ + if (localport->port_id) + statep = "ONLINE"; + else + statep = "UNKNOWN "; + + len += snprintf(buf + len, PAGE_SIZE - len, + "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n", + "NVME LPORT lpfc", + phba->brd_no, + wwn_to_u64(vport->fc_portname.u.wwn), + wwn_to_u64(vport->fc_nodename.u.wwn), + localport->port_id, statep); + + list_for_each_entry(rport, &lport->rport_list, list) { + /* local short-hand pointer. */ + nrport = rport->remoteport; + + /* Port state is only one of two values for now. */ + switch (nrport->port_state) { + case FC_OBJSTATE_ONLINE: + statep = "ONLINE"; + break; + case FC_OBJSTATE_UNKNOWN: + statep = "UNKNOWN "; + break; + default: + statep = "UNSUPPORTED"; + break; + } + + /* Tab in to show lport ownership. */ + len += snprintf(buf + len, PAGE_SIZE - len, + "NVME RPORT "); + if (phba->brd_no >= 10) + len += snprintf(buf + len, PAGE_SIZE - len, " "); + + len += snprintf(buf + len, PAGE_SIZE - len, "WWPN x%llx ", + nrport->port_name); + len += snprintf(buf + len, PAGE_SIZE - len, "WWNN x%llx ", + nrport->node_name); + len += snprintf(buf + len, PAGE_SIZE - len, "DID x%06x ", + nrport->port_id); + + switch (nrport->port_role) { + case FC_PORT_ROLE_NVME_INITIATOR: + len += snprintf(buf + len, PAGE_SIZE - len, + "INITIATOR "); + break; + case FC_PORT_ROLE_NVME_TARGET: + len += snprintf(buf + len, PAGE_SIZE - len, + "TARGET "); + break; + case FC_PORT_ROLE_NVME_DISCOVERY: + len += snprintf(buf + len, PAGE_SIZE - len, + "DISCOVERY "); + break; + default: + len += snprintf(buf + len, PAGE_SIZE - len, + "UNKNOWN_ROLE x%x", + nrport->port_role); + break; + } + len += snprintf(buf + len, PAGE_SIZE - len, "%s ", statep); + /* Terminate the string. */ + len += snprintf(buf + len, PAGE_SIZE - len, "\n"); + } + spin_unlock_irq(shost->host_lock); + + len += snprintf(buf + len, PAGE_SIZE, "\nNVME Statistics\n"); + len += snprintf(buf+len, PAGE_SIZE-len, + "LS: Xmt %016llx Cmpl %016llx\n", + phba->fc4NvmeLsRequests, + phba->fc4NvmeLsCmpls); + + len += snprintf(buf+len, PAGE_SIZE-len, + "FCP: Rd %016llx Wr %016llx IO %016llx\n", + phba->fc4NvmeInputRequests, + phba->fc4NvmeOutputRequests, + phba->fc4NvmeControlRequests); + + len += snprintf(buf+len, PAGE_SIZE-len, + " Cmpl %016llx\n", phba->fc4NvmeIoCmpls); + + return len; +} + +static ssize_t lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -675,6 +890,28 @@ lpfc_issue_lip(struct Scsi_Host *shost) return 0; } +int +lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock) +{ + int cnt = 0; + + spin_lock_irq(lock); + while (!list_empty(q)) { + spin_unlock_irq(lock); + msleep(20); + if (cnt++ > 250) { /* 5 secs */ + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0466 %s %s\n", + "Outstanding IO when ", + "bringing Adapter offline\n"); + return 0; + } + spin_lock_irq(lock); + } + spin_unlock_irq(lock); + return 1; +} + /** * lpfc_do_offline - Issues a mailbox command to bring the link down * @phba: lpfc_hba pointer. @@ -694,10 +931,10 @@ static int lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) { struct completion online_compl; + struct lpfc_queue *qp = NULL; struct lpfc_sli_ring *pring; struct lpfc_sli *psli; int status = 0; - int cnt = 0; int i; int rc; @@ -717,20 +954,24 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) /* Wait a little for things to settle down, but not * long enough for dev loss timeout to expire. */ - for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; - while (!list_empty(&pring->txcmplq)) { - msleep(10); - if (cnt++ > 500) { /* 5 secs */ - lpfc_printf_log(phba, - KERN_WARNING, LOG_INIT, - "0466 Outstanding IO when " - "bringing Adapter offline\n"); - break; - } + if (phba->sli_rev != LPFC_SLI_REV4) { + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + if (!lpfc_emptyq_wait(phba, &pring->txcmplq, + &phba->hbalock)) + goto out; + } + } else { + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) + continue; + if (!lpfc_emptyq_wait(phba, &pring->txcmplq, + &pring->ring_lock)) + goto out; } } - +out: init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, type); if (rc == 0) @@ -1945,6 +2186,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ } +static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL); static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL); static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL); static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL); @@ -2751,6 +2993,13 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR, lpfc_oas_lun_show, lpfc_oas_lun_store); +int lpfc_enable_nvmet_cnt; +unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444); +MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target"); + static int lpfc_poll = 0; module_param(lpfc_poll, int, S_IRUGO); MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" @@ -2816,9 +3065,9 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; + struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); - return snprintf(buf, PAGE_SIZE, "%d\n", - phba->sli.ring[LPFC_ELS_RING].txq_max); + return snprintf(buf, PAGE_SIZE, "%d\n", pring->txq_max); } static DEVICE_ATTR(txq_hw, S_IRUGO, @@ -2829,9 +3078,9 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr, { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; + struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); - return snprintf(buf, PAGE_SIZE, "%d\n", - phba->sli.ring[LPFC_ELS_RING].txcmplq_max); + return snprintf(buf, PAGE_SIZE, "%d\n", pring->txcmplq_max); } static DEVICE_ATTR(txcmplq_hw, S_IRUGO, @@ -3030,6 +3279,59 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR, lpfc_devloss_tmo_show, lpfc_devloss_tmo_store); /* + * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it + * lpfc_suppress_rsp = 0 Disable + * lpfc_suppress_rsp = 1 Enable (default) + * + */ +LPFC_ATTR_R(suppress_rsp, 1, 0, 1, + "Enable suppress rsp feature is firmware supports it"); + +/* + * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds + * lpfc_nvmet_mrq = 1 use a single RQ pair + * lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ + * + */ +LPFC_ATTR_R(nvmet_mrq, + 1, 1, 16, + "Specify number of RQ pairs for processing NVMET cmds"); + +/* + * lpfc_nvmet_mrq_post: Specify number buffers to post on every MRQ + * + */ +LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST, + LPFC_MIN_MRQ_POST, LPFC_MAX_MRQ_POST, + "Specify number of buffers to post on every MRQ"); + +/* + * lpfc_enable_fc4_type: Defines what FC4 types are supported. + * Supported Values: 1 - register just FCP + * 3 - register both FCP and NVME + * Supported values are [1,3]. Default value is 3 + */ +LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH, + LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, + "Define fc4 type to register with fabric."); + +/* + * lpfc_xri_split: Defines the division of XRI resources between SCSI and NVME + * This parameter is only used if: + * lpfc_enable_fc4_type is 3 - register both FCP and NVME and + * port is not configured for NVMET. + * + * ELS/CT always get 10% of XRIs, up to a maximum of 250 + * The remaining XRIs get split up based on lpfc_xri_split per port: + * + * Supported Values are in percentages + * the xri_split value is the percentage the SCSI port will get. The remaining + * percentage will go to NVME. + */ +LPFC_ATTR_R(xri_split, 50, 10, 90, + "Division of XRI resources between SCSI and NVME"); + +/* # lpfc_log_verbose: Only turn this flag on if you are willing to risk being # deluged with LOTS of information. # You can set a bit mask to record specific types of verbose messages: @@ -4143,13 +4445,14 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr, /* * Value range for the HBA is [5000,5000000] * The value for each EQ depends on how many EQs are configured. + * Allow value == 0 */ - if (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX) + if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)) return -EINVAL; phba->cfg_fcp_imax = (uint32_t)val; - for (i = 0; i < phba->cfg_fcp_io_channel; i += LPFC_MAX_EQ_DELAY) - lpfc_modify_fcp_eq_delay(phba, i); + for (i = 0; i < phba->io_channel_irqs; i++) + lpfc_modify_hba_eq_delay(phba, i); return strlen(buf); } @@ -4187,7 +4490,8 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) return 0; } - if (val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) { + if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) || + (val == 0)) { phba->cfg_fcp_imax = val; return 0; } @@ -4377,6 +4681,32 @@ LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536, "First burst size for Targets that support first burst"); /* +* lpfc_nvmet_fb_size: NVME Target mode supported first burst size. +* When the driver is configured as an NVME target, this value is +* communicated to the NVME initiator in the PRLI response. It is +* used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support +* parameters are set and the target is sending the PRLI RSP. +* Parameter supported on physical port only - no NPIV support. +* Value range is [0,65536]. Default value is 0. +*/ +LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536, + "NVME Target mode first burst size in 512B increments."); + +/* + * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions. + * For the Initiator (I), enabling this parameter means that an NVMET + * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be + * processed by the initiator for subsequent NVME FCP IO. For the target + * function (T), enabling this parameter qualifies the lpfc_nvmet_fb_size + * driver parameter as the target function's first burst size returned to the + * initiator in the target's NVME PRLI response. Parameter supported on physical + * port only - no NPIV support. + * Value range is [0,1]. Default value is 0 (disabled). + */ +LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1, + "Enable First Burst feature on I and T functions."); + +/* # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue # depth. Default value is 0. When the value of this parameter is zero the # SCSI command completion time is not used for controlling I/O queue depth. When @@ -4423,17 +4753,25 @@ static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR, LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); /* -# lpfc_fcp_io_sched: Determine scheduling algrithmn for issuing FCP cmds -# range is [0,1]. Default value is 0. -# For [0], FCP commands are issued to Work Queues ina round robin fashion. -# For [1], FCP commands are issued to a Work Queue associated with the -# current CPU. -# It would be set to 1 by the driver if it's able to set up cpu affinity -# for FCP I/Os through Work Queue associated with the current CPU. Otherwise, -# roundrobin scheduling of FCP I/Os through WQs will be used. -*/ -LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for " - "issuing commands [0] - Round Robin, [1] - Current CPU"); + * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds + * range is [0,1]. Default value is 0. + * For [0], FCP commands are issued to Work Queues ina round robin fashion. + * For [1], FCP commands are issued to a Work Queue associated with the + * current CPU. + * + * LPFC_FCP_SCHED_ROUND_ROBIN == 0 + * LPFC_FCP_SCHED_BY_CPU == 1 + * + * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu + * affinity for FCP/NVME I/Os through Work Queues associated with the current + * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os + * through WQs will be used. + */ +LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_ROUND_ROBIN, + LPFC_FCP_SCHED_ROUND_ROBIN, + LPFC_FCP_SCHED_BY_CPU, + "Determine scheduling algorithm for " + "issuing commands [0] - Round Robin, [1] - Current CPU"); /* # lpfc_fcp2_no_tgt_reset: Determine bus reset behavior @@ -4560,15 +4898,54 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " "MSI-X (2), if possible"); /* -# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels -# -# Value range is [1,7]. Default value is 4. -*/ -LPFC_ATTR_R(fcp_io_channel, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN, - LPFC_FCP_IO_CHAN_MAX, + * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs + * + * 0 = NVME OAS disabled + * 1 = NVME OAS enabled + * + * Value range is [0,1]. Default value is 0. + */ +LPFC_ATTR_RW(nvme_oas, 0, 0, 1, + "Use OAS bit on NVME IOs"); + +/* + * lpfc_fcp_io_channel: Set the number of FCP IO channels the driver + * will advertise it supports to the SCSI layer. This also will map to + * the number of WQs the driver will create. + * + * 0 = Configure the number of io channels to the number of active CPUs. + * 1,32 = Manually specify how many io channels to use. + * + * Value range is [0,32]. Default value is 4. + */ +LPFC_ATTR_R(fcp_io_channel, + LPFC_FCP_IO_CHAN_DEF, + LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX, "Set the number of FCP I/O channels"); /* + * lpfc_nvme_io_channel: Set the number of IO hardware queues the driver + * will advertise it supports to the NVME layer. This also will map to + * the number of WQs the driver will create. + * + * This module parameter is valid when lpfc_enable_fc4_type is set + * to support NVME. + * + * The NVME Layer will try to create this many, plus 1 administrative + * hardware queue. The administrative queue will always map to WQ 0 + * A hardware IO queue maps (qidx) to a specific driver WQ. + * + * 0 = Configure the number of io channels to the number of active CPUs. + * 1,32 = Manually specify how many io channels to use. + * + * Value range is [0,32]. Default value is 0. + */ +LPFC_ATTR_R(nvme_io_channel, + LPFC_NVME_IO_CHAN_DEF, + LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX, + "Set the number of NVME I/O channels"); + +/* # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. # 0 = HBA resets disabled # 1 = HBA resets enabled (default) @@ -4692,6 +5069,7 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); struct device_attribute *lpfc_hba_attrs[] = { + &dev_attr_nvme_info, &dev_attr_bg_info, &dev_attr_bg_guard_err, &dev_attr_bg_apptag_err, @@ -4718,6 +5096,8 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_peer_port_login, &dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_devloss_tmo, + &dev_attr_lpfc_enable_fc4_type, + &dev_attr_lpfc_xri_split, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, &dev_attr_lpfc_first_burst_size, @@ -4752,9 +5132,16 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_poll_tmo, &dev_attr_lpfc_task_mgmt_tmo, &dev_attr_lpfc_use_msi, + &dev_attr_lpfc_nvme_oas, &dev_attr_lpfc_fcp_imax, &dev_attr_lpfc_fcp_cpu_map, &dev_attr_lpfc_fcp_io_channel, + &dev_attr_lpfc_suppress_rsp, + &dev_attr_lpfc_nvme_io_channel, + &dev_attr_lpfc_nvmet_mrq, + &dev_attr_lpfc_nvmet_mrq_post, + &dev_attr_lpfc_nvme_enable_fb, + &dev_attr_lpfc_nvmet_fb_size, &dev_attr_lpfc_enable_bg, &dev_attr_lpfc_soft_wwnn, &dev_attr_lpfc_soft_wwpn, @@ -5764,15 +6151,17 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_fdmi_on_init(phba, lpfc_fdmi_on); lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN); lpfc_use_msi_init(phba, lpfc_use_msi); + lpfc_nvme_oas_init(phba, lpfc_nvme_oas); lpfc_fcp_imax_init(phba, lpfc_fcp_imax); lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); - lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); + lpfc_EnableXLane_init(phba, lpfc_EnableXLane); if (phba->sli_rev != LPFC_SLI_REV4) phba->cfg_EnableXLane = 0; lpfc_XLanePriority_init(phba, lpfc_XLanePriority); + memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t))); memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t))); phba->cfg_oas_lun_state = 0; @@ -5786,9 +6175,48 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) phba->cfg_poll = 0; else phba->cfg_poll = lpfc_poll; + lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp); + + lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); + lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); + lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post); + + /* Initialize first burst. Target vs Initiator are different. */ + lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); + lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); + lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); + lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel); + + if (phba->sli_rev != LPFC_SLI_REV4) { + /* NVME only supported on SLI4 */ + phba->nvmet_support = 0; + phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; + } else { + /* We MUST have FCP support */ + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP; + } + + /* A value of 0 means use the number of CPUs found in the system */ + if (phba->cfg_fcp_io_channel == 0) + phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu; + if (phba->cfg_nvme_io_channel == 0) + phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu; + + if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME) + phba->cfg_fcp_io_channel = 0; + + if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) + phba->cfg_nvme_io_channel = 0; + + if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel) + phba->io_channel_irqs = phba->cfg_fcp_io_channel; + else + phba->io_channel_irqs = phba->cfg_nvme_io_channel; phba->cfg_soft_wwnn = 0L; phba->cfg_soft_wwpn = 0L; + lpfc_xri_split_init(phba, lpfc_xri_split); lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); @@ -5805,6 +6233,60 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) } /** + * lpfc_nvme_mod_param_dep - Adjust module parameter value based on + * dependencies between protocols and roles. + * @phba: lpfc_hba pointer. + **/ +void +lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) +{ + if (phba->cfg_nvme_io_channel > phba->sli4_hba.num_present_cpu) + phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu; + + if (phba->cfg_fcp_io_channel > phba->sli4_hba.num_present_cpu) + phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu; + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && + phba->nvmet_support) { + phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP; + phba->cfg_fcp_io_channel = 0; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6013 %s x%x fb_size x%x, fb_max x%x\n", + "NVME Target PRLI ACC enable_fb ", + phba->cfg_nvme_enable_fb, + phba->cfg_nvmet_fb_size, + LPFC_NVMET_FB_SZ_MAX); + + if (phba->cfg_nvme_enable_fb == 0) + phba->cfg_nvmet_fb_size = 0; + else { + if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX) + phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX; + } + + /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */ + if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) { + phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + "6018 Adjust lpfc_nvmet_mrq to %d\n", + phba->cfg_nvmet_mrq); + } + } else { + /* Not NVME Target mode. Turn off Target parameters. */ + phba->nvmet_support = 0; + phba->cfg_nvmet_mrq = 0; + phba->cfg_nvmet_mrq_post = 0; + phba->cfg_nvmet_fb_size = 0; + } + + if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel) + phba->io_channel_irqs = phba->cfg_fcp_io_channel; + else + phba->io_channel_irqs = phba->cfg_nvme_io_channel; +} + +/** * lpfc_get_vport_cfgparam - Used during port create, init the vport structure * @vport: lpfc_vport pointer. **/ diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h index b2bd28e965fa..d56dafcdd563 100644 --- a/drivers/scsi/lpfc/lpfc_attr.h +++ b/drivers/scsi/lpfc/lpfc_attr.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 7dca4d6a8883..18157d2840a3 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2009-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -1704,6 +1706,7 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) struct lpfc_vport **vports; struct Scsi_Host *shost; struct lpfc_sli *psli; + struct lpfc_queue *qp = NULL; struct lpfc_sli_ring *pring; int i = 0; @@ -1711,9 +1714,6 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) if (!psli) return -ENODEV; - pring = &psli->ring[LPFC_FCP_RING]; - if (!pring) - return -ENODEV; if ((phba->link_state == LPFC_HBA_ERROR) || (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || @@ -1732,10 +1732,18 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) scsi_block_requests(shost); } - while (!list_empty(&pring->txcmplq)) { - if (i++ > 500) /* wait up to 5 seconds */ + if (phba->sli_rev != LPFC_SLI_REV4) { + pring = &psli->sli3_ring[LPFC_FCP_RING]; + lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock); + return 0; + } + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring || (pring->ringno != LPFC_FCP_RING)) + continue; + if (!lpfc_emptyq_wait(phba, &pring->txcmplq, + &pring->ring_lock)) break; - msleep(10); } return 0; } @@ -2703,7 +2711,7 @@ err_get_xri_exit: * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers * @phba: Pointer to HBA context object * - * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and. + * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and * returns the pointer to the buffer. **/ static struct lpfc_dmabuf * @@ -2875,8 +2883,7 @@ out: static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, size_t len) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; struct lpfc_iocbq *cmdiocbq; IOCB_t *cmd = NULL; struct list_head head, *curr, *next; @@ -2890,6 +2897,8 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, int iocb_stat; int i = 0; + pring = lpfc_phba_elsring(phba); + cmdiocbq = lpfc_sli_get_iocbq(phba); rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (rxbmp != NULL) { @@ -5403,13 +5412,15 @@ lpfc_bsg_timeout(struct bsg_job *job) struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; struct bsg_job_data *dd_data; unsigned long flags; int rc = 0; LIST_HEAD(completions); struct lpfc_iocbq *check_iocb, *next_iocb; + pring = lpfc_phba_elsring(phba); + /* if job's driver data is NULL, the command completed or is in the * the process of completing. In this case, return status to request * so the timeout is retried. This avoids double completion issues diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h index f2247aa4fa17..e7d95a4e8042 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.h +++ b/drivers/scsi/lpfc/lpfc_bsg.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2010-2015 Emulex. All rights reserved. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2010-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h index c88e556ea62e..6b32b0ae7506 100644 --- a/drivers/scsi/lpfc/lpfc_compat.h +++ b/drivers/scsi/lpfc/lpfc_compat.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 309643a2c55c..843dd73004da 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -21,6 +23,7 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *); struct fc_rport; +struct fc_frame_header; void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli_read_link_ste(struct lpfc_hba *); void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t); @@ -167,6 +170,8 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *); void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *); int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *); +int lpfc_issue_gidft(struct lpfc_vport *vport); +int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq); int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t); void lpfc_fdmi_num_disc_check(struct lpfc_vport *); @@ -186,6 +191,8 @@ void lpfc_unblock_mgmt_io(struct lpfc_hba *); void lpfc_offline_prep(struct lpfc_hba *, int); void lpfc_offline(struct lpfc_hba *); void lpfc_reset_hba(struct lpfc_hba *); +int lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *hd, + spinlock_t *slock); int lpfc_fof_queue_create(struct lpfc_hba *); int lpfc_fof_queue_setup(struct lpfc_hba *); @@ -193,7 +200,11 @@ int lpfc_fof_queue_destroy(struct lpfc_hba *); irqreturn_t lpfc_sli4_fof_intr_handler(int, void *); int lpfc_sli_setup(struct lpfc_hba *); -int lpfc_sli_queue_setup(struct lpfc_hba *); +int lpfc_sli4_setup(struct lpfc_hba *phba); +void lpfc_sli_queue_init(struct lpfc_hba *phba); +void lpfc_sli4_queue_init(struct lpfc_hba *phba); +struct lpfc_sli_ring *lpfc_sli4_calc_ring(struct lpfc_hba *phba, + struct lpfc_iocbq *iocbq); void lpfc_handle_eratt(struct lpfc_hba *); void lpfc_handle_latt(struct lpfc_hba *); @@ -220,6 +231,7 @@ void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t); void lpfc_unreg_vfi(struct lpfcMboxq *, struct lpfc_vport *); void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); +void lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode); void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t); @@ -231,8 +243,15 @@ struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *); void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); +struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba); +void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab); void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, uint16_t); +int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, + struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe); +int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq, + struct lpfc_queue *dq, int count); +int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq); void lpfc_unregister_fcf(struct lpfc_hba *); void lpfc_unregister_fcf_rescan(struct lpfc_hba *); void lpfc_unregister_unused_fcf(struct lpfc_hba *); @@ -287,6 +306,11 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, uint32_t); +int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t rnum, + struct lpfc_iocbq *iocbq); +struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri); +struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, + struct lpfc_iocbq *piocbq); void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); @@ -336,8 +360,13 @@ void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *); void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *); void __lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t); void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t); +void *lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int flags, + dma_addr_t *handle); +void lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virtp, dma_addr_t dma); void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *); +void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp); + /* Function prototypes. */ const char* lpfc_info(struct Scsi_Host *); int lpfc_scan_finished(struct Scsi_Host *, unsigned long); @@ -356,6 +385,7 @@ extern struct device_attribute *lpfc_hba_attrs[]; extern struct device_attribute *lpfc_vport_attrs[]; extern struct scsi_host_template lpfc_template; extern struct scsi_host_template lpfc_template_s3; +extern struct scsi_host_template lpfc_template_nvme; extern struct scsi_host_template lpfc_vport_template; extern struct fc_function_template lpfc_transport_functions; extern struct fc_function_template lpfc_vport_transport_functions; @@ -375,9 +405,11 @@ void lpfc_host_attrib_init(struct Scsi_Host *); extern void lpfc_debugfs_initialize(struct lpfc_vport *); extern void lpfc_debugfs_terminate(struct lpfc_vport *); extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t, - uint32_t, uint32_t); + uint32_t, uint32_t); extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t, - uint32_t, uint32_t); + uint32_t, uint32_t); +extern void lpfc_debugfs_nvme_trc(struct lpfc_hba *phba, char *fmt, + uint16_t data1, uint16_t data2, uint32_t data3); extern struct lpfc_hbq_init *lpfc_hbq_defs[]; /* SLI4 if_type 2 externs. */ @@ -471,7 +503,10 @@ int lpfc_issue_unreg_vfi(struct lpfc_vport *); int lpfc_selective_reset(struct lpfc_hba *); int lpfc_sli4_read_config(struct lpfc_hba *); void lpfc_sli4_node_prep(struct lpfc_hba *); -int lpfc_sli4_xri_sgl_update(struct lpfc_hba *); +int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba); +int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba); +int lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba); +int lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba); void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); @@ -496,3 +531,26 @@ bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *, uint32_t *, uint32_t *); int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox); void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb); + +/* NVME interfaces. */ +void lpfc_nvme_unregister_port(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp); +int lpfc_nvme_register_port(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp); +int lpfc_nvme_create_localport(struct lpfc_vport *vport); +void lpfc_nvme_destroy_localport(struct lpfc_vport *vport); +void lpfc_nvme_update_localport(struct lpfc_vport *vport); +int lpfc_nvmet_create_targetport(struct lpfc_hba *phba); +int lpfc_nvmet_update_targetport(struct lpfc_hba *phba); +void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba); +void lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb); +void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, + struct rqb_dmabuf *nvmebuf, uint64_t isr_ts); +void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba); +void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocb, + struct lpfc_wcqe_complete *abts_cmpl); +extern int lpfc_enable_nvmet_cnt; +extern unsigned long long lpfc_enable_nvmet[]; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 4ac03b16d17f..c22bb3f887e1 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -40,8 +42,9 @@ #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_version.h" @@ -453,8 +456,90 @@ lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) { return NULL; } +static void +lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) +{ + struct lpfc_nodelist *ndlp; + + if ((vport->port_type != LPFC_NPIV_PORT) || + !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) { + + ndlp = lpfc_setup_disc_node(vport, Did); + + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "Parse GID_FTrsp: did:x%x flg:x%x x%x", + Did, ndlp->nlp_flag, vport->fc_flag); + + /* By default, the driver expects to support FCP FC4 */ + if (fc4_type == FC_TYPE_FCP) + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + + if (fc4_type == FC_TYPE_NVME) + ndlp->nlp_fc4_type |= NLP_FC4_NVME; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0238 Process x%06x NameServer Rsp " + "Data: x%x x%x x%x x%x\n", Did, + ndlp->nlp_flag, ndlp->nlp_fc4_type, + vport->fc_flag, + vport->fc_rscn_id_cnt); + } else { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d", + Did, vport->fc_flag, vport->fc_rscn_id_cnt); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0239 Skip x%06x NameServer Rsp " + "Data: x%x x%x\n", Did, + vport->fc_flag, + vport->fc_rscn_id_cnt); + } + } else { + if (!(vport->fc_flag & FC_RSCN_MODE) || + lpfc_rscn_payload_check(vport, Did)) { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "Query GID_FTrsp: did:x%x flg:x%x cnt:%d", + Did, vport->fc_flag, vport->fc_rscn_id_cnt); + + /* + * This NPortID was previously a FCP target, + * Don't even bother to send GFF_ID. + */ + ndlp = lpfc_findnode_did(vport, Did); + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) + ndlp->nlp_fc4_type = fc4_type; + + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { + ndlp->nlp_fc4_type = fc4_type; + + if (ndlp->nlp_type & NLP_FCP_TARGET) + lpfc_setup_disc_node(vport, Did); + + else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID, + 0, Did) == 0) + vport->num_disc_nodes++; + + else + lpfc_setup_disc_node(vport, Did); + } + } else { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "Skip2 GID_FTrsp: did:x%x flg:x%x cnt:%d", + Did, vport->fc_flag, vport->fc_rscn_id_cnt); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0245 Skip x%06x NameServer Rsp " + "Data: x%x x%x\n", Did, + vport->fc_flag, + vport->fc_rscn_id_cnt); + } + } +} + static int -lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) +lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type, + uint32_t Size) { struct lpfc_hba *phba = vport->phba; struct lpfc_sli_ct_request *Response = @@ -499,97 +584,12 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) */ if ((Did != vport->fc_myDID) && ((lpfc_find_vport_by_did(phba, Did) == NULL) || - vport->cfg_peer_port_login)) { - if ((vport->port_type != LPFC_NPIV_PORT) || - (!(vport->ct_flags & FC_CT_RFF_ID)) || - (!vport->cfg_restrict_login)) { - ndlp = lpfc_setup_disc_node(vport, Did); - if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { - lpfc_debugfs_disc_trc(vport, - LPFC_DISC_TRC_CT, - "Parse GID_FTrsp: " - "did:x%x flg:x%x x%x", - Did, ndlp->nlp_flag, - vport->fc_flag); - - lpfc_printf_vlog(vport, - KERN_INFO, - LOG_DISCOVERY, - "0238 Process " - "x%x NameServer Rsp" - "Data: x%x x%x x%x\n", - Did, ndlp->nlp_flag, - vport->fc_flag, - vport->fc_rscn_id_cnt); - } else { - lpfc_debugfs_disc_trc(vport, - LPFC_DISC_TRC_CT, - "Skip1 GID_FTrsp: " - "did:x%x flg:x%x cnt:%d", - Did, vport->fc_flag, - vport->fc_rscn_id_cnt); - - lpfc_printf_vlog(vport, - KERN_INFO, - LOG_DISCOVERY, - "0239 Skip x%x " - "NameServer Rsp Data: " - "x%x x%x\n", - Did, vport->fc_flag, - vport->fc_rscn_id_cnt); - } - - } else { - if (!(vport->fc_flag & FC_RSCN_MODE) || - (lpfc_rscn_payload_check(vport, Did))) { - lpfc_debugfs_disc_trc(vport, - LPFC_DISC_TRC_CT, - "Query GID_FTrsp: " - "did:x%x flg:x%x cnt:%d", - Did, vport->fc_flag, - vport->fc_rscn_id_cnt); - - /* This NPortID was previously - * a FCP target, * Don't even - * bother to send GFF_ID. - */ - ndlp = lpfc_findnode_did(vport, - Did); - if (ndlp && - NLP_CHK_NODE_ACT(ndlp) - && (ndlp->nlp_type & - NLP_FCP_TARGET)) - lpfc_setup_disc_node - (vport, Did); - else if (lpfc_ns_cmd(vport, - SLI_CTNS_GFF_ID, - 0, Did) == 0) - vport->num_disc_nodes++; - else - lpfc_setup_disc_node - (vport, Did); - } - else { - lpfc_debugfs_disc_trc(vport, - LPFC_DISC_TRC_CT, - "Skip2 GID_FTrsp: " - "did:x%x flg:x%x cnt:%d", - Did, vport->fc_flag, - vport->fc_rscn_id_cnt); - - lpfc_printf_vlog(vport, - KERN_INFO, - LOG_DISCOVERY, - "0245 Skip x%x " - "NameServer Rsp Data: " - "x%x x%x\n", - Did, vport->fc_flag, - vport->fc_rscn_id_cnt); - } - } - } + vport->cfg_peer_port_login)) + lpfc_prep_node_fc4type(vport, Did, fc4_type); + if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY))) goto nsout1; + Cnt -= sizeof(uint32_t); } ctptr = NULL; @@ -609,16 +609,18 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct Scsi_Host *shost = lpfc_shost_from_vport(vport); IOCB_t *irsp; struct lpfc_dmabuf *outp; + struct lpfc_dmabuf *inp; struct lpfc_sli_ct_request *CTrsp; + struct lpfc_sli_ct_request *CTreq; struct lpfc_nodelist *ndlp; - int rc; + int rc, type; /* First save ndlp, before we overwrite it */ ndlp = cmdiocb->context_un.ndlp; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->context_un.rsp_iocb = rspiocb; - + inp = (struct lpfc_dmabuf *) cmdiocb->context1; outp = (struct lpfc_dmabuf *) cmdiocb->context2; irsp = &rspiocb->iocb; @@ -656,9 +658,14 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, IOERR_NO_RESOURCES) vport->fc_ns_retry++; + type = lpfc_get_gidft_type(vport, cmdiocb); + if (type == 0) + goto out; + /* CT command is being retried */ + vport->gidft_inp--; rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, - vport->fc_ns_retry, 0); + vport->fc_ns_retry, type); if (rc == 0) goto out; } @@ -670,13 +677,18 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp->ulpStatus, vport->fc_ns_retry); } else { /* Good status, continue checking */ + CTreq = (struct lpfc_sli_ct_request *) inp->virt; CTrsp = (struct lpfc_sli_ct_request *) outp->virt; if (CTrsp->CommandResponse.bits.CmdRsp == cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "0208 NameServer Rsp Data: x%x\n", - vport->fc_flag); - lpfc_ns_rsp(vport, outp, + "0208 NameServer Rsp Data: x%x x%x\n", + vport->fc_flag, + CTreq->un.gid.Fc4Type); + + lpfc_ns_rsp(vport, + outp, + CTreq->un.gid.Fc4Type, (uint32_t) (irsp->un.genreq64.bdl.bdeSize)); } else if (CTrsp->CommandResponse.bits.CmdRsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { @@ -731,9 +743,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation); } + vport->gidft_inp--; } /* Link up / RSCN discovery */ - if (vport->num_disc_nodes == 0) { + if ((vport->num_disc_nodes == 0) && + (vport->gidft_inp == 0)) { /* * The driver has cycled through all Nports in the RSCN payload. * Complete the handling by cleaning up and marking the @@ -881,6 +895,60 @@ out: return; } +static void +lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + IOCB_t *irsp = &rspiocb->iocb; + struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *)cmdiocb->context1; + struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *)cmdiocb->context2; + struct lpfc_sli_ct_request *CTrsp; + int did; + struct lpfc_nodelist *ndlp; + uint32_t fc4_data_0, fc4_data_1; + + did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId; + did = be32_to_cpu(did); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "GFT_ID cmpl: status:x%x/x%x did:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], did); + + if (irsp->ulpStatus == IOSTAT_SUCCESS) { + /* Good status, continue checking */ + CTrsp = (struct lpfc_sli_ct_request *)outp->virt; + fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]); + fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]); + lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + "3062 DID x%06x GFT Wd0 x%08x Wd1 x%08x\n", + did, fc4_data_0, fc4_data_1); + + ndlp = lpfc_findnode_did(vport, did); + if (ndlp) { + /* The bitmask value for FCP and NVME FCP types is + * the same because they are 32 bits distant from + * each other in word0 and word0. + */ + if (fc4_data_0 & LPFC_FC4_TYPE_BITMASK) + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) + ndlp->nlp_fc4_type |= NLP_FC4_NVME; + lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + "3064 Setting ndlp %p, DID x%06x with " + "FC4 x%08x, Data: x%08x x%08x\n", + ndlp, did, ndlp->nlp_fc4_type, + FC_TYPE_FCP, FC_TYPE_NVME); + } + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); + lpfc_issue_els_prli(vport, ndlp, 0); + } else + lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + "3065 GFT_ID failed x%08x\n", irsp->ulpStatus); + + lpfc_ct_free_iocb(phba, cmdiocb); +} static void lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, @@ -1071,31 +1139,27 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, return; } +/* + * Although the symbolic port name is thought to be an integer + * as of January 18, 2016, leave it as a string until more of + * the record state becomes defined. + */ int lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol, size_t size) { int n; - uint8_t *wwn = vport->phba->wwpn; - n = snprintf(symbol, size, - "Emulex PPN-%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", - wwn[0], wwn[1], wwn[2], wwn[3], - wwn[4], wwn[5], wwn[6], wwn[7]); - - if (vport->port_type == LPFC_PHYSICAL_PORT) - return n; - - if (n < size) - n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi); - - if (n < size && - strlen(vport->fc_vport->symbolic_name)) - n += snprintf(symbol + n, size - n, " VName-%s", - vport->fc_vport->symbolic_name); + /* + * Use the lpfc board number as the Symbolic Port + * Name object. NPIV is not in play so this integer + * value is sufficient and unique per FC-ID. + */ + n = snprintf(symbol, size, "%d", vport->phba->brd_no); return n; } + int lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, size_t size) @@ -1106,24 +1170,26 @@ lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, lpfc_decode_firmware_rev(vport->phba, fwrev, 0); n = snprintf(symbol, size, "Emulex %s", vport->phba->ModelName); - if (size < n) return n; - n += snprintf(symbol + n, size - n, " FV%s", fwrev); + n += snprintf(symbol + n, size - n, " FV%s", fwrev); if (size < n) return n; - n += snprintf(symbol + n, size - n, " DV%s", lpfc_release_version); + n += snprintf(symbol + n, size - n, " DV%s.", + lpfc_release_version); if (size < n) return n; - n += snprintf(symbol + n, size - n, " HN:%s", init_utsname()->nodename); - /* Note :- OS name is "Linux" */ + n += snprintf(symbol + n, size - n, " HN:%s.", + init_utsname()->nodename); if (size < n) return n; - n += snprintf(symbol + n, size - n, " OS:%s", init_utsname()->sysname); + /* Note :- OS name is "Linux" */ + n += snprintf(symbol + n, size - n, " OS:%s\n", + init_utsname()->sysname); return n; } @@ -1148,6 +1214,27 @@ lpfc_find_map_node(struct lpfc_vport *vport) } /* + * This routine will return the FC4 Type associated with the CT + * GID_FT command. + */ +int +lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb) +{ + struct lpfc_sli_ct_request *CtReq; + struct lpfc_dmabuf *mp; + uint32_t type; + + mp = cmdiocb->context1; + if (mp == NULL) + return 0; + CtReq = (struct lpfc_sli_ct_request *)mp->virt; + type = (uint32_t)CtReq->un.gid.Fc4Type; + if ((type != SLI_CTPT_FCP) && (type != SLI_CTPT_NVME)) + return 0; + return type; +} + +/* * lpfc_ns_cmd * Description: * Issue Cmd to NameServer @@ -1207,8 +1294,9 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, /* NameServer Req */ lpfc_printf_vlog(vport, KERN_INFO ,LOG_DISCOVERY, - "0236 NameServer Req Data: x%x x%x x%x\n", - cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt); + "0236 NameServer Req Data: x%x x%x x%x x%x\n", + cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt, + context); bpl = (struct ulp_bde64 *) bmp->virt; memset(bpl, 0, sizeof(struct ulp_bde64)); @@ -1219,6 +1307,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, bpl->tus.f.bdeSize = GID_REQUEST_SZ; else if (cmdcode == SLI_CTNS_GFF_ID) bpl->tus.f.bdeSize = GFF_REQUEST_SZ; + else if (cmdcode == SLI_CTNS_GFT_ID) + bpl->tus.f.bdeSize = GFT_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RFT_ID) bpl->tus.f.bdeSize = RFT_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RNN_ID) @@ -1246,7 +1336,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, case SLI_CTNS_GID_FT: CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_GID_FT); - CtReq->un.gid.Fc4Type = SLI_CTPT_FCP; + CtReq->un.gid.Fc4Type = context; + if (vport->port_state < LPFC_NS_QRY) vport->port_state = LPFC_NS_QRY; lpfc_set_disctmo(vport); @@ -1261,12 +1352,32 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, cmpl = lpfc_cmpl_ct_cmd_gff_id; break; + case SLI_CTNS_GFT_ID: + CtReq->CommandResponse.bits.CmdRsp = + cpu_to_be16(SLI_CTNS_GFT_ID); + CtReq->un.gft.PortId = cpu_to_be32(context); + cmpl = lpfc_cmpl_ct_cmd_gft_id; + break; + case SLI_CTNS_RFT_ID: vport->ct_flags &= ~FC_CT_RFT_ID; CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_RFT_ID); CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID); - CtReq->un.rft.fcpReg = 1; + + /* Register FC4 FCP type if enabled. */ + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) + CtReq->un.rft.fcpReg = 1; + + /* Register NVME type if enabled. Defined LE and swapped. + * rsvd[0] is used as word1 because of the hard-coded + * word0 usage in the ct_request data structure. + */ + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) + CtReq->un.rft.rsvd[0] = cpu_to_be32(0x00000100); + cmpl = lpfc_cmpl_ct_cmd_rft_id; break; @@ -1316,7 +1427,31 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, cpu_to_be16(SLI_CTNS_RFF_ID); CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID); CtReq->un.rff.fbits = FC4_FEATURE_INIT; - CtReq->un.rff.type_code = FC_TYPE_FCP; + + /* The driver always supports FC_TYPE_FCP. However, the + * caller can specify NVME (type x28) as well. But only + * these that FC4 type is supported. + */ + if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) && + (context == FC_TYPE_NVME)) { + if ((vport == phba->pport) && phba->nvmet_support) { + CtReq->un.rff.fbits = (FC4_FEATURE_TARGET | + FC4_FEATURE_NVME_DISC); + lpfc_nvmet_update_targetport(phba); + } else { + lpfc_nvme_update_localport(vport); + } + CtReq->un.rff.type_code = context; + + } else if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) && + (context == FC_TYPE_FCP)) + CtReq->un.rff.type_code = context; + + else + goto ns_cmd_free_bmpvirt; + cmpl = lpfc_cmpl_ct_cmd_rff_id; break; } @@ -1337,6 +1472,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, */ lpfc_nlp_put(ndlp); +ns_cmd_free_bmpvirt: lpfc_mbuf_free(phba, bmp->virt, bmp->phys); ns_cmd_free_bmp: kfree(bmp); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index caa7a7b0ec53..9f4798e9d938 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2007-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -34,6 +36,9 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include <scsi/fc/fc_fs.h> + +#include <linux/nvme-fc-driver.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -41,8 +46,10 @@ #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_nvmet.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" @@ -99,6 +106,12 @@ module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc, "Set debugfs slow ring trace depth"); +/* This MUST be a power of 2 */ +static int lpfc_debugfs_max_nvmeio_trc; +module_param(lpfc_debugfs_max_nvmeio_trc, int, 0444); +MODULE_PARM_DESC(lpfc_debugfs_max_nvmeio_trc, + "Set debugfs NVME IO trace depth"); + static int lpfc_debugfs_mask_disc_trc; module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, @@ -484,20 +497,23 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) off += (8 * sizeof(uint32_t)); } - for (i = 0; i < 4; i++) { - pgpp = &phba->port_gp[i]; - pring = &psli->ring[i]; - len += snprintf(buf+len, size-len, - "Ring %d: CMD GetInx:%d (Max:%d Next:%d " - "Local:%d flg:x%x) RSP PutInx:%d Max:%d\n", - i, pgpp->cmdGetInx, pring->sli.sli3.numCiocb, - pring->sli.sli3.next_cmdidx, - pring->sli.sli3.local_getidx, - pring->flag, pgpp->rspPutInx, - pring->sli.sli3.numRiocb); - } - if (phba->sli_rev <= LPFC_SLI_REV3) { + for (i = 0; i < 4; i++) { + pgpp = &phba->port_gp[i]; + pring = &psli->sli3_ring[i]; + len += snprintf(buf+len, size-len, + "Ring %d: CMD GetInx:%d " + "(Max:%d Next:%d " + "Local:%d flg:x%x) " + "RSP PutInx:%d Max:%d\n", + i, pgpp->cmdGetInx, + pring->sli.sli3.numCiocb, + pring->sli.sli3.next_cmdidx, + pring->sli.sli3.local_getidx, + pring->flag, pgpp->rspPutInx, + pring->sli.sli3.numRiocb); + } + word0 = readl(phba->HAregaddr); word1 = readl(phba->CAregaddr); word2 = readl(phba->HSregaddr); @@ -530,11 +546,18 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) int len = 0; int cnt; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp; - unsigned char *statep, *name; + unsigned char *statep; + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + struct lpfc_nvme_rport *rport; + struct lpfc_nvmet_tgtport *tgtp; + struct nvme_fc_remote_port *nrport; cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); + len += snprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n"); spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (!cnt) { @@ -574,36 +597,32 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) default: statep = "UNKNOWN"; } - len += snprintf(buf+len, size-len, "%s DID:x%06x ", - statep, ndlp->nlp_DID); - name = (unsigned char *)&ndlp->nlp_portname; - len += snprintf(buf+len, size-len, - "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ", - *name, *(name+1), *(name+2), *(name+3), - *(name+4), *(name+5), *(name+6), *(name+7)); - name = (unsigned char *)&ndlp->nlp_nodename; - len += snprintf(buf+len, size-len, - "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ", - *name, *(name+1), *(name+2), *(name+3), - *(name+4), *(name+5), *(name+6), *(name+7)); + len += snprintf(buf+len, size-len, "%s DID:x%06x ", + statep, ndlp->nlp_DID); + len += snprintf(buf+len, size-len, + "WWPN x%llx ", + wwn_to_u64(ndlp->nlp_portname.u.wwn)); + len += snprintf(buf+len, size-len, + "WWNN x%llx ", + wwn_to_u64(ndlp->nlp_nodename.u.wwn)); if (ndlp->nlp_flag & NLP_RPI_REGISTERED) - len += snprintf(buf+len, size-len, "RPI:%03d ", - ndlp->nlp_rpi); + len += snprintf(buf+len, size-len, "RPI:%03d ", + ndlp->nlp_rpi); else - len += snprintf(buf+len, size-len, "RPI:none "); + len += snprintf(buf+len, size-len, "RPI:none "); len += snprintf(buf+len, size-len, "flag:x%08x ", ndlp->nlp_flag); if (!ndlp->nlp_type) - len += snprintf(buf+len, size-len, "UNKNOWN_TYPE "); + len += snprintf(buf+len, size-len, "UNKNOWN_TYPE "); if (ndlp->nlp_type & NLP_FC_NODE) - len += snprintf(buf+len, size-len, "FC_NODE "); + len += snprintf(buf+len, size-len, "FC_NODE "); if (ndlp->nlp_type & NLP_FABRIC) - len += snprintf(buf+len, size-len, "FABRIC "); + len += snprintf(buf+len, size-len, "FABRIC "); if (ndlp->nlp_type & NLP_FCP_TARGET) - len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ", + len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ", ndlp->nlp_sid); if (ndlp->nlp_type & NLP_FCP_INITIATOR) - len += snprintf(buf+len, size-len, "FCP_INITIATOR "); + len += snprintf(buf+len, size-len, "FCP_INITIATOR "); len += snprintf(buf+len, size-len, "usgmap:%x ", ndlp->nlp_usg_map); len += snprintf(buf+len, size-len, "refcnt:%x", @@ -611,8 +630,592 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) len += snprintf(buf+len, size-len, "\n"); } spin_unlock_irq(shost->host_lock); + + if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) { + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + len += snprintf(buf + len, size - len, + "\nNVME Targetport Entry ...\n"); + + /* Port state is only one of two values for now. */ + if (phba->targetport->port_id) + statep = "REGISTERED"; + else + statep = "INIT"; + len += snprintf(buf + len, size - len, + "TGT WWNN x%llx WWPN x%llx State %s\n", + wwn_to_u64(vport->fc_nodename.u.wwn), + wwn_to_u64(vport->fc_portname.u.wwn), + statep); + len += snprintf(buf + len, size - len, + " Targetport DID x%06x\n", + phba->targetport->port_id); + goto out_exit; + } + + len += snprintf(buf + len, size - len, + "\nNVME Lport/Rport Entries ...\n"); + + localport = vport->localport; + if (!localport) + goto out_exit; + + spin_lock_irq(shost->host_lock); + lport = (struct lpfc_nvme_lport *)localport->private; + + /* Port state is only one of two values for now. */ + if (localport->port_id) + statep = "ONLINE"; + else + statep = "UNKNOWN "; + + len += snprintf(buf + len, size - len, + "Lport DID x%06x PortState %s\n", + localport->port_id, statep); + + len += snprintf(buf + len, size - len, "\tRport List:\n"); + list_for_each_entry(rport, &lport->rport_list, list) { + /* local short-hand pointer. */ + nrport = rport->remoteport; + + /* Port state is only one of two values for now. */ + switch (nrport->port_state) { + case FC_OBJSTATE_ONLINE: + statep = "ONLINE"; + break; + case FC_OBJSTATE_UNKNOWN: + statep = "UNKNOWN "; + break; + default: + statep = "UNSUPPORTED"; + break; + } + + /* Tab in to show lport ownership. */ + len += snprintf(buf + len, size - len, + "\t%s Port ID:x%06x ", + statep, nrport->port_id); + len += snprintf(buf + len, size - len, "WWPN x%llx ", + nrport->port_name); + len += snprintf(buf + len, size - len, "WWNN x%llx ", + nrport->node_name); + switch (nrport->port_role) { + case FC_PORT_ROLE_NVME_INITIATOR: + len += snprintf(buf + len, size - len, + "NVME INITIATOR "); + break; + case FC_PORT_ROLE_NVME_TARGET: + len += snprintf(buf + len, size - len, + "NVME TARGET "); + break; + case FC_PORT_ROLE_NVME_DISCOVERY: + len += snprintf(buf + len, size - len, + "NVME DISCOVERY "); + break; + default: + len += snprintf(buf + len, size - len, + "UNKNOWN ROLE x%x", + nrport->port_role); + break; + } + + /* Terminate the string. */ + len += snprintf(buf + len, size - len, "\n"); + } + + spin_unlock_irq(shost->host_lock); + out_exit: + return len; +} + +/** + * lpfc_debugfs_nvmestat_data - Dump target node list to a buffer + * @vport: The vport to gather target node info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the NVME statistics associated with @vport + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_nvmet_tgtport *tgtp; + int len = 0; + + if (phba->nvmet_support) { + if (!phba->targetport) + return len; + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + len += snprintf(buf+len, size-len, + "\nNVME Targetport Statistics\n"); + + len += snprintf(buf+len, size-len, + "LS: Rcv %08x Drop %08x Abort %08x\n", + atomic_read(&tgtp->rcv_ls_req_in), + atomic_read(&tgtp->rcv_ls_req_drop), + atomic_read(&tgtp->xmt_ls_abort)); + if (atomic_read(&tgtp->rcv_ls_req_in) != + atomic_read(&tgtp->rcv_ls_req_out)) { + len += snprintf(buf+len, size-len, + "Rcv LS: in %08x != out %08x\n", + atomic_read(&tgtp->rcv_ls_req_in), + atomic_read(&tgtp->rcv_ls_req_out)); + } + + len += snprintf(buf+len, size-len, + "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n", + atomic_read(&tgtp->xmt_ls_rsp), + atomic_read(&tgtp->xmt_ls_drop), + atomic_read(&tgtp->xmt_ls_rsp_cmpl), + atomic_read(&tgtp->xmt_ls_rsp_error)); + + len += snprintf(buf+len, size-len, + "FCP: Rcv %08x Drop %08x\n", + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_drop)); + + if (atomic_read(&tgtp->rcv_fcp_cmd_in) != + atomic_read(&tgtp->rcv_fcp_cmd_out)) { + len += snprintf(buf+len, size-len, + "Rcv FCP: in %08x != out %08x\n", + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out)); + } + + len += snprintf(buf+len, size-len, + "FCP Rsp: read %08x readrsp %08x write %08x rsp %08x\n", + atomic_read(&tgtp->xmt_fcp_read), + atomic_read(&tgtp->xmt_fcp_read_rsp), + atomic_read(&tgtp->xmt_fcp_write), + atomic_read(&tgtp->xmt_fcp_rsp)); + + len += snprintf(buf+len, size-len, + "FCP Rsp: abort %08x drop %08x\n", + atomic_read(&tgtp->xmt_fcp_abort), + atomic_read(&tgtp->xmt_fcp_drop)); + + len += snprintf(buf+len, size-len, + "FCP Rsp Cmpl: %08x err %08x drop %08x\n", + atomic_read(&tgtp->xmt_fcp_rsp_cmpl), + atomic_read(&tgtp->xmt_fcp_rsp_error), + atomic_read(&tgtp->xmt_fcp_rsp_drop)); + + len += snprintf(buf+len, size-len, + "ABORT: Xmt %08x Err %08x Cmpl %08x", + atomic_read(&tgtp->xmt_abort_rsp), + atomic_read(&tgtp->xmt_abort_rsp_error), + atomic_read(&tgtp->xmt_abort_cmpl)); + + len += snprintf(buf+len, size-len, "\n"); + } else { + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) + return len; + + len += snprintf(buf + len, size - len, + "\nNVME Lport Statistics\n"); + + len += snprintf(buf + len, size - len, + "LS: Xmt %016llx Cmpl %016llx\n", + phba->fc4NvmeLsRequests, + phba->fc4NvmeLsCmpls); + + len += snprintf(buf + len, size - len, + "FCP: Rd %016llx Wr %016llx IO %016llx\n", + phba->fc4NvmeInputRequests, + phba->fc4NvmeOutputRequests, + phba->fc4NvmeControlRequests); + + len += snprintf(buf + len, size - len, + " Cmpl %016llx\n", phba->fc4NvmeIoCmpls); + } + return len; } + + +/** + * lpfc_debugfs_nvmektime_data - Dump target node list to a buffer + * @vport: The vport to gather target node info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the NVME statistics associated with @vport + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size) +{ + struct lpfc_hba *phba = vport->phba; + int len = 0; + + if (phba->nvmet_support == 0) { + /* NVME Initiator */ + len += snprintf(buf + len, PAGE_SIZE - len, + "ktime %s: Total Samples: %lld\n", + (phba->ktime_on ? "Enabled" : "Disabled"), + phba->ktime_data_samples); + if (phba->ktime_data_samples == 0) + return len; + + len += snprintf( + buf + len, PAGE_SIZE - len, + "Segment 1: Last NVME Cmd cmpl " + "done -to- Start of next NVME cnd (in driver)\n"); + len += snprintf( + buf + len, PAGE_SIZE - len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg1_total, + phba->ktime_data_samples), + phba->ktime_seg1_min, + phba->ktime_seg1_max); + len += snprintf( + buf + len, PAGE_SIZE - len, + "Segment 2: Driver start of NVME cmd " + "-to- Firmware WQ doorbell\n"); + len += snprintf( + buf + len, PAGE_SIZE - len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg2_total, + phba->ktime_data_samples), + phba->ktime_seg2_min, + phba->ktime_seg2_max); + len += snprintf( + buf + len, PAGE_SIZE - len, + "Segment 3: Firmware WQ doorbell -to- " + "MSI-X ISR cmpl\n"); + len += snprintf( + buf + len, PAGE_SIZE - len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg3_total, + phba->ktime_data_samples), + phba->ktime_seg3_min, + phba->ktime_seg3_max); + len += snprintf( + buf + len, PAGE_SIZE - len, + "Segment 4: MSI-X ISR cmpl -to- " + "NVME cmpl done\n"); + len += snprintf( + buf + len, PAGE_SIZE - len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg4_total, + phba->ktime_data_samples), + phba->ktime_seg4_min, + phba->ktime_seg4_max); + len += snprintf( + buf + len, PAGE_SIZE - len, + "Total IO avg time: %08lld\n", + div_u64(phba->ktime_seg1_total + + phba->ktime_seg2_total + + phba->ktime_seg3_total + + phba->ktime_seg4_total, + phba->ktime_data_samples)); + return len; + } + + /* NVME Target */ + len += snprintf(buf + len, PAGE_SIZE-len, + "ktime %s: Total Samples: %lld %lld\n", + (phba->ktime_on ? "Enabled" : "Disabled"), + phba->ktime_data_samples, + phba->ktime_status_samples); + if (phba->ktime_data_samples == 0) + return len; + + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 1: MSI-X ISR Rcv cmd -to- " + "cmd pass to NVME Layer\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg1_total, + phba->ktime_data_samples), + phba->ktime_seg1_min, + phba->ktime_seg1_max); + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 2: cmd pass to NVME Layer- " + "-to- Driver rcv cmd OP (action)\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg2_total, + phba->ktime_data_samples), + phba->ktime_seg2_min, + phba->ktime_seg2_max); + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 3: Driver rcv cmd OP -to- " + "Firmware WQ doorbell: cmd\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg3_total, + phba->ktime_data_samples), + phba->ktime_seg3_min, + phba->ktime_seg3_max); + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 4: Firmware WQ doorbell: cmd " + "-to- MSI-X ISR for cmd cmpl\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg4_total, + phba->ktime_data_samples), + phba->ktime_seg4_min, + phba->ktime_seg4_max); + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 5: MSI-X ISR for cmd cmpl " + "-to- NVME layer passed cmd done\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg5_total, + phba->ktime_data_samples), + phba->ktime_seg5_min, + phba->ktime_seg5_max); + + if (phba->ktime_status_samples == 0) { + len += snprintf(buf + len, PAGE_SIZE-len, + "Total: cmd received by MSI-X ISR " + "-to- cmd completed on wire\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld " + "max %08lld\n", + div_u64(phba->ktime_seg10_total, + phba->ktime_data_samples), + phba->ktime_seg10_min, + phba->ktime_seg10_max); + return len; + } + + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 6: NVME layer passed cmd done " + "-to- Driver rcv rsp status OP\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg6_total, + phba->ktime_status_samples), + phba->ktime_seg6_min, + phba->ktime_seg6_max); + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 7: Driver rcv rsp status OP " + "-to- Firmware WQ doorbell: status\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg7_total, + phba->ktime_status_samples), + phba->ktime_seg7_min, + phba->ktime_seg7_max); + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 8: Firmware WQ doorbell: status" + " -to- MSI-X ISR for status cmpl\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg8_total, + phba->ktime_status_samples), + phba->ktime_seg8_min, + phba->ktime_seg8_max); + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 9: MSI-X ISR for status cmpl " + "-to- NVME layer passed status done\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg9_total, + phba->ktime_status_samples), + phba->ktime_seg9_min, + phba->ktime_seg9_max); + len += snprintf(buf + len, PAGE_SIZE-len, + "Total: cmd received by MSI-X ISR -to- " + "cmd completed on wire\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg10_total, + phba->ktime_status_samples), + phba->ktime_seg10_min, + phba->ktime_seg10_max); + return len; +} + +/** + * lpfc_debugfs_nvmeio_trc_data - Dump NVME IO trace list to a buffer + * @phba: The phba to gather target node info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the NVME IO trace associated with @phba + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size) +{ + struct lpfc_debugfs_nvmeio_trc *dtp; + int i, state, index, skip; + int len = 0; + + state = phba->nvmeio_trc_on; + + index = (atomic_read(&phba->nvmeio_trc_cnt) + 1) & + (phba->nvmeio_trc_size - 1); + skip = phba->nvmeio_trc_output_idx; + + len += snprintf(buf + len, size - len, + "%s IO Trace %s: next_idx %d skip %d size %d\n", + (phba->nvmet_support ? "NVME" : "NVMET"), + (state ? "Enabled" : "Disabled"), + index, skip, phba->nvmeio_trc_size); + + if (!phba->nvmeio_trc || state) + return len; + + /* trace MUST bhe off to continue */ + + for (i = index; i < phba->nvmeio_trc_size; i++) { + if (skip) { + skip--; + continue; + } + dtp = phba->nvmeio_trc + i; + phba->nvmeio_trc_output_idx++; + + if (!dtp->fmt) + continue; + + len += snprintf(buf + len, size - len, dtp->fmt, + dtp->data1, dtp->data2, dtp->data3); + + if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) { + phba->nvmeio_trc_output_idx = 0; + len += snprintf(buf + len, size - len, + "Trace Complete\n"); + goto out; + } + + if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) { + len += snprintf(buf + len, size - len, + "Trace Continue (%d of %d)\n", + phba->nvmeio_trc_output_idx, + phba->nvmeio_trc_size); + goto out; + } + } + for (i = 0; i < index; i++) { + if (skip) { + skip--; + continue; + } + dtp = phba->nvmeio_trc + i; + phba->nvmeio_trc_output_idx++; + + if (!dtp->fmt) + continue; + + len += snprintf(buf + len, size - len, dtp->fmt, + dtp->data1, dtp->data2, dtp->data3); + + if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) { + phba->nvmeio_trc_output_idx = 0; + len += snprintf(buf + len, size - len, + "Trace Complete\n"); + goto out; + } + + if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) { + len += snprintf(buf + len, size - len, + "Trace Continue (%d of %d)\n", + phba->nvmeio_trc_output_idx, + phba->nvmeio_trc_size); + goto out; + } + } + + len += snprintf(buf + len, size - len, + "Trace Done\n"); +out: + return len; +} + +/** + * lpfc_debugfs_cpucheck_data - Dump target node list to a buffer + * @vport: The vport to gather target node info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the NVME statistics associated with @vport + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size) +{ + struct lpfc_hba *phba = vport->phba; + int i; + int len = 0; + uint32_t tot_xmt = 0; + uint32_t tot_rcv = 0; + uint32_t tot_cmpl = 0; + uint32_t tot_ccmpl = 0; + + if (phba->nvmet_support == 0) { + /* NVME Initiator */ + len += snprintf(buf + len, PAGE_SIZE - len, + "CPUcheck %s\n", + (phba->cpucheck_on & LPFC_CHECK_NVME_IO ? + "Enabled" : "Disabled")); + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + if (i >= LPFC_CHECK_CPU_CNT) + break; + len += snprintf(buf + len, PAGE_SIZE - len, + "%02d: xmit x%08x cmpl x%08x\n", + i, phba->cpucheck_xmt_io[i], + phba->cpucheck_cmpl_io[i]); + tot_xmt += phba->cpucheck_xmt_io[i]; + tot_cmpl += phba->cpucheck_cmpl_io[i]; + } + len += snprintf(buf + len, PAGE_SIZE - len, + "tot:xmit x%08x cmpl x%08x\n", + tot_xmt, tot_cmpl); + return len; + } + + /* NVME Target */ + len += snprintf(buf + len, PAGE_SIZE - len, + "CPUcheck %s ", + (phba->cpucheck_on & LPFC_CHECK_NVMET_IO ? + "IO Enabled - " : "IO Disabled - ")); + len += snprintf(buf + len, PAGE_SIZE - len, + "%s\n", + (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ? + "Rcv Enabled\n" : "Rcv Disabled\n")); + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + if (i >= LPFC_CHECK_CPU_CNT) + break; + len += snprintf(buf + len, PAGE_SIZE - len, + "%02d: xmit x%08x ccmpl x%08x " + "cmpl x%08x rcv x%08x\n", + i, phba->cpucheck_xmt_io[i], + phba->cpucheck_ccmpl_io[i], + phba->cpucheck_cmpl_io[i], + phba->cpucheck_rcv_io[i]); + tot_xmt += phba->cpucheck_xmt_io[i]; + tot_rcv += phba->cpucheck_rcv_io[i]; + tot_cmpl += phba->cpucheck_cmpl_io[i]; + tot_ccmpl += phba->cpucheck_ccmpl_io[i]; + } + len += snprintf(buf + len, PAGE_SIZE - len, + "tot:xmit x%08x ccmpl x%08x cmpl x%08x rcv x%08x\n", + tot_xmt, tot_ccmpl, tot_cmpl, tot_rcv); + return len; +} + #endif /** @@ -697,6 +1300,40 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, return; } +/** + * lpfc_debugfs_nvme_trc - Store NVME/NVMET trace log + * @phba: The phba to associate this trace string with for retrieval. + * @fmt: Format string to be displayed when dumping the log. + * @data1: 1st data parameter to be applied to @fmt. + * @data2: 2nd data parameter to be applied to @fmt. + * @data3: 3rd data parameter to be applied to @fmt. + * + * Description: + * This routine is used by the driver code to add a debugfs log entry to the + * nvme trace buffer associated with @phba. @fmt, @data1, @data2, and + * @data3 are used like printf when displaying the log. + **/ +inline void +lpfc_debugfs_nvme_trc(struct lpfc_hba *phba, char *fmt, + uint16_t data1, uint16_t data2, uint32_t data3) +{ +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + struct lpfc_debugfs_nvmeio_trc *dtp; + int index; + + if (!phba->nvmeio_trc_on || !phba->nvmeio_trc) + return; + + index = atomic_inc_return(&phba->nvmeio_trc_cnt) & + (phba->nvmeio_trc_size - 1); + dtp = phba->nvmeio_trc + index; + dtp->fmt = fmt; + dtp->data1 = data1; + dtp->data2 = data2; + dtp->data3 = data3; +#endif +} + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS /** * lpfc_debugfs_disc_trc_open - Open the discovery trace log @@ -938,7 +1575,7 @@ lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file) goto out; /* Round to page boundary */ - printk(KERN_ERR "9059 BLKGRD: %s: _dump_buf_data=0x%p\n", + pr_err("9059 BLKGRD: %s: _dump_buf_data=0x%p\n", __func__, _dump_buf_data); debug->buffer = _dump_buf_data; if (!debug->buffer) { @@ -968,8 +1605,8 @@ lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file) goto out; /* Round to page boundary */ - printk(KERN_ERR "9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%pD\n", - __func__, _dump_buf_dif, file); + pr_err("9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%pD\n", + __func__, _dump_buf_dif, file); debug->buffer = _dump_buf_dif; if (!debug->buffer) { kfree(debug); @@ -1229,6 +1866,422 @@ lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file) return 0; } + +static int +lpfc_debugfs_nvmestat_open(struct inode *inode, struct file *file) +{ + struct lpfc_vport *vport = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_NVMESTAT_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_nvmestat_data(vport, debug->buffer, + LPFC_NVMESTAT_SIZE); + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nvmet_tgtport *tgtp; + char mybuf[64]; + char *pbuf; + + if (!phba->targetport) + return -ENXIO; + + if (nbytes > 64) + nbytes = 64; + + /* Protect copy from user */ + if (!access_ok(VERIFY_READ, buf, nbytes)) + return -EFAULT; + + memset(mybuf, 0, sizeof(mybuf)); + + if (copy_from_user(mybuf, buf, nbytes)) + return -EFAULT; + pbuf = &mybuf[0]; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if ((strncmp(pbuf, "reset", strlen("reset")) == 0) || + (strncmp(pbuf, "zero", strlen("zero")) == 0)) { + atomic_set(&tgtp->rcv_ls_req_in, 0); + atomic_set(&tgtp->rcv_ls_req_out, 0); + atomic_set(&tgtp->rcv_ls_req_drop, 0); + atomic_set(&tgtp->xmt_ls_abort, 0); + atomic_set(&tgtp->xmt_ls_rsp, 0); + atomic_set(&tgtp->xmt_ls_drop, 0); + atomic_set(&tgtp->xmt_ls_rsp_error, 0); + atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0); + + atomic_set(&tgtp->rcv_fcp_cmd_in, 0); + atomic_set(&tgtp->rcv_fcp_cmd_out, 0); + atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); + atomic_set(&tgtp->xmt_fcp_abort, 0); + atomic_set(&tgtp->xmt_fcp_drop, 0); + atomic_set(&tgtp->xmt_fcp_read_rsp, 0); + atomic_set(&tgtp->xmt_fcp_read, 0); + atomic_set(&tgtp->xmt_fcp_write, 0); + atomic_set(&tgtp->xmt_fcp_rsp, 0); + atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); + atomic_set(&tgtp->xmt_fcp_rsp_error, 0); + atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); + + atomic_set(&tgtp->xmt_abort_rsp, 0); + atomic_set(&tgtp->xmt_abort_rsp_error, 0); + atomic_set(&tgtp->xmt_abort_cmpl, 0); + } + return nbytes; +} + +static int +lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file) +{ + struct lpfc_vport *vport = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_NVMEKTIME_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_nvmektime_data(vport, debug->buffer, + LPFC_NVMEKTIME_SIZE); + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_nvmektime_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; + struct lpfc_hba *phba = vport->phba; + char mybuf[64]; + char *pbuf; + + if (nbytes > 64) + nbytes = 64; + + /* Protect copy from user */ + if (!access_ok(VERIFY_READ, buf, nbytes)) + return -EFAULT; + + memset(mybuf, 0, sizeof(mybuf)); + + if (copy_from_user(mybuf, buf, nbytes)) + return -EFAULT; + pbuf = &mybuf[0]; + + if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) { + phba->ktime_data_samples = 0; + phba->ktime_status_samples = 0; + phba->ktime_seg1_total = 0; + phba->ktime_seg1_max = 0; + phba->ktime_seg1_min = 0xffffffff; + phba->ktime_seg2_total = 0; + phba->ktime_seg2_max = 0; + phba->ktime_seg2_min = 0xffffffff; + phba->ktime_seg3_total = 0; + phba->ktime_seg3_max = 0; + phba->ktime_seg3_min = 0xffffffff; + phba->ktime_seg4_total = 0; + phba->ktime_seg4_max = 0; + phba->ktime_seg4_min = 0xffffffff; + phba->ktime_seg5_total = 0; + phba->ktime_seg5_max = 0; + phba->ktime_seg5_min = 0xffffffff; + phba->ktime_seg6_total = 0; + phba->ktime_seg6_max = 0; + phba->ktime_seg6_min = 0xffffffff; + phba->ktime_seg7_total = 0; + phba->ktime_seg7_max = 0; + phba->ktime_seg7_min = 0xffffffff; + phba->ktime_seg8_total = 0; + phba->ktime_seg8_max = 0; + phba->ktime_seg8_min = 0xffffffff; + phba->ktime_seg9_total = 0; + phba->ktime_seg9_max = 0; + phba->ktime_seg9_min = 0xffffffff; + phba->ktime_seg10_total = 0; + phba->ktime_seg10_max = 0; + phba->ktime_seg10_min = 0xffffffff; + + phba->ktime_on = 1; + return strlen(pbuf); + } else if ((strncmp(pbuf, "off", + sizeof("off") - 1) == 0)) { + phba->ktime_on = 0; + return strlen(pbuf); + } else if ((strncmp(pbuf, "zero", + sizeof("zero") - 1) == 0)) { + phba->ktime_data_samples = 0; + phba->ktime_status_samples = 0; + phba->ktime_seg1_total = 0; + phba->ktime_seg1_max = 0; + phba->ktime_seg1_min = 0xffffffff; + phba->ktime_seg2_total = 0; + phba->ktime_seg2_max = 0; + phba->ktime_seg2_min = 0xffffffff; + phba->ktime_seg3_total = 0; + phba->ktime_seg3_max = 0; + phba->ktime_seg3_min = 0xffffffff; + phba->ktime_seg4_total = 0; + phba->ktime_seg4_max = 0; + phba->ktime_seg4_min = 0xffffffff; + phba->ktime_seg5_total = 0; + phba->ktime_seg5_max = 0; + phba->ktime_seg5_min = 0xffffffff; + phba->ktime_seg6_total = 0; + phba->ktime_seg6_max = 0; + phba->ktime_seg6_min = 0xffffffff; + phba->ktime_seg7_total = 0; + phba->ktime_seg7_max = 0; + phba->ktime_seg7_min = 0xffffffff; + phba->ktime_seg8_total = 0; + phba->ktime_seg8_max = 0; + phba->ktime_seg8_min = 0xffffffff; + phba->ktime_seg9_total = 0; + phba->ktime_seg9_max = 0; + phba->ktime_seg9_min = 0xffffffff; + phba->ktime_seg10_total = 0; + phba->ktime_seg10_max = 0; + phba->ktime_seg10_min = 0xffffffff; + return strlen(pbuf); + } + return -EINVAL; +} + +static int +lpfc_debugfs_nvmeio_trc_open(struct inode *inode, struct file *file) +{ + struct lpfc_hba *phba = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_NVMEIO_TRC_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_nvmeio_trc_data(phba, debug->buffer, + LPFC_NVMEIO_TRC_SIZE); + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + int i; + unsigned long sz; + char mybuf[64]; + char *pbuf; + + if (nbytes > 64) + nbytes = 64; + + /* Protect copy from user */ + if (!access_ok(VERIFY_READ, buf, nbytes)) + return -EFAULT; + + memset(mybuf, 0, sizeof(mybuf)); + + if (copy_from_user(mybuf, buf, nbytes)) + return -EFAULT; + pbuf = &mybuf[0]; + + if ((strncmp(pbuf, "off", sizeof("off") - 1) == 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0570 nvmeio_trc_off\n"); + phba->nvmeio_trc_output_idx = 0; + phba->nvmeio_trc_on = 0; + return strlen(pbuf); + } else if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0571 nvmeio_trc_on\n"); + phba->nvmeio_trc_output_idx = 0; + phba->nvmeio_trc_on = 1; + return strlen(pbuf); + } + + /* We must be off to allocate the trace buffer */ + if (phba->nvmeio_trc_on != 0) + return -EINVAL; + + /* If not on or off, the parameter is the trace buffer size */ + i = kstrtoul(pbuf, 0, &sz); + if (i) + return -EINVAL; + phba->nvmeio_trc_size = (uint32_t)sz; + + /* It must be a power of 2 - round down */ + i = 0; + while (sz > 1) { + sz = sz >> 1; + i++; + } + sz = (1 << i); + if (phba->nvmeio_trc_size != sz) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0572 nvmeio_trc_size changed to %ld\n", + sz); + phba->nvmeio_trc_size = (uint32_t)sz; + + /* If one previously exists, free it */ + kfree(phba->nvmeio_trc); + + /* Allocate new trace buffer and initialize */ + phba->nvmeio_trc = kmalloc((sizeof(struct lpfc_debugfs_nvmeio_trc) * + sz), GFP_KERNEL); + if (!phba->nvmeio_trc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0573 Cannot create debugfs " + "nvmeio_trc buffer\n"); + return -ENOMEM; + } + memset(phba->nvmeio_trc, 0, + (sizeof(struct lpfc_debugfs_nvmeio_trc) * sz)); + atomic_set(&phba->nvmeio_trc_cnt, 0); + phba->nvmeio_trc_on = 0; + phba->nvmeio_trc_output_idx = 0; + + return strlen(pbuf); +} + +static int +lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file) +{ + struct lpfc_vport *vport = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_CPUCHECK_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer, + LPFC_NVMEKTIME_SIZE); + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; + struct lpfc_hba *phba = vport->phba; + char mybuf[64]; + char *pbuf; + int i; + + if (nbytes > 64) + nbytes = 64; + + /* Protect copy from user */ + if (!access_ok(VERIFY_READ, buf, nbytes)) + return -EFAULT; + + memset(mybuf, 0, sizeof(mybuf)); + + if (copy_from_user(mybuf, buf, nbytes)) + return -EFAULT; + pbuf = &mybuf[0]; + + if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) { + if (phba->nvmet_support) + phba->cpucheck_on |= LPFC_CHECK_NVMET_IO; + else + phba->cpucheck_on |= LPFC_CHECK_NVME_IO; + return strlen(pbuf); + } else if ((strncmp(pbuf, "rcv", + sizeof("rcv") - 1) == 0)) { + if (phba->nvmet_support) + phba->cpucheck_on |= LPFC_CHECK_NVMET_RCV; + else + return -EINVAL; + return strlen(pbuf); + } else if ((strncmp(pbuf, "off", + sizeof("off") - 1) == 0)) { + phba->cpucheck_on = LPFC_CHECK_OFF; + return strlen(pbuf); + } else if ((strncmp(pbuf, "zero", + sizeof("zero") - 1) == 0)) { + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + if (i >= LPFC_CHECK_CPU_CNT) + break; + phba->cpucheck_rcv_io[i] = 0; + phba->cpucheck_xmt_io[i] = 0; + phba->cpucheck_cmpl_io[i] = 0; + phba->cpucheck_ccmpl_io[i] = 0; + } + return strlen(pbuf); + } + return -EINVAL; +} + /* * --------------------------------- * iDiag debugfs file access methods @@ -1974,6 +3027,203 @@ error_out: return -EINVAL; } +static int +__lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype, + char *pbuffer, int len) +{ + if (!qp) + return len; + + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t\t%s WQ info: ", wqtype); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "AssocCQID[%04d]: WQ-STAT[oflow:x%x posted:x%llx]\n", + qp->assoc_qid, qp->q_cnt_1, + (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t\tWQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, qp->entry_count, + qp->entry_size, qp->host_index, + qp->hba_index); + len += snprintf(pbuffer + len, + LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); + return len; +} + +static int +lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer, + int *len, int max_cnt, int cq_id) +{ + struct lpfc_queue *qp; + int qidx; + + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) { + qp = phba->sli4_hba.fcp_wq[qidx]; + if (qp->assoc_qid != cq_id) + continue; + *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len); + if (*len >= max_cnt) + return 1; + } + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { + qp = phba->sli4_hba.nvme_wq[qidx]; + if (qp->assoc_qid != cq_id) + continue; + *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len); + if (*len >= max_cnt) + return 1; + } + return 0; +} + +static int +__lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype, + char *pbuffer, int len) +{ + if (!qp) + return len; + + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t%s CQ info: ", cqtype); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "AssocEQID[%02d]: CQ STAT[max:x%x relw:x%x " + "xabt:x%x wq:x%llx]\n", + qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, + qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\tCQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, qp->entry_count, + qp->entry_size, qp->host_index, + qp->hba_index); + + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); + + return len; +} + +static int +__lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp, + char *rqtype, char *pbuffer, int len) +{ + if (!qp || !datqp) + return len; + + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t\t%s RQ info: ", rqtype); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x " + "trunc:x%x rcv:x%llx]\n", + qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, + qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t\tHQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]\n", + qp->queue_id, qp->entry_count, qp->entry_size, + qp->host_index, qp->hba_index); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t\tDQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]\n", + datqp->queue_id, datqp->entry_count, + datqp->entry_size, datqp->host_index, + datqp->hba_index); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); + + return len; +} + +static int +lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer, + int *len, int max_cnt, int eqidx, int eq_id) +{ + struct lpfc_queue *qp; + int qidx, rc; + + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) { + qp = phba->sli4_hba.fcp_cq[qidx]; + if (qp->assoc_qid != eq_id) + continue; + + *len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len); + + /* Reset max counter */ + qp->CQ_max_cqe = 0; + + if (*len >= max_cnt) + return 1; + + rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len, + max_cnt, qp->queue_id); + if (rc) + return 1; + } + + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { + qp = phba->sli4_hba.nvme_cq[qidx]; + if (qp->assoc_qid != eq_id) + continue; + + *len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len); + + /* Reset max counter */ + qp->CQ_max_cqe = 0; + + if (*len >= max_cnt) + return 1; + + rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len, + max_cnt, qp->queue_id); + if (rc) + return 1; + } + + if (eqidx < phba->cfg_nvmet_mrq) { + /* NVMET CQset */ + qp = phba->sli4_hba.nvmet_cqset[eqidx]; + *len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len); + + /* Reset max counter */ + qp->CQ_max_cqe = 0; + + if (*len >= max_cnt) + return 1; + + /* RQ header */ + qp = phba->sli4_hba.nvmet_mrq_hdr[eqidx]; + *len = __lpfc_idiag_print_rqpair(qp, + phba->sli4_hba.nvmet_mrq_data[eqidx], + "NVMET MRQ", pbuffer, *len); + + if (*len >= max_cnt) + return 1; + } + + return 0; +} + +static int +__lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype, + char *pbuffer, int len) +{ + if (!qp) + return len; + + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\n%s EQ info: EQ-STAT[max:x%x noE:x%x " + "bs:x%x proc:x%llx]\n", + eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, + (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "EQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, qp->entry_count, qp->entry_size, + qp->host_index, qp->hba_index); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); + + return len; +} + /** * lpfc_idiag_queinfo_read - idiag debugfs read queue information * @file: The file pointer to read from. @@ -1984,6 +3234,9 @@ error_out: * Description: * This routine reads data from the @phba SLI4 PCI function queue information, * and copies to user @buf. + * This routine only returns 1 EQs worth of information. It remembers the last + * EQ read and jumps to the next EQ. Thus subsequent calls to queInfo will + * retrieve all EQs allocated for the phba. * * Returns: * This function returns the amount of data that was read (this could be less @@ -1995,19 +3248,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; - int len = 0; char *pbuffer; - int x, cnt; - int max_cnt; + int max_cnt, rc, x, len = 0; struct lpfc_queue *qp = NULL; - if (!debug->buffer) debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL); if (!debug->buffer) return 0; pbuffer = debug->buffer; - max_cnt = LPFC_QUE_INFO_GET_BUF_SIZE - 128; + max_cnt = LPFC_QUE_INFO_GET_BUF_SIZE - 256; if (*ppos) return 0; @@ -2015,375 +3265,134 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, spin_lock_irq(&phba->hbalock); /* Fast-path event queue */ - if (phba->sli4_hba.hba_eq && phba->cfg_fcp_io_channel) { - cnt = phba->cfg_fcp_io_channel; + if (phba->sli4_hba.hba_eq && phba->io_channel_irqs) { - for (x = 0; x < cnt; x++) { + x = phba->lpfc_idiag_last_eq; + if (phba->cfg_fof && (x >= phba->io_channel_irqs)) { + phba->lpfc_idiag_last_eq = 0; + goto fof; + } + phba->lpfc_idiag_last_eq++; + if (phba->lpfc_idiag_last_eq >= phba->io_channel_irqs) + if (phba->cfg_fof == 0) + phba->lpfc_idiag_last_eq = 0; - /* Fast-path EQ */ - qp = phba->sli4_hba.hba_eq[x]; - if (!qp) - goto proc_cq; + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "EQ %d out of %d HBA EQs\n", + x, phba->io_channel_irqs); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\nHBA EQ info: " - "EQ-STAT[max:x%x noE:x%x " - "bs:x%x proc:x%llx]\n", - qp->q_cnt_1, qp->q_cnt_2, - qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); + /* Fast-path EQ */ + qp = phba->sli4_hba.hba_eq[x]; + if (!qp) + goto out; - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "EQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, - qp->entry_count, - qp->entry_size, - qp->host_index, - qp->hba_index); - - - /* Reset max counter */ - qp->EQ_max_eqe = 0; + len = __lpfc_idiag_print_eq(qp, "HBA", pbuffer, len); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; -proc_cq: - /* Fast-path FCP CQ */ - qp = phba->sli4_hba.fcp_cq[x]; - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tFCP CQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocEQID[%02d]: " - "CQ STAT[max:x%x relw:x%x " - "xabt:x%x wq:x%llx]\n", - qp->assoc_qid, - qp->q_cnt_1, qp->q_cnt_2, - qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tCQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, qp->entry_count, - qp->entry_size, qp->host_index, - qp->hba_index); + /* Reset max counter */ + qp->EQ_max_eqe = 0; + if (len >= max_cnt) + goto too_big; - /* Reset max counter */ - qp->CQ_max_cqe = 0; + /* will dump both fcp and nvme cqs/wqs for the eq */ + rc = lpfc_idiag_cqs_for_eq(phba, pbuffer, &len, + max_cnt, x, qp->queue_id); + if (rc) + goto too_big; - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; + /* Only EQ 0 has slow path CQs configured */ + if (x) + goto out; - /* Fast-path FCP WQ */ - qp = phba->sli4_hba.fcp_wq[x]; + /* Slow-path mailbox CQ */ + qp = phba->sli4_hba.mbx_cq; + len = __lpfc_idiag_print_cq(qp, "MBX", pbuffer, len); + if (len >= max_cnt) + goto too_big; - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tFCP WQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocCQID[%02d]: " - "WQ-STAT[oflow:x%x posted:x%llx]\n", - qp->assoc_qid, - qp->q_cnt_1, (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tWQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, - qp->entry_count, - qp->entry_size, - qp->host_index, - qp->hba_index); - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; - - if (x) - continue; - - /* Only EQ 0 has slow path CQs configured */ - - /* Slow-path mailbox CQ */ - qp = phba->sli4_hba.mbx_cq; - if (qp) { - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tMBX CQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocEQID[%02d]: " - "CQ-STAT[mbox:x%x relw:x%x " - "xabt:x%x wq:x%llx]\n", - qp->assoc_qid, - qp->q_cnt_1, qp->q_cnt_2, - qp->q_cnt_3, - (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tCQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, qp->entry_count, - qp->entry_size, qp->host_index, - qp->hba_index); - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; - } + /* Slow-path MBOX MQ */ + qp = phba->sli4_hba.mbx_wq; + len = __lpfc_idiag_print_wq(qp, "MBX", pbuffer, len); + if (len >= max_cnt) + goto too_big; - /* Slow-path MBOX MQ */ - qp = phba->sli4_hba.mbx_wq; - if (qp) { - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tMBX MQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocCQID[%02d]:\n", - phba->sli4_hba.mbx_wq->assoc_qid); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tWQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, qp->entry_count, - qp->entry_size, qp->host_index, - qp->hba_index); - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; - } + /* Slow-path ELS response CQ */ + qp = phba->sli4_hba.els_cq; + len = __lpfc_idiag_print_cq(qp, "ELS", pbuffer, len); + /* Reset max counter */ + if (qp) + qp->CQ_max_cqe = 0; + if (len >= max_cnt) + goto too_big; - /* Slow-path ELS response CQ */ - qp = phba->sli4_hba.els_cq; - if (qp) { - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tELS CQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocEQID[%02d]: " - "CQ-STAT[max:x%x relw:x%x " - "xabt:x%x wq:x%llx]\n", - qp->assoc_qid, - qp->q_cnt_1, qp->q_cnt_2, - qp->q_cnt_3, - (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tCQID [%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, qp->entry_count, - qp->entry_size, qp->host_index, - qp->hba_index); - - /* Reset max counter */ - qp->CQ_max_cqe = 0; - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; - } + /* Slow-path ELS WQ */ + qp = phba->sli4_hba.els_wq; + len = __lpfc_idiag_print_wq(qp, "ELS", pbuffer, len); + if (len >= max_cnt) + goto too_big; - /* Slow-path ELS WQ */ - qp = phba->sli4_hba.els_wq; - if (qp) { - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tELS WQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocCQID[%02d]: " - " WQ-STAT[oflow:x%x " - "posted:x%llx]\n", - qp->assoc_qid, - qp->q_cnt_1, - (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tWQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, qp->entry_count, - qp->entry_size, qp->host_index, - qp->hba_index); - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; - } + /* Slow-path NVME LS response CQ */ + qp = phba->sli4_hba.nvmels_cq; + len = __lpfc_idiag_print_cq(qp, "NVME LS", + pbuffer, len); + /* Reset max counter */ + if (qp) + qp->CQ_max_cqe = 0; + if (len >= max_cnt) + goto too_big; - if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) { - /* Slow-path RQ header */ - qp = phba->sli4_hba.hdr_rq; + /* Slow-path NVME LS WQ */ + qp = phba->sli4_hba.nvmels_wq; + len = __lpfc_idiag_print_wq(qp, "NVME LS", + pbuffer, len); + if (len >= max_cnt) + goto too_big; - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tRQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocCQID[%02d]: " - "RQ-STAT[nopost:x%x nobuf:x%x " - "trunc:x%x rcv:x%llx]\n", - qp->assoc_qid, - qp->q_cnt_1, qp->q_cnt_2, - qp->q_cnt_3, - (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tHQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]\n", - qp->queue_id, - qp->entry_count, - qp->entry_size, - qp->host_index, - qp->hba_index); - - /* Slow-path RQ data */ - qp = phba->sli4_hba.dat_rq; - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tDQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]\n", - qp->queue_id, - qp->entry_count, - qp->entry_size, - qp->host_index, - qp->hba_index); - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - } - } + qp = phba->sli4_hba.hdr_rq; + len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq, + "RQpair", pbuffer, len); + if (len >= max_cnt) + goto too_big; + + goto out; } +fof: if (phba->cfg_fof) { /* FOF EQ */ qp = phba->sli4_hba.fof_eq; - if (!qp) - goto out; - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\nFOF EQ info: " - "EQ-STAT[max:x%x noE:x%x " - "bs:x%x proc:x%llx]\n", - qp->q_cnt_1, qp->q_cnt_2, - qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "EQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, - qp->entry_count, - qp->entry_size, - qp->host_index, - qp->hba_index); + len = __lpfc_idiag_print_eq(qp, "FOF", pbuffer, len); /* Reset max counter */ - qp->EQ_max_eqe = 0; + if (qp) + qp->EQ_max_eqe = 0; - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); if (len >= max_cnt) goto too_big; - } - - if (phba->cfg_fof) { /* OAS CQ */ qp = phba->sli4_hba.oas_cq; - if (qp) { - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tOAS CQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocEQID[%02d]: " - "CQ STAT[max:x%x relw:x%x " - "xabt:x%x wq:x%llx]\n", - qp->assoc_qid, - qp->q_cnt_1, qp->q_cnt_2, - qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tCQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, qp->entry_count, - qp->entry_size, qp->host_index, - qp->hba_index); - - /* Reset max counter */ + len = __lpfc_idiag_print_cq(qp, "OAS", pbuffer, len); + /* Reset max counter */ + if (qp) qp->CQ_max_cqe = 0; - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; - } + if (len >= max_cnt) + goto too_big; /* OAS WQ */ qp = phba->sli4_hba.oas_wq; - if (qp) { - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tOAS WQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocCQID[%02d]: " - "WQ-STAT[oflow:x%x posted:x%llx]\n", - qp->assoc_qid, - qp->q_cnt_1, (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tWQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, - qp->entry_count, - qp->entry_size, - qp->host_index, - qp->hba_index); - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; - } + len = __lpfc_idiag_print_wq(qp, "OAS", pbuffer, len); + if (len >= max_cnt) + goto too_big; } -out: + spin_unlock_irq(&phba->hbalock); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); too_big: - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "Truncated ...\n"); + len += snprintf(pbuffer + len, + LPFC_QUE_INFO_GET_BUF_SIZE - len, "Truncated ...\n"); +out: spin_unlock_irq(&phba->hbalock); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } @@ -2559,7 +3568,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; uint32_t qidx, quetp, queid, index, count, offset, value; uint32_t *pentry; - struct lpfc_queue *pque; + struct lpfc_queue *pque, *qp; int rc; /* This is a user write operation */ @@ -2595,19 +3604,15 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, case LPFC_IDIAG_EQ: /* HBA event queue */ if (phba->sli4_hba.hba_eq) { - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; - qidx++) { - if (phba->sli4_hba.hba_eq[qidx] && - phba->sli4_hba.hba_eq[qidx]->queue_id == - queid) { + for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) { + qp = phba->sli4_hba.hba_eq[qidx]; + if (qp && qp->queue_id == queid) { /* Sanity check */ - rc = lpfc_idiag_que_param_check( - phba->sli4_hba.hba_eq[qidx], + rc = lpfc_idiag_que_param_check(qp, index, count); if (rc) goto error_out; - idiag.ptr_private = - phba->sli4_hba.hba_eq[qidx]; + idiag.ptr_private = qp; goto pass_check; } } @@ -2637,24 +3642,62 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, idiag.ptr_private = phba->sli4_hba.els_cq; goto pass_check; } + /* NVME LS complete queue */ + if (phba->sli4_hba.nvmels_cq && + phba->sli4_hba.nvmels_cq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.nvmels_cq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.nvmels_cq; + goto pass_check; + } + /* NVME LS complete queue */ + if (phba->sli4_hba.nvmels_cq && + phba->sli4_hba.nvmels_cq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.nvmels_cq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.nvmels_cq; + goto pass_check; + } /* FCP complete queue */ if (phba->sli4_hba.fcp_cq) { + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; + qidx++) { + qp = phba->sli4_hba.fcp_cq[qidx]; + if (qp && qp->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + qp, index, count); + if (rc) + goto error_out; + idiag.ptr_private = qp; + goto pass_check; + } + } + } + /* NVME complete queue */ + if (phba->sli4_hba.nvme_cq) { qidx = 0; do { - if (phba->sli4_hba.fcp_cq[qidx] && - phba->sli4_hba.fcp_cq[qidx]->queue_id == + if (phba->sli4_hba.nvme_cq[qidx] && + phba->sli4_hba.nvme_cq[qidx]->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( - phba->sli4_hba.fcp_cq[qidx], + phba->sli4_hba.nvme_cq[qidx], index, count); if (rc) goto error_out; idiag.ptr_private = - phba->sli4_hba.fcp_cq[qidx]; + phba->sli4_hba.nvme_cq[qidx]; goto pass_check; } - } while (++qidx < phba->cfg_fcp_io_channel); + } while (++qidx < phba->cfg_nvme_io_channel); } goto error_out; break; @@ -2684,22 +3727,77 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, idiag.ptr_private = phba->sli4_hba.els_wq; goto pass_check; } + /* NVME LS work queue */ + if (phba->sli4_hba.nvmels_wq && + phba->sli4_hba.nvmels_wq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.nvmels_wq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.nvmels_wq; + goto pass_check; + } + /* NVME LS work queue */ + if (phba->sli4_hba.nvmels_wq && + phba->sli4_hba.nvmels_wq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.nvmels_wq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.nvmels_wq; + goto pass_check; + } /* FCP work queue */ if (phba->sli4_hba.fcp_wq) { for (qidx = 0; qidx < phba->cfg_fcp_io_channel; + qidx++) { + qp = phba->sli4_hba.fcp_wq[qidx]; + if (qp && qp->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + qp, index, count); + if (rc) + goto error_out; + idiag.ptr_private = qp; + goto pass_check; + } + } + } + /* NVME work queue */ + if (phba->sli4_hba.nvme_wq) { + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; + qidx++) { + qp = phba->sli4_hba.nvme_wq[qidx]; + if (qp && qp->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + qp, index, count); + if (rc) + goto error_out; + idiag.ptr_private = qp; + goto pass_check; + } + } + } + + /* NVME work queues */ + if (phba->sli4_hba.nvme_wq) { + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { - if (!phba->sli4_hba.fcp_wq[qidx]) + if (!phba->sli4_hba.nvme_wq[qidx]) continue; - if (phba->sli4_hba.fcp_wq[qidx]->queue_id == + if (phba->sli4_hba.nvme_wq[qidx]->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( - phba->sli4_hba.fcp_wq[qidx], + phba->sli4_hba.nvme_wq[qidx], index, count); if (rc) goto error_out; idiag.ptr_private = - phba->sli4_hba.fcp_wq[qidx]; + phba->sli4_hba.nvme_wq[qidx]; goto pass_check; } } @@ -3687,6 +4785,46 @@ static const struct file_operations lpfc_debugfs_op_dumpHostSlim = { .release = lpfc_debugfs_release, }; +#undef lpfc_debugfs_op_nvmestat +static const struct file_operations lpfc_debugfs_op_nvmestat = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_nvmestat_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_nvmestat_write, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_nvmektime +static const struct file_operations lpfc_debugfs_op_nvmektime = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_nvmektime_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_nvmektime_write, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_nvmeio_trc +static const struct file_operations lpfc_debugfs_op_nvmeio_trc = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_nvmeio_trc_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_nvmeio_trc_write, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_cpucheck +static const struct file_operations lpfc_debugfs_op_cpucheck = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_cpucheck_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_cpucheck_write, + .release = lpfc_debugfs_release, +}; + #undef lpfc_debugfs_op_dumpData static const struct file_operations lpfc_debugfs_op_dumpData = { .owner = THIS_MODULE, @@ -3853,7 +4991,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, if ((mbox_tp == mbox_rd) && (dma_tp == dma_mbox)) { if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_MBX) { do_dump |= LPFC_BSG_DMP_MBX_RD_MBX; - printk(KERN_ERR "\nRead mbox command (x%x), " + pr_err("\nRead mbox command (x%x), " "nemb:0x%x, extbuf_cnt:%d:\n", sta_tp, nemb_tp, ext_buf); } @@ -3861,7 +4999,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, if ((mbox_tp == mbox_rd) && (dma_tp == dma_ebuf)) { if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_BUF) { do_dump |= LPFC_BSG_DMP_MBX_RD_BUF; - printk(KERN_ERR "\nRead mbox buffer (x%x), " + pr_err("\nRead mbox buffer (x%x), " "nemb:0x%x, extbuf_seq:%d:\n", sta_tp, nemb_tp, ext_buf); } @@ -3869,7 +5007,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, if ((mbox_tp == mbox_wr) && (dma_tp == dma_mbox)) { if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_MBX) { do_dump |= LPFC_BSG_DMP_MBX_WR_MBX; - printk(KERN_ERR "\nWrite mbox command (x%x), " + pr_err("\nWrite mbox command (x%x), " "nemb:0x%x, extbuf_cnt:%d:\n", sta_tp, nemb_tp, ext_buf); } @@ -3877,7 +5015,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, if ((mbox_tp == mbox_wr) && (dma_tp == dma_ebuf)) { if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_BUF) { do_dump |= LPFC_BSG_DMP_MBX_WR_BUF; - printk(KERN_ERR "\nWrite mbox buffer (x%x), " + pr_err("\nWrite mbox buffer (x%x), " "nemb:0x%x, extbuf_seq:%d:\n", sta_tp, nemb_tp, ext_buf); } @@ -3889,7 +5027,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, for (i = 0; i < *mbx_word_cnt; i++) { if (!(i % 8)) { if (i != 0) - printk(KERN_ERR "%s\n", line_buf); + pr_err("%s\n", line_buf); len = 0; len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, @@ -3900,7 +5038,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, pword++; } if ((i - 1) % 8) - printk(KERN_ERR "%s\n", line_buf); + pr_err("%s\n", line_buf); (*mbx_dump_cnt)--; } @@ -3949,13 +5087,13 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox) /* dump buffer content */ if (*mbx_dump_map & LPFC_MBX_DMP_MBX_WORD) { - printk(KERN_ERR "Mailbox command:0x%x dump by word:\n", + pr_err("Mailbox command:0x%x dump by word:\n", pmbox->mbxCommand); pword = (uint32_t *)pmbox; for (i = 0; i < *mbx_word_cnt; i++) { if (!(i % 8)) { if (i != 0) - printk(KERN_ERR "%s\n", line_buf); + pr_err("%s\n", line_buf); len = 0; memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ); len += snprintf(line_buf+len, @@ -3968,17 +5106,17 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox) pword++; } if ((i - 1) % 8) - printk(KERN_ERR "%s\n", line_buf); - printk(KERN_ERR "\n"); + pr_err("%s\n", line_buf); + pr_err("\n"); } if (*mbx_dump_map & LPFC_MBX_DMP_MBX_BYTE) { - printk(KERN_ERR "Mailbox command:0x%x dump by byte:\n", + pr_err("Mailbox command:0x%x dump by byte:\n", pmbox->mbxCommand); pbyte = (uint8_t *)pmbox; for (i = 0; i < *mbx_word_cnt; i++) { if (!(i % 8)) { if (i != 0) - printk(KERN_ERR "%s\n", line_buf); + pr_err("%s\n", line_buf); len = 0; memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ); len += snprintf(line_buf+len, @@ -3996,8 +5134,8 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox) LPFC_MBX_ACC_LBUF_SZ-len, " "); } if ((i - 1) % 8) - printk(KERN_ERR "%s\n", line_buf); - printk(KERN_ERR "\n"); + pr_err("%s\n", line_buf); + pr_err("\n"); } (*mbx_dump_cnt)--; @@ -4240,8 +5378,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) i++; } lpfc_debugfs_max_slow_ring_trc = (1 << i); - printk(KERN_ERR - "lpfc_debugfs_max_disc_trc changed to " + pr_err("lpfc_debugfs_max_disc_trc changed to " "%d\n", lpfc_debugfs_max_disc_trc); } } @@ -4273,6 +5410,61 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) (sizeof(struct lpfc_debugfs_trc) * lpfc_debugfs_max_slow_ring_trc)); } + + snprintf(name, sizeof(name), "nvmeio_trc"); + phba->debug_nvmeio_trc = + debugfs_create_file(name, 0644, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_nvmeio_trc); + if (!phba->debug_nvmeio_trc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0574 No create debugfs nvmeio_trc\n"); + goto debug_failed; + } + + atomic_set(&phba->nvmeio_trc_cnt, 0); + if (lpfc_debugfs_max_nvmeio_trc) { + num = lpfc_debugfs_max_nvmeio_trc - 1; + if (num & lpfc_debugfs_max_disc_trc) { + /* Change to be a power of 2 */ + num = lpfc_debugfs_max_nvmeio_trc; + i = 0; + while (num > 1) { + num = num >> 1; + i++; + } + lpfc_debugfs_max_nvmeio_trc = (1 << i); + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0575 lpfc_debugfs_max_nvmeio_trc " + "changed to %d\n", + lpfc_debugfs_max_nvmeio_trc); + } + phba->nvmeio_trc_size = lpfc_debugfs_max_nvmeio_trc; + + /* Allocate trace buffer and initialize */ + phba->nvmeio_trc = kmalloc( + (sizeof(struct lpfc_debugfs_nvmeio_trc) * + phba->nvmeio_trc_size), GFP_KERNEL); + + if (!phba->nvmeio_trc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0576 Cannot create debugfs " + "nvmeio_trc buffer\n"); + goto nvmeio_off; + } + memset(phba->nvmeio_trc, 0, + (sizeof(struct lpfc_debugfs_nvmeio_trc) * + phba->nvmeio_trc_size)); + phba->nvmeio_trc_on = 1; + phba->nvmeio_trc_output_idx = 0; + phba->nvmeio_trc = NULL; + } else { +nvmeio_off: + phba->nvmeio_trc_size = 0; + phba->nvmeio_trc_on = 0; + phba->nvmeio_trc_output_idx = 0; + phba->nvmeio_trc = NULL; + } } snprintf(name, sizeof(name), "vport%d", vport->vpi); @@ -4298,8 +5490,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) i++; } lpfc_debugfs_max_disc_trc = (1 << i); - printk(KERN_ERR - "lpfc_debugfs_max_disc_trc changed to %d\n", + pr_err("lpfc_debugfs_max_disc_trc changed to %d\n", lpfc_debugfs_max_disc_trc); } } @@ -4338,6 +5529,39 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) goto debug_failed; } + snprintf(name, sizeof(name), "nvmestat"); + vport->debug_nvmestat = + debugfs_create_file(name, 0644, + vport->vport_debugfs_root, + vport, &lpfc_debugfs_op_nvmestat); + if (!vport->debug_nvmestat) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0811 Cannot create debugfs nvmestat\n"); + goto debug_failed; + } + + snprintf(name, sizeof(name), "nvmektime"); + vport->debug_nvmektime = + debugfs_create_file(name, 0644, + vport->vport_debugfs_root, + vport, &lpfc_debugfs_op_nvmektime); + if (!vport->debug_nvmektime) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0815 Cannot create debugfs nvmektime\n"); + goto debug_failed; + } + + snprintf(name, sizeof(name), "cpucheck"); + vport->debug_cpucheck = + debugfs_create_file(name, 0644, + vport->vport_debugfs_root, + vport, &lpfc_debugfs_op_cpucheck); + if (!vport->debug_cpucheck) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0819 Cannot create debugfs cpucheck\n"); + goto debug_failed; + } + /* * The following section is for additional directories/files for the * physical port. @@ -4502,140 +5726,126 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) kfree(vport->disc_trc); vport->disc_trc = NULL; } - if (vport->debug_disc_trc) { - debugfs_remove(vport->debug_disc_trc); /* discovery_trace */ - vport->debug_disc_trc = NULL; - } - if (vport->debug_nodelist) { - debugfs_remove(vport->debug_nodelist); /* nodelist */ - vport->debug_nodelist = NULL; - } + + debugfs_remove(vport->debug_disc_trc); /* discovery_trace */ + vport->debug_disc_trc = NULL; + + debugfs_remove(vport->debug_nodelist); /* nodelist */ + vport->debug_nodelist = NULL; + + debugfs_remove(vport->debug_nvmestat); /* nvmestat */ + vport->debug_nvmestat = NULL; + + debugfs_remove(vport->debug_nvmektime); /* nvmektime */ + vport->debug_nvmektime = NULL; + + debugfs_remove(vport->debug_cpucheck); /* cpucheck */ + vport->debug_cpucheck = NULL; + if (vport->vport_debugfs_root) { debugfs_remove(vport->vport_debugfs_root); /* vportX */ vport->vport_debugfs_root = NULL; atomic_dec(&phba->debugfs_vport_count); } + if (atomic_read(&phba->debugfs_vport_count) == 0) { - if (phba->debug_hbqinfo) { - debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ - phba->debug_hbqinfo = NULL; - } - if (phba->debug_dumpHBASlim) { - debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */ - phba->debug_dumpHBASlim = NULL; - } - if (phba->debug_dumpHostSlim) { - debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */ - phba->debug_dumpHostSlim = NULL; - } - if (phba->debug_dumpData) { - debugfs_remove(phba->debug_dumpData); /* dumpData */ - phba->debug_dumpData = NULL; - } + debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ + phba->debug_hbqinfo = NULL; - if (phba->debug_dumpDif) { - debugfs_remove(phba->debug_dumpDif); /* dumpDif */ - phba->debug_dumpDif = NULL; - } - if (phba->debug_InjErrLBA) { - debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */ - phba->debug_InjErrLBA = NULL; - } - if (phba->debug_InjErrNPortID) { /* InjErrNPortID */ - debugfs_remove(phba->debug_InjErrNPortID); - phba->debug_InjErrNPortID = NULL; - } - if (phba->debug_InjErrWWPN) { - debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */ - phba->debug_InjErrWWPN = NULL; - } - if (phba->debug_writeGuard) { - debugfs_remove(phba->debug_writeGuard); /* writeGuard */ - phba->debug_writeGuard = NULL; - } - if (phba->debug_writeApp) { - debugfs_remove(phba->debug_writeApp); /* writeApp */ - phba->debug_writeApp = NULL; - } - if (phba->debug_writeRef) { - debugfs_remove(phba->debug_writeRef); /* writeRef */ - phba->debug_writeRef = NULL; - } - if (phba->debug_readGuard) { - debugfs_remove(phba->debug_readGuard); /* readGuard */ - phba->debug_readGuard = NULL; - } - if (phba->debug_readApp) { - debugfs_remove(phba->debug_readApp); /* readApp */ - phba->debug_readApp = NULL; - } - if (phba->debug_readRef) { - debugfs_remove(phba->debug_readRef); /* readRef */ - phba->debug_readRef = NULL; - } + debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */ + phba->debug_dumpHBASlim = NULL; + + debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */ + phba->debug_dumpHostSlim = NULL; + + debugfs_remove(phba->debug_dumpData); /* dumpData */ + phba->debug_dumpData = NULL; + + debugfs_remove(phba->debug_dumpDif); /* dumpDif */ + phba->debug_dumpDif = NULL; + + debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */ + phba->debug_InjErrLBA = NULL; + + debugfs_remove(phba->debug_InjErrNPortID); + phba->debug_InjErrNPortID = NULL; + + debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */ + phba->debug_InjErrWWPN = NULL; + + debugfs_remove(phba->debug_writeGuard); /* writeGuard */ + phba->debug_writeGuard = NULL; + + debugfs_remove(phba->debug_writeApp); /* writeApp */ + phba->debug_writeApp = NULL; + + debugfs_remove(phba->debug_writeRef); /* writeRef */ + phba->debug_writeRef = NULL; + + debugfs_remove(phba->debug_readGuard); /* readGuard */ + phba->debug_readGuard = NULL; + + debugfs_remove(phba->debug_readApp); /* readApp */ + phba->debug_readApp = NULL; + + debugfs_remove(phba->debug_readRef); /* readRef */ + phba->debug_readRef = NULL; if (phba->slow_ring_trc) { kfree(phba->slow_ring_trc); phba->slow_ring_trc = NULL; } - if (phba->debug_slow_ring_trc) { - /* slow_ring_trace */ - debugfs_remove(phba->debug_slow_ring_trc); - phba->debug_slow_ring_trc = NULL; - } + + /* slow_ring_trace */ + debugfs_remove(phba->debug_slow_ring_trc); + phba->debug_slow_ring_trc = NULL; + + debugfs_remove(phba->debug_nvmeio_trc); + phba->debug_nvmeio_trc = NULL; + + kfree(phba->nvmeio_trc); + phba->nvmeio_trc = NULL; /* * iDiag release */ if (phba->sli_rev == LPFC_SLI_REV4) { - if (phba->idiag_ext_acc) { - /* iDiag extAcc */ - debugfs_remove(phba->idiag_ext_acc); - phba->idiag_ext_acc = NULL; - } - if (phba->idiag_mbx_acc) { - /* iDiag mbxAcc */ - debugfs_remove(phba->idiag_mbx_acc); - phba->idiag_mbx_acc = NULL; - } - if (phba->idiag_ctl_acc) { - /* iDiag ctlAcc */ - debugfs_remove(phba->idiag_ctl_acc); - phba->idiag_ctl_acc = NULL; - } - if (phba->idiag_drb_acc) { - /* iDiag drbAcc */ - debugfs_remove(phba->idiag_drb_acc); - phba->idiag_drb_acc = NULL; - } - if (phba->idiag_que_acc) { - /* iDiag queAcc */ - debugfs_remove(phba->idiag_que_acc); - phba->idiag_que_acc = NULL; - } - if (phba->idiag_que_info) { - /* iDiag queInfo */ - debugfs_remove(phba->idiag_que_info); - phba->idiag_que_info = NULL; - } - if (phba->idiag_bar_acc) { - /* iDiag barAcc */ - debugfs_remove(phba->idiag_bar_acc); - phba->idiag_bar_acc = NULL; - } - if (phba->idiag_pci_cfg) { - /* iDiag pciCfg */ - debugfs_remove(phba->idiag_pci_cfg); - phba->idiag_pci_cfg = NULL; - } + /* iDiag extAcc */ + debugfs_remove(phba->idiag_ext_acc); + phba->idiag_ext_acc = NULL; + + /* iDiag mbxAcc */ + debugfs_remove(phba->idiag_mbx_acc); + phba->idiag_mbx_acc = NULL; + + /* iDiag ctlAcc */ + debugfs_remove(phba->idiag_ctl_acc); + phba->idiag_ctl_acc = NULL; + + /* iDiag drbAcc */ + debugfs_remove(phba->idiag_drb_acc); + phba->idiag_drb_acc = NULL; + + /* iDiag queAcc */ + debugfs_remove(phba->idiag_que_acc); + phba->idiag_que_acc = NULL; + + /* iDiag queInfo */ + debugfs_remove(phba->idiag_que_info); + phba->idiag_que_info = NULL; + + /* iDiag barAcc */ + debugfs_remove(phba->idiag_bar_acc); + phba->idiag_bar_acc = NULL; + + /* iDiag pciCfg */ + debugfs_remove(phba->idiag_pci_cfg); + phba->idiag_pci_cfg = NULL; /* Finally remove the iDiag debugfs root */ - if (phba->idiag_root) { - /* iDiag root */ - debugfs_remove(phba->idiag_root); - phba->idiag_root = NULL; - } + debugfs_remove(phba->idiag_root); + phba->idiag_root = NULL; } if (phba->hba_debugfs_root) { @@ -4644,10 +5854,8 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) atomic_dec(&lpfc_debugfs_hba_count); } - if (atomic_read(&lpfc_debugfs_hba_count) == 0) { - debugfs_remove(lpfc_debugfs_root); /* lpfc */ - lpfc_debugfs_root = NULL; - } + debugfs_remove(lpfc_debugfs_root); /* lpfc */ + lpfc_debugfs_root = NULL; } #endif return; @@ -4668,31 +5876,39 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) void lpfc_debug_dump_all_queues(struct lpfc_hba *phba) { - int fcp_wqidx; + int idx; /* * Dump Work Queues (WQs) */ - lpfc_debug_dump_mbx_wq(phba); - lpfc_debug_dump_els_wq(phba); + lpfc_debug_dump_wq(phba, DUMP_MBX, 0); + lpfc_debug_dump_wq(phba, DUMP_ELS, 0); + lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0); - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) - lpfc_debug_dump_fcp_wq(phba, fcp_wqidx); + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) + lpfc_debug_dump_wq(phba, DUMP_FCP, idx); + + for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++) + lpfc_debug_dump_wq(phba, DUMP_NVME, idx); lpfc_debug_dump_hdr_rq(phba); lpfc_debug_dump_dat_rq(phba); /* * Dump Complete Queues (CQs) */ - lpfc_debug_dump_mbx_cq(phba); - lpfc_debug_dump_els_cq(phba); + lpfc_debug_dump_cq(phba, DUMP_MBX, 0); + lpfc_debug_dump_cq(phba, DUMP_ELS, 0); + lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0); + + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) + lpfc_debug_dump_cq(phba, DUMP_FCP, idx); - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) - lpfc_debug_dump_fcp_cq(phba, fcp_wqidx); + for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++) + lpfc_debug_dump_cq(phba, DUMP_NVME, idx); /* * Dump Event Queues (EQs) */ - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) - lpfc_debug_dump_hba_eq(phba, fcp_wqidx); + for (idx = 0; idx < phba->io_channel_irqs; idx++) + lpfc_debug_dump_hba_eq(phba, idx); } diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 8b2b6a3bfc25..c05f56c3023f 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2007-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -42,6 +44,22 @@ /* hbqinfo output buffer size */ #define LPFC_HBQINFO_SIZE 8192 +enum { + DUMP_FCP, + DUMP_NVME, + DUMP_MBX, + DUMP_ELS, + DUMP_NVMELS, +}; + +/* nvmestat output buffer size */ +#define LPFC_NVMESTAT_SIZE 8192 +#define LPFC_NVMEKTIME_SIZE 8192 +#define LPFC_CPUCHECK_SIZE 8192 +#define LPFC_NVMEIO_TRC_SIZE 8192 + +#define LPFC_DEBUG_OUT_LINE_SZ 80 + /* * For SLI4 iDiag debugfs diagnostics tool */ @@ -188,6 +206,12 @@ #define SIZE_U16 sizeof(uint16_t) #define SIZE_U32 sizeof(uint32_t) +#define lpfc_nvmeio_data(phba, fmt, arg...) \ + { \ + if (phba->nvmeio_trc_on) \ + lpfc_debugfs_nvme_trc(phba, fmt, ##arg); \ + } + struct lpfc_debug { char *i_private; char op; @@ -206,6 +230,13 @@ struct lpfc_debugfs_trc { unsigned long jif; }; +struct lpfc_debugfs_nvmeio_trc { + char *fmt; + uint16_t data1; + uint16_t data2; + uint32_t data3; +}; + struct lpfc_idiag_offset { uint32_t last_rd; }; @@ -358,58 +389,111 @@ lpfc_debug_dump_q(struct lpfc_queue *q) } /** - * lpfc_debug_dump_fcp_wq - dump all entries from a fcp work queue + * lpfc_debug_dump_wq - dump all entries from the fcp or nvme work queue * @phba: Pointer to HBA context object. - * @fcp_wqidx: Index to a FCP work queue. + * @wqidx: Index to a FCP or NVME work queue. * - * This function dumps all entries from a FCP work queue specified by the - * @fcp_wqidx. + * This function dumps all entries from a FCP or NVME work queue specified + * by the wqidx. **/ static inline void -lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx) +lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx) { - /* sanity check */ - if (fcp_wqidx >= phba->cfg_fcp_io_channel) + struct lpfc_queue *wq; + char *qtypestr; + + if (qtype == DUMP_FCP) { + wq = phba->sli4_hba.fcp_wq[wqidx]; + qtypestr = "FCP"; + } else if (qtype == DUMP_NVME) { + wq = phba->sli4_hba.nvme_wq[wqidx]; + qtypestr = "NVME"; + } else if (qtype == DUMP_MBX) { + wq = phba->sli4_hba.mbx_wq; + qtypestr = "MBX"; + } else if (qtype == DUMP_ELS) { + wq = phba->sli4_hba.els_wq; + qtypestr = "ELS"; + } else if (qtype == DUMP_NVMELS) { + wq = phba->sli4_hba.nvmels_wq; + qtypestr = "NVMELS"; + } else return; - printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n", - fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id); - lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[fcp_wqidx]); + if (qtype == DUMP_FCP || qtype == DUMP_NVME) + pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n", + qtypestr, wqidx, wq->queue_id); + else + pr_err("%s WQ: WQ[Qid:%d]\n", + qtypestr, wq->queue_id); + + lpfc_debug_dump_q(wq); } /** - * lpfc_debug_dump_fcp_cq - dump all entries from a fcp work queue's cmpl queue + * lpfc_debug_dump_cq - dump all entries from a fcp or nvme work queue's + * cmpl queue * @phba: Pointer to HBA context object. - * @fcp_wqidx: Index to a FCP work queue. + * @wqidx: Index to a FCP work queue. * - * This function dumps all entries from a FCP complete queue which is - * associated to the FCP work queue specified by the @fcp_wqidx. + * This function dumps all entries from a FCP or NVME completion queue + * which is associated to the work queue specified by the @wqidx. **/ static inline void -lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx) +lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx) { - int fcp_cqidx, fcp_cqid; - - /* sanity check */ - if (fcp_wqidx >= phba->cfg_fcp_io_channel) + struct lpfc_queue *wq, *cq, *eq; + char *qtypestr; + int eqidx; + + /* fcp/nvme wq and cq are 1:1, thus same indexes */ + + if (qtype == DUMP_FCP) { + wq = phba->sli4_hba.fcp_wq[wqidx]; + cq = phba->sli4_hba.fcp_cq[wqidx]; + qtypestr = "FCP"; + } else if (qtype == DUMP_NVME) { + wq = phba->sli4_hba.nvme_wq[wqidx]; + cq = phba->sli4_hba.nvme_cq[wqidx]; + qtypestr = "NVME"; + } else if (qtype == DUMP_MBX) { + wq = phba->sli4_hba.mbx_wq; + cq = phba->sli4_hba.mbx_cq; + qtypestr = "MBX"; + } else if (qtype == DUMP_ELS) { + wq = phba->sli4_hba.els_wq; + cq = phba->sli4_hba.els_cq; + qtypestr = "ELS"; + } else if (qtype == DUMP_NVMELS) { + wq = phba->sli4_hba.nvmels_wq; + cq = phba->sli4_hba.nvmels_cq; + qtypestr = "NVMELS"; + } else return; - fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid; - for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) - if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) + for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) { + eq = phba->sli4_hba.hba_eq[eqidx]; + if (cq->assoc_qid == eq->queue_id) break; - if (phba->intr_type == MSIX) { - if (fcp_cqidx >= phba->cfg_fcp_io_channel) - return; - } else { - if (fcp_cqidx > 0) - return; + } + if (eqidx == phba->io_channel_irqs) { + pr_err("Couldn't find EQ for CQ. Using EQ[0]\n"); + eqidx = 0; + eq = phba->sli4_hba.hba_eq[0]; } - printk(KERN_ERR "FCP CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]:\n", - fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, - fcp_cqidx, fcp_cqid); - lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[fcp_cqidx]); + if (qtype == DUMP_FCP || qtype == DUMP_NVME) + pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]" + "->EQ[Idx:%d|Qid:%d]:\n", + qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id, + eqidx, eq->queue_id); + else + pr_err("%s CQ: WQ[Qid:%d]->CQ[Qid:%d]" + "->EQ[Idx:%d|Qid:%d]:\n", + qtypestr, wq->queue_id, cq->queue_id, + eqidx, eq->queue_id); + + lpfc_debug_dump_q(cq); } /** @@ -421,64 +505,15 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx) * associated to the FCP work queue specified by the @fcp_wqidx. **/ static inline void -lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int fcp_wqidx) +lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int qidx) { - struct lpfc_queue *qdesc; - int fcp_eqidx, fcp_eqid; - int fcp_cqidx, fcp_cqid; + struct lpfc_queue *qp; - /* sanity check */ - if (fcp_wqidx >= phba->cfg_fcp_io_channel) - return; - fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid; - for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) - if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) - break; - if (phba->intr_type == MSIX) { - if (fcp_cqidx >= phba->cfg_fcp_io_channel) - return; - } else { - if (fcp_cqidx > 0) - return; - } + qp = phba->sli4_hba.hba_eq[qidx]; - fcp_eqidx = fcp_cqidx; - fcp_eqid = phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id; - qdesc = phba->sli4_hba.hba_eq[fcp_eqidx]; + pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id); - printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->" - "EQ[Idx:%d|Qid:%d]\n", - fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, - fcp_cqidx, fcp_cqid, fcp_eqidx, fcp_eqid); - lpfc_debug_dump_q(qdesc); -} - -/** - * lpfc_debug_dump_els_wq - dump all entries from the els work queue - * @phba: Pointer to HBA context object. - * - * This function dumps all entries from the ELS work queue. - **/ -static inline void -lpfc_debug_dump_els_wq(struct lpfc_hba *phba) -{ - printk(KERN_ERR "ELS WQ: WQ[Qid:%d]:\n", - phba->sli4_hba.els_wq->queue_id); - lpfc_debug_dump_q(phba->sli4_hba.els_wq); -} - -/** - * lpfc_debug_dump_mbx_wq - dump all entries from the mbox work queue - * @phba: Pointer to HBA context object. - * - * This function dumps all entries from the MBOX work queue. - **/ -static inline void -lpfc_debug_dump_mbx_wq(struct lpfc_hba *phba) -{ - printk(KERN_ERR "MBX WQ: WQ[Qid:%d]\n", - phba->sli4_hba.mbx_wq->queue_id); - lpfc_debug_dump_q(phba->sli4_hba.mbx_wq); + lpfc_debug_dump_q(qp); } /** @@ -510,36 +545,6 @@ lpfc_debug_dump_hdr_rq(struct lpfc_hba *phba) } /** - * lpfc_debug_dump_els_cq - dump all entries from the els complete queue - * @phba: Pointer to HBA context object. - * - * This function dumps all entries from the els complete queue. - **/ -static inline void -lpfc_debug_dump_els_cq(struct lpfc_hba *phba) -{ - printk(KERN_ERR "ELS CQ: WQ[Qid:%d]->CQ[Qid:%d]\n", - phba->sli4_hba.els_wq->queue_id, - phba->sli4_hba.els_cq->queue_id); - lpfc_debug_dump_q(phba->sli4_hba.els_cq); -} - -/** - * lpfc_debug_dump_mbx_cq - dump all entries from the mbox complete queue - * @phba: Pointer to HBA context object. - * - * This function dumps all entries from the mbox complete queue. - **/ -static inline void -lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba) -{ - printk(KERN_ERR "MBX CQ: WQ[Qid:%d]->CQ[Qid:%d]\n", - phba->sli4_hba.mbx_wq->queue_id, - phba->sli4_hba.mbx_cq->queue_id); - lpfc_debug_dump_q(phba->sli4_hba.mbx_cq); -} - -/** * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id * @phba: Pointer to HBA context object. * @qid: Work queue identifier. @@ -556,14 +561,29 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid) if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid) break; if (wq_idx < phba->cfg_fcp_io_channel) { - printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); + pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]); return; } + for (wq_idx = 0; wq_idx < phba->cfg_nvme_io_channel; wq_idx++) + if (phba->sli4_hba.nvme_wq[wq_idx]->queue_id == qid) + break; + if (wq_idx < phba->cfg_nvme_io_channel) { + pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); + lpfc_debug_dump_q(phba->sli4_hba.nvme_wq[wq_idx]); + return; + } + if (phba->sli4_hba.els_wq->queue_id == qid) { - printk(KERN_ERR "ELS WQ[Qid:%d]\n", qid); + pr_err("ELS WQ[Qid:%d]\n", qid); lpfc_debug_dump_q(phba->sli4_hba.els_wq); + return; + } + + if (phba->sli4_hba.nvmels_wq->queue_id == qid) { + pr_err("NVME LS WQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.nvmels_wq); } } @@ -617,27 +637,42 @@ lpfc_debug_dump_rq_by_id(struct lpfc_hba *phba, int qid) static inline void lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid) { - int cq_idx = 0; + int cq_idx; - do { + for (cq_idx = 0; cq_idx < phba->cfg_fcp_io_channel; cq_idx++) if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid) break; - } while (++cq_idx < phba->cfg_fcp_io_channel); if (cq_idx < phba->cfg_fcp_io_channel) { - printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); + pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]); return; } + for (cq_idx = 0; cq_idx < phba->cfg_nvme_io_channel; cq_idx++) + if (phba->sli4_hba.nvme_cq[cq_idx]->queue_id == qid) + break; + + if (cq_idx < phba->cfg_nvme_io_channel) { + pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); + lpfc_debug_dump_q(phba->sli4_hba.nvme_cq[cq_idx]); + return; + } + if (phba->sli4_hba.els_cq->queue_id == qid) { - printk(KERN_ERR "ELS CQ[Qid:%d]\n", qid); + pr_err("ELS CQ[Qid:%d]\n", qid); lpfc_debug_dump_q(phba->sli4_hba.els_cq); return; } + if (phba->sli4_hba.nvmels_cq->queue_id == qid) { + pr_err("NVME LS CQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.nvmels_cq); + return; + } + if (phba->sli4_hba.mbx_cq->queue_id == qid) { - printk(KERN_ERR "MBX CQ[Qid:%d]\n", qid); + pr_err("MBX CQ[Qid:%d]\n", qid); lpfc_debug_dump_q(phba->sli4_hba.mbx_cq); } } @@ -655,17 +690,15 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid) { int eq_idx; - for (eq_idx = 0; eq_idx < phba->cfg_fcp_io_channel; eq_idx++) { + for (eq_idx = 0; eq_idx < phba->io_channel_irqs; eq_idx++) if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid) break; - } - if (eq_idx < phba->cfg_fcp_io_channel) { + if (eq_idx < phba->io_channel_irqs) { printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid); lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]); return; } - } void lpfc_debug_dump_all_queues(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 361f5b3d9d93..f4ff99d95db3 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -86,6 +88,17 @@ struct lpfc_nodelist { #define NLP_FABRIC 0x4 /* entry rep a Fabric entity */ #define NLP_FCP_TARGET 0x8 /* entry is an FCP target */ #define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */ +#define NLP_NVME_TARGET 0x20 /* entry is a NVME Target */ +#define NLP_NVME_INITIATOR 0x40 /* entry is a NVME Initiator */ + + uint16_t nlp_fc4_type; /* FC types node supports. */ + /* Assigned from GID_FF, only + * FCP (0x8) and NVME (0x28) + * supported. + */ +#define NLP_FC4_NONE 0x0 +#define NLP_FC4_FCP 0x1 /* FC4 Type FCP (value x8)) */ +#define NLP_FC4_NVME 0x2 /* FC4 TYPE NVME (value x28) */ uint16_t nlp_rpi; uint16_t nlp_state; /* state transition indicator */ @@ -107,8 +120,8 @@ struct lpfc_nodelist { struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ struct lpfc_hba *phba; - struct fc_rport *rport; /* Corresponding FC transport - port structure */ + struct fc_rport *rport; /* scsi_transport_fc port structure */ + struct lpfc_nvme_rport *nrport; /* nvme transport rport struct. */ struct lpfc_vport *vport; struct lpfc_work_evt els_retry_evt; struct lpfc_work_evt dev_loss_evt; @@ -118,6 +131,10 @@ struct lpfc_nodelist { unsigned long last_change_time; unsigned long *active_rrqs_xri_bitmap; struct lpfc_scsicmd_bkt *lat_data; /* Latency data */ + uint32_t fc4_prli_sent; + uint32_t upcall_flags; + uint32_t nvme_fb_size; /* NVME target's supported byte cnt */ +#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */ }; struct lpfc_node_rrq { struct list_head list; @@ -133,6 +150,7 @@ struct lpfc_node_rrq { /* Defines for nlp_flag (uint32) */ #define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ #define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */ +#define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */ #define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ #define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ #define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 3a1f1a2a2b55..2d26440e6f2f 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -29,7 +31,6 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> - #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" @@ -1323,7 +1324,7 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba) "0201 Abort outstanding I/O on NPort x%x\n", Fabric_DID); - pring = &phba->sli.ring[LPFC_ELS_RING]; + pring = lpfc_phba_elsring(phba); /* * Check the txcmplq for an iocb that matches the nport the driver is @@ -1513,7 +1514,7 @@ static struct lpfc_nodelist * lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, struct lpfc_nodelist *ndlp) { - struct lpfc_vport *vport = ndlp->vport; + struct lpfc_vport *vport = ndlp->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *new_ndlp; struct lpfc_rport_data *rdata; @@ -1868,10 +1869,12 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* PLOGI completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0102 PLOGI completes to NPort x%x " + "0102 PLOGI completes to NPort x%06x " "Data: x%x x%x x%x x%x x%x\n", - ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], - irsp->ulpTimeout, disc, vport->num_disc_nodes); + ndlp->nlp_DID, ndlp->nlp_fc4_type, + irsp->ulpStatus, irsp->un.ulpWord[4], + disc, vport->num_disc_nodes); + /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { spin_lock_irq(shost->host_lock); @@ -2000,12 +2003,21 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) sp->cmn.fcphHigh = FC_PH3; sp->cmn.valid_vendor_ver_level = 0; - memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion)); + memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue PLOGI: did:x%x", did, 0, 0); + /* If our firmware supports this feature, convey that + * information to the target using the vendor specific field. + */ + if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { + sp->cmn.valid_vendor_ver_level = 1; + sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); + sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); + } + phba->fc_stat.elsXmitPLOGI++; elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); @@ -2052,14 +2064,17 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, "PRLI cmpl: status:x%x/x%x did:x%x", irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); + + /* Ddriver supports multiple FC4 types. Counters matter. */ + vport->fc_prli_sent--; + /* PRLI completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0103 PRLI completes to NPort x%x " + "0103 PRLI completes to NPort x%06x " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], - irsp->ulpTimeout, vport->num_disc_nodes); + vport->num_disc_nodes, ndlp->fc4_prli_sent); - vport->fc_prli_sent--; /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) goto out; @@ -2068,6 +2083,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ + ndlp->fc4_prli_sent--; goto out; } /* PRLI failed */ @@ -2082,9 +2098,14 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI); } else - /* Good status, call state machine */ + /* Good status, call state machine. However, if another + * PRLI is outstanding, don't call the state machine + * because final disposition to Mapped or Unmapped is + * completed there. + */ lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI); + out: lpfc_els_free_iocb(phba, cmdiocb); return; @@ -2118,42 +2139,100 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; PRLI *npr; + struct lpfc_nvme_prli *npr_nvme; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; uint16_t cmdsize; - - cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); + u32 local_nlp_type, elscmd; + + local_nlp_type = ndlp->nlp_fc4_type; + + send_next_prli: + if (local_nlp_type & NLP_FC4_FCP) { + /* Payload is 4 + 16 = 20 x14 bytes. */ + cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); + elscmd = ELS_CMD_PRLI; + } else if (local_nlp_type & NLP_FC4_NVME) { + /* Payload is 4 + 20 = 24 x18 bytes. */ + cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); + elscmd = ELS_CMD_NVMEPRLI; + } else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "3083 Unknown FC_TYPE x%x ndlp x%06x\n", + ndlp->nlp_fc4_type, ndlp->nlp_DID); + return 1; + } elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, - ndlp->nlp_DID, ELS_CMD_PRLI); + ndlp->nlp_DID, elscmd); if (!elsiocb) return 1; pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); /* For PRLI request, remainder of payload is service parameters */ - memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t))); - *((uint32_t *) (pcmd)) = ELS_CMD_PRLI; - pcmd += sizeof(uint32_t); + memset(pcmd, 0, cmdsize); - /* For PRLI, remainder of payload is PRLI parameter page */ - npr = (PRLI *) pcmd; - /* - * If our firmware version is 3.20 or later, - * set the following bits for FC-TAPE support. - */ - if (phba->vpd.rev.feaLevelHigh >= 0x02) { - npr->ConfmComplAllowed = 1; - npr->Retry = 1; - npr->TaskRetryIdReq = 1; - } - npr->estabImagePair = 1; - npr->readXferRdyDis = 1; - if (vport->cfg_first_burst_size) - npr->writeXferRdyDis = 1; + if (local_nlp_type & NLP_FC4_FCP) { + /* Remainder of payload is FCP PRLI parameter page. + * Note: this data structure is defined as + * BE/LE in the structure definition so no + * byte swap call is made. + */ + *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; + pcmd += sizeof(uint32_t); + npr = (PRLI *)pcmd; - /* For FCP support */ - npr->prliType = PRLI_FCP_TYPE; - npr->initiatorFunc = 1; + /* + * If our firmware version is 3.20 or later, + * set the following bits for FC-TAPE support. + */ + if (phba->vpd.rev.feaLevelHigh >= 0x02) { + npr->ConfmComplAllowed = 1; + npr->Retry = 1; + npr->TaskRetryIdReq = 1; + } + npr->estabImagePair = 1; + npr->readXferRdyDis = 1; + if (vport->cfg_first_burst_size) + npr->writeXferRdyDis = 1; + + /* For FCP support */ + npr->prliType = PRLI_FCP_TYPE; + npr->initiatorFunc = 1; + elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ; + + /* Remove FCP type - processed. */ + local_nlp_type &= ~NLP_FC4_FCP; + } else if (local_nlp_type & NLP_FC4_NVME) { + /* Remainder of payload is NVME PRLI parameter page. + * This data structure is the newer definition that + * uses bf macros so a byte swap is required. + */ + *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; + pcmd += sizeof(uint32_t); + npr_nvme = (struct lpfc_nvme_prli *)pcmd; + bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); + bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ + + /* Only initiators request first burst. */ + if ((phba->cfg_nvme_enable_fb) && + !phba->nvmet_support) + bf_set(prli_fba, npr_nvme, 1); + + if (phba->nvmet_support) { + bf_set(prli_tgt, npr_nvme, 1); + bf_set(prli_disc, npr_nvme, 1); + + } else { + bf_set(prli_init, npr_nvme, 1); + } + npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); + npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); + elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ; + + /* Remove NVME type - processed. */ + local_nlp_type &= ~NLP_FC4_NVME; + } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue PRLI: did:x%x", @@ -2172,7 +2251,20 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_free_iocb(phba, elsiocb); return 1; } + + /* The vport counters are used for lpfc_scan_finished, but + * the ndlp is used to track outstanding PRLIs for different + * FC4 types. + */ vport->fc_prli_sent++; + ndlp->fc4_prli_sent++; + + /* The driver supports 2 FC4 types. Make sure + * a PRLI is issued for all types before exiting. + */ + if (local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) + goto send_next_prli; + return 0; } @@ -2543,6 +2635,15 @@ out: if ((vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_PT2PT_PLOGI)) { phba->pport->fc_myDID = 0; + + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if (phba->nvmet_support) + lpfc_nvmet_update_targetport(phba); + else + lpfc_nvme_update_localport(phba->pport); + } + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { lpfc_config_link(phba, mbox); @@ -3055,6 +3156,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) } break; case ELS_CMD_PRLI: + case ELS_CMD_NVMEPRLI: if (!lpfc_issue_els_prli(vport, ndlp, retry)) { ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); @@ -3245,7 +3347,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, break; } if ((cmd == ELS_CMD_PLOGI) || - (cmd == ELS_CMD_PRLI)) { + (cmd == ELS_CMD_PRLI) || + (cmd == ELS_CMD_NVMEPRLI)) { delay = 1000; maxretry = lpfc_max_els_tries + 1; retry = 1; @@ -3265,7 +3368,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, case LSRJT_LOGICAL_BSY: if ((cmd == ELS_CMD_PLOGI) || - (cmd == ELS_CMD_PRLI)) { + (cmd == ELS_CMD_PRLI) || + (cmd == ELS_CMD_NVMEPRLI)) { delay = 1000; maxretry = 48; } else if (cmd == ELS_CMD_FDISC) { @@ -3399,7 +3503,8 @@ out_retry: spin_unlock_irq(shost->host_lock); ndlp->nlp_prev_state = ndlp->nlp_state; - if (cmd == ELS_CMD_PRLI) + if ((cmd == ELS_CMD_PRLI) || + (cmd == ELS_CMD_NVMEPRLI)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); else @@ -3430,6 +3535,7 @@ out_retry: lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); return 1; case ELS_CMD_PRLI: + case ELS_CMD_NVMEPRLI: ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); @@ -3995,7 +4101,18 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, sizeof(struct serv_parm)); sp->cmn.valid_vendor_ver_level = 0; - memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion)); + memset(sp->un.vendorVersion, 0, + sizeof(sp->un.vendorVersion)); + + /* If our firmware supports this feature, convey that + * info to the target using the vendor specific field. + */ + if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { + sp->cmn.valid_vendor_ver_level = 1; + sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); + sp->un.vv.flags = + cpu_to_be32(LPFC_VV_SUPPRESS_RSP); + } } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, @@ -4231,17 +4348,43 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, { struct lpfc_hba *phba = vport->phba; PRLI *npr; + struct lpfc_nvme_prli *npr_nvme; lpfc_vpd_t *vpd; IOCB_t *icmd; IOCB_t *oldcmd; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; uint16_t cmdsize; + uint32_t prli_fc4_req, *req_payload; + struct lpfc_dmabuf *req_buf; int rc; + u32 elsrspcmd; + + /* Need the incoming PRLI payload to determine if the ACC is for an + * FC4 or NVME PRLI type. The PRLI type is at word 1. + */ + req_buf = (struct lpfc_dmabuf *)oldiocb->context2; + req_payload = (((uint32_t *)req_buf->virt) + 1); + + /* PRLI type payload is at byte 3 for FCP or NVME. */ + prli_fc4_req = be32_to_cpu(*req_payload); + prli_fc4_req = (prli_fc4_req >> 24) & 0xff; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", + prli_fc4_req, *((uint32_t *)req_payload)); + + if (prli_fc4_req == PRLI_FCP_TYPE) { + cmdsize = sizeof(uint32_t) + sizeof(PRLI); + elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); + } else if (prli_fc4_req & PRLI_NVME_TYPE) { + cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); + elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); + } else { + return 1; + } - cmdsize = sizeof(uint32_t) + sizeof(PRLI); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, - ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK))); + ndlp->nlp_DID, elsrspcmd); if (!elsiocb) return 1; @@ -4258,33 +4401,71 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); + memset(pcmd, 0, cmdsize); *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); pcmd += sizeof(uint32_t); /* For PRLI, remainder of payload is PRLI parameter page */ - memset(pcmd, 0, sizeof(PRLI)); - - npr = (PRLI *) pcmd; vpd = &phba->vpd; - /* - * If the remote port is a target and our firmware version is 3.20 or - * later, set the following bits for FC-TAPE support. - */ - if ((ndlp->nlp_type & NLP_FCP_TARGET) && - (vpd->rev.feaLevelHigh >= 0x02)) { - npr->ConfmComplAllowed = 1; - npr->Retry = 1; - npr->TaskRetryIdReq = 1; - } - npr->acceptRspCode = PRLI_REQ_EXECUTED; - npr->estabImagePair = 1; - npr->readXferRdyDis = 1; - npr->ConfmComplAllowed = 1; + if (prli_fc4_req == PRLI_FCP_TYPE) { + /* + * If the remote port is a target and our firmware version + * is 3.20 or later, set the following bits for FC-TAPE + * support. + */ + npr = (PRLI *) pcmd; + if ((ndlp->nlp_type & NLP_FCP_TARGET) && + (vpd->rev.feaLevelHigh >= 0x02)) { + npr->ConfmComplAllowed = 1; + npr->Retry = 1; + npr->TaskRetryIdReq = 1; + } + npr->acceptRspCode = PRLI_REQ_EXECUTED; + npr->estabImagePair = 1; + npr->readXferRdyDis = 1; + npr->ConfmComplAllowed = 1; + npr->prliType = PRLI_FCP_TYPE; + npr->initiatorFunc = 1; + } else if (prli_fc4_req & PRLI_NVME_TYPE) { + /* Respond with an NVME PRLI Type */ + npr_nvme = (struct lpfc_nvme_prli *) pcmd; + bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); + bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ + bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); + if (phba->nvmet_support) { + bf_set(prli_tgt, npr_nvme, 1); + bf_set(prli_disc, npr_nvme, 1); + if (phba->cfg_nvme_enable_fb) { + bf_set(prli_fba, npr_nvme, 1); + + /* TBD. Target mode needs to post buffers + * that support the configured first burst + * byte size. + */ + bf_set(prli_fb_sz, npr_nvme, + phba->cfg_nvmet_fb_size); + } + } else { + bf_set(prli_init, npr_nvme, 1); + } - npr->prliType = PRLI_FCP_TYPE; - npr->initiatorFunc = 1; + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6015 NVME issue PRLI ACC word1 x%08x " + "word4 x%08x word5 x%08x flag x%x, " + "fcp_info x%x nlp_type x%x\n", + npr_nvme->word1, npr_nvme->word4, + npr_nvme->word5, ndlp->nlp_flag, + ndlp->nlp_fcp_info, ndlp->nlp_type); + npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); + npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); + npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); + } else + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", + prli_fc4_req, ndlp->nlp_fc4_type, + ndlp->nlp_DID); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "Issue ACC PRLI: did:x%x flg:x%x", @@ -4411,7 +4592,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, **/ static void lpfc_els_clear_rrq(struct lpfc_vport *vport, - struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) + struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; uint8_t *pcmd; @@ -4909,7 +5090,7 @@ lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); - memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 2); + memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); desc->length = cpu_to_be32(sizeof(desc->opd_info)); return sizeof(struct fc_rdp_opd_sfp_desc); @@ -5004,7 +5185,7 @@ lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, memcpy(desc->port_names.wwnn, phba->wwnn, sizeof(desc->port_names.wwnn)); - memcpy(desc->port_names.wwpn, &phba->wwpn, + memcpy(desc->port_names.wwpn, phba->wwpn, sizeof(desc->port_names.wwpn)); desc->length = cpu_to_be32(sizeof(desc->port_names)); @@ -5233,9 +5414,8 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct ls_rjt stat; if (phba->sli_rev < LPFC_SLI_REV4 || - (bf_get(lpfc_sli_intf_if_type, - &phba->sli4_hba.sli_intf) != - LPFC_SLI_INTF_IF_TYPE_2)) { + bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != + LPFC_SLI_INTF_IF_TYPE_2) { rjt_err = LSRJT_UNABLE_TPC; rjt_expl = LSEXP_REQ_UNSUPPORTED; goto error; @@ -5687,6 +5867,8 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport) (ndlp->nlp_state == NLP_STE_UNUSED_NODE) || !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) continue; + if (vport->phba->nvmet_support) + continue; lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); lpfc_cancel_retry_delay_tmo(vport, ndlp); @@ -5976,9 +6158,11 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport) if (ndlp && NLP_CHK_NODE_ACT(ndlp) && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { /* Good ndlp, issue CT Request to NameServer */ - if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) + vport->gidft_inp = 0; + if (lpfc_issue_gidft(vport) == 0) /* Wait for NameServer query cmpl before we can - continue */ + * continue + */ return 1; } else { /* If login to NameServer does not exist, issue one */ @@ -6082,7 +6266,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); - /* * If our portname is greater than the remote portname, * then we initiate Nport login. @@ -7155,7 +7338,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) timeout = (uint32_t)(phba->fc_ratov << 1); - pring = &phba->sli.ring[LPFC_ELS_RING]; + pring = lpfc_phba_elsring(phba); + if ((phba->pport->load_flag & FC_UNLOADING)) return; spin_lock_irq(&phba->hbalock); @@ -7224,7 +7408,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) spin_unlock_irq(&phba->hbalock); } - if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq)) + if (!list_empty(&pring->txcmplq)) if (!(phba->pport->load_flag & FC_UNLOADING)) mod_timer(&vport->els_tmofunc, jiffies + msecs_to_jiffies(1000 * timeout)); @@ -7255,7 +7439,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) { LIST_HEAD(abort_list); struct lpfc_hba *phba = vport->phba; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; struct lpfc_iocbq *tmp_iocb, *piocb; IOCB_t *cmd = NULL; @@ -7267,6 +7451,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) * a working list and release the locks before calling the abort. */ spin_lock_irq(&phba->hbalock); + pring = lpfc_phba_elsring(phba); if (phba->sli_rev == LPFC_SLI_REV4) spin_lock(&pring->ring_lock); @@ -7777,6 +7962,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, lpfc_els_rcv_fan(vport, elsiocb, ndlp); break; case ELS_CMD_PRLI: + case ELS_CMD_NVMEPRLI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV PRLI: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); @@ -8881,8 +9067,7 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, break; } - if (atomic_read(&phba->fabric_iocb_count) == 0) - BUG(); + BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; cmdiocb->fabric_iocb_cmpl = NULL; @@ -8927,8 +9112,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) int ready; int ret; - if (atomic_read(&phba->fabric_iocb_count) > 1) - BUG(); + BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); spin_lock_irqsave(&phba->hbalock, iflags); ready = atomic_read(&phba->fabric_iocb_count) == 0 && @@ -9013,7 +9197,9 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) LIST_HEAD(completions); struct lpfc_hba *phba = ndlp->phba; struct lpfc_iocbq *tmp_iocb, *piocb; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; + + pring = lpfc_phba_elsring(phba); spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, @@ -9069,13 +9255,13 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) unsigned long iflag = 0; spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_sgl_list_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); list_for_each_entry_safe(sglq_entry, sglq_next, &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) sglq_entry->ndlp = NULL; } - spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } @@ -9099,22 +9285,22 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; unsigned long iflag = 0; struct lpfc_nodelist *ndlp; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; + + pring = lpfc_phba_elsring(phba); spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_sgl_list_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); list_for_each_entry_safe(sglq_entry, sglq_next, &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { if (sglq_entry->sli4_xritag == xri) { list_del(&sglq_entry->list); ndlp = sglq_entry->ndlp; sglq_entry->ndlp = NULL; - spin_lock(&pring->ring_lock); list_add_tail(&sglq_entry->list, - &phba->sli4_hba.lpfc_sgl_list); + &phba->sli4_hba.lpfc_els_sgl_list); sglq_entry->state = SGL_FREED; - spin_unlock(&pring->ring_lock); - spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_set_rrq_active(phba, ndlp, sglq_entry->sli4_lxritag, @@ -9126,21 +9312,21 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, return; } } - spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + spin_unlock(&phba->sli4_hba.sgl_list_lock); lxri = lpfc_sli4_xri_inrange(phba, xri); if (lxri == NO_XRI) { spin_unlock_irqrestore(&phba->hbalock, iflag); return; } - spin_lock(&pring->ring_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); sglq_entry = __lpfc_get_active_sglq(phba, lxri); if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { - spin_unlock(&pring->ring_lock); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } sglq_entry->state = SGL_XRI_ABORTED; - spin_unlock(&pring->ring_lock); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 82047070cdc9..194a14d5f8a9 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -31,6 +33,9 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include <scsi/fc/fc_fs.h> + +#include <linux/nvme-fc-driver.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -38,8 +43,9 @@ #include "lpfc_disc.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" @@ -93,7 +99,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport) if (ndlp->nlp_sid != NLP_NO_SID) { lpfc_sli_abort_iocb(ndlp->vport, - &phba->sli.ring[phba->sli.fcp_ring], + &phba->sli.sli3_ring[LPFC_FCP_RING], ndlp->nlp_sid, 0, LPFC_CTX_TGT); } } @@ -247,8 +253,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) if (ndlp->nlp_sid != NLP_NO_SID) { /* flush the target */ lpfc_sli_abort_iocb(vport, - &phba->sli.ring[phba->sli.fcp_ring], - ndlp->nlp_sid, 0, LPFC_CTX_TGT); + &phba->sli.sli3_ring[LPFC_FCP_RING], + ndlp->nlp_sid, 0, LPFC_CTX_TGT); } put_node = rdata->pnode != NULL; rdata->pnode = NULL; @@ -283,7 +289,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) if (ndlp->nlp_sid != NLP_NO_SID) { warn_on = 1; - lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], + lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING], ndlp->nlp_sid, 0, LPFC_CTX_TGT); } @@ -495,11 +501,12 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba, return; } - fc_host_post_vendor_event(shost, - fc_get_event_number(), - evt_data_size, - evt_data, - LPFC_NL_VENDOR_ID); + if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) + fc_host_post_vendor_event(shost, + fc_get_event_number(), + evt_data_size, + evt_data, + LPFC_NL_VENDOR_ID); lpfc_free_fast_evt(phba, fast_evt_data); return; @@ -682,7 +689,7 @@ lpfc_work_done(struct lpfc_hba *phba) } lpfc_destroy_vport_work_array(phba, vports); - pring = &phba->sli.ring[LPFC_ELS_RING]; + pring = lpfc_phba_elsring(phba); status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); status >>= (4*LPFC_ELS_RING); if ((status & HA_RXMASK) || @@ -852,9 +859,12 @@ lpfc_port_link_failure(struct lpfc_vport *vport) void lpfc_linkdown_port(struct lpfc_vport *vport) { + struct lpfc_hba *phba = vport->phba; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); + if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) + fc_host_post_event(shost, fc_get_event_number(), + FCH_EVT_LINKDOWN, 0); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Link Down: state:x%x rtry:x%x flg:x%x", @@ -894,11 +904,22 @@ lpfc_linkdown(struct lpfc_hba *phba) spin_unlock_irq(shost->host_lock); } vports = lpfc_create_vport_work_array(phba); - if (vports != NULL) + if (vports != NULL) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { /* Issue a LINK DOWN event to all nodes */ lpfc_linkdown_port(vports[i]); + + vports[i]->fc_myDID = 0; + + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if (phba->nvmet_support) + lpfc_nvmet_update_targetport(phba); + else + lpfc_nvme_update_localport(vports[i]); + } } + } lpfc_destroy_vport_work_array(phba, vports); /* Clean up any firmware default rpi's */ mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -914,7 +935,6 @@ lpfc_linkdown(struct lpfc_hba *phba) /* Setup myDID for link up if we are in pt2pt mode */ if (phba->pport->fc_flag & FC_PT2PT) { - phba->pport->fc_myDID = 0; mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mb) { lpfc_config_link(phba, mb); @@ -929,7 +949,6 @@ lpfc_linkdown(struct lpfc_hba *phba) phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); spin_unlock_irq(shost->host_lock); } - return 0; } @@ -977,7 +996,9 @@ lpfc_linkup_port(struct lpfc_vport *vport) (vport != phba->pport)) return; - fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0); + if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) + fc_host_post_event(shost, fc_get_event_number(), + FCH_EVT_LINKUP, 0); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | @@ -1016,7 +1037,7 @@ lpfc_linkup(struct lpfc_hba *phba) * This routine handles processing a CLEAR_LA mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is - * handed off to the SLI layer. + * handed off to the SLI layer. SLI3 only. */ static void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) @@ -1028,9 +1049,8 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) uint32_t control; /* Since we don't do discovery right now, turn these off here */ - psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT; - psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT; - psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT; + psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT; + psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT; /* Check for error */ if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { @@ -3277,7 +3297,7 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba) * This routine handles processing a READ_TOPOLOGY mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is - * handed off to the SLI layer. + * handed off to the SLI layer. SLI4 only. */ void lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) @@ -3285,11 +3305,14 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_mbx_read_top *la; + struct lpfc_sli_ring *pring; MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); /* Unblock ELS traffic */ - phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; + pring = lpfc_phba_elsring(phba); + pring->flag &= ~LPFC_STOP_IOCB_EVENT; + /* Check for error */ if (mb->mbxStatus) { lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, @@ -3458,6 +3481,14 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; spin_unlock_irq(shost->host_lock); + + /* + * We cannot leave the RPI registered because + * if we go thru discovery again for this ndlp + * a subsequent REG_RPI will fail. + */ + ndlp->nlp_flag |= NLP_RPI_REGISTERED; + lpfc_unreg_rpi(vport, ndlp); } /* Call state machine */ @@ -3556,6 +3587,14 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); spin_unlock_irq(shost->host_lock); vport->fc_myDID = 0; + + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if (phba->nvmet_support) + lpfc_nvmet_update_targetport(phba); + else + lpfc_nvme_update_localport(vport); + } goto out; } @@ -3805,6 +3844,52 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) return; } + /* + * This routine will issue a GID_FT for each FC4 Type supported + * by the driver. ALL GID_FTs must complete before discovery is started. + */ +int +lpfc_issue_gidft(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + + /* Good status, issue CT Request to NameServer */ + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) { + if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) { + /* Cannot issue NameServer FCP Query, so finish up + * discovery + */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, + "0604 %s FC TYPE %x %s\n", + "Failed to issue GID_FT to ", + FC_TYPE_FCP, + "Finishing discovery."); + return 0; + } + vport->gidft_inp++; + } + + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) { + /* Cannot issue NameServer NVME Query, so finish up + * discovery + */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, + "0605 %s FC_TYPE %x %s %d\n", + "Failed to issue GID_FT to ", + FC_TYPE_NVME, + "Finishing discovery: gidftinp ", + vport->gidft_inp); + if (vport->gidft_inp == 0) + return 0; + } else + vport->gidft_inp++; + } + return vport->gidft_inp; +} + /* * This routine handles processing a NameServer REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ @@ -3821,12 +3906,14 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) pmb->context1 = NULL; pmb->context2 = NULL; + vport->gidft_inp = 0; if (mb->mbxStatus) { -out: lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0260 Register NameServer error: 0x%x\n", mb->mbxStatus); + +out: /* decrement the node reference count held for this * callback function. */ @@ -3870,20 +3957,29 @@ out: lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); - lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); + + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) + lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP); + + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) + lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, + FC_TYPE_NVME); /* Issue SCR just before NameServer GID_FT Query */ lpfc_issue_els_scr(vport, SCR_DID, 0); } vport->fc_ns_retry = 0; - /* Good status, issue CT Request to NameServer */ - if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) { - /* Cannot issue NameServer Query, so finish up discovery */ + if (lpfc_issue_gidft(vport) == 0) goto out; - } - /* decrement the node reference count held for this + /* + * At this point in time we may need to wait for multiple + * SLI_CTNS_GID_FT CT commands to complete before we start discovery. + * + * decrement the node reference count held for this * callback function. */ lpfc_nlp_put(ndlp); @@ -3903,6 +3999,9 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) struct fc_rport_identifiers rport_ids; struct lpfc_hba *phba = vport->phba; + if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME) + return; + /* Remote port has reappeared. Re-register w/ FC transport */ rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); @@ -3972,12 +4071,17 @@ static void lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) { struct fc_rport *rport = ndlp->rport; + struct lpfc_vport *vport = ndlp->vport; + struct lpfc_hba *phba = vport->phba; - lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, + if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME) + return; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport delete: did:x%x flg:x%x type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); - lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "3184 rport unregister x%06x, rport %p\n", ndlp->nlp_DID, rport); @@ -4029,6 +4133,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int old_state, int new_state) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; if (new_state == NLP_STE_UNMAPPED_NODE) { ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; @@ -4039,23 +4144,56 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (new_state == NLP_STE_NPR_NODE) ndlp->nlp_flag &= ~NLP_RCV_PLOGI; - /* Transport interface */ - if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE || - old_state == NLP_STE_UNMAPPED_NODE)) { - vport->phba->nport_event_cnt++; - lpfc_unregister_remote_port(ndlp); + /* FCP and NVME Transport interface */ + if ((old_state == NLP_STE_MAPPED_NODE || + old_state == NLP_STE_UNMAPPED_NODE)) { + if (ndlp->rport) { + vport->phba->nport_event_cnt++; + lpfc_unregister_remote_port(ndlp); + } + + /* Notify the NVME transport of this rport's loss */ + if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) && + (vport->phba->nvmet_support == 0) && + ((ndlp->nlp_fc4_type & NLP_FC4_NVME) || + (ndlp->nlp_DID == Fabric_DID))) { + vport->phba->nport_event_cnt++; + lpfc_nvme_unregister_port(vport, ndlp); + } } + /* FCP and NVME Transport interfaces */ + if (new_state == NLP_STE_MAPPED_NODE || new_state == NLP_STE_UNMAPPED_NODE) { - vport->phba->nport_event_cnt++; - /* - * Tell the fc transport about the port, if we haven't - * already. If we have, and it's a scsi entity, be - * sure to unblock any attached scsi devices - */ - lpfc_register_remote_port(vport, ndlp); + if ((ndlp->nlp_fc4_type & NLP_FC4_FCP) || + (ndlp->nlp_DID == Fabric_DID)) { + vport->phba->nport_event_cnt++; + /* + * Tell the fc transport about the port, if we haven't + * already. If we have, and it's a scsi entity, be + */ + lpfc_register_remote_port(vport, ndlp); + } + /* Notify the NVME transport of this new rport. */ + if (ndlp->nlp_fc4_type & NLP_FC4_NVME) { + if (vport->phba->nvmet_support == 0) { + /* Register this rport with the transport. + * Initiators take the NDLP ref count in + * the register. + */ + vport->phba->nport_event_cnt++; + lpfc_nvme_register_port(vport, ndlp); + } else { + /* Just take an NDLP ref count since the + * target does not register rports. + */ + lpfc_nlp_get(ndlp); + } + } } + if ((new_state == NLP_STE_MAPPED_NODE) && (vport->stat_data_enabled)) { /* @@ -4073,12 +4211,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, "0x%x\n", ndlp->nlp_DID); } /* - * if we added to Mapped list, but the remote port - * registration failed or assigned a target id outside - * our presentable range - move the node to the - * Unmapped List + * If the node just added to Mapped list was an FCP target, + * but the remote port registration failed or assigned a target + * id outside the presentable range - move the node to the + * Unmapped List. */ - if (new_state == NLP_STE_MAPPED_NODE && + if ((new_state == NLP_STE_MAPPED_NODE) && + (ndlp->nlp_type & NLP_FCP_TARGET) && (!ndlp->rport || ndlp->rport->scsi_target_id == -1 || ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { @@ -4212,6 +4351,7 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->vport = vport; ndlp->phba = vport->phba; ndlp->nlp_sid = NLP_NO_SID; + ndlp->nlp_fc4_type = NLP_FC4_NONE; kref_init(&ndlp->kref); NLP_INT_NODE_ACT(ndlp); atomic_set(&ndlp->cmd_pending, 0); @@ -4394,7 +4534,6 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) { - struct lpfc_sli *psli = &phba->sli; IOCB_t *icmd = &iocb->iocb; struct lpfc_vport *vport = ndlp->vport; @@ -4413,9 +4552,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, if (iocb->context1 == (uint8_t *) ndlp) return 1; } - } else if (pring->ringno == psli->extra_ring) { - - } else if (pring->ringno == psli->fcp_ring) { + } else if (pring->ringno == LPFC_FCP_RING) { /* Skip match check if waiting to relogin to FCP target */ if ((ndlp->nlp_type & NLP_FCP_TARGET) && (ndlp->nlp_flag & NLP_DELAY_TMO)) { @@ -4424,12 +4561,58 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) { return 1; } - } else if (pring->ringno == psli->next_ring) { - } return 0; } +static void +__lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring, + struct list_head *dequeue_list) +{ + struct lpfc_iocbq *iocb, *next_iocb; + + list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { + /* Check to see if iocb matches the nport */ + if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) + /* match, dequeue */ + list_move_tail(&iocb->list, dequeue_list); + } +} + +static void +lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp, struct list_head *dequeue_list) +{ + struct lpfc_sli *psli = &phba->sli; + uint32_t i; + + spin_lock_irq(&phba->hbalock); + for (i = 0; i < psli->num_rings; i++) + __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i], + dequeue_list); + spin_unlock_irq(&phba->hbalock); +} + +static void +lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp, struct list_head *dequeue_list) +{ + struct lpfc_sli_ring *pring; + struct lpfc_queue *qp = NULL; + + spin_lock_irq(&phba->hbalock); + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) + continue; + spin_lock_irq(&pring->ring_lock); + __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list); + spin_unlock_irq(&pring->ring_lock); + } + spin_unlock_irq(&phba->hbalock); +} + /* * Free resources / clean up outstanding I/Os * associated with nlp_rpi in the LPFC_NODELIST entry. @@ -4438,10 +4621,6 @@ static int lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(completions); - struct lpfc_sli *psli; - struct lpfc_sli_ring *pring; - struct lpfc_iocbq *iocb, *next_iocb; - uint32_t i; lpfc_fabric_abort_nport(ndlp); @@ -4449,29 +4628,11 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) * Everything that matches on txcmplq will be returned * by firmware with a no rpi error. */ - psli = &phba->sli; if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { - /* Now process each ring */ - for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; - - spin_lock_irq(&phba->hbalock); - list_for_each_entry_safe(iocb, next_iocb, &pring->txq, - list) { - /* - * Check to see if iocb matches the nport we are - * looking for - */ - if ((lpfc_check_sli_ndlp(phba, pring, iocb, - ndlp))) { - /* It matches, so deque and call compl - with an error */ - list_move_tail(&iocb->list, - &completions); - } - } - spin_unlock_irq(&phba->hbalock); - } + if (phba->sli_rev != LPFC_SLI_REV4) + lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions); + else + lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions); } /* Cancel all the IOCBs from the completions list */ @@ -4950,6 +5111,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) return NULL; lpfc_nlp_init(vport, ndlp, did); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + if (vport->phba->nvmet_support) + return ndlp; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); @@ -4958,6 +5121,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); if (!ndlp) return NULL; + if (vport->phba->nvmet_support) + return ndlp; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); @@ -4977,6 +5142,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) * delay timeout is not needed. */ lpfc_cancel_retry_delay_tmo(vport, ndlp); + if (vport->phba->nvmet_support) + return ndlp; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); @@ -4992,6 +5159,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) ndlp->nlp_flag & NLP_RCV_PLOGI) return NULL; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + if (vport->phba->nvmet_support) + return ndlp; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); @@ -5040,14 +5209,14 @@ lpfc_disc_list_loopmap(struct lpfc_vport *vport) return; } +/* SLI3 only */ void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) { LPFC_MBOXQ_t *mbox; struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring]; - struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring]; - struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring]; + struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING]; + struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING]; int rc; /* @@ -5071,7 +5240,6 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) lpfc_disc_flush_list(vport); extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; - next_ring->flag &= ~LPFC_STOP_IOCB_EVENT; phba->link_state = LPFC_HBA_ERROR; } } @@ -5207,7 +5375,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) struct lpfc_sli_ring *pring; psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; + pring = lpfc_phba_elsring(phba); /* Error matching iocb on txq or txcmplq * First check the txq. @@ -5331,12 +5499,13 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) switch (vport->port_state) { case LPFC_LOCAL_CFG_LINK: - /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for - * FAN - */ - /* FAN timeout */ + /* + * port_state is identically LPFC_LOCAL_CFG_LINK while + * waiting for FAN timeout + */ lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, "0221 FAN timeout\n"); + /* Start discovery by sending FLOGI, clean up old rpis */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { @@ -5407,8 +5576,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { /* Try it one more time */ vport->fc_ns_retry++; - rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, - vport->fc_ns_retry, 0); + vport->gidft_inp = 0; + rc = lpfc_issue_gidft(vport); if (rc == 0) break; } @@ -5523,12 +5692,14 @@ restart_disc: if (clrlaerr) { lpfc_disc_flush_list(vport); - psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; - psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; - psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; + if (phba->sli_rev != LPFC_SLI_REV4) { + psli->sli3_ring[(LPFC_EXTRA_RING)].flag &= + ~LPFC_STOP_IOCB_EVENT; + psli->sli3_ring[LPFC_FCP_RING].flag &= + ~LPFC_STOP_IOCB_EVENT; + } vport->port_state = LPFC_VPORT_READY; } - return; } diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 3b970d370600..15ca21484150 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -44,8 +46,6 @@ #define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */ #define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */ #define LPFC_ELS_RING 2 /* ring 2 for ELS commands */ -#define LPFC_FCP_NEXT_RING 3 -#define LPFC_FCP_OAS_RING 3 #define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */ #define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */ @@ -92,8 +92,10 @@ union CtCommandResponse { uint32_t word; }; -#define FC4_FEATURE_INIT 0x2 -#define FC4_FEATURE_TARGET 0x1 +/* FC4 Feature bits for RFF_ID */ +#define FC4_FEATURE_TARGET 0x1 +#define FC4_FEATURE_INIT 0x2 +#define FC4_FEATURE_NVME_DISC 0x4 struct lpfc_sli_ct_request { /* Structure is in Big Endian format */ @@ -117,6 +119,16 @@ struct lpfc_sli_ct_request { uint8_t AreaScope; uint8_t Fc4Type; /* for GID_FT requests */ } gid; + struct gid_ff { + uint8_t Flags; + uint8_t DomainScope; + uint8_t AreaScope; + uint8_t rsvd1; + uint8_t rsvd2; + uint8_t rsvd3; + uint8_t Fc4FBits; + uint8_t Fc4Type; + } gid_ff; struct rft { uint32_t PortId; /* For RFT_ID requests */ @@ -161,6 +173,12 @@ struct lpfc_sli_ct_request { struct gff_acc { uint8_t fbits[128]; } gff_acc; + struct gft { + uint32_t PortId; + } gft; + struct gft_acc { + uint32_t fc4_types[8]; + } gft_acc; #define FCP_TYPE_FEATURE_OFFSET 7 struct rff { uint32_t PortId; @@ -176,8 +194,12 @@ struct lpfc_sli_ct_request { #define SLI_CT_REVISION 1 #define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ sizeof(struct gid)) +#define GIDFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct gid_ff)) #define GFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ sizeof(struct gff)) +#define GFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct gft)) #define RFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ sizeof(struct rft)) #define RFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ @@ -273,6 +295,7 @@ struct lpfc_sli_ct_request { #define SLI_CTNS_GNN_IP 0x0153 #define SLI_CTNS_GIPA_IP 0x0156 #define SLI_CTNS_GID_FT 0x0171 +#define SLI_CTNS_GID_FF 0x01F1 #define SLI_CTNS_GID_PT 0x01A1 #define SLI_CTNS_RPN_ID 0x0212 #define SLI_CTNS_RNN_ID 0x0213 @@ -290,15 +313,16 @@ struct lpfc_sli_ct_request { * Port Types */ -#define SLI_CTPT_N_PORT 0x01 -#define SLI_CTPT_NL_PORT 0x02 -#define SLI_CTPT_FNL_PORT 0x03 -#define SLI_CTPT_IP 0x04 -#define SLI_CTPT_FCP 0x08 -#define SLI_CTPT_NX_PORT 0x7F -#define SLI_CTPT_F_PORT 0x81 -#define SLI_CTPT_FL_PORT 0x82 -#define SLI_CTPT_E_PORT 0x84 +#define SLI_CTPT_N_PORT 0x01 +#define SLI_CTPT_NL_PORT 0x02 +#define SLI_CTPT_FNL_PORT 0x03 +#define SLI_CTPT_IP 0x04 +#define SLI_CTPT_FCP 0x08 +#define SLI_CTPT_NVME 0x28 +#define SLI_CTPT_NX_PORT 0x7F +#define SLI_CTPT_F_PORT 0x81 +#define SLI_CTPT_FL_PORT 0x82 +#define SLI_CTPT_E_PORT 0x84 #define SLI_CT_LAST_ENTRY 0x80000000 @@ -339,6 +363,7 @@ struct lpfc_name { uint8_t IEEE[6]; /* FC IEEE address */ } s; uint8_t wwn[8]; + uint64_t name; } u; }; @@ -492,7 +517,15 @@ struct serv_parm { /* Structure is in Big Endian format */ struct class_parms cls2; struct class_parms cls3; struct class_parms cls4; - uint8_t vendorVersion[16]; + union { + uint8_t vendorVersion[16]; + struct { + uint32_t vid; +#define LPFC_VV_EMLX_ID 0x454d4c58 /* EMLX */ + uint32_t flags; +#define LPFC_VV_SUPPRESS_RSP 1 + } vv; + } un; }; /* @@ -551,6 +584,7 @@ struct fc_vft_header { #define ELS_CMD_REC 0x13000000 #define ELS_CMD_RDP 0x18000000 #define ELS_CMD_PRLI 0x20100014 +#define ELS_CMD_NVMEPRLI 0x20140018 #define ELS_CMD_PRLO 0x21100014 #define ELS_CMD_PRLO_ACC 0x02100014 #define ELS_CMD_PDISC 0x50000000 @@ -590,6 +624,7 @@ struct fc_vft_header { #define ELS_CMD_REC 0x13 #define ELS_CMD_RDP 0x18 #define ELS_CMD_PRLI 0x14001020 +#define ELS_CMD_NVMEPRLI 0x18001420 #define ELS_CMD_PRLO 0x14001021 #define ELS_CMD_PRLO_ACC 0x14001002 #define ELS_CMD_PDISC 0x50 @@ -686,6 +721,7 @@ typedef struct _PRLI { /* Structure is in Big Endian format */ uint8_t prliType; /* FC Parm Word 0, bit 24:31 */ #define PRLI_FCP_TYPE 0x08 +#define PRLI_NVME_TYPE 0x28 uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */ #ifdef __BIG_ENDIAN_BITFIELD @@ -1245,8 +1281,7 @@ struct fc_rdp_opd_sfp_info { uint8_t vendor_name[16]; uint8_t model_number[16]; uint8_t serial_number[16]; - uint8_t revision[2]; - uint8_t reserved[2]; + uint8_t revision[4]; uint8_t date[8]; }; @@ -1265,14 +1300,14 @@ struct fc_rdp_req_frame { struct fc_rdp_res_frame { - uint32_t reply_sequence; /* FC word0 LS_ACC or LS_RJT */ - uint32_t length; /* FC Word 1 */ - struct fc_rdp_link_service_desc link_service_desc; /* Word 2 -4 */ - struct fc_rdp_sfp_desc sfp_desc; /* Word 5 -9 */ - struct fc_rdp_port_speed_desc portspeed_desc; /* Word 10-12 */ - struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */ - struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22-27 */ - struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */ + uint32_t reply_sequence; /* FC word0 LS_ACC or LS_RJT */ + uint32_t length; /* FC Word 1 */ + struct fc_rdp_link_service_desc link_service_desc; /* Word 2 -4 */ + struct fc_rdp_sfp_desc sfp_desc; /* Word 5 -9 */ + struct fc_rdp_port_speed_desc portspeed_desc; /* Word 10 -12 */ + struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13 -21 */ + struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22 -27 */ + struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28 -33 */ struct fc_fec_rdp_desc fec_desc; /* FC word 34-37*/ struct fc_rdp_bbc_desc bbc_desc; /* FC Word 38-42*/ struct fc_rdp_oed_sfp_desc oed_temp_desc; /* FC Word 43-47*/ @@ -1791,6 +1826,7 @@ typedef struct { /* FireFly BIU registers */ #define MBX_INIT_VFI 0xA3 #define MBX_INIT_VPI 0xA4 #define MBX_ACCESS_VDATA 0xA5 +#define MBX_REG_FCFI_MRQ 0xAF #define MBX_AUTH_PORT 0xF8 #define MBX_SECURITY_MGMT 0xF9 diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 5646699b0516..cfdb068a3bfc 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2016 Emulex. All rights reserved. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -108,6 +110,7 @@ struct lpfc_sli_intf { #define LPFC_MAX_MQ_PAGE 8 #define LPFC_MAX_WQ_PAGE_V0 4 #define LPFC_MAX_WQ_PAGE 8 +#define LPFC_MAX_RQ_PAGE 8 #define LPFC_MAX_CQ_PAGE 4 #define LPFC_MAX_EQ_PAGE 8 @@ -198,7 +201,7 @@ struct lpfc_sli_intf { /* Configuration of Interrupts / sec for entire HBA port */ #define LPFC_MIN_IMAX 5000 #define LPFC_MAX_IMAX 5000000 -#define LPFC_DEF_IMAX 50000 +#define LPFC_DEF_IMAX 150000 #define LPFC_MIN_CPU_MAP 0 #define LPFC_MAX_CPU_MAP 2 @@ -348,6 +351,7 @@ struct lpfc_cqe { #define CQE_CODE_RECEIVE 0x4 #define CQE_CODE_XRI_ABORTED 0x5 #define CQE_CODE_RECEIVE_V1 0x9 +#define CQE_CODE_NVME_ERSP 0xd /* * Define mask value for xri_aborted and wcqe completed CQE extended status. @@ -367,6 +371,9 @@ struct lpfc_wcqe_complete { #define lpfc_wcqe_c_hw_status_SHIFT 0 #define lpfc_wcqe_c_hw_status_MASK 0x000000FF #define lpfc_wcqe_c_hw_status_WORD word0 +#define lpfc_wcqe_c_ersp0_SHIFT 0 +#define lpfc_wcqe_c_ersp0_MASK 0x0000FFFF +#define lpfc_wcqe_c_ersp0_WORD word0 uint32_t total_data_placed; uint32_t parameter; #define lpfc_wcqe_c_bg_edir_SHIFT 5 @@ -400,6 +407,9 @@ struct lpfc_wcqe_complete { #define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT #define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK #define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD +#define lpfc_wcqe_c_sqhead_SHIFT 0 +#define lpfc_wcqe_c_sqhead_MASK 0x0000FFFF +#define lpfc_wcqe_c_sqhead_WORD word3 }; /* completion queue entry for wqe release */ @@ -954,6 +964,7 @@ struct mbox_header { #define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A #define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B #define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10 +#define LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET 0x1D #define LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS 0x21 #define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22 #define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23 @@ -1135,6 +1146,116 @@ struct lpfc_mbx_cq_create { } u; }; +struct lpfc_mbx_cq_create_set { + union lpfc_sli4_cfg_shdr cfg_shdr; + union { + struct { + uint32_t word0; +#define lpfc_mbx_cq_create_set_page_size_SHIFT 16 /* Version 2 Only */ +#define lpfc_mbx_cq_create_set_page_size_MASK 0x000000FF +#define lpfc_mbx_cq_create_set_page_size_WORD word0 +#define lpfc_mbx_cq_create_set_num_pages_SHIFT 0 +#define lpfc_mbx_cq_create_set_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_num_pages_WORD word0 + uint32_t word1; +#define lpfc_mbx_cq_create_set_evt_SHIFT 31 +#define lpfc_mbx_cq_create_set_evt_MASK 0x00000001 +#define lpfc_mbx_cq_create_set_evt_WORD word1 +#define lpfc_mbx_cq_create_set_valid_SHIFT 29 +#define lpfc_mbx_cq_create_set_valid_MASK 0x00000001 +#define lpfc_mbx_cq_create_set_valid_WORD word1 +#define lpfc_mbx_cq_create_set_cqe_cnt_SHIFT 27 +#define lpfc_mbx_cq_create_set_cqe_cnt_MASK 0x00000003 +#define lpfc_mbx_cq_create_set_cqe_cnt_WORD word1 +#define lpfc_mbx_cq_create_set_cqe_size_SHIFT 25 +#define lpfc_mbx_cq_create_set_cqe_size_MASK 0x00000003 +#define lpfc_mbx_cq_create_set_cqe_size_WORD word1 +#define lpfc_mbx_cq_create_set_auto_SHIFT 15 +#define lpfc_mbx_cq_create_set_auto_MASK 0x0000001 +#define lpfc_mbx_cq_create_set_auto_WORD word1 +#define lpfc_mbx_cq_create_set_nodelay_SHIFT 14 +#define lpfc_mbx_cq_create_set_nodelay_MASK 0x00000001 +#define lpfc_mbx_cq_create_set_nodelay_WORD word1 +#define lpfc_mbx_cq_create_set_clswm_SHIFT 12 +#define lpfc_mbx_cq_create_set_clswm_MASK 0x00000003 +#define lpfc_mbx_cq_create_set_clswm_WORD word1 + uint32_t word2; +#define lpfc_mbx_cq_create_set_arm_SHIFT 31 +#define lpfc_mbx_cq_create_set_arm_MASK 0x00000001 +#define lpfc_mbx_cq_create_set_arm_WORD word2 +#define lpfc_mbx_cq_create_set_num_cq_SHIFT 0 +#define lpfc_mbx_cq_create_set_num_cq_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_num_cq_WORD word2 + uint32_t word3; +#define lpfc_mbx_cq_create_set_eq_id1_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id1_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id1_WORD word3 +#define lpfc_mbx_cq_create_set_eq_id0_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id0_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id0_WORD word3 + uint32_t word4; +#define lpfc_mbx_cq_create_set_eq_id3_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id3_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id3_WORD word4 +#define lpfc_mbx_cq_create_set_eq_id2_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id2_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id2_WORD word4 + uint32_t word5; +#define lpfc_mbx_cq_create_set_eq_id5_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id5_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id5_WORD word5 +#define lpfc_mbx_cq_create_set_eq_id4_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id4_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id4_WORD word5 + uint32_t word6; +#define lpfc_mbx_cq_create_set_eq_id7_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id7_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id7_WORD word6 +#define lpfc_mbx_cq_create_set_eq_id6_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id6_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id6_WORD word6 + uint32_t word7; +#define lpfc_mbx_cq_create_set_eq_id9_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id9_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id9_WORD word7 +#define lpfc_mbx_cq_create_set_eq_id8_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id8_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id8_WORD word7 + uint32_t word8; +#define lpfc_mbx_cq_create_set_eq_id11_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id11_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id11_WORD word8 +#define lpfc_mbx_cq_create_set_eq_id10_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id10_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id10_WORD word8 + uint32_t word9; +#define lpfc_mbx_cq_create_set_eq_id13_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id13_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id13_WORD word9 +#define lpfc_mbx_cq_create_set_eq_id12_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id12_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id12_WORD word9 + uint32_t word10; +#define lpfc_mbx_cq_create_set_eq_id15_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id15_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id15_WORD word10 +#define lpfc_mbx_cq_create_set_eq_id14_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id14_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id14_WORD word10 + struct dma_address page[1]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_cq_create_set_num_alloc_SHIFT 16 +#define lpfc_mbx_cq_create_set_num_alloc_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_num_alloc_WORD word0 +#define lpfc_mbx_cq_create_set_base_id_SHIFT 0 +#define lpfc_mbx_cq_create_set_base_id_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_base_id_WORD word0 + } response; + } u; +}; + struct lpfc_mbx_cq_destroy { struct mbox_header header; union { @@ -1186,6 +1307,7 @@ struct lpfc_mbx_wq_create { #define lpfc_mbx_wq_create_page_size_SHIFT 0 #define lpfc_mbx_wq_create_page_size_MASK 0x000000FF #define lpfc_mbx_wq_create_page_size_WORD word1 +#define LPFC_WQ_PAGE_SIZE_4096 0x1 #define lpfc_mbx_wq_create_wqe_size_SHIFT 8 #define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F #define lpfc_mbx_wq_create_wqe_size_WORD word1 @@ -1243,10 +1365,10 @@ struct rq_context { #define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ #define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ #define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ -#define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1 Only */ +#define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1-2 Only */ #define lpfc_rq_context_rqe_count_1_MASK 0x0000FFFF #define lpfc_rq_context_rqe_count_1_WORD word0 -#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */ +#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1-2 Only */ #define lpfc_rq_context_rqe_size_MASK 0x0000000F #define lpfc_rq_context_rqe_size_WORD word0 #define LPFC_RQE_SIZE_8 2 @@ -1257,7 +1379,14 @@ struct rq_context { #define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */ #define lpfc_rq_context_page_size_MASK 0x000000FF #define lpfc_rq_context_page_size_WORD word0 - uint32_t reserved1; +#define LPFC_RQ_PAGE_SIZE_4096 0x1 + uint32_t word1; +#define lpfc_rq_context_data_size_SHIFT 16 /* Version 2 Only */ +#define lpfc_rq_context_data_size_MASK 0x0000FFFF +#define lpfc_rq_context_data_size_WORD word1 +#define lpfc_rq_context_hdr_size_SHIFT 0 /* Version 2 Only */ +#define lpfc_rq_context_hdr_size_MASK 0x0000FFFF +#define lpfc_rq_context_hdr_size_WORD word1 uint32_t word2; #define lpfc_rq_context_cq_id_SHIFT 16 #define lpfc_rq_context_cq_id_MASK 0x000003FF @@ -1265,6 +1394,9 @@ struct rq_context { #define lpfc_rq_context_buf_size_SHIFT 0 #define lpfc_rq_context_buf_size_MASK 0x0000FFFF #define lpfc_rq_context_buf_size_WORD word2 +#define lpfc_rq_context_base_cq_SHIFT 0 /* Version 2 Only */ +#define lpfc_rq_context_base_cq_MASK 0x0000FFFF +#define lpfc_rq_context_base_cq_WORD word2 uint32_t buffer_size; /* Version 1 Only */ }; @@ -1286,10 +1418,65 @@ struct lpfc_mbx_rq_create { #define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF #define lpfc_mbx_rq_create_ulp_num_WORD word0 struct rq_context context; - struct dma_address page[LPFC_MAX_WQ_PAGE]; + struct dma_address page[LPFC_MAX_RQ_PAGE]; } request; struct { uint32_t word0; +#define lpfc_mbx_rq_create_q_cnt_v2_SHIFT 16 +#define lpfc_mbx_rq_create_q_cnt_v2_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_q_cnt_v2_WORD word0 +#define lpfc_mbx_rq_create_q_id_SHIFT 0 +#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_q_id_WORD word0 + uint32_t doorbell_offset; + uint32_t word2; +#define lpfc_mbx_rq_create_bar_set_SHIFT 0 +#define lpfc_mbx_rq_create_bar_set_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_bar_set_WORD word2 +#define lpfc_mbx_rq_create_db_format_SHIFT 16 +#define lpfc_mbx_rq_create_db_format_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_db_format_WORD word2 + } response; + } u; +}; + +struct lpfc_mbx_rq_create_v2 { + union lpfc_sli4_cfg_shdr cfg_shdr; + union { + struct { + uint32_t word0; +#define lpfc_mbx_rq_create_num_pages_SHIFT 0 +#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_num_pages_WORD word0 +#define lpfc_mbx_rq_create_rq_cnt_SHIFT 16 +#define lpfc_mbx_rq_create_rq_cnt_MASK 0x000000FF +#define lpfc_mbx_rq_create_rq_cnt_WORD word0 +#define lpfc_mbx_rq_create_dua_SHIFT 16 +#define lpfc_mbx_rq_create_dua_MASK 0x00000001 +#define lpfc_mbx_rq_create_dua_WORD word0 +#define lpfc_mbx_rq_create_bqu_SHIFT 17 +#define lpfc_mbx_rq_create_bqu_MASK 0x00000001 +#define lpfc_mbx_rq_create_bqu_WORD word0 +#define lpfc_mbx_rq_create_ulp_num_SHIFT 24 +#define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF +#define lpfc_mbx_rq_create_ulp_num_WORD word0 +#define lpfc_mbx_rq_create_dim_SHIFT 29 +#define lpfc_mbx_rq_create_dim_MASK 0x00000001 +#define lpfc_mbx_rq_create_dim_WORD word0 +#define lpfc_mbx_rq_create_dfd_SHIFT 30 +#define lpfc_mbx_rq_create_dfd_MASK 0x00000001 +#define lpfc_mbx_rq_create_dfd_WORD word0 +#define lpfc_mbx_rq_create_dnb_SHIFT 31 +#define lpfc_mbx_rq_create_dnb_MASK 0x00000001 +#define lpfc_mbx_rq_create_dnb_WORD word0 + struct rq_context context; + struct dma_address page[1]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_rq_create_q_cnt_v2_SHIFT 16 +#define lpfc_mbx_rq_create_q_cnt_v2_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_q_cnt_v2_WORD word0 #define lpfc_mbx_rq_create_q_id_SHIFT 0 #define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF #define lpfc_mbx_rq_create_q_id_WORD word0 @@ -2203,6 +2390,160 @@ struct lpfc_mbx_reg_fcfi { #define lpfc_reg_fcfi_vlan_tag_WORD word8 }; +struct lpfc_mbx_reg_fcfi_mrq { + uint32_t word1; +#define lpfc_reg_fcfi_mrq_info_index_SHIFT 0 +#define lpfc_reg_fcfi_mrq_info_index_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_info_index_WORD word1 +#define lpfc_reg_fcfi_mrq_fcfi_SHIFT 16 +#define lpfc_reg_fcfi_mrq_fcfi_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_fcfi_WORD word1 + uint32_t word2; +#define lpfc_reg_fcfi_mrq_rq_id1_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rq_id1_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_rq_id1_WORD word2 +#define lpfc_reg_fcfi_mrq_rq_id0_SHIFT 16 +#define lpfc_reg_fcfi_mrq_rq_id0_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_rq_id0_WORD word2 + uint32_t word3; +#define lpfc_reg_fcfi_mrq_rq_id3_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rq_id3_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_rq_id3_WORD word3 +#define lpfc_reg_fcfi_mrq_rq_id2_SHIFT 16 +#define lpfc_reg_fcfi_mrq_rq_id2_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_rq_id2_WORD word3 + uint32_t word4; +#define lpfc_reg_fcfi_mrq_type_match0_SHIFT 24 +#define lpfc_reg_fcfi_mrq_type_match0_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_match0_WORD word4 +#define lpfc_reg_fcfi_mrq_type_mask0_SHIFT 16 +#define lpfc_reg_fcfi_mrq_type_mask0_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_mask0_WORD word4 +#define lpfc_reg_fcfi_mrq_rctl_match0_SHIFT 8 +#define lpfc_reg_fcfi_mrq_rctl_match0_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_match0_WORD word4 +#define lpfc_reg_fcfi_mrq_rctl_mask0_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rctl_mask0_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_mask0_WORD word4 + uint32_t word5; +#define lpfc_reg_fcfi_mrq_type_match1_SHIFT 24 +#define lpfc_reg_fcfi_mrq_type_match1_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_match1_WORD word5 +#define lpfc_reg_fcfi_mrq_type_mask1_SHIFT 16 +#define lpfc_reg_fcfi_mrq_type_mask1_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_mask1_WORD word5 +#define lpfc_reg_fcfi_mrq_rctl_match1_SHIFT 8 +#define lpfc_reg_fcfi_mrq_rctl_match1_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_match1_WORD word5 +#define lpfc_reg_fcfi_mrq_rctl_mask1_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rctl_mask1_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_mask1_WORD word5 + uint32_t word6; +#define lpfc_reg_fcfi_mrq_type_match2_SHIFT 24 +#define lpfc_reg_fcfi_mrq_type_match2_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_match2_WORD word6 +#define lpfc_reg_fcfi_mrq_type_mask2_SHIFT 16 +#define lpfc_reg_fcfi_mrq_type_mask2_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_mask2_WORD word6 +#define lpfc_reg_fcfi_mrq_rctl_match2_SHIFT 8 +#define lpfc_reg_fcfi_mrq_rctl_match2_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_match2_WORD word6 +#define lpfc_reg_fcfi_mrq_rctl_mask2_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rctl_mask2_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_mask2_WORD word6 + uint32_t word7; +#define lpfc_reg_fcfi_mrq_type_match3_SHIFT 24 +#define lpfc_reg_fcfi_mrq_type_match3_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_match3_WORD word7 +#define lpfc_reg_fcfi_mrq_type_mask3_SHIFT 16 +#define lpfc_reg_fcfi_mrq_type_mask3_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_mask3_WORD word7 +#define lpfc_reg_fcfi_mrq_rctl_match3_SHIFT 8 +#define lpfc_reg_fcfi_mrq_rctl_match3_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_match3_WORD word7 +#define lpfc_reg_fcfi_mrq_rctl_mask3_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rctl_mask3_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_mask3_WORD word7 + uint32_t word8; +#define lpfc_reg_fcfi_mrq_ptc7_SHIFT 31 +#define lpfc_reg_fcfi_mrq_ptc7_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc7_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc6_SHIFT 30 +#define lpfc_reg_fcfi_mrq_ptc6_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc6_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc5_SHIFT 29 +#define lpfc_reg_fcfi_mrq_ptc5_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc5_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc4_SHIFT 28 +#define lpfc_reg_fcfi_mrq_ptc4_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc4_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc3_SHIFT 27 +#define lpfc_reg_fcfi_mrq_ptc3_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc3_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc2_SHIFT 26 +#define lpfc_reg_fcfi_mrq_ptc2_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc2_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc1_SHIFT 25 +#define lpfc_reg_fcfi_mrq_ptc1_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc1_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc0_SHIFT 24 +#define lpfc_reg_fcfi_mrq_ptc0_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc0_WORD word8 +#define lpfc_reg_fcfi_mrq_pt7_SHIFT 23 +#define lpfc_reg_fcfi_mrq_pt7_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt7_WORD word8 +#define lpfc_reg_fcfi_mrq_pt6_SHIFT 22 +#define lpfc_reg_fcfi_mrq_pt6_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt6_WORD word8 +#define lpfc_reg_fcfi_mrq_pt5_SHIFT 21 +#define lpfc_reg_fcfi_mrq_pt5_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt5_WORD word8 +#define lpfc_reg_fcfi_mrq_pt4_SHIFT 20 +#define lpfc_reg_fcfi_mrq_pt4_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt4_WORD word8 +#define lpfc_reg_fcfi_mrq_pt3_SHIFT 19 +#define lpfc_reg_fcfi_mrq_pt3_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt3_WORD word8 +#define lpfc_reg_fcfi_mrq_pt2_SHIFT 18 +#define lpfc_reg_fcfi_mrq_pt2_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt2_WORD word8 +#define lpfc_reg_fcfi_mrq_pt1_SHIFT 17 +#define lpfc_reg_fcfi_mrq_pt1_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt1_WORD word8 +#define lpfc_reg_fcfi_mrq_pt0_SHIFT 16 +#define lpfc_reg_fcfi_mrq_pt0_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt0_WORD word8 +#define lpfc_reg_fcfi_mrq_xmv_SHIFT 15 +#define lpfc_reg_fcfi_mrq_xmv_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_xmv_WORD word8 +#define lpfc_reg_fcfi_mrq_mode_SHIFT 13 +#define lpfc_reg_fcfi_mrq_mode_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_mode_WORD word8 +#define lpfc_reg_fcfi_mrq_vv_SHIFT 12 +#define lpfc_reg_fcfi_mrq_vv_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_vv_WORD word8 +#define lpfc_reg_fcfi_mrq_vlan_tag_SHIFT 0 +#define lpfc_reg_fcfi_mrq_vlan_tag_MASK 0x00000FFF +#define lpfc_reg_fcfi_mrq_vlan_tag_WORD word8 + uint32_t word9; +#define lpfc_reg_fcfi_mrq_policy_SHIFT 12 +#define lpfc_reg_fcfi_mrq_policy_MASK 0x0000000F +#define lpfc_reg_fcfi_mrq_policy_WORD word9 +#define lpfc_reg_fcfi_mrq_filter_SHIFT 8 +#define lpfc_reg_fcfi_mrq_filter_MASK 0x0000000F +#define lpfc_reg_fcfi_mrq_filter_WORD word9 +#define lpfc_reg_fcfi_mrq_npairs_SHIFT 0 +#define lpfc_reg_fcfi_mrq_npairs_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_npairs_WORD word9 + uint32_t word10; + uint32_t word11; + uint32_t word12; + uint32_t word13; + uint32_t word14; + uint32_t word15; + uint32_t word16; +}; + struct lpfc_mbx_unreg_fcfi { uint32_t word1_rsv; uint32_t word2; @@ -2382,6 +2723,9 @@ struct lpfc_mbx_request_features { #define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11 #define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001 #define lpfc_mbx_rq_ftr_rq_perfh_WORD word2 +#define lpfc_mbx_rq_ftr_rq_mrqp_SHIFT 16 +#define lpfc_mbx_rq_ftr_rq_mrqp_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_mrqp_WORD word2 uint32_t word3; #define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0 #define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001 @@ -2410,6 +2754,9 @@ struct lpfc_mbx_request_features { #define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT 11 #define lpfc_mbx_rq_ftr_rsp_perfh_MASK 0x00000001 #define lpfc_mbx_rq_ftr_rsp_perfh_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_mrqp_SHIFT 16 +#define lpfc_mbx_rq_ftr_rsp_mrqp_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_mrqp_WORD word3 }; struct lpfc_mbx_supp_pages { @@ -2839,12 +3186,18 @@ struct lpfc_sli4_parameters { #define cfg_mqv_WORD word6 uint32_t word7; uint32_t word8; +#define cfg_wqpcnt_SHIFT 0 +#define cfg_wqpcnt_MASK 0x0000000f +#define cfg_wqpcnt_WORD word8 #define cfg_wqsize_SHIFT 8 #define cfg_wqsize_MASK 0x0000000f #define cfg_wqsize_WORD word8 #define cfg_wqv_SHIFT 14 #define cfg_wqv_MASK 0x00000003 #define cfg_wqv_WORD word8 +#define cfg_wqpsize_SHIFT 16 +#define cfg_wqpsize_MASK 0x000000ff +#define cfg_wqpsize_WORD word8 uint32_t word9; uint32_t word10; #define cfg_rqv_SHIFT 14 @@ -2895,6 +3248,12 @@ struct lpfc_sli4_parameters { #define cfg_mds_diags_SHIFT 1 #define cfg_mds_diags_MASK 0x00000001 #define cfg_mds_diags_WORD word19 +#define cfg_nvme_SHIFT 3 +#define cfg_nvme_MASK 0x00000001 +#define cfg_nvme_WORD word19 +#define cfg_xib_SHIFT 4 +#define cfg_xib_MASK 0x00000001 +#define cfg_xib_WORD word19 }; #define LPFC_SET_UE_RECOVERY 0x10 @@ -3290,14 +3649,17 @@ struct lpfc_mqe { struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry; struct lpfc_mbx_redisc_fcf_tbl redisc_fcf_tbl; struct lpfc_mbx_reg_fcfi reg_fcfi; + struct lpfc_mbx_reg_fcfi_mrq reg_fcfi_mrq; struct lpfc_mbx_unreg_fcfi unreg_fcfi; struct lpfc_mbx_mq_create mq_create; struct lpfc_mbx_mq_create_ext mq_create_ext; struct lpfc_mbx_eq_create eq_create; struct lpfc_mbx_modify_eq_delay eq_delay; struct lpfc_mbx_cq_create cq_create; + struct lpfc_mbx_cq_create_set cq_create_set; struct lpfc_mbx_wq_create wq_create; struct lpfc_mbx_rq_create rq_create; + struct lpfc_mbx_rq_create_v2 rq_create_v2; struct lpfc_mbx_mq_destroy mq_destroy; struct lpfc_mbx_eq_destroy eq_destroy; struct lpfc_mbx_cq_destroy cq_destroy; @@ -3657,6 +4019,9 @@ struct wqe_common { #define wqe_ebde_cnt_SHIFT 0 #define wqe_ebde_cnt_MASK 0x0000000f #define wqe_ebde_cnt_WORD word10 +#define wqe_nvme_SHIFT 4 +#define wqe_nvme_MASK 0x00000001 +#define wqe_nvme_WORD word10 #define wqe_oas_SHIFT 6 #define wqe_oas_MASK 0x00000001 #define wqe_oas_WORD word10 @@ -3717,9 +4082,18 @@ struct wqe_common { #define LPFC_ELS_ID_FDISC 2 #define LPFC_ELS_ID_LOGO 1 #define LPFC_ELS_ID_DEFAULT 0 +#define wqe_irsp_SHIFT 4 +#define wqe_irsp_MASK 0x00000001 +#define wqe_irsp_WORD word11 +#define wqe_sup_SHIFT 6 +#define wqe_sup_MASK 0x00000001 +#define wqe_sup_WORD word11 #define wqe_wqec_SHIFT 7 #define wqe_wqec_MASK 0x00000001 #define wqe_wqec_WORD word11 +#define wqe_irsplen_SHIFT 8 +#define wqe_irsplen_MASK 0x0000000f +#define wqe_irsplen_WORD word11 #define wqe_cqid_SHIFT 16 #define wqe_cqid_MASK 0x0000ffff #define wqe_cqid_WORD word11 @@ -3897,6 +4271,50 @@ struct gen_req64_wqe { uint32_t max_response_payload_len; }; +/* Define NVME PRLI request to fabric. NVME is a + * fabric-only protocol. + * Updated to red-lined v1.08 on Sept 16, 2016 + */ +struct lpfc_nvme_prli { + uint32_t word1; + /* The Response Code is defined in the FCP PRLI lpfc_hw.h */ +#define prli_acc_rsp_code_SHIFT 8 +#define prli_acc_rsp_code_MASK 0x0000000f +#define prli_acc_rsp_code_WORD word1 +#define prli_estabImagePair_SHIFT 13 +#define prli_estabImagePair_MASK 0x00000001 +#define prli_estabImagePair_WORD word1 +#define prli_type_code_ext_SHIFT 16 +#define prli_type_code_ext_MASK 0x000000ff +#define prli_type_code_ext_WORD word1 +#define prli_type_code_SHIFT 24 +#define prli_type_code_MASK 0x000000ff +#define prli_type_code_WORD word1 + uint32_t word_rsvd2; + uint32_t word_rsvd3; + uint32_t word4; +#define prli_fba_SHIFT 0 +#define prli_fba_MASK 0x00000001 +#define prli_fba_WORD word4 +#define prli_disc_SHIFT 3 +#define prli_disc_MASK 0x00000001 +#define prli_disc_WORD word4 +#define prli_tgt_SHIFT 4 +#define prli_tgt_MASK 0x00000001 +#define prli_tgt_WORD word4 +#define prli_init_SHIFT 5 +#define prli_init_MASK 0x00000001 +#define prli_init_WORD word4 +#define prli_recov_SHIFT 8 +#define prli_recov_MASK 0x00000001 +#define prli_recov_WORD word4 + uint32_t word5; +#define prli_fb_sz_SHIFT 0 +#define prli_fb_sz_MASK 0x0000ffff +#define prli_fb_sz_WORD word5 +#define LPFC_NVMET_FB_SZ_MAX 65536 /* Driver target mode only. */ +}; + struct create_xri_wqe { uint32_t rsrvd[5]; /* words 0-4 */ struct wqe_did wqe_dest; /* word 5 */ @@ -3969,6 +4387,35 @@ struct fcp_icmnd64_wqe { uint32_t rsvd_12_15[4]; /* word 12-15 */ }; +struct fcp_trsp64_wqe { + struct ulp_bde64 bde; + uint32_t response_len; + uint32_t rsvd_4_5[2]; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +struct fcp_tsend64_wqe { + struct ulp_bde64 bde; + uint32_t payload_offset_len; + uint32_t relative_offset; + uint32_t reserved; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t fcp_data_len; /* word 12 */ + uint32_t rsvd_13_15[3]; /* word 13-15 */ +}; + +struct fcp_treceive64_wqe { + struct ulp_bde64 bde; + uint32_t payload_offset_len; + uint32_t relative_offset; + uint32_t reserved; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t fcp_data_len; /* word 12 */ + uint32_t rsvd_13_15[3]; /* word 13-15 */ +}; +#define TXRDY_PAYLOAD_LEN 12 + union lpfc_wqe { uint32_t words[16]; @@ -3984,6 +4431,10 @@ union lpfc_wqe { struct xmit_els_rsp64_wqe xmit_els_rsp; struct els_request64_wqe els_req; struct gen_req64_wqe gen_req; + struct fcp_trsp64_wqe fcp_trsp; + struct fcp_tsend64_wqe fcp_tsend; + struct fcp_treceive64_wqe fcp_treceive; + }; union lpfc_wqe128 { @@ -3992,6 +4443,9 @@ union lpfc_wqe128 { struct fcp_icmnd64_wqe fcp_icmd; struct fcp_iread64_wqe fcp_iread; struct fcp_iwrite64_wqe fcp_iwrite; + struct fcp_trsp64_wqe fcp_trsp; + struct fcp_tsend64_wqe fcp_tsend; + struct fcp_treceive64_wqe fcp_treceive; struct xmit_seq64_wqe xmit_sequence; struct gen_req64_wqe gen_req; }; @@ -4015,11 +4469,39 @@ struct lpfc_grp_hdr { uint8_t revision[32]; }; -#define FCP_COMMAND 0x0 -#define FCP_COMMAND_DATA_OUT 0x1 -#define ELS_COMMAND_NON_FIP 0xC -#define ELS_COMMAND_FIP 0xD -#define OTHER_COMMAND 0x8 +/* Defines for WQE command type */ +#define FCP_COMMAND 0x0 +#define NVME_READ_CMD 0x0 +#define FCP_COMMAND_DATA_OUT 0x1 +#define NVME_WRITE_CMD 0x1 +#define FCP_COMMAND_TRECEIVE 0x2 +#define FCP_COMMAND_TRSP 0x3 +#define FCP_COMMAND_TSEND 0x7 +#define OTHER_COMMAND 0x8 +#define ELS_COMMAND_NON_FIP 0xC +#define ELS_COMMAND_FIP 0xD + +#define LPFC_NVME_EMBED_CMD 0x0 +#define LPFC_NVME_EMBED_WRITE 0x1 +#define LPFC_NVME_EMBED_READ 0x2 + +/* WQE Commands */ +#define CMD_ABORT_XRI_WQE 0x0F +#define CMD_XMIT_SEQUENCE64_WQE 0x82 +#define CMD_XMIT_BCAST64_WQE 0x84 +#define CMD_ELS_REQUEST64_WQE 0x8A +#define CMD_XMIT_ELS_RSP64_WQE 0x95 +#define CMD_XMIT_BLS_RSP64_WQE 0x97 +#define CMD_FCP_IWRITE64_WQE 0x98 +#define CMD_FCP_IREAD64_WQE 0x9A +#define CMD_FCP_ICMND64_WQE 0x9C +#define CMD_FCP_TSEND64_WQE 0x9F +#define CMD_FCP_TRECEIVE64_WQE 0xA1 +#define CMD_FCP_TRSP64_WQE 0xA3 +#define CMD_GEN_REQUEST64_WQE 0xC2 + +#define CMD_WQE_MASK 0xff + #define LPFC_FW_DUMP 1 #define LPFC_FW_RESET 2 diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h index 5733feafe25f..0ba3733eb36d 100644 --- a/drivers/scsi/lpfc/lpfc_ids.h +++ b/drivers/scsi/lpfc/lpfc_ids.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 64717c171b15..0ee429d773f3 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -34,6 +36,7 @@ #include <linux/firmware.h> #include <linux/miscdevice.h> #include <linux/percpu.h> +#include <linux/msi.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -46,8 +49,9 @@ #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" @@ -71,6 +75,7 @@ static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); static int lpfc_setup_endian_order(struct lpfc_hba *); static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); static void lpfc_free_els_sgl_list(struct lpfc_hba *); +static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); static void lpfc_init_sgl_list(struct lpfc_hba *); static int lpfc_init_active_sgl_array(struct lpfc_hba *); static void lpfc_free_active_sgl(struct lpfc_hba *); @@ -86,6 +91,7 @@ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; static DEFINE_IDR(lpfc_hba_index); +#define LPFC_NVMET_BUF_POST 254 /** * lpfc_config_port_prep - Perform lpfc initialization prior to config port @@ -499,12 +505,10 @@ lpfc_config_port_post(struct lpfc_hba *phba) phba->link_state = LPFC_LINK_DOWN; /* Only process IOCBs on ELS ring till hba_state is READY */ - if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr) - psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; - if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr) - psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; - if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr) - psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; + if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) + psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; + if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) + psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; /* Post receive buffers for desired rings */ if (phba->sli_rev != 3) @@ -892,7 +896,7 @@ lpfc_hba_free_post_buf(struct lpfc_hba *phba) lpfc_sli_hbqbuf_free_all(phba); else { /* Cleanup preposted buffers on the ELS ring */ - pring = &psli->ring[LPFC_ELS_RING]; + pring = &psli->sli3_ring[LPFC_ELS_RING]; spin_lock_irq(&phba->hbalock); list_splice_init(&pring->postbufq, &buflist); spin_unlock_irq(&phba->hbalock); @@ -925,32 +929,43 @@ static void lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; + struct lpfc_queue *qp = NULL; struct lpfc_sli_ring *pring; LIST_HEAD(completions); int i; - for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; - if (phba->sli_rev >= LPFC_SLI_REV4) - spin_lock_irq(&pring->ring_lock); - else + if (phba->sli_rev != LPFC_SLI_REV4) { + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; spin_lock_irq(&phba->hbalock); - /* At this point in time the HBA is either reset or DOA. Either - * way, nothing should be on txcmplq as it will NEVER complete. - */ - list_splice_init(&pring->txcmplq, &completions); - pring->txcmplq_cnt = 0; - - if (phba->sli_rev >= LPFC_SLI_REV4) - spin_unlock_irq(&pring->ring_lock); - else + /* At this point in time the HBA is either reset or DOA + * Nothing should be on txcmplq as it will + * NEVER complete. + */ + list_splice_init(&pring->txcmplq, &completions); + pring->txcmplq_cnt = 0; spin_unlock_irq(&phba->hbalock); + lpfc_sli_abort_iocb_ring(phba, pring); + } /* Cancel all the IOCBs from the completions list */ - lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, - IOERR_SLI_ABORTED); + lpfc_sli_cancel_iocbs(phba, &completions, + IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); + return; + } + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) + continue; + spin_lock_irq(&pring->ring_lock); + list_splice_init(&pring->txcmplq, &completions); + pring->txcmplq_cnt = 0; + spin_unlock_irq(&pring->ring_lock); lpfc_sli_abort_iocb_ring(phba, pring); } + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &completions, + IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); } /** @@ -989,43 +1004,58 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) { struct lpfc_scsi_buf *psb, *psb_next; LIST_HEAD(aborts); + LIST_HEAD(nvme_aborts); unsigned long iflag = 0; struct lpfc_sglq *sglq_entry = NULL; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; - lpfc_hba_free_post_buf(phba); + + lpfc_sli_hbqbuf_free_all(phba); lpfc_hba_clean_txcmplq(phba); - pring = &psli->ring[LPFC_ELS_RING]; /* At this point in time the HBA is either reset or DOA. Either * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be - * on the lpfc_sgl_list so that it can either be freed if the + * on the lpfc_els_sgl_list so that it can either be freed if the * driver is unloading or reposted if the driver is restarting * the port. */ - spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ + spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */ /* scsl_buf_list */ - /* abts_sgl_list_lock required because worker thread uses this + /* sgl_list_lock required because worker thread uses this * list. */ - spin_lock(&phba->sli4_hba.abts_sgl_list_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); list_for_each_entry(sglq_entry, &phba->sli4_hba.lpfc_abts_els_sgl_list, list) sglq_entry->state = SGL_FREED; + list_for_each_entry(sglq_entry, + &phba->sli4_hba.lpfc_abts_nvmet_sgl_list, list) + sglq_entry->state = SGL_FREED; - spin_lock(&pring->ring_lock); list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, - &phba->sli4_hba.lpfc_sgl_list); - spin_unlock(&pring->ring_lock); - spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + &phba->sli4_hba.lpfc_els_sgl_list); + + if (phba->sli4_hba.nvme_wq) + list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list, + &phba->sli4_hba.lpfc_nvmet_sgl_list); + + spin_unlock(&phba->sli4_hba.sgl_list_lock); /* abts_scsi_buf_list_lock required because worker thread uses this * list. */ - spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); - list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, - &aborts); - spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { + spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, + &aborts); + spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); + } + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list, + &nvme_aborts); + spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + } + spin_unlock_irq(&phba->hbalock); list_for_each_entry_safe(psb, psb_next, &aborts, list) { @@ -1036,6 +1066,14 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); + list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) { + psb->pCmd = NULL; + psb->status = IOSTAT_SUCCESS; + } + spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); + list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put); + spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); + lpfc_sli4_free_sp_events(phba); return 0; } @@ -1829,7 +1867,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba) * @phba: pointer to lpfc hba data structure. * * This routine is invoked from the worker thread to handle a HBA host - * attention link event. + * attention link event. SLI3 only. **/ void lpfc_handle_latt(struct lpfc_hba *phba) @@ -1867,7 +1905,7 @@ lpfc_handle_latt(struct lpfc_hba *phba) pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; pmb->vport = vport; /* Block ELS IOCBs until we have processed this mbox command */ - phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; + phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { rc = 4; @@ -1883,7 +1921,7 @@ lpfc_handle_latt(struct lpfc_hba *phba) return; lpfc_handle_latt_free_mbuf: - phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; + phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; lpfc_mbuf_free(phba, mp->virt, mp->phys); lpfc_handle_latt_free_mp: kfree(mp); @@ -2441,7 +2479,7 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) * * This routine posts initial receive IOCB buffers to the ELS ring. The * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is - * set to 64 IOCBs. + * set to 64 IOCBs. SLI3 only. * * Return codes * 0 - success (currently always success) @@ -2452,7 +2490,7 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba) struct lpfc_sli *psli = &phba->sli; /* Ring 0, ELS / CT buffers */ - lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); + lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); /* Ring 2 - FCP no buffers needed */ return 0; @@ -2640,6 +2678,13 @@ lpfc_cleanup(struct lpfc_vport *vport) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); + if (ndlp->nlp_fc4_type & NLP_FC4_NVME) { + /* Remove the NVME transport reference now and + * continue to remove the node. + */ + lpfc_nlp_put(ndlp); + } + lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } @@ -2894,11 +2939,6 @@ lpfc_online(struct lpfc_hba *phba) lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); - if (!lpfc_sli_queue_setup(phba)) { - lpfc_unblock_mgmt_io(phba); - return 1; - } - if (phba->sli_rev == LPFC_SLI_REV4) { if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ lpfc_unblock_mgmt_io(phba); @@ -2909,6 +2949,7 @@ lpfc_online(struct lpfc_hba *phba) vpis_cleared = true; spin_unlock_irq(&phba->hbalock); } else { + lpfc_sli_queue_init(phba); if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ lpfc_unblock_mgmt_io(phba); return 1; @@ -3098,7 +3139,9 @@ static void lpfc_scsi_free(struct lpfc_hba *phba) { struct lpfc_scsi_buf *sb, *sb_next; - struct lpfc_iocbq *io, *io_next; + + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return; spin_lock_irq(&phba->hbalock); @@ -3108,7 +3151,7 @@ lpfc_scsi_free(struct lpfc_hba *phba) list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, list) { list_del(&sb->list); - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, + pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; @@ -3119,25 +3162,58 @@ lpfc_scsi_free(struct lpfc_hba *phba) list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, list) { list_del(&sb->list); - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, + pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; } spin_unlock(&phba->scsi_buf_list_get_lock); + spin_unlock_irq(&phba->hbalock); +} +/** + * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists + * @phba: pointer to lpfc hba data structure. + * + * This routine is to free all the NVME buffers and IOCBs from the driver + * list back to kernel. It is called from lpfc_pci_remove_one to free + * the internal resources before the device is removed from the system. + **/ +static void +lpfc_nvme_free(struct lpfc_hba *phba) +{ + struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; - /* Release all the lpfc_iocbq entries maintained by this host. */ - list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { - list_del(&io->list); - kfree(io); - phba->total_iocbq_bufs--; - } + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) + return; + + spin_lock_irq(&phba->hbalock); + /* Release all the lpfc_nvme_bufs maintained by this host. */ + spin_lock(&phba->nvme_buf_list_put_lock); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &phba->lpfc_nvme_buf_list_put, list) { + list_del(&lpfc_ncmd->list); + pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, + lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + phba->total_nvme_bufs--; + } + spin_unlock(&phba->nvme_buf_list_put_lock); + + spin_lock(&phba->nvme_buf_list_get_lock); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &phba->lpfc_nvme_buf_list_get, list) { + list_del(&lpfc_ncmd->list); + pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, + lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + phba->total_nvme_bufs--; + } + spin_unlock(&phba->nvme_buf_list_get_lock); spin_unlock_irq(&phba->hbalock); } - /** - * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping + * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping * @phba: pointer to lpfc hba data structure. * * This routine first calculates the sizes of the current els and allocated @@ -3149,20 +3225,18 @@ lpfc_scsi_free(struct lpfc_hba *phba) * 0 - successful (for now, it always returns 0) **/ int -lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) +lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) { struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; - struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL; - uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt; + uint16_t i, lxri, xri_cnt, els_xri_cnt; LIST_HEAD(els_sgl_list); - LIST_HEAD(scsi_sgl_list); int rc; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; /* * update on pci function's els xri-sgl list */ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { /* els xri-sgl expanded */ xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; @@ -3198,9 +3272,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) list_add_tail(&sglq_entry->list, &els_sgl_list); } spin_lock_irq(&phba->hbalock); - spin_lock(&pring->ring_lock); - list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); - spin_unlock(&pring->ring_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&els_sgl_list, + &phba->sli4_hba.lpfc_els_sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->hbalock); } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { /* els xri-sgl shrinked */ @@ -3210,24 +3285,22 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) "%d to %d\n", phba->sli4_hba.els_xri_cnt, els_xri_cnt); spin_lock_irq(&phba->hbalock); - spin_lock(&pring->ring_lock); - list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list); - spin_unlock(&pring->ring_lock); - spin_unlock_irq(&phba->hbalock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, + &els_sgl_list); /* release extra els sgls from list */ for (i = 0; i < xri_cnt; i++) { list_remove_head(&els_sgl_list, sglq_entry, struct lpfc_sglq, list); if (sglq_entry) { - lpfc_mbuf_free(phba, sglq_entry->virt, - sglq_entry->phys); + __lpfc_mbuf_free(phba, sglq_entry->virt, + sglq_entry->phys); kfree(sglq_entry); } } - spin_lock_irq(&phba->hbalock); - spin_lock(&pring->ring_lock); - list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); - spin_unlock(&pring->ring_lock); + list_splice_init(&els_sgl_list, + &phba->sli4_hba.lpfc_els_sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->hbalock); } else lpfc_printf_log(phba, KERN_INFO, LOG_SLI, @@ -3239,7 +3312,7 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) sglq_entry = NULL; sglq_entry_next = NULL; list_for_each_entry_safe(sglq_entry, sglq_entry_next, - &phba->sli4_hba.lpfc_sgl_list, list) { + &phba->sli4_hba.lpfc_els_sgl_list, list) { lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -3251,21 +3324,182 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) sglq_entry->sli4_lxritag = lxri; sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; } + return 0; + +out_free_mem: + lpfc_free_els_sgl_list(phba); + return rc; +} + +/** + * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping + * @phba: pointer to lpfc hba data structure. + * + * This routine first calculates the sizes of the current els and allocated + * scsi sgl lists, and then goes through all sgls to updates the physical + * XRIs assigned due to port function reset. During port initialization, the + * current els and allocated scsi sgl lists are 0s. + * + * Return codes + * 0 - successful (for now, it always returns 0) + **/ +int +lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) +{ + struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; + uint16_t i, lxri, xri_cnt, els_xri_cnt; + uint16_t nvmet_xri_cnt, tot_cnt; + LIST_HEAD(nvmet_sgl_list); + int rc; /* - * update on pci function's allocated scsi xri-sgl list + * update on pci function's nvmet xri-sgl list + */ + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post; + tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; + if (nvmet_xri_cnt > tot_cnt) { + phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq; + nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "6301 NVMET post-sgl count changed to %d\n", + phba->cfg_nvmet_mrq_post); + } + + if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { + /* els xri-sgl expanded */ + xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "6302 NVMET xri-sgl cnt grew from %d to %d\n", + phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); + /* allocate the additional nvmet sgls */ + for (i = 0; i < xri_cnt; i++) { + sglq_entry = kzalloc(sizeof(struct lpfc_sglq), + GFP_KERNEL); + if (sglq_entry == NULL) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6303 Failure to allocate an " + "NVMET sgl entry:%d\n", i); + rc = -ENOMEM; + goto out_free_mem; + } + sglq_entry->buff_type = NVMET_BUFF_TYPE; + sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, + &sglq_entry->phys); + if (sglq_entry->virt == NULL) { + kfree(sglq_entry); + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6304 Failure to allocate an " + "NVMET buf:%d\n", i); + rc = -ENOMEM; + goto out_free_mem; + } + sglq_entry->sgl = sglq_entry->virt; + memset(sglq_entry->sgl, 0, + phba->cfg_sg_dma_buf_size); + sglq_entry->state = SGL_FREED; + list_add_tail(&sglq_entry->list, &nvmet_sgl_list); + } + spin_lock_irq(&phba->hbalock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&nvmet_sgl_list, + &phba->sli4_hba.lpfc_nvmet_sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + spin_unlock_irq(&phba->hbalock); + } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { + /* nvmet xri-sgl shrunk */ + xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "6305 NVMET xri-sgl count decreased from " + "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, + nvmet_xri_cnt); + spin_lock_irq(&phba->hbalock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, + &nvmet_sgl_list); + /* release extra nvmet sgls from list */ + for (i = 0; i < xri_cnt; i++) { + list_remove_head(&nvmet_sgl_list, + sglq_entry, struct lpfc_sglq, list); + if (sglq_entry) { + lpfc_nvmet_buf_free(phba, sglq_entry->virt, + sglq_entry->phys); + kfree(sglq_entry); + } + } + list_splice_init(&nvmet_sgl_list, + &phba->sli4_hba.lpfc_nvmet_sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + spin_unlock_irq(&phba->hbalock); + } else + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "6306 NVMET xri-sgl count unchanged: %d\n", + nvmet_xri_cnt); + phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; + + /* update xris to nvmet sgls on the list */ + sglq_entry = NULL; + sglq_entry_next = NULL; + list_for_each_entry_safe(sglq_entry, sglq_entry_next, + &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { + lxri = lpfc_sli4_next_xritag(phba); + if (lxri == NO_XRI) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6307 Failed to allocate xri for " + "NVMET sgl\n"); + rc = -ENOMEM; + goto out_free_mem; + } + sglq_entry->sli4_lxritag = lxri; + sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; + } + return 0; + +out_free_mem: + lpfc_free_nvmet_sgl_list(phba); + return rc; +} + +/** + * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping + * @phba: pointer to lpfc hba data structure. + * + * This routine first calculates the sizes of the current els and allocated + * scsi sgl lists, and then goes through all sgls to updates the physical + * XRIs assigned due to port function reset. During port initialization, the + * current els and allocated scsi sgl lists are 0s. + * + * Return codes + * 0 - successful (for now, it always returns 0) + **/ +int +lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba) +{ + struct lpfc_scsi_buf *psb, *psb_next; + uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt; + LIST_HEAD(scsi_sgl_list); + int rc; + + /* + * update on pci function's els xri-sgl list */ + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); phba->total_scsi_bufs = 0; + /* + * update on pci function's allocated scsi xri-sgl list + */ /* maximum number of xris available for scsi buffers */ phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "2401 Current allocated SCSI xri-sgl count:%d, " - "maximum SCSI xri count:%d\n", - phba->sli4_hba.scsi_xri_cnt, - phba->sli4_hba.scsi_xri_max); + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return 0; + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + phba->sli4_hba.scsi_xri_max = /* Split them up */ + (phba->sli4_hba.scsi_xri_max * + phba->cfg_xri_split) / 100; spin_lock_irq(&phba->scsi_buf_list_get_lock); spin_lock(&phba->scsi_buf_list_put_lock); @@ -3283,7 +3517,7 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) list_remove_head(&scsi_sgl_list, psb, struct lpfc_scsi_buf, list); if (psb) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + pci_pool_free(phba->lpfc_sg_dma_buf_pool, psb->data, psb->dma_handle); kfree(psb); } @@ -3314,16 +3548,113 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); spin_unlock(&phba->scsi_buf_list_put_lock); spin_unlock_irq(&phba->scsi_buf_list_get_lock); - return 0; out_free_mem: - lpfc_free_els_sgl_list(phba); lpfc_scsi_free(phba); return rc; } /** + * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping + * @phba: pointer to lpfc hba data structure. + * + * This routine first calculates the sizes of the current els and allocated + * scsi sgl lists, and then goes through all sgls to updates the physical + * XRIs assigned due to port function reset. During port initialization, the + * current els and allocated scsi sgl lists are 0s. + * + * Return codes + * 0 - successful (for now, it always returns 0) + **/ +int +lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba) +{ + struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; + uint16_t i, lxri, els_xri_cnt; + uint16_t nvme_xri_cnt, nvme_xri_max; + LIST_HEAD(nvme_sgl_list); + int rc; + + phba->total_nvme_bufs = 0; + + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) + return 0; + /* + * update on pci function's allocated nvme xri-sgl list + */ + + /* maximum number of xris available for nvme buffers */ + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; + phba->sli4_hba.nvme_xri_max = nvme_xri_max; + phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max; + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "6074 Current allocated NVME xri-sgl count:%d, " + "maximum NVME xri count:%d\n", + phba->sli4_hba.nvme_xri_cnt, + phba->sli4_hba.nvme_xri_max); + + spin_lock_irq(&phba->nvme_buf_list_get_lock); + spin_lock(&phba->nvme_buf_list_put_lock); + list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list); + list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list); + spin_unlock(&phba->nvme_buf_list_put_lock); + spin_unlock_irq(&phba->nvme_buf_list_get_lock); + + if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) { + /* max nvme xri shrunk below the allocated nvme buffers */ + spin_lock_irq(&phba->nvme_buf_list_get_lock); + nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt - + phba->sli4_hba.nvme_xri_max; + spin_unlock_irq(&phba->nvme_buf_list_get_lock); + /* release the extra allocated nvme buffers */ + for (i = 0; i < nvme_xri_cnt; i++) { + list_remove_head(&nvme_sgl_list, lpfc_ncmd, + struct lpfc_nvme_buf, list); + if (lpfc_ncmd) { + pci_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, + lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + } + } + spin_lock_irq(&phba->nvme_buf_list_get_lock); + phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt; + spin_unlock_irq(&phba->nvme_buf_list_get_lock); + } + + /* update xris associated to remaining allocated nvme buffers */ + lpfc_ncmd = NULL; + lpfc_ncmd_next = NULL; + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &nvme_sgl_list, list) { + lxri = lpfc_sli4_next_xritag(phba); + if (lxri == NO_XRI) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6075 Failed to allocate xri for " + "nvme buffer\n"); + rc = -ENOMEM; + goto out_free_mem; + } + lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; + lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; + } + spin_lock_irq(&phba->nvme_buf_list_get_lock); + spin_lock(&phba->nvme_buf_list_put_lock); + list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); + spin_unlock(&phba->nvme_buf_list_put_lock); + spin_unlock_irq(&phba->nvme_buf_list_get_lock); + return 0; + +out_free_mem: + lpfc_nvme_free(phba); + return rc; +} + +/** * lpfc_create_port - Create an FC port * @phba: pointer to lpfc hba data structure. * @instance: a unique integer ID to this FC port. @@ -3343,18 +3674,23 @@ struct lpfc_vport * lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) { struct lpfc_vport *vport; - struct Scsi_Host *shost; + struct Scsi_Host *shost = NULL; int error = 0; - if (dev != &phba->pcidev->dev) { - shost = scsi_host_alloc(&lpfc_vport_template, - sizeof(struct lpfc_vport)); - } else { - if (phba->sli_rev == LPFC_SLI_REV4) - shost = scsi_host_alloc(&lpfc_template, - sizeof(struct lpfc_vport)); - else - shost = scsi_host_alloc(&lpfc_template_s3, + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { + if (dev != &phba->pcidev->dev) { + shost = scsi_host_alloc(&lpfc_vport_template, + sizeof(struct lpfc_vport)); + } else { + if (phba->sli_rev == LPFC_SLI_REV4) + shost = scsi_host_alloc(&lpfc_template, + sizeof(struct lpfc_vport)); + else + shost = scsi_host_alloc(&lpfc_template_s3, + sizeof(struct lpfc_vport)); + } + } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + shost = scsi_host_alloc(&lpfc_template_nvme, sizeof(struct lpfc_vport)); } if (!shost) @@ -3365,8 +3701,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) vport->load_flag |= FC_LOADING; vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; vport->fc_rscn_flush = 0; - lpfc_get_vport_cfgparam(vport); + shost->unique_id = instance; shost->max_id = LPFC_MAX_TARGET; shost->max_lun = vport->cfg_max_luns; @@ -3944,7 +4280,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, lpfc_els_flush_all_cmd(phba); /* Block ELS IOCBs until we have done process link event */ - phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; + phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; /* Update link event statistics */ phba->sli.slistat.link_event++; @@ -4103,7 +4439,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) lpfc_els_flush_all_cmd(phba); /* Block ELS IOCBs until we have done process link event */ - phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; + phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; /* Update link event statistics */ phba->sli.slistat.link_event++; @@ -4272,13 +4608,13 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) sprintf(message, "Unqualified optics - Replace with " "Avago optics for Warranty and Technical " "Support - Link is%s operational", - (operational) ? "" : " not"); + (operational) ? " not" : ""); break; case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: sprintf(message, "Uncertified optics - Replace with " "Avago-certified optics to enable link " "operation - Link is%s operational", - (operational) ? "" : " not"); + (operational) ? " not" : ""); break; default: /* firmware is reporting a status we don't know about */ @@ -5000,40 +5336,79 @@ lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) } /** - * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. + * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to set up the driver internal resources specific to - * support the SLI-3 HBA device it attached to. + * This routine is invoked to set up the driver internal resources before the + * device specific resource setup to support the HBA device it attached to. * * Return codes - * 0 - successful - * other values - error + * 0 - successful + * other values - error **/ static int -lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) +lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) { - struct lpfc_sli *psli; - int rc; + struct lpfc_sli *psli = &phba->sli; /* - * Initialize timers used by driver + * Driver resources common to all SLI revisions */ + atomic_set(&phba->fast_event_count, 0); + spin_lock_init(&phba->hbalock); - /* Heartbeat timer */ - init_timer(&phba->hb_tmofunc); - phba->hb_tmofunc.function = lpfc_hb_timeout; - phba->hb_tmofunc.data = (unsigned long)phba; + /* Initialize ndlp management spinlock */ + spin_lock_init(&phba->ndlp_lock); + + INIT_LIST_HEAD(&phba->port_list); + INIT_LIST_HEAD(&phba->work_list); + init_waitqueue_head(&phba->wait_4_mlo_m_q); + + /* Initialize the wait queue head for the kernel thread */ + init_waitqueue_head(&phba->work_waitq); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1403 Protocols supported %s %s %s\n", + ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? + "SCSI" : " "), + ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? + "NVME" : " "), + (phba->nvmet_support ? "NVMET" : " ")); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { + /* Initialize the scsi buffer list used by driver for scsi IO */ + spin_lock_init(&phba->scsi_buf_list_get_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); + spin_lock_init(&phba->scsi_buf_list_put_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + } + + if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && + (phba->nvmet_support == 0)) { + /* Initialize the NVME buffer list used by driver for NVME IO */ + spin_lock_init(&phba->nvme_buf_list_get_lock); + INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get); + spin_lock_init(&phba->nvme_buf_list_put_lock); + INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); + } + + /* Initialize the fabric iocb list */ + INIT_LIST_HEAD(&phba->fabric_iocb_list); + + /* Initialize list to save ELS buffers */ + INIT_LIST_HEAD(&phba->elsbuf); + + /* Initialize FCF connection rec list */ + INIT_LIST_HEAD(&phba->fcf_conn_rec_list); + + /* Initialize OAS configuration list */ + spin_lock_init(&phba->devicelock); + INIT_LIST_HEAD(&phba->luns); - psli = &phba->sli; /* MBOX heartbeat timer */ init_timer(&psli->mbox_tmo); psli->mbox_tmo.function = lpfc_mbox_timeout; psli->mbox_tmo.data = (unsigned long) phba; - /* FCP polling mode timer */ - init_timer(&phba->fcp_poll_timer); - phba->fcp_poll_timer.function = lpfc_poll_timeout; - phba->fcp_poll_timer.data = (unsigned long) phba; /* Fabric block timer */ init_timer(&phba->fabric_block_timer); phba->fabric_block_timer.function = lpfc_fabric_block_timeout; @@ -5042,6 +5417,38 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) init_timer(&phba->eratt_poll); phba->eratt_poll.function = lpfc_poll_eratt; phba->eratt_poll.data = (unsigned long) phba; + /* Heartbeat timer */ + init_timer(&phba->hb_tmofunc); + phba->hb_tmofunc.function = lpfc_hb_timeout; + phba->hb_tmofunc.data = (unsigned long)phba; + + return 0; +} + +/** + * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the driver internal resources specific to + * support the SLI-3 HBA device it attached to. + * + * Return codes + * 0 - successful + * other values - error + **/ +static int +lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) +{ + int rc; + + /* + * Initialize timers used by driver + */ + + /* FCP polling mode timer */ + init_timer(&phba->fcp_poll_timer); + phba->fcp_poll_timer.function = lpfc_poll_timeout; + phba->fcp_poll_timer.data = (unsigned long) phba; /* Host attention work mask setup */ phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); @@ -5049,6 +5456,12 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) /* Get all the module params for configuring this host */ lpfc_get_cfgparam(phba); + /* Set up phase-1 common device driver resources */ + + rc = lpfc_setup_driver_resource_phase1(phba); + if (rc) + return -ENODEV; + if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { phba->menlo_flag |= HBA_MENLO_SUPPORT; /* check for menlo minimum sg count */ @@ -5056,10 +5469,10 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; } - if (!phba->sli.ring) - phba->sli.ring = kzalloc(LPFC_SLI3_MAX_RING * + if (!phba->sli.sli3_ring) + phba->sli.sli3_ring = kzalloc(LPFC_SLI3_MAX_RING * sizeof(struct lpfc_sli_ring), GFP_KERNEL); - if (!phba->sli.ring) + if (!phba->sli.sli3_ring) return -ENOMEM; /* @@ -5118,7 +5531,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) * Initialize the SLI Layer to run with lpfc HBAs. */ lpfc_sli_setup(phba); - lpfc_sli_queue_setup(phba); + lpfc_sli_queue_init(phba); /* Allocate device driver memory */ if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) @@ -5174,18 +5587,27 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) static int lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) { - struct lpfc_vector_map_info *cpup; - struct lpfc_sli *psli; LPFC_MBOXQ_t *mboxq; - int rc, i, hbq_count, max_buf_size; + MAILBOX_t *mb; + int rc, i, max_buf_size; uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; struct lpfc_mqe *mqe; int longs; int fof_vectors = 0; + uint64_t wwn; + + phba->sli4_hba.num_online_cpu = num_online_cpus(); + phba->sli4_hba.num_present_cpu = lpfc_present_cpu; + phba->sli4_hba.curr_disp_cpu = 0; /* Get all the module params for configuring this host */ lpfc_get_cfgparam(phba); + /* Set up phase-1 common device driver resources */ + rc = lpfc_setup_driver_resource_phase1(phba); + if (rc) + return -ENODEV; + /* Before proceed, wait for POST done and device ready */ rc = lpfc_sli4_post_status_check(phba); if (rc) @@ -5195,27 +5617,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) * Initialize timers used by driver */ - /* Heartbeat timer */ - init_timer(&phba->hb_tmofunc); - phba->hb_tmofunc.function = lpfc_hb_timeout; - phba->hb_tmofunc.data = (unsigned long)phba; init_timer(&phba->rrq_tmr); phba->rrq_tmr.function = lpfc_rrq_timeout; phba->rrq_tmr.data = (unsigned long)phba; - psli = &phba->sli; - /* MBOX heartbeat timer */ - init_timer(&psli->mbox_tmo); - psli->mbox_tmo.function = lpfc_mbox_timeout; - psli->mbox_tmo.data = (unsigned long) phba; - /* Fabric block timer */ - init_timer(&phba->fabric_block_timer); - phba->fabric_block_timer.function = lpfc_fabric_block_timeout; - phba->fabric_block_timer.data = (unsigned long) phba; - /* EA polling mode timer */ - init_timer(&phba->eratt_poll); - phba->eratt_poll.function = lpfc_poll_eratt; - phba->eratt_poll.data = (unsigned long) phba; /* FCF rediscover timer */ init_timer(&phba->fcf.redisc_wait); phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; @@ -5242,14 +5647,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands - * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. + * we will associate a new ring, for each EQ/CQ/WQ tuple. + * The WQ create will allocate the ring. */ - if (!phba->sli.ring) - phba->sli.ring = kzalloc( - (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) * - sizeof(struct lpfc_sli_ring), GFP_KERNEL); - if (!phba->sli.ring) - return -ENOMEM; /* * It doesn't matter what family our adapter is in, we are @@ -5261,43 +5661,45 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; /* - * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size - * used to create the sg_dma_buf_pool must be dynamically calculated. + * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size + * used to create the sg_dma_buf_pool must be calculated. */ - if (phba->cfg_enable_bg) { /* - * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, - * the FCP rsp, and a SGE for each. Sice we have no control - * over how many protection data segments the SCSI Layer + * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, + * the FCP rsp, and a SGE. Sice we have no control + * over how many protection segments the SCSI Layer * will hand us (ie: there could be one for every block - * in the IO), we just allocate enough SGEs to accomidate - * our max amount and we need to limit lpfc_sg_seg_cnt to - * minimize the risk of running out. + * in the IO), just allocate enough SGEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt + * to minimize the risk of running out. */ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp) + max_buf_size; + sizeof(struct fcp_rsp) + max_buf_size; /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) - phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF; + phba->cfg_sg_seg_cnt = + LPFC_MAX_SG_SLI4_SEG_CNT_DIF; } else { /* - * The scsi_buf for a regular I/O will hold the FCP cmnd, + * The scsi_buf for a regular I/O holds the FCP cmnd, * the FCP rsp, a SGE for each, and a SGE for up to * cfg_sg_seg_cnt data segments. */ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp) + - ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * + sizeof(struct sli4_sge)); /* Total SGEs for scsi_sg_list */ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; + /* - * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need - * to post 1 page for the SGL. + * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only + * need to post 1 page for the SGL. */ } @@ -5317,21 +5719,28 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->cfg_total_seg_cnt); /* Initialize buffer queue management fields */ - hbq_count = lpfc_sli_hbq_count(); - for (i = 0; i < hbq_count; ++i) - INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); - INIT_LIST_HEAD(&phba->rb_pend_list); + INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; /* * Initialize the SLI Layer to run with lpfc SLI4 HBAs. */ - /* Initialize the Abort scsi buffer list used by driver */ - spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { + /* Initialize the Abort scsi buffer list used by driver */ + spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + } + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + /* Initialize the Abort nvme buffer list used by driver */ + spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); + } + /* This abort list used by worker thread */ - spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); + spin_lock_init(&phba->sli4_hba.sgl_list_lock); + spin_lock_init(&phba->sli4_hba.nvmet_io_lock); /* * Initialize driver internal slow-path work queues @@ -5359,10 +5768,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* initialize optic_state to 0xFF */ phba->sli4_hba.lnk_info.optic_state = 0xff; - /* Initialize the driver internal SLI layer lists. */ - lpfc_sli_setup(phba); - lpfc_sli_queue_setup(phba); - /* Allocate device driver memory */ rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); if (rc) @@ -5372,8 +5777,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_2) { rc = lpfc_pci_function_reset(phba); - if (unlikely(rc)) - return -ENODEV; + if (unlikely(rc)) { + rc = -ENODEV; + goto out_free_mem; + } phba->temp_sensor_support = 1; } @@ -5410,6 +5817,46 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_bsmbx; } + /* Check for NVMET being configured */ + phba->nvmet_support = 0; + if (lpfc_enable_nvmet_cnt) { + + /* First get WWN of HBA instance */ + lpfc_read_nv(phba, mboxq); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6016 Mailbox failed , mbxCmd x%x " + "READ_NV, mbxStatus x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + bf_get(lpfc_mqe_status, &mboxq->u.mqe)); + rc = -EIO; + goto out_free_bsmbx; + } + mb = &mboxq->u.mb; + memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, + sizeof(uint64_t)); + wwn = cpu_to_be64(wwn); + phba->sli4_hba.wwnn.u.name = wwn; + memcpy(&wwn, (char *)mb->un.varRDnvp.portname, + sizeof(uint64_t)); + /* wwn is WWPN of HBA instance */ + wwn = cpu_to_be64(wwn); + phba->sli4_hba.wwpn.u.name = wwn; + + /* Check to see if it matches any module parameter */ + for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { + if (wwn == lpfc_enable_nvmet[i]) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6017 NVME Target %016llx\n", + wwn); + phba->nvmet_support = 1; /* a match */ + } + } + } + + lpfc_nvme_mod_param_dep(phba); + /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ lpfc_supported_pages(mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); @@ -5448,9 +5895,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2999 Unsupported SLI4 Parameters " "Extents and RPI headers enabled.\n"); - goto out_free_bsmbx; } + mempool_free(mboxq, phba->mbox_mem_pool); + goto out_free_bsmbx; } + mempool_free(mboxq, phba->mbox_mem_pool); /* Verify OAS is supported */ @@ -5497,11 +5946,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_remove_rpi_hdrs; } - phba->sli4_hba.fcp_eq_hdl = - kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * - (fof_vectors + phba->cfg_fcp_io_channel)), - GFP_KERNEL); - if (!phba->sli4_hba.fcp_eq_hdl) { + phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs, + sizeof(struct lpfc_hba_eq_hdl), + GFP_KERNEL); + if (!phba->sli4_hba.hba_eq_hdl) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2572 Failed allocate memory for " "fast-path per-EQ handle array\n"); @@ -5509,52 +5957,31 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_fcf_rr_bmask; } - phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * - (fof_vectors + - phba->cfg_fcp_io_channel)), GFP_KERNEL); - if (!phba->sli4_hba.msix_entries) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2573 Failed allocate memory for msi-x " - "interrupt vector entries\n"); - rc = -ENOMEM; - goto out_free_fcp_eq_hdl; - } - - phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) * - phba->sli4_hba.num_present_cpu), - GFP_KERNEL); + phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu, + sizeof(struct lpfc_vector_map_info), + GFP_KERNEL); if (!phba->sli4_hba.cpu_map) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3327 Failed allocate memory for msi-x " "interrupt vector mapping\n"); rc = -ENOMEM; - goto out_free_msix; + goto out_free_hba_eq_hdl; } if (lpfc_used_cpu == NULL) { - lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu), - GFP_KERNEL); + lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t), + GFP_KERNEL); if (!lpfc_used_cpu) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3335 Failed allocate memory for msi-x " "interrupt vector mapping\n"); kfree(phba->sli4_hba.cpu_map); rc = -ENOMEM; - goto out_free_msix; + goto out_free_hba_eq_hdl; } for (i = 0; i < lpfc_present_cpu; i++) lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY; } - /* Initialize io channels for round robin */ - cpup = phba->sli4_hba.cpu_map; - rc = 0; - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { - cpup->channel_id = rc; - rc++; - if (rc >= phba->cfg_fcp_io_channel) - rc = 0; - } - /* * Enable sr-iov virtual functions if supported and configured * through the module parameter. @@ -5574,10 +6001,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) return 0; -out_free_msix: - kfree(phba->sli4_hba.msix_entries); -out_free_fcp_eq_hdl: - kfree(phba->sli4_hba.fcp_eq_hdl); +out_free_hba_eq_hdl: + kfree(phba->sli4_hba.hba_eq_hdl); out_free_fcf_rr_bmask: kfree(phba->fcf.fcf_rr_bmask); out_remove_rpi_hdrs: @@ -5611,11 +6036,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) phba->sli4_hba.num_online_cpu = 0; phba->sli4_hba.curr_disp_cpu = 0; - /* Free memory allocated for msi-x interrupt vector entries */ - kfree(phba->sli4_hba.msix_entries); - /* Free memory allocated for fast-path work queue handles */ - kfree(phba->sli4_hba.fcp_eq_hdl); + kfree(phba->sli4_hba.hba_eq_hdl); /* Free the allocated rpi headers. */ lpfc_sli4_remove_rpi_hdrs(phba); @@ -5627,6 +6049,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) /* Free the ELS sgl list */ lpfc_free_active_sgl(phba); lpfc_free_els_sgl_list(phba); + lpfc_free_nvmet_sgl_list(phba); /* Free the completion queue EQ event pool */ lpfc_sli4_cq_event_release_all(phba); @@ -5689,58 +6112,6 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) } /** - * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to set up the driver internal resources before the - * device specific resource setup to support the HBA device it attached to. - * - * Return codes - * 0 - successful - * other values - error - **/ -static int -lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) -{ - /* - * Driver resources common to all SLI revisions - */ - atomic_set(&phba->fast_event_count, 0); - spin_lock_init(&phba->hbalock); - - /* Initialize ndlp management spinlock */ - spin_lock_init(&phba->ndlp_lock); - - INIT_LIST_HEAD(&phba->port_list); - INIT_LIST_HEAD(&phba->work_list); - init_waitqueue_head(&phba->wait_4_mlo_m_q); - - /* Initialize the wait queue head for the kernel thread */ - init_waitqueue_head(&phba->work_waitq); - - /* Initialize the scsi buffer list used by driver for scsi IO */ - spin_lock_init(&phba->scsi_buf_list_get_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); - spin_lock_init(&phba->scsi_buf_list_put_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); - - /* Initialize the fabric iocb list */ - INIT_LIST_HEAD(&phba->fabric_iocb_list); - - /* Initialize list to save ELS buffers */ - INIT_LIST_HEAD(&phba->elsbuf); - - /* Initialize FCF connection rec list */ - INIT_LIST_HEAD(&phba->fcf_conn_rec_list); - - /* Initialize OAS configuration list */ - spin_lock_init(&phba->devicelock); - INIT_LIST_HEAD(&phba->luns); - - return 0; -} - -/** * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. * @phba: pointer to lpfc hba data structure. * @@ -5887,13 +6258,12 @@ static void lpfc_free_els_sgl_list(struct lpfc_hba *phba) { LIST_HEAD(sglq_list); - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; /* Retrieve all els sgls from driver list */ spin_lock_irq(&phba->hbalock); - spin_lock(&pring->ring_lock); - list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); - spin_unlock(&pring->ring_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->hbalock); /* Now free the sgl list */ @@ -5901,6 +6271,33 @@ lpfc_free_els_sgl_list(struct lpfc_hba *phba) } /** + * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the driver's nvmet sgl list and memory. + **/ +static void +lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) +{ + struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + LIST_HEAD(sglq_list); + + /* Retrieve all nvmet sgls from driver list */ + spin_lock_irq(&phba->hbalock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + spin_unlock_irq(&phba->hbalock); + + /* Now free the sgl list */ + list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { + list_del(&sglq_entry->list); + lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); + kfree(sglq_entry); + } +} + +/** * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. * @phba: pointer to lpfc hba data structure. * @@ -5947,14 +6344,19 @@ static void lpfc_init_sgl_list(struct lpfc_hba *phba) { /* Initialize and populate the sglq list per host/VF. */ - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list); /* els xri-sgl book keeping */ phba->sli4_hba.els_xri_cnt = 0; /* scsi xri-buffer book keeping */ phba->sli4_hba.scsi_xri_cnt = 0; + + /* nvme xri-buffer book keeping */ + phba->sli4_hba.nvme_xri_cnt = 0; } /** @@ -6185,9 +6587,9 @@ lpfc_hba_free(struct lpfc_hba *phba) /* Release the driver assigned board number */ idr_remove(&lpfc_hba_index, phba->brd_no); - /* Free memory allocated with sli rings */ - kfree(phba->sli.ring); - phba->sli.ring = NULL; + /* Free memory allocated with sli3 rings */ + kfree(phba->sli.sli3_ring); + phba->sli.sli3_ring = NULL; kfree(phba); return; @@ -6223,6 +6625,23 @@ lpfc_create_shost(struct lpfc_hba *phba) shost = lpfc_shost_from_vport(vport); phba->pport = vport; + + if (phba->nvmet_support) { + /* Only 1 vport (pport) will support NVME target */ + if (phba->txrdy_payload_pool == NULL) { + phba->txrdy_payload_pool = pci_pool_create( + "txrdy_pool", phba->pcidev, + TXRDY_PAYLOAD_LEN, 16, 0); + if (phba->txrdy_payload_pool) { + phba->targetport = NULL; + phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; + lpfc_printf_log(phba, KERN_INFO, + LOG_INIT | LOG_NVME_DISC, + "6076 NVME Target Found\n"); + } + } + } + lpfc_debugfs_initialize(vport); /* Put reference to SCSI host to driver's device private data */ pci_set_drvdata(phba->pcidev, shost); @@ -6504,8 +6923,6 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); - INIT_LIST_HEAD(&phba->rb_pend_list); - phba->MBslimaddr = phba->slim_memmap_p; phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; @@ -7009,7 +7426,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) "VPI(B:%d M:%d) " "VFI(B:%d M:%d) " "RPI(B:%d M:%d) " - "FCFI(Count:%d)\n", + "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n", phba->sli4_hba.extents_in_use, phba->sli4_hba.max_cfg_param.xri_base, phba->sli4_hba.max_cfg_param.max_xri, @@ -7019,7 +7436,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) phba->sli4_hba.max_cfg_param.max_vfi, phba->sli4_hba.max_cfg_param.rpi_base, phba->sli4_hba.max_cfg_param.max_rpi, - phba->sli4_hba.max_cfg_param.max_fcfi); + phba->sli4_hba.max_cfg_param.max_fcfi, + phba->sli4_hba.max_cfg_param.max_eq, + phba->sli4_hba.max_cfg_param.max_cq, + phba->sli4_hba.max_cfg_param.max_wq, + phba->sli4_hba.max_cfg_param.max_rq); + } if (rc) @@ -7210,11 +7632,11 @@ lpfc_setup_endian_order(struct lpfc_hba *phba) } /** - * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts + * lpfc_sli4_queue_verify - Verify and update EQ counts * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to check the user settable queue counts for EQs and - * CQs. after this routine is called the counts will be set to valid values that + * This routine is invoked to check the user settable queue counts for EQs. + * After this routine is called the counts will be set to valid values that * adhere to the constraints of the system's interrupt vectors and the port's * queue resources. * @@ -7225,9 +7647,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba) static int lpfc_sli4_queue_verify(struct lpfc_hba *phba) { - int cfg_fcp_io_channel; - uint32_t cpu; - uint32_t i = 0; + int io_channel; int fof_vectors = phba->cfg_fof ? 1 : 0; /* @@ -7236,49 +7656,40 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) */ /* Sanity check on HBA EQ parameters */ - cfg_fcp_io_channel = phba->cfg_fcp_io_channel; - - /* It doesn't make sense to have more io channels then online CPUs */ - for_each_present_cpu(cpu) { - if (cpu_online(cpu)) - i++; - } - phba->sli4_hba.num_online_cpu = i; - phba->sli4_hba.num_present_cpu = lpfc_present_cpu; - phba->sli4_hba.curr_disp_cpu = 0; + io_channel = phba->io_channel_irqs; - if (i < cfg_fcp_io_channel) { + if (phba->sli4_hba.num_online_cpu < io_channel) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3188 Reducing IO channels to match number of " "online CPUs: from %d to %d\n", - cfg_fcp_io_channel, i); - cfg_fcp_io_channel = i; + io_channel, phba->sli4_hba.num_online_cpu); + io_channel = phba->sli4_hba.num_online_cpu; } - if (cfg_fcp_io_channel + fof_vectors > - phba->sli4_hba.max_cfg_param.max_eq) { - if (phba->sli4_hba.max_cfg_param.max_eq < - LPFC_FCP_IO_CHAN_MIN) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2574 Not enough EQs (%d) from the " - "pci function for supporting FCP " - "EQs (%d)\n", - phba->sli4_hba.max_cfg_param.max_eq, - phba->cfg_fcp_io_channel); - goto out_error; - } + if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2575 Reducing IO channels to match number of " "available EQs: from %d to %d\n", - cfg_fcp_io_channel, + io_channel, phba->sli4_hba.max_cfg_param.max_eq); - cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq - - fof_vectors; + io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors; } - /* The actual number of FCP event queues adopted */ - phba->cfg_fcp_io_channel = cfg_fcp_io_channel; + /* The actual number of FCP / NVME event queues adopted */ + if (io_channel != phba->io_channel_irqs) + phba->io_channel_irqs = io_channel; + if (phba->cfg_fcp_io_channel > io_channel) + phba->cfg_fcp_io_channel = io_channel; + if (phba->cfg_nvme_io_channel > io_channel) + phba->cfg_nvme_io_channel = io_channel; + if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq) + phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n", + phba->io_channel_irqs, phba->cfg_fcp_io_channel, + phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq); /* Get EQ depth from module parameter, fake the default for now */ phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; @@ -7287,10 +7698,67 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) /* Get CQ depth from module parameter, fake the default for now */ phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; + return 0; +} +static int +lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) +{ + struct lpfc_queue *qdesc; + int cnt; + + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0508 Failed allocate fast-path NVME CQ (%d)\n", + wqidx); + return 1; + } + phba->sli4_hba.nvme_cq[wqidx] = qdesc; + + cnt = LPFC_NVME_WQSIZE; + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0509 Failed allocate fast-path NVME WQ (%d)\n", + wqidx); + return 1; + } + phba->sli4_hba.nvme_wq[wqidx] = qdesc; + list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); + return 0; +} + +static int +lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) +{ + struct lpfc_queue *qdesc; + uint32_t wqesize; + + /* Create Fast Path FCP CQs */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx); + return 1; + } + phba->sli4_hba.fcp_cq[wqidx] = qdesc; + + /* Create Fast Path FCP WQs */ + wqesize = (phba->fcp_embed_io) ? + LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; + qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0503 Failed allocate fast-path FCP WQ (%d)\n", + wqidx); + return 1; + } + phba->sli4_hba.fcp_wq[wqidx] = qdesc; + list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); return 0; -out_error: - return -ENOMEM; } /** @@ -7311,13 +7779,14 @@ int lpfc_sli4_queue_create(struct lpfc_hba *phba) { struct lpfc_queue *qdesc; - uint32_t wqesize; - int idx; + int idx, io_channel, max; /* * Create HBA Record arrays. + * Both NVME and FCP will share that same vectors / EQs */ - if (!phba->cfg_fcp_io_channel) + io_channel = phba->io_channel_irqs; + if (!io_channel) return -ERANGE; phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; @@ -7326,9 +7795,14 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; + phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; + phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; + phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; + phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; - phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) * - phba->cfg_fcp_io_channel), GFP_KERNEL); + phba->sli4_hba.hba_eq = kcalloc(io_channel, + sizeof(struct lpfc_queue *), + GFP_KERNEL); if (!phba->sli4_hba.hba_eq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2576 Failed allocate memory for " @@ -7336,44 +7810,115 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) goto out_error; } - phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * - phba->cfg_fcp_io_channel), GFP_KERNEL); - if (!phba->sli4_hba.fcp_cq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2577 Failed allocate memory for fast-path " - "CQ record array\n"); - goto out_error; + if (phba->cfg_fcp_io_channel) { + phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.fcp_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2577 Failed allocate memory for " + "fast-path CQ record array\n"); + goto out_error; + } + phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.fcp_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2578 Failed allocate memory for " + "fast-path FCP WQ record array\n"); + goto out_error; + } + /* + * Since the first EQ can have multiple CQs associated with it, + * this array is used to quickly see if we have a FCP fast-path + * CQ match. + */ + phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel, + sizeof(uint16_t), + GFP_KERNEL); + if (!phba->sli4_hba.fcp_cq_map) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2545 Failed allocate memory for " + "fast-path CQ map\n"); + goto out_error; + } } - phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * - phba->cfg_fcp_io_channel), GFP_KERNEL); - if (!phba->sli4_hba.fcp_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2578 Failed allocate memory for fast-path " - "WQ record array\n"); - goto out_error; - } + if (phba->cfg_nvme_io_channel) { + phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.nvme_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6077 Failed allocate memory for " + "fast-path CQ record array\n"); + goto out_error; + } - /* - * Since the first EQ can have multiple CQs associated with it, - * this array is used to quickly see if we have a FCP fast-path - * CQ match. - */ - phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) * - phba->cfg_fcp_io_channel), GFP_KERNEL); - if (!phba->sli4_hba.fcp_cq_map) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2545 Failed allocate memory for fast-path " - "CQ map\n"); - goto out_error; + phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.nvme_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2581 Failed allocate memory for " + "fast-path NVME WQ record array\n"); + goto out_error; + } + + /* + * Since the first EQ can have multiple CQs associated with it, + * this array is used to quickly see if we have a NVME fast-path + * CQ match. + */ + phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel, + sizeof(uint16_t), + GFP_KERNEL); + if (!phba->sli4_hba.nvme_cq_map) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6078 Failed allocate memory for " + "fast-path CQ map\n"); + goto out_error; + } + + if (phba->nvmet_support) { + phba->sli4_hba.nvmet_cqset = kcalloc( + phba->cfg_nvmet_mrq, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.nvmet_cqset) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3121 Fail allocate memory for " + "fast-path CQ set array\n"); + goto out_error; + } + phba->sli4_hba.nvmet_mrq_hdr = kcalloc( + phba->cfg_nvmet_mrq, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.nvmet_mrq_hdr) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3122 Fail allocate memory for " + "fast-path RQ set hdr array\n"); + goto out_error; + } + phba->sli4_hba.nvmet_mrq_data = kcalloc( + phba->cfg_nvmet_mrq, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.nvmet_mrq_data) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3124 Fail allocate memory for " + "fast-path RQ set data array\n"); + goto out_error; + } + } } - /* - * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies - * how many EQs to create. - */ - for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); + /* Create HBA Event Queues (EQs) */ + for (idx = 0; idx < io_channel; idx++) { /* Create EQs */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, phba->sli4_hba.eq_ecount); @@ -7383,33 +7928,42 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) goto out_error; } phba->sli4_hba.hba_eq[idx] = qdesc; + } - /* Create Fast Path FCP CQs */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, - phba->sli4_hba.cq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0499 Failed allocate fast-path FCP " - "CQ (%d)\n", idx); + /* FCP and NVME io channels are not required to be balanced */ + + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) + if (lpfc_alloc_fcp_wq_cq(phba, idx)) goto out_error; - } - phba->sli4_hba.fcp_cq[idx] = qdesc; - /* Create Fast Path FCP WQs */ - wqesize = (phba->fcp_embed_io) ? - LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; - qdesc = lpfc_sli4_queue_alloc(phba, wqesize, - phba->sli4_hba.wq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0503 Failed allocate fast-path FCP " - "WQ (%d)\n", idx); + for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++) + if (lpfc_alloc_nvme_wq_cq(phba, idx)) + goto out_error; + + /* allocate MRQ CQs */ + max = phba->cfg_nvme_io_channel; + if (max < phba->cfg_nvmet_mrq) + max = phba->cfg_nvmet_mrq; + + for (idx = 0; idx < max; idx++) + if (lpfc_alloc_nvme_wq_cq(phba, idx)) goto out_error; + + if (phba->nvmet_support) { + for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { + qdesc = lpfc_sli4_queue_alloc(phba, + phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3142 Failed allocate NVME " + "CQ Set (%d)\n", idx); + goto out_error; + } + phba->sli4_hba.nvmet_cqset[idx] = qdesc; } - phba->sli4_hba.fcp_wq[idx] = qdesc; } - /* * Create Slow Path Completion Queues (CQs) */ @@ -7463,6 +8017,30 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) goto out_error; } phba->sli4_hba.els_wq = qdesc; + list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + /* Create NVME LS Complete Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6079 Failed allocate NVME LS CQ\n"); + goto out_error; + } + phba->sli4_hba.nvmels_cq = qdesc; + + /* Create NVME LS Work Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6080 Failed allocate NVME LS WQ\n"); + goto out_error; + } + phba->sli4_hba.nvmels_wq = qdesc; + list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); + } /* * Create Receive Queue (RQ) @@ -7488,6 +8066,44 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) } phba->sli4_hba.dat_rq = qdesc; + if (phba->nvmet_support) { + for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { + /* Create NVMET Receive Queue for header */ + qdesc = lpfc_sli4_queue_alloc(phba, + phba->sli4_hba.rq_esize, + phba->sli4_hba.rq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3146 Failed allocate " + "receive HRQ\n"); + goto out_error; + } + phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; + + /* Only needed for header of RQ pair */ + qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb), + GFP_KERNEL); + if (qdesc->rqbp == NULL) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6131 Failed allocate " + "Header RQBP\n"); + goto out_error; + } + + /* Create NVMET Receive Queue for data */ + qdesc = lpfc_sli4_queue_alloc(phba, + phba->sli4_hba.rq_esize, + phba->sli4_hba.rq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3156 Failed allocate " + "receive DRQ\n"); + goto out_error; + } + phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; + } + } + /* Create the Queues needed for Flash Optimized Fabric operations */ if (phba->cfg_fof) lpfc_fof_queue_create(phba); @@ -7498,6 +8114,39 @@ out_error: return -ENOMEM; } +static inline void +__lpfc_sli4_release_queue(struct lpfc_queue **qp) +{ + if (*qp != NULL) { + lpfc_sli4_queue_free(*qp); + *qp = NULL; + } +} + +static inline void +lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) +{ + int idx; + + if (*qs == NULL) + return; + + for (idx = 0; idx < max; idx++) + __lpfc_sli4_release_queue(&(*qs)[idx]); + + kfree(*qs); + *qs = NULL; +} + +static inline void +lpfc_sli4_release_queue_map(uint16_t **qmap) +{ + if (*qmap != NULL) { + kfree(*qmap); + *qmap = NULL; + } +} + /** * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues * @phba: pointer to lpfc hba data structure. @@ -7513,91 +8162,196 @@ out_error: void lpfc_sli4_queue_destroy(struct lpfc_hba *phba) { - int idx; - if (phba->cfg_fof) lpfc_fof_queue_destroy(phba); - if (phba->sli4_hba.hba_eq != NULL) { - /* Release HBA event queue */ - for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { - if (phba->sli4_hba.hba_eq[idx] != NULL) { - lpfc_sli4_queue_free( - phba->sli4_hba.hba_eq[idx]); - phba->sli4_hba.hba_eq[idx] = NULL; - } - } - kfree(phba->sli4_hba.hba_eq); - phba->sli4_hba.hba_eq = NULL; - } + /* Release HBA eqs */ + lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs); - if (phba->sli4_hba.fcp_cq != NULL) { - /* Release FCP completion queue */ - for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { - if (phba->sli4_hba.fcp_cq[idx] != NULL) { - lpfc_sli4_queue_free( - phba->sli4_hba.fcp_cq[idx]); - phba->sli4_hba.fcp_cq[idx] = NULL; - } - } - kfree(phba->sli4_hba.fcp_cq); - phba->sli4_hba.fcp_cq = NULL; - } + /* Release FCP cqs */ + lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq, + phba->cfg_fcp_io_channel); - if (phba->sli4_hba.fcp_wq != NULL) { - /* Release FCP work queue */ - for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { - if (phba->sli4_hba.fcp_wq[idx] != NULL) { - lpfc_sli4_queue_free( - phba->sli4_hba.fcp_wq[idx]); - phba->sli4_hba.fcp_wq[idx] = NULL; - } - } - kfree(phba->sli4_hba.fcp_wq); - phba->sli4_hba.fcp_wq = NULL; - } + /* Release FCP wqs */ + lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq, + phba->cfg_fcp_io_channel); /* Release FCP CQ mapping array */ - if (phba->sli4_hba.fcp_cq_map != NULL) { - kfree(phba->sli4_hba.fcp_cq_map); - phba->sli4_hba.fcp_cq_map = NULL; - } + lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map); + + /* Release NVME cqs */ + lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq, + phba->cfg_nvme_io_channel); + + /* Release NVME wqs */ + lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq, + phba->cfg_nvme_io_channel); + + /* Release NVME CQ mapping array */ + lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map); + + lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, + phba->cfg_nvmet_mrq); + + lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, + phba->cfg_nvmet_mrq); + lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, + phba->cfg_nvmet_mrq); /* Release mailbox command work queue */ - if (phba->sli4_hba.mbx_wq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); - phba->sli4_hba.mbx_wq = NULL; - } + __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); /* Release ELS work queue */ - if (phba->sli4_hba.els_wq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.els_wq); - phba->sli4_hba.els_wq = NULL; - } + __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); + + /* Release ELS work queue */ + __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); /* Release unsolicited receive queue */ - if (phba->sli4_hba.hdr_rq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); - phba->sli4_hba.hdr_rq = NULL; + __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); + __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); + + /* Release ELS complete queue */ + __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); + + /* Release NVME LS complete queue */ + __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); + + /* Release mailbox command complete queue */ + __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); + + /* Everything on this list has been freed */ + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); +} + +int +lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, + struct lpfc_queue *drq, int count) +{ + int rc, i; + struct lpfc_rqe hrqe; + struct lpfc_rqe drqe; + struct lpfc_rqb *rqbp; + struct rqb_dmabuf *rqb_buffer; + LIST_HEAD(rqb_buf_list); + + rqbp = hrq->rqbp; + for (i = 0; i < count; i++) { + rqb_buffer = (rqbp->rqb_alloc_buffer)(phba); + if (!rqb_buffer) + break; + rqb_buffer->hrq = hrq; + rqb_buffer->drq = drq; + list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); + } + while (!list_empty(&rqb_buf_list)) { + list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, + hbuf.list); + + hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); + hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); + drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); + drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); + rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); + if (rc < 0) { + (rqbp->rqb_free_buffer)(phba, rqb_buffer); + } else { + list_add_tail(&rqb_buffer->hbuf.list, + &rqbp->rqb_buffer_list); + rqbp->buffer_count++; + } } - if (phba->sli4_hba.dat_rq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); - phba->sli4_hba.dat_rq = NULL; + return 1; +} + +int +lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) +{ + struct lpfc_rqb *rqbp; + struct lpfc_dmabuf *h_buf; + struct rqb_dmabuf *rqb_buffer; + + rqbp = rq->rqbp; + while (!list_empty(&rqbp->rqb_buffer_list)) { + list_remove_head(&rqbp->rqb_buffer_list, h_buf, + struct lpfc_dmabuf, list); + + rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); + (rqbp->rqb_free_buffer)(phba, rqb_buffer); + rqbp->buffer_count--; } + return 1; +} - /* Release ELS complete queue */ - if (phba->sli4_hba.els_cq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.els_cq); - phba->sli4_hba.els_cq = NULL; +static int +lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, + struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, + int qidx, uint32_t qtype) +{ + struct lpfc_sli_ring *pring; + int rc; + + if (!eq || !cq || !wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6085 Fast-path %s (%d) not allocated\n", + ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); + return -ENOMEM; } - /* Release mailbox command complete queue */ - if (phba->sli4_hba.mbx_cq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); - phba->sli4_hba.mbx_cq = NULL; + /* create the Cq first */ + rc = lpfc_cq_create(phba, cq, eq, + (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6086 Failed setup of CQ (%d), rc = 0x%x\n", + qidx, (uint32_t)rc); + return rc; } - return; + if (qtype != LPFC_MBOX) { + /* Setup nvme_cq_map for fast lookup */ + if (cq_map) + *cq_map = cq->queue_id; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", + qidx, cq->queue_id, qidx, eq->queue_id); + + /* create the wq */ + rc = lpfc_wq_create(phba, wq, cq, qtype); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n", + qidx, (uint32_t)rc); + /* no need to tear down cq - caller will do so */ + return rc; + } + + /* Bind this CQ/WQ to the NVME ring */ + pring = wq->pring; + pring->sli.sli4.wqp = (void *)wq; + cq->pring = pring; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", + qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); + } else { + rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0539 Failed setup of slow-path MQ: " + "rc = 0x%x\n", rc); + /* no need to tear down cq - caller will do so */ + return rc; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", + phba->sli4_hba.mbx_wq->queue_id, + phba->sli4_hba.mbx_cq->queue_id); + } + + return 0; } /** @@ -7615,15 +8369,12 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) int lpfc_sli4_queue_setup(struct lpfc_hba *phba) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; - int rc = -ENOMEM; - int fcp_eqidx, fcp_cqidx, fcp_wqidx; - int fcp_cq_index = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; LPFC_MBOXQ_t *mboxq; - uint32_t length; + int qidx; + uint32_t length, io_channel; + int rc = -ENOMEM; /* Check for dual-ULP support */ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -7673,220 +8424,263 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) /* * Set up HBA Event Queues (EQs) */ + io_channel = phba->io_channel_irqs; /* Set up HBA event queue */ - if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) { + if (io_channel && !phba->sli4_hba.hba_eq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3147 Fast-path EQs not allocated\n"); rc = -ENOMEM; goto out_error; } - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { - if (!phba->sli4_hba.hba_eq[fcp_eqidx]) { + for (qidx = 0; qidx < io_channel; qidx++) { + if (!phba->sli4_hba.hba_eq[qidx]) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0522 Fast-path EQ (%d) not " - "allocated\n", fcp_eqidx); + "allocated\n", qidx); rc = -ENOMEM; - goto out_destroy_hba_eq; + goto out_destroy; } - rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx], - (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel)); + rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx], + phba->cfg_fcp_imax); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0523 Failed setup of fast-path EQ " - "(%d), rc = 0x%x\n", fcp_eqidx, + "(%d), rc = 0x%x\n", qidx, (uint32_t)rc); - goto out_destroy_hba_eq; + goto out_destroy; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2584 HBA EQ setup: " - "queue[%d]-id=%d\n", fcp_eqidx, - phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id); - } - - /* Set up fast-path FCP Response Complete Queue */ - if (!phba->sli4_hba.fcp_cq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3148 Fast-path FCP CQ array not " - "allocated\n"); - rc = -ENOMEM; - goto out_destroy_hba_eq; + "2584 HBA EQ setup: queue[%d]-id=%d\n", + qidx, phba->sli4_hba.hba_eq[qidx]->queue_id); } - for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) { - if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { + if (phba->cfg_nvme_io_channel) { + if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0526 Fast-path FCP CQ (%d) not " - "allocated\n", fcp_cqidx); + "6084 Fast-path NVME %s array not allocated\n", + (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ"); rc = -ENOMEM; - goto out_destroy_fcp_cq; - } - rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], - phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0527 Failed setup of fast-path FCP " - "CQ (%d), rc = 0x%x\n", fcp_cqidx, - (uint32_t)rc); - goto out_destroy_fcp_cq; + goto out_destroy; } - /* Setup fcp_cq_map for fast lookup */ - phba->sli4_hba.fcp_cq_map[fcp_cqidx] = - phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id; - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2588 FCP CQ setup: cq[%d]-id=%d, " - "parent seq[%d]-id=%d\n", - fcp_cqidx, - phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, - fcp_cqidx, - phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id); - } - - /* Set up fast-path FCP Work Queue */ - if (!phba->sli4_hba.fcp_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3149 Fast-path FCP WQ array not " - "allocated\n"); - rc = -ENOMEM; - goto out_destroy_fcp_cq; + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { + rc = lpfc_create_wq_cq(phba, + phba->sli4_hba.hba_eq[ + qidx % io_channel], + phba->sli4_hba.nvme_cq[qidx], + phba->sli4_hba.nvme_wq[qidx], + &phba->sli4_hba.nvme_cq_map[qidx], + qidx, LPFC_NVME); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6123 Failed to setup fastpath " + "NVME WQ/CQ (%d), rc = 0x%x\n", + qidx, (uint32_t)rc); + goto out_destroy; + } + } } - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) { - if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { + if (phba->cfg_fcp_io_channel) { + /* Set up fast-path FCP Response Complete Queue */ + if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0534 Fast-path FCP WQ (%d) not " - "allocated\n", fcp_wqidx); + "3148 Fast-path FCP %s array not allocated\n", + phba->sli4_hba.fcp_cq ? "WQ" : "CQ"); rc = -ENOMEM; - goto out_destroy_fcp_wq; - } - rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], - phba->sli4_hba.fcp_cq[fcp_wqidx], - LPFC_FCP); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0535 Failed setup of fast-path FCP " - "WQ (%d), rc = 0x%x\n", fcp_wqidx, - (uint32_t)rc); - goto out_destroy_fcp_wq; + goto out_destroy; } - /* Bind this WQ to the next FCP ring */ - pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx]; - pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx]; - phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring; - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2591 FCP WQ setup: wq[%d]-id=%d, " - "parent cq[%d]-id=%d\n", - fcp_wqidx, - phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, - fcp_cq_index, - phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id); + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) { + rc = lpfc_create_wq_cq(phba, + phba->sli4_hba.hba_eq[ + qidx % io_channel], + phba->sli4_hba.fcp_cq[qidx], + phba->sli4_hba.fcp_wq[qidx], + &phba->sli4_hba.fcp_cq_map[qidx], + qidx, LPFC_FCP); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0535 Failed to setup fastpath " + "FCP WQ/CQ (%d), rc = 0x%x\n", + qidx, (uint32_t)rc); + goto out_destroy; + } + } } + /* - * Set up Complete Queues (CQs) + * Set up Slow Path Complete Queues (CQs) */ - /* Set up slow-path MBOX Complete Queue as the first CQ */ - if (!phba->sli4_hba.mbx_cq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0528 Mailbox CQ not allocated\n"); - rc = -ENOMEM; - goto out_destroy_fcp_wq; - } - rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, - phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0529 Failed setup of slow-path mailbox CQ: " - "rc = 0x%x\n", (uint32_t)rc); - goto out_destroy_fcp_wq; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", - phba->sli4_hba.mbx_cq->queue_id, - phba->sli4_hba.hba_eq[0]->queue_id); + /* Set up slow-path MBOX CQ/MQ */ - /* Set up slow-path ELS Complete Queue */ - if (!phba->sli4_hba.els_cq) { + if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0530 ELS CQ not allocated\n"); + "0528 %s not allocated\n", + phba->sli4_hba.mbx_cq ? + "Mailbox WQ" : "Mailbox CQ"); rc = -ENOMEM; - goto out_destroy_mbx_cq; - } - rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, - phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0531 Failed setup of slow-path ELS CQ: " - "rc = 0x%x\n", (uint32_t)rc); - goto out_destroy_mbx_cq; + goto out_destroy; } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", - phba->sli4_hba.els_cq->queue_id, - phba->sli4_hba.hba_eq[0]->queue_id); - - /* - * Set up all the Work Queues (WQs) - */ - /* Set up Mailbox Command Queue */ - if (!phba->sli4_hba.mbx_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0538 Slow-path MQ not allocated\n"); - rc = -ENOMEM; - goto out_destroy_els_cq; - } - rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, - phba->sli4_hba.mbx_cq, LPFC_MBOX); + rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], + phba->sli4_hba.mbx_cq, + phba->sli4_hba.mbx_wq, + NULL, 0, LPFC_MBOX); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0539 Failed setup of slow-path MQ: " - "rc = 0x%x\n", rc); - goto out_destroy_els_cq; + "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", + (uint32_t)rc); + goto out_destroy; + } + if (phba->nvmet_support) { + if (!phba->sli4_hba.nvmet_cqset) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3165 Fast-path NVME CQ Set " + "array not allocated\n"); + rc = -ENOMEM; + goto out_destroy; + } + if (phba->cfg_nvmet_mrq > 1) { + rc = lpfc_cq_create_set(phba, + phba->sli4_hba.nvmet_cqset, + phba->sli4_hba.hba_eq, + LPFC_WCQ, LPFC_NVMET); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3164 Failed setup of NVME CQ " + "Set, rc = 0x%x\n", + (uint32_t)rc); + goto out_destroy; + } + } else { + /* Set up NVMET Receive Complete Queue */ + rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], + phba->sli4_hba.hba_eq[0], + LPFC_WCQ, LPFC_NVMET); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6089 Failed setup NVMET CQ: " + "rc = 0x%x\n", (uint32_t)rc); + goto out_destroy; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "6090 NVMET CQ setup: cq-id=%d, " + "parent eq-id=%d\n", + phba->sli4_hba.nvmet_cqset[0]->queue_id, + phba->sli4_hba.hba_eq[0]->queue_id); + } } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", - phba->sli4_hba.mbx_wq->queue_id, - phba->sli4_hba.mbx_cq->queue_id); - /* Set up slow-path ELS Work Queue */ - if (!phba->sli4_hba.els_wq) { + /* Set up slow-path ELS WQ/CQ */ + if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0536 Slow-path ELS WQ not allocated\n"); + "0530 ELS %s not allocated\n", + phba->sli4_hba.els_cq ? "WQ" : "CQ"); rc = -ENOMEM; - goto out_destroy_mbx_wq; + goto out_destroy; } - rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, - phba->sli4_hba.els_cq, LPFC_ELS); + rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], + phba->sli4_hba.els_cq, + phba->sli4_hba.els_wq, + NULL, 0, LPFC_ELS); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0537 Failed setup of slow-path ELS WQ: " - "rc = 0x%x\n", (uint32_t)rc); - goto out_destroy_mbx_wq; + "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n", + (uint32_t)rc); + goto out_destroy; } - - /* Bind this WQ to the ELS ring */ - pring = &psli->ring[LPFC_ELS_RING]; - pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq; - phba->sli4_hba.els_cq->pring = pring; - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", phba->sli4_hba.els_wq->queue_id, phba->sli4_hba.els_cq->queue_id); + if (phba->cfg_nvme_io_channel) { + /* Set up NVME LS Complete Queue */ + if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6091 LS %s not allocated\n", + phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); + rc = -ENOMEM; + goto out_destroy; + } + rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], + phba->sli4_hba.nvmels_cq, + phba->sli4_hba.nvmels_wq, + NULL, 0, LPFC_NVME_LS); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0529 Failed setup of NVVME LS WQ/CQ: " + "rc = 0x%x\n", (uint32_t)rc); + goto out_destroy; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "6096 ELS WQ setup: wq-id=%d, " + "parent cq-id=%d\n", + phba->sli4_hba.nvmels_wq->queue_id, + phba->sli4_hba.nvmels_cq->queue_id); + } + /* - * Create Receive Queue (RQ) + * Create NVMET Receive Queue (RQ) */ + if (phba->nvmet_support) { + if ((!phba->sli4_hba.nvmet_cqset) || + (!phba->sli4_hba.nvmet_mrq_hdr) || + (!phba->sli4_hba.nvmet_mrq_data)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6130 MRQ CQ Queues not " + "allocated\n"); + rc = -ENOMEM; + goto out_destroy; + } + if (phba->cfg_nvmet_mrq > 1) { + rc = lpfc_mrq_create(phba, + phba->sli4_hba.nvmet_mrq_hdr, + phba->sli4_hba.nvmet_mrq_data, + phba->sli4_hba.nvmet_cqset, + LPFC_NVMET); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6098 Failed setup of NVMET " + "MRQ: rc = 0x%x\n", + (uint32_t)rc); + goto out_destroy; + } + + } else { + rc = lpfc_rq_create(phba, + phba->sli4_hba.nvmet_mrq_hdr[0], + phba->sli4_hba.nvmet_mrq_data[0], + phba->sli4_hba.nvmet_cqset[0], + LPFC_NVMET); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6057 Failed setup of NVMET " + "Receive Queue: rc = 0x%x\n", + (uint32_t)rc); + goto out_destroy; + } + + lpfc_printf_log( + phba, KERN_INFO, LOG_INIT, + "6099 NVMET RQ setup: hdr-rq-id=%d, " + "dat-rq-id=%d parent cq-id=%d\n", + phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, + phba->sli4_hba.nvmet_mrq_data[0]->queue_id, + phba->sli4_hba.nvmet_cqset[0]->queue_id); + + } + } + if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0540 Receive Queue not allocated\n"); rc = -ENOMEM; - goto out_destroy_els_wq; + goto out_destroy; } lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); @@ -7898,7 +8692,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0541 Failed setup of Receive Queue: " "rc = 0x%x\n", (uint32_t)rc); - goto out_destroy_fcp_wq; + goto out_destroy; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, @@ -7914,7 +8708,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0549 Failed setup of FOF Queues: " "rc = 0x%x\n", rc); - goto out_destroy_els_rq; + goto out_destroy; } } @@ -7922,30 +8716,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) * Configure EQ delay multipier for interrupt coalescing using * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time. */ - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; - fcp_eqidx += LPFC_MAX_EQ_DELAY) - lpfc_modify_fcp_eq_delay(phba, fcp_eqidx); + for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY) + lpfc_modify_hba_eq_delay(phba, qidx); return 0; -out_destroy_els_rq: - lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); -out_destroy_els_wq: - lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); -out_destroy_mbx_wq: - lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); -out_destroy_els_cq: - lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); -out_destroy_mbx_cq: - lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); -out_destroy_fcp_wq: - for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) - lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); -out_destroy_fcp_cq: - for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) - lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); -out_destroy_hba_eq: - for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) - lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]); +out_destroy: + lpfc_sli4_queue_unset(phba); out_error: return rc; } @@ -7965,39 +8741,81 @@ out_error: void lpfc_sli4_queue_unset(struct lpfc_hba *phba) { - int fcp_qidx; + int qidx; /* Unset the queues created for Flash Optimized Fabric operations */ if (phba->cfg_fof) lpfc_fof_queue_destroy(phba); + /* Unset mailbox command work queue */ - lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); + if (phba->sli4_hba.mbx_wq) + lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); + + /* Unset NVME LS work queue */ + if (phba->sli4_hba.nvmels_wq) + lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); + /* Unset ELS work queue */ - lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); + if (phba->sli4_hba.els_cq) + lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); + /* Unset unsolicited receive queue */ - lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); + if (phba->sli4_hba.hdr_rq) + lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, + phba->sli4_hba.dat_rq); + /* Unset FCP work queue */ - if (phba->sli4_hba.fcp_wq) { - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; - fcp_qidx++) - lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); + if (phba->sli4_hba.fcp_wq) + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) + lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]); + + /* Unset NVME work queue */ + if (phba->sli4_hba.nvme_wq) { + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) + lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]); } + /* Unset mailbox command complete queue */ - lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); + if (phba->sli4_hba.mbx_cq) + lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); + /* Unset ELS complete queue */ - lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); - /* Unset FCP response complete queue */ - if (phba->sli4_hba.fcp_cq) { - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; - fcp_qidx++) - lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); + if (phba->sli4_hba.els_cq) + lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); + + /* Unset NVME LS complete queue */ + if (phba->sli4_hba.nvmels_cq) + lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); + + /* Unset NVME response complete queue */ + if (phba->sli4_hba.nvme_cq) + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) + lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]); + + /* Unset NVMET MRQ queue */ + if (phba->sli4_hba.nvmet_mrq_hdr) { + for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) + lpfc_rq_destroy(phba, + phba->sli4_hba.nvmet_mrq_hdr[qidx], + phba->sli4_hba.nvmet_mrq_data[qidx]); } - /* Unset fast-path event queue */ - if (phba->sli4_hba.hba_eq) { - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; - fcp_qidx++) - lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]); + + /* Unset NVMET CQ Set complete queue */ + if (phba->sli4_hba.nvmet_cqset) { + for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) + lpfc_cq_destroy(phba, + phba->sli4_hba.nvmet_cqset[qidx]); } + + /* Unset FCP response complete queue */ + if (phba->sli4_hba.fcp_cq) + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) + lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]); + + /* Unset fast-path event queue */ + if (phba->sli4_hba.hba_eq) + for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) + lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]); } /** @@ -8484,16 +9302,7 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI-X interrupt vectors to device - * with SLI-3 interface specs. The kernel function pci_enable_msix_exact() - * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(), - * once invoked, enables either all or nothing, depending on the current - * availability of PCI vector resources. The device driver is responsible - * for calling the individual request_irq() to register each MSI-X vector - * with a interrupt handler, which is done in this function. Note that - * later when device is unloading, the driver should always call free_irq() - * on all MSI-X vectors it has done request_irq() on before calling - * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device - * will be left with MSI-X enabled and leaks its vectors. + * with SLI-3 interface specs. * * Return codes * 0 - successful @@ -8502,33 +9311,24 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) static int lpfc_sli_enable_msix(struct lpfc_hba *phba) { - int rc, i; + int rc; LPFC_MBOXQ_t *pmb; /* Set up MSI-X multi-message vectors */ - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - phba->msix_entries[i].entry = i; - - /* Configure MSI-X capability structure */ - rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries, - LPFC_MSIX_VECTORS); - if (rc) { + rc = pci_alloc_irq_vectors(phba->pcidev, + LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); + if (rc < 0) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0420 PCI enable MSI-X failed (%d)\n", rc); goto vec_fail_out; } - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0477 MSI-X entry[%d]: vector=x%x " - "message=%d\n", i, - phba->msix_entries[i].vector, - phba->msix_entries[i].entry); + /* * Assign MSI-X vectors to interrupt handlers */ /* vector-0 is associated to slow-path handler */ - rc = request_irq(phba->msix_entries[0].vector, + rc = request_irq(pci_irq_vector(phba->pcidev, 0), &lpfc_sli_sp_intr_handler, 0, LPFC_SP_DRIVER_HANDLER_NAME, phba); if (rc) { @@ -8539,7 +9339,7 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba) } /* vector-1 is associated to fast-path handler */ - rc = request_irq(phba->msix_entries[1].vector, + rc = request_irq(pci_irq_vector(phba->pcidev, 1), &lpfc_sli_fp_intr_handler, 0, LPFC_FP_DRIVER_HANDLER_NAME, phba); @@ -8584,42 +9384,21 @@ mbx_fail_out: mem_fail_out: /* free the irq already requested */ - free_irq(phba->msix_entries[1].vector, phba); + free_irq(pci_irq_vector(phba->pcidev, 1), phba); irq_fail_out: /* free the irq already requested */ - free_irq(phba->msix_entries[0].vector, phba); + free_irq(pci_irq_vector(phba->pcidev, 0), phba); msi_fail_out: /* Unconfigure MSI-X capability structure */ - pci_disable_msix(phba->pcidev); + pci_free_irq_vectors(phba->pcidev); vec_fail_out: return rc; } /** - * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to release the MSI-X vectors and then disable the - * MSI-X interrupt mode to device with SLI-3 interface spec. - **/ -static void -lpfc_sli_disable_msix(struct lpfc_hba *phba) -{ - int i; - - /* Free up MSI-X multi-message vectors */ - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - free_irq(phba->msix_entries[i].vector, phba); - /* Disable MSI-X */ - pci_disable_msix(phba->pcidev); - - return; -} - -/** * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. * @phba: pointer to lpfc hba data structure. * @@ -8659,24 +9438,6 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba) } /** - * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to disable the MSI interrupt mode to device with - * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has - * done request_irq() on before calling pci_disable_msi(). Failure to do so - * results in a BUG_ON() and a device will be left with MSI enabled and leaks - * its vector. - */ -static void -lpfc_sli_disable_msi(struct lpfc_hba *phba) -{ - free_irq(phba->pcidev->irq, phba); - pci_disable_msi(phba->pcidev); - return; -} - -/** * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. * @phba: pointer to lpfc hba data structure. * @@ -8747,107 +9508,50 @@ lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) static void lpfc_sli_disable_intr(struct lpfc_hba *phba) { - /* Disable the currently initialized interrupt mode */ + int nr_irqs, i; + if (phba->intr_type == MSIX) - lpfc_sli_disable_msix(phba); - else if (phba->intr_type == MSI) - lpfc_sli_disable_msi(phba); - else if (phba->intr_type == INTx) - free_irq(phba->pcidev->irq, phba); + nr_irqs = LPFC_MSIX_VECTORS; + else + nr_irqs = 1; + + for (i = 0; i < nr_irqs; i++) + free_irq(pci_irq_vector(phba->pcidev, i), phba); + pci_free_irq_vectors(phba->pcidev); /* Reset interrupt management states */ phba->intr_type = NONE; phba->sli.slistat.sli_intr = 0; - - return; } /** - * lpfc_find_next_cpu - Find next available CPU that matches the phys_id + * lpfc_cpu_affinity_check - Check vector CPU affinity mappings * @phba: pointer to lpfc hba data structure. + * @vectors: number of msix vectors allocated. * - * Find next available CPU to use for IRQ to CPU affinity. + * The routine will figure out the CPU affinity assignment for every + * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated + * with a pointer to the CPU mask that defines ALL the CPUs this vector + * can be associated with. If the vector can be unquely associated with + * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu. + * In addition, the CPU to IO channel mapping will be calculated + * and the phba->sli4_hba.cpu_map array will reflect this. */ -static int -lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id) +static void +lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) { struct lpfc_vector_map_info *cpup; + int index = 0; + int vec = 0; int cpu; - - cpup = phba->sli4_hba.cpu_map; - for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { - /* CPU must be online */ - if (cpu_online(cpu)) { - if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && - (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) && - (cpup->phys_id == phys_id)) { - return cpu; - } - } - cpup++; - } - - /* - * If we get here, we have used ALL CPUs for the specific - * phys_id. Now we need to clear out lpfc_used_cpu and start - * reusing CPUs. - */ - - for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { - if (lpfc_used_cpu[cpu] == phys_id) - lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; - } - - cpup = phba->sli4_hba.cpu_map; - for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { - /* CPU must be online */ - if (cpu_online(cpu)) { - if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && - (cpup->phys_id == phys_id)) { - return cpu; - } - } - cpup++; - } - return LPFC_VECTOR_MAP_EMPTY; -} - -/** - * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors - * @phba: pointer to lpfc hba data structure. - * @vectors: number of HBA vectors - * - * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector - * affinization across multple physical CPUs (numa nodes). - * In addition, this routine will assign an IO channel for each CPU - * to use when issuing I/Os. - */ -static int -lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) -{ - int i, idx, saved_chann, used_chann, cpu, phys_id; - int max_phys_id, min_phys_id; - int num_io_channel, first_cpu, chan; - struct lpfc_vector_map_info *cpup; #ifdef CONFIG_X86 struct cpuinfo_x86 *cpuinfo; #endif - uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1]; - - /* If there is no mapping, just return */ - if (!phba->cfg_fcp_cpu_map) - return 1; /* Init cpu_map array */ memset(phba->sli4_hba.cpu_map, 0xff, (sizeof(struct lpfc_vector_map_info) * - phba->sli4_hba.num_present_cpu)); - - max_phys_id = 0; - min_phys_id = 0xff; - phys_id = 0; - num_io_channel = 0; - first_cpu = LPFC_VECTOR_MAP_EMPTY; + phba->sli4_hba.num_present_cpu)); /* Update CPU map with physical id and core id of each CPU */ cpup = phba->sli4_hba.cpu_map; @@ -8861,184 +9565,16 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) cpup->phys_id = 0; cpup->core_id = 0; #endif - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3328 CPU physid %d coreid %d\n", - cpup->phys_id, cpup->core_id); - - if (cpup->phys_id > max_phys_id) - max_phys_id = cpup->phys_id; - if (cpup->phys_id < min_phys_id) - min_phys_id = cpup->phys_id; + cpup->channel_id = index; /* For now round robin */ + cpup->irq = pci_irq_vector(phba->pcidev, vec); + vec++; + if (vec >= vectors) + vec = 0; + index++; + if (index >= phba->cfg_fcp_io_channel) + index = 0; cpup++; } - - phys_id = min_phys_id; - /* Now associate the HBA vectors with specific CPUs */ - for (idx = 0; idx < vectors; idx++) { - cpup = phba->sli4_hba.cpu_map; - cpu = lpfc_find_next_cpu(phba, phys_id); - if (cpu == LPFC_VECTOR_MAP_EMPTY) { - - /* Try for all phys_id's */ - for (i = 1; i < max_phys_id; i++) { - phys_id++; - if (phys_id > max_phys_id) - phys_id = min_phys_id; - cpu = lpfc_find_next_cpu(phba, phys_id); - if (cpu == LPFC_VECTOR_MAP_EMPTY) - continue; - goto found; - } - - /* Use round robin for scheduling */ - phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; - chan = 0; - cpup = phba->sli4_hba.cpu_map; - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { - cpup->channel_id = chan; - cpup++; - chan++; - if (chan >= phba->cfg_fcp_io_channel) - chan = 0; - } - - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3329 Cannot set affinity:" - "Error mapping vector %d (%d)\n", - idx, vectors); - return 0; - } -found: - cpup += cpu; - if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP) - lpfc_used_cpu[cpu] = phys_id; - - /* Associate vector with selected CPU */ - cpup->irq = phba->sli4_hba.msix_entries[idx].vector; - - /* Associate IO channel with selected CPU */ - cpup->channel_id = idx; - num_io_channel++; - - if (first_cpu == LPFC_VECTOR_MAP_EMPTY) - first_cpu = cpu; - - /* Now affinitize to the selected CPU */ - i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx]. - vector, get_cpu_mask(cpu)); - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3330 Set Affinity: CPU %d channel %d " - "irq %d (%x)\n", - cpu, cpup->channel_id, - phba->sli4_hba.msix_entries[idx].vector, i); - - /* Spread vector mapping across multple physical CPU nodes */ - phys_id++; - if (phys_id > max_phys_id) - phys_id = min_phys_id; - } - - /* - * Finally fill in the IO channel for any remaining CPUs. - * At this point, all IO channels have been assigned to a specific - * MSIx vector, mapped to a specific CPU. - * Base the remaining IO channel assigned, to IO channels already - * assigned to other CPUs on the same phys_id. - */ - for (i = min_phys_id; i <= max_phys_id; i++) { - /* - * If there are no io channels already mapped to - * this phys_id, just round robin thru the io_channels. - * Setup chann[] for round robin. - */ - for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) - chann[idx] = idx; - - saved_chann = 0; - used_chann = 0; - - /* - * First build a list of IO channels already assigned - * to this phys_id before reassigning the same IO - * channels to the remaining CPUs. - */ - cpup = phba->sli4_hba.cpu_map; - cpu = first_cpu; - cpup += cpu; - for (idx = 0; idx < phba->sli4_hba.num_present_cpu; - idx++) { - if (cpup->phys_id == i) { - /* - * Save any IO channels that are - * already mapped to this phys_id. - */ - if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) { - if (saved_chann <= - LPFC_FCP_IO_CHAN_MAX) { - chann[saved_chann] = - cpup->channel_id; - saved_chann++; - } - goto out; - } - - /* See if we are using round-robin */ - if (saved_chann == 0) - saved_chann = - phba->cfg_fcp_io_channel; - - /* Associate next IO channel with CPU */ - cpup->channel_id = chann[used_chann]; - num_io_channel++; - used_chann++; - if (used_chann == saved_chann) - used_chann = 0; - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3331 Set IO_CHANN " - "CPU %d channel %d\n", - idx, cpup->channel_id); - } -out: - cpu++; - if (cpu >= phba->sli4_hba.num_present_cpu) { - cpup = phba->sli4_hba.cpu_map; - cpu = 0; - } else { - cpup++; - } - } - } - - if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) { - cpup = phba->sli4_hba.cpu_map; - for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { - if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) { - cpup->channel_id = 0; - num_io_channel++; - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3332 Assign IO_CHANN " - "CPU %d channel %d\n", - idx, cpup->channel_id); - } - cpup++; - } - } - - /* Sanity check */ - if (num_io_channel != phba->sli4_hba.num_present_cpu) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3333 Set affinity mismatch:" - "%d chann != %d cpus: %d vectors\n", - num_io_channel, phba->sli4_hba.num_present_cpu, - vectors); - - /* Enable using cpu affinity for scheduling */ - phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; - return 1; } @@ -9047,14 +9583,7 @@ out: * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI-X interrupt vectors to device - * with SLI-4 interface spec. The kernel function pci_enable_msix_range() - * is called to enable the MSI-X vectors. The device driver is responsible - * for calling the individual request_irq() to register each MSI-X vector - * with a interrupt handler, which is done in this function. Note that - * later when device is unloading, the driver should always call free_irq() - * on all MSI-X vectors it has done request_irq() on before calling - * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device - * will be left with MSI-X enabled and leaks its vectors. + * with SLI-4 interface spec. * * Return codes * 0 - successful @@ -9066,17 +9595,13 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) int vectors, rc, index; /* Set up MSI-X multi-message vectors */ - for (index = 0; index < phba->cfg_fcp_io_channel; index++) - phba->sli4_hba.msix_entries[index].entry = index; - - /* Configure MSI-X capability structure */ - vectors = phba->cfg_fcp_io_channel; - if (phba->cfg_fof) { - phba->sli4_hba.msix_entries[index].entry = index; + vectors = phba->io_channel_irqs; + if (phba->cfg_fof) vectors++; - } - rc = pci_enable_msix_range(phba->pcidev, phba->sli4_hba.msix_entries, - 2, vectors); + + rc = pci_alloc_irq_vectors(phba->pcidev, + (phba->nvmet_support) ? 1 : 2, + vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); if (rc < 0) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0484 PCI enable MSI-X failed (%d)\n", rc); @@ -9084,14 +9609,6 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) } vectors = rc; - /* Log MSI-X vector assignment */ - for (index = 0; index < vectors; index++) - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0489 MSI-X entry[%d]: vector=x%x " - "message=%d\n", index, - phba->sli4_hba.msix_entries[index].vector, - phba->sli4_hba.msix_entries[index].entry); - /* Assign MSI-X vectors to interrupt handlers */ for (index = 0; index < vectors; index++) { memset(&phba->sli4_hba.handler_name[index], 0, 16); @@ -9099,21 +9616,19 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) LPFC_SLI4_HANDLER_NAME_SZ, LPFC_DRIVER_HANDLER_NAME"%d", index); - phba->sli4_hba.fcp_eq_hdl[index].idx = index; - phba->sli4_hba.fcp_eq_hdl[index].phba = phba; - atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1); + phba->sli4_hba.hba_eq_hdl[index].idx = index; + phba->sli4_hba.hba_eq_hdl[index].phba = phba; + atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1); if (phba->cfg_fof && (index == (vectors - 1))) - rc = request_irq( - phba->sli4_hba.msix_entries[index].vector, + rc = request_irq(pci_irq_vector(phba->pcidev, index), &lpfc_sli4_fof_intr_handler, 0, (char *)&phba->sli4_hba.handler_name[index], - &phba->sli4_hba.fcp_eq_hdl[index]); + &phba->sli4_hba.hba_eq_hdl[index]); else - rc = request_irq( - phba->sli4_hba.msix_entries[index].vector, + rc = request_irq(pci_irq_vector(phba->pcidev, index), &lpfc_sli4_hba_intr_handler, 0, (char *)&phba->sli4_hba.handler_name[index], - &phba->sli4_hba.fcp_eq_hdl[index]); + &phba->sli4_hba.hba_eq_hdl[index]); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0486 MSI-X fast-path (%d) " @@ -9125,64 +9640,38 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) if (phba->cfg_fof) vectors--; - if (vectors != phba->cfg_fcp_io_channel) { + if (vectors != phba->io_channel_irqs) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3238 Reducing IO channels to match number of " "MSI-X vectors, requested %d got %d\n", - phba->cfg_fcp_io_channel, vectors); - phba->cfg_fcp_io_channel = vectors; + phba->io_channel_irqs, vectors); + if (phba->cfg_fcp_io_channel > vectors) + phba->cfg_fcp_io_channel = vectors; + if (phba->cfg_nvme_io_channel > vectors) + phba->cfg_nvme_io_channel = vectors; + if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel) + phba->io_channel_irqs = phba->cfg_fcp_io_channel; + else + phba->io_channel_irqs = phba->cfg_nvme_io_channel; } + lpfc_cpu_affinity_check(phba, vectors); - if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport))) - lpfc_sli4_set_affinity(phba, vectors); return rc; cfg_fail_out: /* free the irq already requested */ - for (--index; index >= 0; index--) { - irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. - vector, NULL); - free_irq(phba->sli4_hba.msix_entries[index].vector, - &phba->sli4_hba.fcp_eq_hdl[index]); - } + for (--index; index >= 0; index--) + free_irq(pci_irq_vector(phba->pcidev, index), + &phba->sli4_hba.hba_eq_hdl[index]); /* Unconfigure MSI-X capability structure */ - pci_disable_msix(phba->pcidev); + pci_free_irq_vectors(phba->pcidev); vec_fail_out: return rc; } /** - * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to release the MSI-X vectors and then disable the - * MSI-X interrupt mode to device with SLI-4 interface spec. - **/ -static void -lpfc_sli4_disable_msix(struct lpfc_hba *phba) -{ - int index; - - /* Free up MSI-X multi-message vectors */ - for (index = 0; index < phba->cfg_fcp_io_channel; index++) { - irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. - vector, NULL); - free_irq(phba->sli4_hba.msix_entries[index].vector, - &phba->sli4_hba.fcp_eq_hdl[index]); - } - if (phba->cfg_fof) { - free_irq(phba->sli4_hba.msix_entries[index].vector, - &phba->sli4_hba.fcp_eq_hdl[index]); - } - /* Disable MSI-X */ - pci_disable_msix(phba->pcidev); - - return; -} - -/** * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * @@ -9220,37 +9709,19 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) return rc; } - for (index = 0; index < phba->cfg_fcp_io_channel; index++) { - phba->sli4_hba.fcp_eq_hdl[index].idx = index; - phba->sli4_hba.fcp_eq_hdl[index].phba = phba; + for (index = 0; index < phba->io_channel_irqs; index++) { + phba->sli4_hba.hba_eq_hdl[index].idx = index; + phba->sli4_hba.hba_eq_hdl[index].phba = phba; } if (phba->cfg_fof) { - phba->sli4_hba.fcp_eq_hdl[index].idx = index; - phba->sli4_hba.fcp_eq_hdl[index].phba = phba; + phba->sli4_hba.hba_eq_hdl[index].idx = index; + phba->sli4_hba.hba_eq_hdl[index].phba = phba; } return 0; } /** - * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to disable the MSI interrupt mode to device with - * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has - * done request_irq() on before calling pci_disable_msi(). Failure to do so - * results in a BUG_ON() and a device will be left with MSI enabled and leaks - * its vector. - **/ -static void -lpfc_sli4_disable_msi(struct lpfc_hba *phba) -{ - free_irq(phba->pcidev->irq, phba); - pci_disable_msi(phba->pcidev); - return; -} - -/** * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device * @phba: pointer to lpfc hba data structure. * @@ -9270,7 +9741,7 @@ static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) { uint32_t intr_mode = LPFC_INTR_ERROR; - int retval, index; + int retval, idx; if (cfg_mode == 2) { /* Preparation before conf_msi mbox cmd */ @@ -9301,21 +9772,23 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (!retval) { + struct lpfc_hba_eq_hdl *eqhdl; + /* Indicate initialization to INTx mode */ phba->intr_type = INTx; intr_mode = 0; - for (index = 0; index < phba->cfg_fcp_io_channel; - index++) { - phba->sli4_hba.fcp_eq_hdl[index].idx = index; - phba->sli4_hba.fcp_eq_hdl[index].phba = phba; - atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. - fcp_eq_in_use, 1); + + for (idx = 0; idx < phba->io_channel_irqs; idx++) { + eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; + eqhdl->idx = idx; + eqhdl->phba = phba; + atomic_set(&eqhdl->hba_eq_in_use, 1); } if (phba->cfg_fof) { - phba->sli4_hba.fcp_eq_hdl[index].idx = index; - phba->sli4_hba.fcp_eq_hdl[index].phba = phba; - atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. - fcp_eq_in_use, 1); + eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; + eqhdl->idx = idx; + eqhdl->phba = phba; + atomic_set(&eqhdl->hba_eq_in_use, 1); } } } @@ -9335,18 +9808,26 @@ static void lpfc_sli4_disable_intr(struct lpfc_hba *phba) { /* Disable the currently initialized interrupt mode */ - if (phba->intr_type == MSIX) - lpfc_sli4_disable_msix(phba); - else if (phba->intr_type == MSI) - lpfc_sli4_disable_msi(phba); - else if (phba->intr_type == INTx) + if (phba->intr_type == MSIX) { + int index; + + /* Free up MSI-X multi-message vectors */ + for (index = 0; index < phba->io_channel_irqs; index++) + free_irq(pci_irq_vector(phba->pcidev, index), + &phba->sli4_hba.hba_eq_hdl[index]); + + if (phba->cfg_fof) + free_irq(pci_irq_vector(phba->pcidev, index), + &phba->sli4_hba.hba_eq_hdl[index]); + } else { free_irq(phba->pcidev->irq, phba); + } + + pci_free_irq_vectors(phba->pcidev); /* Reset interrupt management states */ phba->intr_type = NONE; phba->sli.slistat.sli_intr = 0; - - return; } /** @@ -9399,11 +9880,27 @@ static void lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) { int wait_time = 0; - int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + int nvme_xri_cmpl = 1; + int fcp_xri_cmpl = 1; int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); + int nvmet_xri_cmpl = + list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) + fcp_xri_cmpl = + list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + nvme_xri_cmpl = + list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list); - while (!fcp_xri_cmpl || !els_xri_cmpl) { + while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl || + !nvmet_xri_cmpl) { if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { + if (!nvme_xri_cmpl) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6100 NVME XRI exchange busy " + "wait time: %d seconds.\n", + wait_time/1000); if (!fcp_xri_cmpl) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2877 FCP XRI exchange busy " @@ -9420,10 +9917,19 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; } - fcp_xri_cmpl = - list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + nvme_xri_cmpl = list_empty( + &phba->sli4_hba.lpfc_abts_nvme_buf_list); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) + fcp_xri_cmpl = list_empty( + &phba->sli4_hba.lpfc_abts_scsi_buf_list); + els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); + + nvmet_xri_cmpl = + list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list); } } @@ -9635,10 +10141,35 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, mbx_sli4_parameters); + sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, mbx_sli4_parameters); phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); + phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) && + bf_get(cfg_xib, mbx_sli4_parameters)); + + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) || + !phba->nvme_support) { + phba->nvme_support = 0; + phba->nvmet_support = 0; + phba->cfg_nvmet_mrq = 0; + phba->cfg_nvme_io_channel = 0; + phba->io_channel_irqs = phba->cfg_fcp_io_channel; + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, + "6101 Disabling NVME support: " + "Not supported by firmware: %d %d\n", + bf_get(cfg_nvme, mbx_sli4_parameters), + bf_get(cfg_xib, mbx_sli4_parameters)); + + /* If firmware doesn't support NVME, just use SCSI support */ + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return -ENODEV; + phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; + } + + if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp) + phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; /* Make sure that sge_supp_len can be handled by the driver */ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) @@ -9713,14 +10244,6 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_disable_pci_dev; } - /* Set up phase-1 common device driver resources */ - error = lpfc_setup_driver_resource_phase1(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1403 Failed to set up driver resource.\n"); - goto out_unset_pci_mem_s3; - } - /* Set up SLI-3 specific device driver resources */ error = lpfc_sli_driver_resource_setup(phba); if (error) { @@ -9876,7 +10399,13 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) /* Remove FC host and then SCSI host with the physical port */ fc_remove_host(shost); scsi_remove_host(shost); + + /* Perform ndlp cleanup on the physical port. The nvme and nvmet + * localports are destroyed after to cleanup all transport memory. + */ lpfc_cleanup(vport); + lpfc_nvmet_destroy_targetport(phba); + lpfc_nvme_destroy_localport(vport); /* * Bring down the SLI Layer. This step disable all interrupts, @@ -10296,6 +10825,23 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) } /** + * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve + * @phba: pointer to lpfc hba data structure. + * + * returns the number of ELS/CT + NVMET IOCBs to reserve + **/ +int +lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) +{ + int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); + + if (phba->nvmet_support) + max_xri += LPFC_NVMET_BUF_POST; + return max_xri; +} + + +/** * lpfc_write_firmware - attempt to write a firmware image to the port * @fw: pointer to firmware image returned from request_firmware. * @phba: pointer to lpfc hba data structure. @@ -10459,7 +11005,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) struct Scsi_Host *shost = NULL; int error; uint32_t cfg_mode, intr_mode; - int adjusted_fcp_io_channel; /* Allocate memory for HBA structure */ phba = lpfc_hba_alloc(pdev); @@ -10484,14 +11029,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_disable_pci_dev; } - /* Set up phase-1 common device driver resources */ - error = lpfc_setup_driver_resource_phase1(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1411 Failed to set up driver resource.\n"); - goto out_unset_pci_mem_s4; - } - /* Set up SLI-4 Specific device driver resources */ error = lpfc_sli4_driver_resource_setup(phba); if (error) { @@ -10550,6 +11087,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* Put device to a known state before enabling interrupt */ lpfc_stop_port(phba); + /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); if (intr_mode == LPFC_INTR_ERROR) { @@ -10559,11 +11097,17 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_free_sysfs_attr; } /* Default to single EQ for non-MSI-X */ - if (phba->intr_type != MSIX) - adjusted_fcp_io_channel = 1; - else - adjusted_fcp_io_channel = phba->cfg_fcp_io_channel; - phba->cfg_fcp_io_channel = adjusted_fcp_io_channel; + if (phba->intr_type != MSIX) { + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) + phba->cfg_fcp_io_channel = 1; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + phba->cfg_nvme_io_channel = 1; + if (phba->nvmet_support) + phba->cfg_nvmet_mrq = 1; + } + phba->io_channel_irqs = 1; + } + /* Set up SLI-4 HBA */ if (lpfc_sli4_hba_setup(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -10579,6 +11123,24 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* Perform post initialization setup */ lpfc_post_init_setup(phba); + /* NVME support in FW earlier in the driver load corrects the + * FC4 type making a check for nvme_support unnecessary. + */ + if ((phba->nvmet_support == 0) && + (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { + /* Create NVME binding with nvme_fc_transport. This + * ensures the vport is initialized. + */ + error = lpfc_nvme_create_localport(vport); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6004 NVME registration failed, " + "error x%x\n", + error); + goto out_disable_intr; + } + } + /* check for firmware upgrade or downgrade */ if (phba->cfg_request_firmware_upgrade) lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); @@ -10650,8 +11212,12 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) fc_remove_host(shost); scsi_remove_host(shost); - /* Perform cleanup on the physical port */ + /* Perform ndlp cleanup on the physical port. The nvme and nvmet + * localports are destroyed after to cleanup all transport memory. + */ lpfc_cleanup(vport); + lpfc_nvmet_destroy_targetport(phba); + lpfc_nvme_destroy_localport(vport); /* * Bring down the SLI Layer. This step disables all interrupts, @@ -10669,6 +11235,8 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) * buffers are released to their corresponding pools here. */ lpfc_scsi_free(phba); + lpfc_nvme_free(phba); + lpfc_free_iocb_list(phba); lpfc_sli4_driver_resource_unset(phba); @@ -11314,7 +11882,7 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba) int lpfc_fof_queue_setup(struct lpfc_hba *phba) { - struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; int rc; rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); @@ -11333,8 +11901,11 @@ lpfc_fof_queue_setup(struct lpfc_hba *phba) if (rc) goto out_oas_wq; - phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING]; - phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING]; + /* Bind this CQ/WQ to the NVME ring */ + pring = phba->sli4_hba.oas_wq->pring; + pring->sli.sli4.wqp = + (void *)phba->sli4_hba.oas_wq; + phba->sli4_hba.oas_cq->pring = pring; } return 0; @@ -11391,6 +11962,7 @@ lpfc_fof_queue_create(struct lpfc_hba *phba) goto out_error; phba->sli4_hba.oas_wq = qdesc; + list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); } return 0; @@ -11476,7 +12048,6 @@ static struct miscdevice lpfc_mgmt_dev = { static int __init lpfc_init(void) { - int cpu; int error = 0; printk(LPFC_MODULE_DESC "\n"); @@ -11502,9 +12073,7 @@ lpfc_init(void) /* Initialize in case vector mapping is needed */ lpfc_used_cpu = NULL; - lpfc_present_cpu = 0; - for_each_present_cpu(cpu) - lpfc_present_cpu++; + lpfc_present_cpu = num_present_cpus(); error = pci_register_driver(&lpfc_driver); if (error) { @@ -11550,5 +12119,5 @@ module_init(lpfc_init); module_exit(lpfc_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(LPFC_MODULE_DESC); -MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); +MODULE_AUTHOR("Broadcom"); MODULE_VERSION("0:" LPFC_DRIVER_VERSION); diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index 2a4e5d21eab2..3b654ad08d1f 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -38,6 +40,10 @@ #define LOG_FIP 0x00020000 /* FIP events */ #define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */ #define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */ +#define LOG_NVME 0x00100000 /* NVME general events. */ +#define LOG_NVME_DISC 0x00200000 /* NVME Discovery/Connect events. */ +#define LOG_NVME_ABTS 0x00400000 /* NVME ABTS events. */ +#define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */ #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index b234c50c255f..a928f5187fa4 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -954,7 +956,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba) pcbp->maxRing = (psli->num_rings - 1); for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; + pring = &psli->sli3_ring[i]; pring->sli.sli3.sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE : @@ -1217,7 +1219,7 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) mb->un.varCfgRing.recvNotify = 1; psli = &phba->sli; - pring = &psli->ring[ring]; + pring = &psli->sli3_ring[ring]; mb->un.varCfgRing.numMask = pring->num_mask; mb->mbxCommand = MBX_CONFIG_RING; mb->mbxOwner = OWN_HOST; @@ -2081,6 +2083,9 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) if (phba->max_vpi && phba->cfg_enable_npiv) bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1); + if (phba->nvmet_support) + bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1); + return; } @@ -2434,14 +2439,45 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox) memset(mbox, 0, sizeof(*mbox)); reg_fcfi = &mbox->u.mqe.un.reg_fcfi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI); - bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id); - bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); + if (phba->nvmet_support == 0) { + bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, + phba->sli4_hba.hdr_rq->queue_id); + /* Match everything - rq_id0 */ + bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_rctl_mask0, reg_fcfi, 0); + + bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); + + /* addr mode is bit wise inverted value of fcf addr_mode */ + bf_set(lpfc_reg_fcfi_mam, reg_fcfi, + (~phba->fcf.addr_mode) & 0x3); + } else { + /* This is ONLY for NVMET MRQ == 1 */ + if (phba->cfg_nvmet_mrq != 1) + return; + + bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, + phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id); + /* Match type FCP - rq_id0 */ + bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, FC_TYPE_FCP); + bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0xff); + bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi, + FC_RCTL_DD_UNSOL_CMD); + + bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, + phba->sli4_hba.hdr_rq->queue_id); + /* Match everything else - rq_id1 */ + bf_set(lpfc_reg_fcfi_type_match1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_type_mask1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_rctl_match1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_rctl_mask1, reg_fcfi, 0); + } bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.current_rec.fcf_indx); - /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */ - bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3); if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) { bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, @@ -2450,6 +2486,70 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox) } /** + * lpfc_reg_fcfi_mrq - Initialize the REG_FCFI_MRQ mailbox command + * @phba: pointer to the hba structure containing the FCF index and RQ ID. + * @mbox: pointer to lpfc mbox command to initialize. + * @mode: 0 to register FCFI, 1 to register MRQs + * + * The REG_FCFI_MRQ mailbox command supports Fibre Channel Forwarders (FCFs). + * The SLI Host uses the command to activate an FCF after it has acquired FCF + * information via a READ_FCF mailbox command. This mailbox command also is used + * to indicate where received unsolicited frames from this FCF will be sent. By + * default this routine will set up the FCF to forward all unsolicited frames + * the the RQ ID passed in the @phba. This can be overridden by the caller for + * more complicated setups. + **/ +void +lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode) +{ + struct lpfc_mbx_reg_fcfi_mrq *reg_fcfi; + + /* This is ONLY for MRQ */ + if (phba->cfg_nvmet_mrq <= 1) + return; + + memset(mbox, 0, sizeof(*mbox)); + reg_fcfi = &mbox->u.mqe.un.reg_fcfi_mrq; + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI_MRQ); + if (mode == 0) { + bf_set(lpfc_reg_fcfi_mrq_info_index, reg_fcfi, + phba->fcf.current_rec.fcf_indx); + if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) { + bf_set(lpfc_reg_fcfi_mrq_vv, reg_fcfi, 1); + bf_set(lpfc_reg_fcfi_mrq_vlan_tag, reg_fcfi, + phba->fcf.current_rec.vlan_id); + } + return; + } + + bf_set(lpfc_reg_fcfi_mrq_rq_id0, reg_fcfi, + phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id); + /* Match NVME frames of type FCP (protocol NVME) - rq_id0 */ + bf_set(lpfc_reg_fcfi_mrq_type_match0, reg_fcfi, FC_TYPE_FCP); + bf_set(lpfc_reg_fcfi_mrq_type_mask0, reg_fcfi, 0xff); + bf_set(lpfc_reg_fcfi_mrq_rctl_match0, reg_fcfi, FC_RCTL_DD_UNSOL_CMD); + bf_set(lpfc_reg_fcfi_mrq_rctl_mask0, reg_fcfi, 0xff); + bf_set(lpfc_reg_fcfi_mrq_ptc0, reg_fcfi, 1); + bf_set(lpfc_reg_fcfi_mrq_pt0, reg_fcfi, 1); + + bf_set(lpfc_reg_fcfi_mrq_policy, reg_fcfi, 3); /* NVME connection id */ + bf_set(lpfc_reg_fcfi_mrq_mode, reg_fcfi, 1); + bf_set(lpfc_reg_fcfi_mrq_filter, reg_fcfi, 1); /* rq_id0 */ + bf_set(lpfc_reg_fcfi_mrq_npairs, reg_fcfi, phba->cfg_nvmet_mrq); + + bf_set(lpfc_reg_fcfi_mrq_rq_id1, reg_fcfi, + phba->sli4_hba.hdr_rq->queue_id); + /* Match everything - rq_id1 */ + bf_set(lpfc_reg_fcfi_mrq_type_match1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_mrq_type_mask1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_mrq_rctl_match1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_mrq_rctl_mask1, reg_fcfi, 0); + + bf_set(lpfc_reg_fcfi_mrq_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); + bf_set(lpfc_reg_fcfi_mrq_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); +} + +/** * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command * @mbox: pointer to lpfc mbox command to initialize. * @fcfi: FCFI to be unregistered. diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 3fa65338d3f5..c61d8d692ede 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -24,10 +26,12 @@ #include <linux/pci.h> #include <linux/interrupt.h> +#include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport_fc.h> +#include <scsi/fc/fc_fs.h> -#include <scsi/scsi.h> +#include <linux/nvme-fc-driver.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -35,8 +39,10 @@ #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_nvmet.h" #include "lpfc_crtn.h" #include "lpfc_logmsg.h" @@ -66,7 +72,7 @@ lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { * lpfc_mem_alloc - create and allocate all PCI and memory pools * @phba: HBA to allocate pools for * - * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, + * Description: Creates and allocates PCI pools lpfc_sg_dma_buf_pool, * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. * @@ -90,21 +96,23 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) else i = SLI4_PAGE_SIZE; - phba->lpfc_scsi_dma_buf_pool = - pci_pool_create("lpfc_scsi_dma_buf_pool", - phba->pcidev, - phba->cfg_sg_dma_buf_size, - i, - 0); + phba->lpfc_sg_dma_buf_pool = + pci_pool_create("lpfc_sg_dma_buf_pool", + phba->pcidev, + phba->cfg_sg_dma_buf_size, + i, 0); + if (!phba->lpfc_sg_dma_buf_pool) + goto fail; + } else { - phba->lpfc_scsi_dma_buf_pool = - pci_pool_create("lpfc_scsi_dma_buf_pool", - phba->pcidev, phba->cfg_sg_dma_buf_size, - align, 0); - } + phba->lpfc_sg_dma_buf_pool = + pci_pool_create("lpfc_sg_dma_buf_pool", + phba->pcidev, phba->cfg_sg_dma_buf_size, + align, 0); - if (!phba->lpfc_scsi_dma_buf_pool) - goto fail; + if (!phba->lpfc_sg_dma_buf_pool) + goto fail; + } phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, LPFC_BPL_SIZE, @@ -170,12 +178,15 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) LPFC_DEVICE_DATA_POOL_SIZE, sizeof(struct lpfc_device_data)); if (!phba->device_data_mem_pool) - goto fail_free_hrb_pool; + goto fail_free_drb_pool; } else { phba->device_data_mem_pool = NULL; } return 0; +fail_free_drb_pool: + pci_pool_destroy(phba->lpfc_drb_pool); + phba->lpfc_drb_pool = NULL; fail_free_hrb_pool: pci_pool_destroy(phba->lpfc_hrb_pool); phba->lpfc_hrb_pool = NULL; @@ -197,8 +208,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) pci_pool_destroy(phba->lpfc_mbuf_pool); phba->lpfc_mbuf_pool = NULL; fail_free_dma_buf_pool: - pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); - phba->lpfc_scsi_dma_buf_pool = NULL; + pci_pool_destroy(phba->lpfc_sg_dma_buf_pool); + phba->lpfc_sg_dma_buf_pool = NULL; fail: return -ENOMEM; } @@ -227,6 +238,9 @@ lpfc_mem_free(struct lpfc_hba *phba) if (phba->lpfc_hrb_pool) pci_pool_destroy(phba->lpfc_hrb_pool); phba->lpfc_hrb_pool = NULL; + if (phba->txrdy_payload_pool) + pci_pool_destroy(phba->txrdy_payload_pool); + phba->txrdy_payload_pool = NULL; if (phba->lpfc_hbq_pool) pci_pool_destroy(phba->lpfc_hbq_pool); @@ -258,8 +272,8 @@ lpfc_mem_free(struct lpfc_hba *phba) phba->lpfc_mbuf_pool = NULL; /* Free DMA buffer memory pool */ - pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); - phba->lpfc_scsi_dma_buf_pool = NULL; + pci_pool_destroy(phba->lpfc_sg_dma_buf_pool); + phba->lpfc_sg_dma_buf_pool = NULL; /* Free Device Data memory pool */ if (phba->device_data_mem_pool) { @@ -282,7 +296,7 @@ lpfc_mem_free(struct lpfc_hba *phba) * @phba: HBA to free memory for * * Description: Free memory from PCI and driver memory pools and also those - * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees + * used : lpfc_sg_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees * the VPI bitmask. * @@ -431,6 +445,44 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) } /** + * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the + * lpfc_sg_dma_buf_pool PCI pool + * @phba: HBA which owns the pool to allocate from + * @mem_flags: indicates if this is a priority (MEM_PRI) allocation + * @handle: used to return the DMA-mapped address of the nvmet_buf + * + * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool + * PCI pool. Allocates from generic pci_pool_alloc function. + * + * Returns: + * pointer to the allocated nvmet_buf on success + * NULL on failure + **/ +void * +lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) +{ + void *ret; + + ret = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle); + return ret; +} + +/** + * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool + * PCI pool + * @phba: HBA which owns the pool to return to + * @virt: nvmet_buf to free + * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed + * + * Returns: None + **/ +void +lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma) +{ + pci_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma); +} + +/** * lpfc_els_hbq_alloc - Allocate an HBQ buffer * @phba: HBA to allocate HBQ buffer for * @@ -458,7 +510,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) kfree(hbqbp); return NULL; } - hbqbp->size = LPFC_BPL_SIZE; + hbqbp->total_size = LPFC_BPL_SIZE; return hbqbp; } @@ -518,7 +570,7 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba) kfree(dma_buf); return NULL; } - dma_buf->size = LPFC_BPL_SIZE; + dma_buf->total_size = LPFC_DATA_BUF_SIZE; return dma_buf; } @@ -540,7 +592,134 @@ lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); kfree(dmab); - return; +} + +/** + * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer + * @phba: HBA to allocate a receive buffer for + * + * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI + * pool along a non-DMA-mapped container for it. + * + * Notes: Not interrupt-safe. Must be called with no locks held. + * + * Returns: + * pointer to HBQ on success + * NULL on failure + **/ +struct rqb_dmabuf * +lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) +{ + struct rqb_dmabuf *dma_buf; + struct lpfc_iocbq *nvmewqe; + union lpfc_wqe128 *wqe; + + dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL); + if (!dma_buf) + return NULL; + + dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, + &dma_buf->hbuf.phys); + if (!dma_buf->hbuf.virt) { + kfree(dma_buf); + return NULL; + } + dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, + &dma_buf->dbuf.phys); + if (!dma_buf->dbuf.virt) { + pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + return NULL; + } + dma_buf->total_size = LPFC_DATA_BUF_SIZE; + + dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), + GFP_KERNEL); + if (!dma_buf->context) { + pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, + dma_buf->dbuf.phys); + pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + return NULL; + } + + dma_buf->iocbq = lpfc_sli_get_iocbq(phba); + dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET; + if (!dma_buf->iocbq) { + kfree(dma_buf->context); + pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, + dma_buf->dbuf.phys); + pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "2621 Ran out of nvmet iocb/WQEs\n"); + return NULL; + } + nvmewqe = dma_buf->iocbq; + wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; + /* Initialize WQE */ + memset(wqe, 0, sizeof(union lpfc_wqe)); + /* Word 7 */ + bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI); + bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); + bf_set(wqe_pu, &wqe->generic.wqe_com, 1); + /* Word 10 */ + bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0); + bf_set(wqe_qosd, &wqe->generic.wqe_com, 0); + + dma_buf->iocbq->context1 = NULL; + spin_lock(&phba->sli4_hba.sgl_list_lock); + dma_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, dma_buf->iocbq); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + if (!dma_buf->sglq) { + lpfc_sli_release_iocbq(phba, dma_buf->iocbq); + kfree(dma_buf->context); + pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, + dma_buf->dbuf.phys); + pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6132 Ran out of nvmet XRIs\n"); + return NULL; + } + return dma_buf; +} + +/** + * lpfc_sli4_nvmet_free - Frees a receive buffer + * @phba: HBA buffer was allocated for + * @dmab: DMA Buffer container returned by lpfc_sli4_rbq_alloc + * + * Description: Frees both the container and the DMA-mapped buffers returned by + * lpfc_sli4_nvmet_alloc. + * + * Notes: Can be called with or without locks held. + * + * Returns: None + **/ +void +lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) +{ + unsigned long flags; + + __lpfc_clear_active_sglq(phba, dmab->sglq->sli4_lxritag); + dmab->sglq->state = SGL_FREED; + dmab->sglq->ndlp = NULL; + + spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags); + list_add_tail(&dmab->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list); + spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, flags); + + lpfc_sli_release_iocbq(phba, dmab->iocbq); + kfree(dmab->context); + pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); + pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); + kfree(dmab); } /** @@ -565,13 +744,13 @@ lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) return; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); /* Check whether HBQ is still in use */ spin_lock_irqsave(&phba->hbalock, flags); if (!phba->hbq_in_use) { spin_unlock_irqrestore(&phba->hbalock, flags); return; } - hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); list_del(&hbq_entry->dbuf.list); if (hbq_entry->tag == -1) { (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) @@ -586,3 +765,48 @@ lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) } return; } + +/** + * lpfc_rq_buf_free - Free a RQ DMA buffer + * @phba: HBA buffer is associated with + * @mp: Buffer to free + * + * Description: Frees the given DMA buffer in the appropriate way given by + * reposting it to its associated RQ so it can be reused. + * + * Notes: Takes phba->hbalock. Can be called with or without other locks held. + * + * Returns: None + **/ +void +lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) +{ + struct lpfc_rqb *rqbp; + struct lpfc_rqe hrqe; + struct lpfc_rqe drqe; + struct rqb_dmabuf *rqb_entry; + unsigned long flags; + int rc; + + if (!mp) + return; + + rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf); + rqbp = rqb_entry->hrq->rqbp; + + spin_lock_irqsave(&phba->hbalock, flags); + list_del(&rqb_entry->hbuf.list); + hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys); + hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys); + drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys); + drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); + rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); + if (rc < 0) { + (rqbp->rqb_free_buffer)(phba, rqb_entry); + } else { + list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); + rqbp->buffer_count++; + } + + spin_unlock_irqrestore(&phba->hbalock, flags); +} diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h index f2b1bbcb196f..b93e78f671fb 100644 --- a/drivers/scsi/lpfc/lpfc_nl.h +++ b/drivers/scsi/lpfc/lpfc_nl.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2010 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 56a3df4fddb0..061626bdf701 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -28,6 +30,9 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include <scsi/fc/fc_fs.h> + +#include <linux/nvme-fc-driver.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -35,8 +40,9 @@ #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" @@ -204,10 +210,11 @@ int lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(abort_list); - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; struct lpfc_iocbq *iocb, *next_iocb; + pring = lpfc_phba_elsring(phba); + /* Abort outstanding I/O on NPort <nlp_DID> */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, "2819 Abort outstanding I/O on NPort x%x " @@ -283,6 +290,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t ed_tov; LPFC_MBOXQ_t *mbox; struct ls_rjt stat; + uint32_t vid, flag; int rc; memset(&stat, 0, sizeof (struct ls_rjt)); @@ -418,6 +426,15 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_can_disctmo(vport); } + ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; + if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && + sp->cmn.valid_vendor_ver_level) { + vid = be32_to_cpu(sp->un.vv.vid); + flag = be32_to_cpu(sp->un.vv.flags); + if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP)) + ndlp->nlp_flag |= NLP_SUPPRESS_RSP; + } + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) goto out; @@ -707,6 +724,7 @@ static void lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb) { + struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd; uint32_t *lp; PRLI *npr; @@ -720,16 +738,32 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_flag &= ~NLP_FIRSTBURST; - if (npr->prliType == PRLI_FCP_TYPE) { - if (npr->initiatorFunc) - ndlp->nlp_type |= NLP_FCP_INITIATOR; + if ((npr->prliType == PRLI_FCP_TYPE) || + (npr->prliType == PRLI_NVME_TYPE)) { + if (npr->initiatorFunc) { + if (npr->prliType == PRLI_FCP_TYPE) + ndlp->nlp_type |= NLP_FCP_INITIATOR; + if (npr->prliType == PRLI_NVME_TYPE) + ndlp->nlp_type |= NLP_NVME_INITIATOR; + } if (npr->targetFunc) { - ndlp->nlp_type |= NLP_FCP_TARGET; + if (npr->prliType == PRLI_FCP_TYPE) + ndlp->nlp_type |= NLP_FCP_TARGET; + if (npr->prliType == PRLI_NVME_TYPE) + ndlp->nlp_type |= NLP_NVME_TARGET; if (npr->writeXferRdyDis) ndlp->nlp_flag |= NLP_FIRSTBURST; } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; + + /* If this driver is in nvme target mode, set the ndlp's fc4 + * type to NVME provided the PRLI response claims NVME FC4 + * type. Target mode does not issue gft_id so doesn't get + * the fc4 type set until now. + */ + if ((phba->nvmet_support) && (npr->prliType == PRLI_NVME_TYPE)) + ndlp->nlp_fc4_type |= NLP_FC4_NVME; } if (rport) { /* We need to update the rport role values */ @@ -743,7 +777,8 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, "rport rolechg: role:x%x did:x%x flg:x%x", roles, ndlp->nlp_DID, ndlp->nlp_flag); - fc_remote_port_rolechg(rport, roles); + if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) + fc_remote_port_rolechg(rport, roles); } } @@ -1026,6 +1061,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, *rspiocb; struct lpfc_dmabuf *pcmd, *prsp, *mp; uint32_t *lp; + uint32_t vid, flag; IOCB_t *irsp; struct serv_parm *sp; uint32_t ed_tov; @@ -1094,6 +1130,16 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ed_tov = (phba->fc_edtov + 999999) / 1000000; } + ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; + if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && + sp->cmn.valid_vendor_ver_level) { + vid = be32_to_cpu(sp->un.vv.vid); + flag = be32_to_cpu(sp->un.vv.flags); + if ((vid == LPFC_VV_EMLX_ID) && + (flag & LPFC_VV_SUPPRESS_RSP)) + ndlp->nlp_flag |= NLP_SUPPRESS_RSP; + } + /* * Use the larger EDTOV * RATOV = 2 * EDTOV for pt-to-pt @@ -1489,8 +1535,38 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + struct ls_rjt stat; + + if (vport->phba->nvmet_support) { + /* NVME Target mode. Handle and respond to the PRLI and + * transition to UNMAPPED provided the RPI has completed + * registration. + */ + if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + lpfc_rcv_prli(vport, ndlp, cmdiocb); + lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + } else { + /* RPI registration has not completed. Reject the PRLI + * to prevent an illegal state transition when the + * rpi registration does complete. + */ + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC, + "6115 NVMET ndlp rpi %d state " + "unknown, state x%x flags x%08x\n", + ndlp->nlp_rpi, ndlp->nlp_state, + ndlp->nlp_flag); + memset(&stat, 0, sizeof(struct ls_rjt)); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, + ndlp, NULL); + } + } else { + /* Initiator mode. */ + lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); + } - lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } @@ -1573,9 +1649,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; MAILBOX_t *mb = &pmb->u.mb; uint32_t did = mb->un.varWords[1]; + int rc = 0; if (mb->mbxStatus) { /* RegLogin failed */ @@ -1610,19 +1688,55 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, } /* SLI4 ports have preallocated logical rpis. */ - if (vport->phba->sli_rev < LPFC_SLI_REV4) + if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_REGISTERED; /* Only if we are not a fabric nport do we issue PRLI */ - if (!(ndlp->nlp_type & NLP_FABRIC)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "3066 RegLogin Complete on x%x x%x x%x\n", + did, ndlp->nlp_type, ndlp->nlp_fc4_type); + if (!(ndlp->nlp_type & NLP_FABRIC) && + (phba->nvmet_support == 0)) { + /* The driver supports FCP and NVME concurrently. If the + * ndlp's nlp_fc4_type is still zero, the driver doesn't + * know what PRLI to send yet. Figure that out now and + * call PRLI depending on the outcome. + */ + if (vport->fc_flag & FC_PT2PT) { + /* If we are pt2pt, there is no Fabric to determine + * the FC4 type of the remote nport. So if NVME + * is configured try it. + */ + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + ndlp->nlp_fc4_type |= NLP_FC4_NVME; + /* We need to update the localport also */ + lpfc_nvme_update_localport(vport); + } + + } else if (ndlp->nlp_fc4_type == 0) { + rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID, + 0, ndlp->nlp_DID); + return ndlp->nlp_state; + } + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); lpfc_issue_els_prli(vport, ndlp, 0); } else { - ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support) + phba->targetport->port_id = vport->fc_myDID; + + /* Only Fabric ports should transition. NVME target + * must complete PRLI. + */ + if (ndlp->nlp_type & NLP_FABRIC) { + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + } } return ndlp->nlp_state; } @@ -1663,7 +1777,14 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); - ndlp->nlp_flag |= NLP_IGNR_REG_CMPL; + + /* If we are a target we won't immediately transition into PRLI, + * so if REG_LOGIN already completed we don't need to ignore it. + */ + if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) || + !vport->phba->nvmet_support) + ndlp->nlp_flag |= NLP_IGNR_REG_CMPL; + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(shost->host_lock); lpfc_disc_set_adisc(vport, ndlp); @@ -1739,10 +1860,23 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_hba *phba = vport->phba; IOCB_t *irsp; PRLI *npr; + struct lpfc_nvme_prli *nvpr; + void *temp_ptr; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; - npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); + + /* A solicited PRLI is either FCP or NVME. The PRLI cmd/rsp + * format is different so NULL the two PRLI types so that the + * driver correctly gets the correct context. + */ + npr = NULL; + nvpr = NULL; + temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); + if (cmdiocb->iocb_flag & LPFC_PRLI_FCP_REQ) + npr = (PRLI *) temp_ptr; + else if (cmdiocb->iocb_flag & LPFC_PRLI_NVME_REQ) + nvpr = (struct lpfc_nvme_prli *) temp_ptr; irsp = &rspiocb->iocb; if (irsp->ulpStatus) { @@ -1750,7 +1884,21 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, vport->cfg_restrict_login) { goto out; } + + /* The LS Req had some error. Don't let this be a + * target. + */ + if ((ndlp->fc4_prli_sent == 1) && + (ndlp->nlp_state == NLP_STE_PRLI_ISSUE) && + (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_FCP_INITIATOR))) + /* The FCP PRLI completed successfully but + * the NVME PRLI failed. Since they are sent in + * succession, allow the FCP to complete. + */ + goto out_err; + ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; + ndlp->nlp_type |= NLP_FCP_INITIATOR; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); return ndlp->nlp_state; } @@ -1758,9 +1906,16 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Check out PRLI rsp */ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + + /* NVME or FCP first burst must be negotiated for each PRLI. */ ndlp->nlp_flag &= ~NLP_FIRSTBURST; - if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && + ndlp->nvme_fb_size = 0; + if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) && (npr->prliType == PRLI_FCP_TYPE)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6028 FCP NPR PRLI Cmpl Init %d Target %d\n", + npr->initiatorFunc, + npr->targetFunc); if (npr->initiatorFunc) ndlp->nlp_type |= NLP_FCP_INITIATOR; if (npr->targetFunc) { @@ -1770,6 +1925,49 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; + + /* PRLI completed. Decrement count. */ + ndlp->fc4_prli_sent--; + } else if (nvpr && + (bf_get_be32(prli_acc_rsp_code, nvpr) == + PRLI_REQ_EXECUTED) && + (bf_get_be32(prli_type_code, nvpr) == + PRLI_NVME_TYPE)) { + + /* Complete setting up the remote ndlp personality. */ + if (bf_get_be32(prli_init, nvpr)) + ndlp->nlp_type |= NLP_NVME_INITIATOR; + + /* Target driver cannot solicit NVME FB. */ + if (bf_get_be32(prli_tgt, nvpr)) { + ndlp->nlp_type |= NLP_NVME_TARGET; + if ((bf_get_be32(prli_fba, nvpr) == 1) && + (bf_get_be32(prli_fb_sz, nvpr) > 0) && + (phba->cfg_nvme_enable_fb) && + (!phba->nvmet_support)) { + /* Both sides support FB. The target's first + * burst size is a 512 byte encoded value. + */ + ndlp->nlp_flag |= NLP_FIRSTBURST; + ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz, + nvpr); + } + } + + if (bf_get_be32(prli_recov, nvpr)) + ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6029 NVME PRLI Cmpl w1 x%08x " + "w4 x%08x w5 x%08x flag x%x, " + "fcp_info x%x nlp_type x%x\n", + be32_to_cpu(nvpr->word1), + be32_to_cpu(nvpr->word4), + be32_to_cpu(nvpr->word5), + ndlp->nlp_flag, ndlp->nlp_fcp_info, + ndlp->nlp_type); + /* PRLI completed. Decrement count. */ + ndlp->fc4_prli_sent--; } if (!(ndlp->nlp_type & NLP_FCP_TARGET) && (vport->port_type == LPFC_NPIV_PORT) && @@ -1785,11 +1983,24 @@ out: return ndlp->nlp_state; } - ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; - if (ndlp->nlp_type & NLP_FCP_TARGET) - lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); - else - lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); +out_err: + /* The ndlp state cannot move to MAPPED or UNMAPPED before all PRLIs + * are complete. + */ + if (ndlp->fc4_prli_sent == 0) { + ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; + if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) + lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); + else + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + } else + lpfc_printf_vlog(vport, + KERN_INFO, LOG_ELS, + "3067 PRLI's still outstanding " + "on x%06x - count %d, Pend Node Mode " + "transition...\n", + ndlp->nlp_DID, ndlp->fc4_prli_sent); + return ndlp->nlp_state; } @@ -2104,7 +2315,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* flush the target */ - lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], + lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING], ndlp->nlp_sid, 0, LPFC_CTX_TGT); /* Treat like rcv logo */ diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c new file mode 100644 index 000000000000..609a908ea9db --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -0,0 +1,2464 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + ********************************************************************/ +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <asm/unaligned.h> +#include <linux/crc-t10dif.h> +#include <net/checksum.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_tcq.h> +#include <scsi/scsi_transport_fc.h> +#include <scsi/fc/fc_fs.h> + +#include <linux/nvme.h> +#include <linux/nvme-fc-driver.h> +#include <linux/nvme-fc.h> +#include "lpfc_version.h" +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc.h" +#include "lpfc_nvme.h" +#include "lpfc_scsi.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_debugfs.h" + +/* NVME initiator-based functions */ + +static struct lpfc_nvme_buf * +lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp); + +static void +lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *); + + +/** + * lpfc_nvme_create_queue - + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. + * @handle: An opaque driver handle used in follow-up calls. + * + * Driver registers this routine to preallocate and initialize any + * internal data structures to bind the @qidx to its internal IO queues. + * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ. + * + * Return value : + * 0 - Success + * -EINVAL - Unsupported input value. + * -ENOMEM - Could not alloc necessary memory + **/ +static int +lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, + unsigned int qidx, u16 qsize, + void **handle) +{ + struct lpfc_nvme_lport *lport; + struct lpfc_vport *vport; + struct lpfc_nvme_qhandle *qhandle; + char *str; + + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + vport = lport->vport; + qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL); + if (qhandle == NULL) + return -ENOMEM; + + qhandle->cpu_id = smp_processor_id(); + qhandle->qidx = qidx; + /* + * NVME qidx == 0 is the admin queue, so both admin queue + * and first IO queue will use MSI-X vector and associated + * EQ/CQ/WQ at index 0. After that they are sequentially assigned. + */ + if (qidx) { + str = "IO "; /* IO queue */ + qhandle->index = ((qidx - 1) % + vport->phba->cfg_nvme_io_channel); + } else { + str = "ADM"; /* Admin queue */ + qhandle->index = qidx; + } + + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6073 Binding %s HdwQueue %d (cpu %d) to " + "io_channel %d qhandle %p\n", str, + qidx, qhandle->cpu_id, qhandle->index, qhandle); + *handle = (void *)qhandle; + return 0; +} + +/** + * lpfc_nvme_delete_queue - + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. + * @handle: An opaque driver handle from lpfc_nvme_create_queue + * + * Driver registers this routine to free + * any internal data structures to bind the @qidx to its internal + * IO queues. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static void +lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport, + unsigned int qidx, + void *handle) +{ + struct lpfc_nvme_lport *lport; + struct lpfc_vport *vport; + + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + vport = lport->vport; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, + "6001 ENTER. lpfc_pnvme %p, qidx x%xi qhandle %p\n", + lport, qidx, handle); + kfree(handle); +} + +static void +lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport) +{ + struct lpfc_nvme_lport *lport = localport->private; + + /* release any threads waiting for the unreg to complete */ + complete(&lport->lport_unreg_done); +} + +/* lpfc_nvme_remoteport_delete + * + * @remoteport: Pointer to an nvme transport remoteport instance. + * + * This is a template downcall. NVME transport calls this function + * when it has completed the unregistration of a previously + * registered remoteport. + * + * Return value : + * None + */ +void +lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) +{ + struct lpfc_nvme_rport *rport = remoteport->private; + struct lpfc_vport *vport; + struct lpfc_nodelist *ndlp; + + ndlp = rport->ndlp; + if (!ndlp) + goto rport_err; + + vport = ndlp->vport; + if (!vport) + goto rport_err; + + /* Remove this rport from the lport's list - memory is owned by the + * transport. Remove the ndlp reference for the NVME transport before + * calling state machine to remove the node, this is devloss = 0 + * semantics. + */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6146 remoteport delete complete %p\n", + remoteport); + list_del(&rport->list); + lpfc_nlp_put(ndlp); + + rport_err: + /* This call has to execute as long as the rport is valid. + * Release any threads waiting for the unreg to complete. + */ + complete(&rport->rport_unreg_done); +} + +static void +lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_vport *vport = cmdwqe->vport; + uint32_t status; + struct nvmefc_ls_req *pnvme_lsreq; + struct lpfc_dmabuf *buf_ptr; + struct lpfc_nodelist *ndlp; + + vport->phba->fc4NvmeLsCmpls++; + + pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2; + status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; + ndlp = (struct lpfc_nodelist *)cmdwqe->context1; + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6047 nvme cmpl Enter " + "Data %p DID %x Xri: %x status %x cmd:%p lsreg:%p " + "bmp:%p ndlp:%p\n", + pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, + cmdwqe->sli4_xritag, status, + cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp); + + lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n", + cmdwqe->sli4_xritag, status, wcqe->parameter); + + if (cmdwqe->context3) { + buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3; + lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); + kfree(buf_ptr); + cmdwqe->context3 = NULL; + } + if (pnvme_lsreq->done) + pnvme_lsreq->done(pnvme_lsreq, status); + else + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + "6046 nvme cmpl without done call back? " + "Data %p DID %x Xri: %x status %x\n", + pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, + cmdwqe->sli4_xritag, status); + if (ndlp) { + lpfc_nlp_put(ndlp); + cmdwqe->context1 = NULL; + } + lpfc_sli_release_iocbq(phba, cmdwqe); +} + +static int +lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, + struct lpfc_dmabuf *inp, + struct nvmefc_ls_req *pnvme_lsreq, + void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_wcqe_complete *), + struct lpfc_nodelist *ndlp, uint32_t num_entry, + uint32_t tmo, uint8_t retry) +{ + struct lpfc_hba *phba = vport->phba; + union lpfc_wqe *wqe; + struct lpfc_iocbq *genwqe; + struct ulp_bde64 *bpl; + struct ulp_bde64 bde; + int i, rc, xmit_len, first_len; + + /* Allocate buffer for command WQE */ + genwqe = lpfc_sli_get_iocbq(phba); + if (genwqe == NULL) + return 1; + + wqe = &genwqe->wqe; + memset(wqe, 0, sizeof(union lpfc_wqe)); + + genwqe->context3 = (uint8_t *)bmp; + genwqe->iocb_flag |= LPFC_IO_NVME_LS; + + /* Save for completion so we can release these resources */ + genwqe->context1 = lpfc_nlp_get(ndlp); + genwqe->context2 = (uint8_t *)pnvme_lsreq; + /* Fill in payload, bp points to frame payload */ + + if (!tmo) + /* FC spec states we need 3 * ratov for CT requests */ + tmo = (3 * phba->fc_ratov); + + /* For this command calculate the xmit length of the request bde. */ + xmit_len = 0; + first_len = 0; + bpl = (struct ulp_bde64 *)bmp->virt; + for (i = 0; i < num_entry; i++) { + bde.tus.w = bpl[i].tus.w; + if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) + break; + xmit_len += bde.tus.f.bdeSize; + if (i == 0) + first_len = xmit_len; + } + + genwqe->rsvd2 = num_entry; + genwqe->hba_wqidx = 0; + + /* Words 0 - 2 */ + wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + wqe->generic.bde.tus.f.bdeSize = first_len; + wqe->generic.bde.addrLow = bpl[0].addrLow; + wqe->generic.bde.addrHigh = bpl[0].addrHigh; + + /* Word 3 */ + wqe->gen_req.request_payload_len = first_len; + + /* Word 4 */ + + /* Word 5 */ + bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0); + bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1); + bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1); + bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL); + bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME); + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag); + + /* Word 7 */ + bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1)); + bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3); + bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE); + bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI); + + /* Word 8 */ + wqe->gen_req.wqe_com.abort_tag = genwqe->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag); + + /* Word 10 */ + bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); + bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); + bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); + bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND); + + + /* Issue GEN REQ WQE for NPORT <did> */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "6050 Issue GEN REQ WQE to NPORT x%x " + "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n", + ndlp->nlp_DID, genwqe->iotag, + vport->port_state, + genwqe, pnvme_lsreq, bmp, xmit_len, first_len); + genwqe->wqe_cmpl = cmpl; + genwqe->iocb_cmpl = NULL; + genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; + genwqe->vport = vport; + genwqe->retry = retry; + + lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n", + genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID); + + rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe); + if (rc == WQE_ERROR) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "6045 Issue GEN REQ WQE to NPORT x%x " + "Data: x%x x%x\n", + ndlp->nlp_DID, genwqe->iotag, + vport->port_state); + lpfc_sli_release_iocbq(phba, genwqe); + return 1; + } + return 0; +} + +/** + * lpfc_nvme_ls_req - Issue an Link Service request + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @lpfc_nvme_lport: Pointer to the driver's local port data + * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq + * + * Driver registers this routine to handle any link service request + * from the nvme_fc transport to a remote nvme-aware port. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static int +lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, + struct nvme_fc_remote_port *pnvme_rport, + struct nvmefc_ls_req *pnvme_lsreq) +{ + int ret = 0; + struct lpfc_nvme_lport *lport; + struct lpfc_vport *vport; + struct lpfc_nodelist *ndlp; + struct ulp_bde64 *bpl; + struct lpfc_dmabuf *bmp; + + /* there are two dma buf in the request, actually there is one and + * the second one is just the start address + cmd size. + * Before calling lpfc_nvme_gen_req these buffers need to be wrapped + * in a lpfc_dmabuf struct. When freeing we just free the wrapper + * because the nvem layer owns the data bufs. + * We do not have to break these packets open, we don't care what is in + * them. And we do not have to look at the resonse data, we only care + * that we got a response. All of the caring is going to happen in the + * nvme-fc layer. + */ + + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + vport = lport->vport; + + ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + "6043 Could not find node for DID %x\n", + pnvme_rport->port_id); + return 1; + } + bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!bmp) { + + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + "6044 Could not find node for DID %x\n", + pnvme_rport->port_id); + return 2; + } + INIT_LIST_HEAD(&bmp->list); + bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys)); + if (!bmp->virt) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + "6042 Could not find node for DID %x\n", + pnvme_rport->port_id); + kfree(bmp); + return 3; + } + bpl = (struct ulp_bde64 *)bmp->virt; + bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma)); + bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma)); + bpl->tus.f.bdeFlags = 0; + bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + bpl++; + + bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma)); + bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma)); + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; + bpl->tus.f.bdeSize = pnvme_lsreq->rsplen; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + + /* Expand print to include key fields. */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6051 ENTER. lport %p, rport %p lsreq%p rqstlen:%d " + "rsplen:%d %pad %pad\n", + pnvme_lport, pnvme_rport, + pnvme_lsreq, pnvme_lsreq->rqstlen, + pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, + &pnvme_lsreq->rspdma); + + vport->phba->fc4NvmeLsRequests++; + + /* Hardcode the wait to 30 seconds. Connections are failing otherwise. + * This code allows it all to work. + */ + ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr, + pnvme_lsreq, lpfc_nvme_cmpl_gen_req, + ndlp, 2, 30, 0); + if (ret != WQE_SUCCESS) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6052 EXIT. issue ls wqe failed lport %p, " + "rport %p lsreq%p Status %x DID %x\n", + pnvme_lport, pnvme_rport, pnvme_lsreq, + ret, ndlp->nlp_DID); + lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys); + kfree(bmp); + return ret; + } + + /* Stub in routine and return 0 for now. */ + return ret; +} + +/** + * lpfc_nvme_ls_abort - Issue an Link Service request + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @lpfc_nvme_lport: Pointer to the driver's local port data + * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq + * + * Driver registers this routine to handle any link service request + * from the nvme_fc transport to a remote nvme-aware port. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static void +lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport, + struct nvme_fc_remote_port *pnvme_rport, + struct nvmefc_ls_req *pnvme_lsreq) +{ + struct lpfc_nvme_lport *lport; + struct lpfc_vport *vport; + struct lpfc_hba *phba; + struct lpfc_nodelist *ndlp; + LIST_HEAD(abort_list); + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *wqe, *next_wqe; + + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + vport = lport->vport; + phba = vport->phba; + + ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, + "6049 Could not find node for DID %x\n", + pnvme_rport->port_id); + return; + } + + /* Expand print to include key fields. */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, + "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d " + "rsplen:%d %pad %pad\n", + pnvme_lport, pnvme_rport, + pnvme_lsreq, pnvme_lsreq->rqstlen, + pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, + &pnvme_lsreq->rspdma); + + /* + * Lock the ELS ring txcmplq and build a local list of all ELS IOs + * that need an ABTS. The IOs need to stay on the txcmplq so that + * the abort operation completes them successfully. + */ + pring = phba->sli4_hba.nvmels_wq->pring; + spin_lock_irq(&phba->hbalock); + spin_lock(&pring->ring_lock); + list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) { + /* Add to abort_list on on NDLP match. */ + if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) { + wqe->iocb_flag |= LPFC_DRIVER_ABORTED; + list_add_tail(&wqe->dlist, &abort_list); + } + } + spin_unlock(&pring->ring_lock); + spin_unlock_irq(&phba->hbalock); + + /* Abort the targeted IOs and remove them from the abort list. */ + list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) { + spin_lock_irq(&phba->hbalock); + list_del_init(&wqe->dlist); + lpfc_sli_issue_abort_iotag(phba, pring, wqe); + spin_unlock_irq(&phba->hbalock); + } +} + +/* Fix up the existing sgls for NVME IO. */ +static void +lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport, + struct lpfc_nvme_buf *lpfc_ncmd, + struct nvmefc_fcp_req *nCmd) +{ + struct sli4_sge *sgl; + union lpfc_wqe128 *wqe; + uint32_t *wptr, *dptr; + + /* + * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to + * match NVME. NVME sends 96 bytes. Also, use the + * nvme commands command and response dma addresses + * rather than the virtual memory to ease the restore + * operation. + */ + sgl = lpfc_ncmd->nvme_sgl; + sgl->sge_len = cpu_to_le32(nCmd->cmdlen); + + sgl++; + + /* Setup the physical region for the FCP RSP */ + sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma)); + sgl->word2 = le32_to_cpu(sgl->word2); + if (nCmd->sg_cnt) + bf_set(lpfc_sli4_sge_last, sgl, 0); + else + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(nCmd->rsplen); + + /* + * Get a local pointer to the built-in wqe and correct + * the cmd size to match NVME's 96 bytes and fix + * the dma address. + */ + + /* 128 byte wqe support here */ + wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe; + + /* Word 0-2 - NVME CMND IU (embedded payload) */ + wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED; + wqe->generic.bde.tus.f.bdeSize = 60; + wqe->generic.bde.addrHigh = 0; + wqe->generic.bde.addrLow = 64; /* Word 16 */ + + /* Word 3 */ + bf_set(payload_offset_len, &wqe->fcp_icmd, + (nCmd->rsplen + nCmd->cmdlen)); + + /* Word 10 */ + bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1); + bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); + + /* + * Embed the payload in the last half of the WQE + * WQE words 16-30 get the NVME CMD IU payload + * + * WQE Word 16 is already setup with flags + * WQE words 17-19 get payload Words 2-4 + * WQE words 20-21 get payload Words 6-7 + * WQE words 22-29 get payload Words 16-23 + */ + wptr = &wqe->words[17]; /* WQE ptr */ + dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */ + dptr += 2; /* Skip Words 0-1 in payload */ + + *wptr++ = *dptr++; /* Word 2 */ + *wptr++ = *dptr++; /* Word 3 */ + *wptr++ = *dptr++; /* Word 4 */ + dptr++; /* Skip Word 5 in payload */ + *wptr++ = *dptr++; /* Word 6 */ + *wptr++ = *dptr++; /* Word 7 */ + dptr += 8; /* Skip Words 8-15 in payload */ + *wptr++ = *dptr++; /* Word 16 */ + *wptr++ = *dptr++; /* Word 17 */ + *wptr++ = *dptr++; /* Word 18 */ + *wptr++ = *dptr++; /* Word 19 */ + *wptr++ = *dptr++; /* Word 20 */ + *wptr++ = *dptr++; /* Word 21 */ + *wptr++ = *dptr++; /* Word 22 */ + *wptr = *dptr; /* Word 23 */ +} + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +static void +lpfc_nvme_ktime(struct lpfc_hba *phba, + struct lpfc_nvme_buf *lpfc_ncmd) +{ + uint64_t seg1, seg2, seg3, seg4; + + if (!phba->ktime_on) + return; + if (!lpfc_ncmd->ts_last_cmd || + !lpfc_ncmd->ts_cmd_start || + !lpfc_ncmd->ts_cmd_wqput || + !lpfc_ncmd->ts_isr_cmpl || + !lpfc_ncmd->ts_data_nvme) + return; + if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd) + return; + if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start) + return; + if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput) + return; + if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl) + return; + /* + * Segment 1 - Time from Last FCP command cmpl is handed + * off to NVME Layer to start of next command. + * Segment 2 - Time from Driver receives a IO cmd start + * from NVME Layer to WQ put is done on IO cmd. + * Segment 3 - Time from Driver WQ put is done on IO cmd + * to MSI-X ISR for IO cmpl. + * Segment 4 - Time from MSI-X ISR for IO cmpl to when + * cmpl is handled off to the NVME Layer. + */ + seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd; + if (seg1 > 5000000) /* 5 ms - for sequential IOs */ + return; + + /* Calculate times relative to start of IO */ + seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start); + seg3 = (lpfc_ncmd->ts_isr_cmpl - + lpfc_ncmd->ts_cmd_start) - seg2; + seg4 = (lpfc_ncmd->ts_data_nvme - + lpfc_ncmd->ts_cmd_start) - seg2 - seg3; + phba->ktime_data_samples++; + phba->ktime_seg1_total += seg1; + if (seg1 < phba->ktime_seg1_min) + phba->ktime_seg1_min = seg1; + else if (seg1 > phba->ktime_seg1_max) + phba->ktime_seg1_max = seg1; + phba->ktime_seg2_total += seg2; + if (seg2 < phba->ktime_seg2_min) + phba->ktime_seg2_min = seg2; + else if (seg2 > phba->ktime_seg2_max) + phba->ktime_seg2_max = seg2; + phba->ktime_seg3_total += seg3; + if (seg3 < phba->ktime_seg3_min) + phba->ktime_seg3_min = seg3; + else if (seg3 > phba->ktime_seg3_max) + phba->ktime_seg3_max = seg3; + phba->ktime_seg4_total += seg4; + if (seg4 < phba->ktime_seg4_min) + phba->ktime_seg4_min = seg4; + else if (seg4 > phba->ktime_seg4_max) + phba->ktime_seg4_max = seg4; + + lpfc_ncmd->ts_last_cmd = 0; + lpfc_ncmd->ts_cmd_start = 0; + lpfc_ncmd->ts_cmd_wqput = 0; + lpfc_ncmd->ts_isr_cmpl = 0; + lpfc_ncmd->ts_data_nvme = 0; +} +#endif + +/** + * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @lpfc_nvme_lport: Pointer to the driver's local port data + * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq + * + * Driver registers this routine as it io request handler. This + * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq + * data structure to the rport indicated in @lpfc_nvme_rport. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static void +lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_nvme_buf *lpfc_ncmd = + (struct lpfc_nvme_buf *)pwqeIn->context1; + struct lpfc_vport *vport = pwqeIn->vport; + struct nvmefc_fcp_req *nCmd; + struct nvme_fc_ersp_iu *ep; + struct nvme_fc_cmd_iu *cp; + struct lpfc_nvme_rport *rport; + struct lpfc_nodelist *ndlp; + unsigned long flags; + uint32_t code; + uint16_t cid, sqhd, data; + uint32_t *ptr; + + /* Sanity check on return of outstanding command */ + if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, + "6071 Completion pointers bad on wqe %p.\n", + wcqe); + return; + } + phba->fc4NvmeIoCmpls++; + + nCmd = lpfc_ncmd->nvmeCmd; + rport = lpfc_ncmd->nrport; + + lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n", + lpfc_ncmd->cur_iocbq.sli4_xritag, + bf_get(lpfc_wcqe_c_status, wcqe), wcqe->parameter); + /* + * Catch race where our node has transitioned, but the + * transport is still transitioning. + */ + ndlp = rport->ndlp; + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, + "6061 rport %p, ndlp %p, DID x%06x ndlp " + "not ready.\n", + rport, ndlp, rport->remoteport->port_id); + + ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id); + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + "6062 Ignoring NVME cmpl. No ndlp\n"); + goto out_err; + } + } + + code = bf_get(lpfc_wcqe_c_code, wcqe); + if (code == CQE_CODE_NVME_ERSP) { + /* For this type of CQE, we need to rebuild the rsp */ + ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; + + /* + * Get Command Id from cmd to plug into response. This + * code is not needed in the next NVME Transport drop. + */ + cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; + cid = cp->sqe.common.command_id; + + /* + * RSN is in CQE word 2 + * SQHD is in CQE Word 3 bits 15:0 + * Cmd Specific info is in CQE Word 1 + * and in CQE Word 0 bits 15:0 + */ + sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe); + + /* Now lets build the NVME ERSP IU */ + ep->iu_len = cpu_to_be16(8); + ep->rsn = wcqe->parameter; + ep->xfrd_len = cpu_to_be32(nCmd->payload_length); + ep->rsvd12 = 0; + ptr = (uint32_t *)&ep->cqe.result.u64; + *ptr++ = wcqe->total_data_placed; + data = bf_get(lpfc_wcqe_c_ersp0, wcqe); + *ptr = (uint32_t)data; + ep->cqe.sq_head = sqhd; + ep->cqe.sq_id = nCmd->sqid; + ep->cqe.command_id = cid; + ep->cqe.status = 0; + + lpfc_ncmd->status = IOSTAT_SUCCESS; + lpfc_ncmd->result = 0; + nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN; + nCmd->transferred_length = nCmd->payload_length; + } else { + lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) & + LPFC_IOCB_STATUS_MASK); + lpfc_ncmd->result = wcqe->parameter; + + /* For NVME, the only failure path that results in an + * IO error is when the adapter rejects it. All other + * conditions are a success case and resolved by the + * transport. + * IOSTAT_FCP_RSP_ERROR means: + * 1. Length of data received doesn't match total + * transfer length in WQE + * 2. If the RSP payload does NOT match these cases: + * a. RSP length 12/24 bytes and all zeros + * b. NVME ERSP + */ + switch (lpfc_ncmd->status) { + case IOSTAT_SUCCESS: + nCmd->transferred_length = wcqe->total_data_placed; + nCmd->rcv_rsplen = 0; + nCmd->status = 0; + break; + case IOSTAT_FCP_RSP_ERROR: + nCmd->transferred_length = wcqe->total_data_placed; + nCmd->rcv_rsplen = wcqe->parameter; + nCmd->status = 0; + /* Sanity check */ + if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) + break; + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + "6081 NVME Completion Protocol Error: " + "status x%x result x%x placed x%x\n", + lpfc_ncmd->status, lpfc_ncmd->result, + wcqe->total_data_placed); + break; + default: +out_err: + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + "6072 NVME Completion Error: " + "status x%x result x%x placed x%x\n", + lpfc_ncmd->status, lpfc_ncmd->result, + wcqe->total_data_placed); + nCmd->transferred_length = 0; + nCmd->rcv_rsplen = 0; + nCmd->status = NVME_SC_FC_TRANSPORT_ERROR; + } + } + + /* pick up SLI4 exhange busy condition */ + if (bf_get(lpfc_wcqe_c_xb, wcqe)) + lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; + else + lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; + + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) + atomic_dec(&ndlp->cmd_pending); + + /* Update stats and complete the IO. There is + * no need for dma unprep because the nvme_transport + * owns the dma address. + */ +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) { + lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp; + lpfc_ncmd->ts_data_nvme = ktime_get_ns(); + phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme; + lpfc_nvme_ktime(phba, lpfc_ncmd); + } + if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { + if (lpfc_ncmd->cpu != smp_processor_id()) + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + "6701 CPU Check cmpl: " + "cpu %d expect %d\n", + smp_processor_id(), lpfc_ncmd->cpu); + if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT) + phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++; + } +#endif + nCmd->done(nCmd); + + spin_lock_irqsave(&phba->hbalock, flags); + lpfc_ncmd->nrport = NULL; + spin_unlock_irqrestore(&phba->hbalock, flags); + + lpfc_release_nvme_buf(phba, lpfc_ncmd); +} + + +/** + * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @lpfc_nvme_lport: Pointer to the driver's local port data + * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq + * @lpfc_nvme_fcreq: IO request from nvme fc to driver. + * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue + * + * Driver registers this routine as it io request handler. This + * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq + * data structure to the rport indicated in @lpfc_nvme_rport. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static int +lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, + struct lpfc_nvme_buf *lpfc_ncmd, + struct lpfc_nodelist *pnode) +{ + struct lpfc_hba *phba = vport->phba; + struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; + struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq); + union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&pwqeq->wqe; + uint32_t req_len; + + if (!pnode || !NLP_CHK_NODE_ACT(pnode)) + return -EINVAL; + + /* + * There are three possibilities here - use scatter-gather segment, use + * the single mapping, or neither. + */ + wqe->fcp_iwrite.initial_xfer_len = 0; + if (nCmd->sg_cnt) { + if (nCmd->io_dir == NVMEFC_FCP_WRITE) { + /* Word 5 */ + if ((phba->cfg_nvme_enable_fb) && + (pnode->nlp_flag & NLP_FIRSTBURST)) { + req_len = lpfc_ncmd->nvmeCmd->payload_length; + if (req_len < pnode->nvme_fb_size) + wqe->fcp_iwrite.initial_xfer_len = + req_len; + else + wqe->fcp_iwrite.initial_xfer_len = + pnode->nvme_fb_size; + } + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->generic.wqe_com, + CMD_FCP_IWRITE64_WQE); + bf_set(wqe_pu, &wqe->generic.wqe_com, + PARM_READ_CHECK); + + /* Word 10 */ + bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0); + bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, + LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, + LPFC_WQE_LENLOC_WORD4); + if (phba->cfg_nvme_oas) + bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); + + /* Word 11 */ + bf_set(wqe_cmd_type, &wqe->generic.wqe_com, + NVME_WRITE_CMD); + + /* Word 16 */ + wqe->words[16] = LPFC_NVME_EMBED_WRITE; + + phba->fc4NvmeOutputRequests++; + } else { + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->generic.wqe_com, + CMD_FCP_IREAD64_WQE); + bf_set(wqe_pu, &wqe->generic.wqe_com, + PARM_READ_CHECK); + + /* Word 10 */ + bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0); + bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, + LPFC_WQE_IOD_READ); + bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, + LPFC_WQE_LENLOC_WORD4); + if (phba->cfg_nvme_oas) + bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); + + /* Word 11 */ + bf_set(wqe_cmd_type, &wqe->generic.wqe_com, + NVME_READ_CMD); + + /* Word 16 */ + wqe->words[16] = LPFC_NVME_EMBED_READ; + + phba->fc4NvmeInputRequests++; + } + } else { + /* Word 4 */ + wqe->fcp_icmd.rsrvd4 = 0; + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_FCP_ICMND64_WQE); + bf_set(wqe_pu, &wqe->generic.wqe_com, 0); + + /* Word 10 */ + bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); + bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, + LPFC_WQE_LENLOC_NONE); + if (phba->cfg_nvme_oas) + bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); + + /* Word 11 */ + bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD); + + /* Word 16 */ + wqe->words[16] = LPFC_NVME_EMBED_CMD; + + phba->fc4NvmeControlRequests++; + } + /* + * Finish initializing those WQE fields that are independent + * of the nvme_cmnd request_buffer + */ + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, + phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); + + /* Word 7 */ + /* Preserve Class data in the ndlp. */ + bf_set(wqe_class, &wqe->generic.wqe_com, + (pnode->nlp_fcp_info & 0x0f)); + + /* Word 8 */ + wqe->generic.wqe_com.abort_tag = pwqeq->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + + pwqeq->vport = vport; + return 0; +} + + +/** + * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @lpfc_nvme_lport: Pointer to the driver's local port data + * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq + * @lpfc_nvme_fcreq: IO request from nvme fc to driver. + * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue + * + * Driver registers this routine as it io request handler. This + * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq + * data structure to the rport indicated in @lpfc_nvme_rport. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static int +lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, + struct lpfc_nvme_buf *lpfc_ncmd) +{ + struct lpfc_hba *phba = vport->phba; + struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; + union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe; + struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl; + struct scatterlist *data_sg; + struct sli4_sge *first_data_sgl; + dma_addr_t physaddr; + uint32_t num_bde = 0; + uint32_t dma_len; + uint32_t dma_offset = 0; + int nseg, i; + + /* Fix up the command and response DMA stuff. */ + lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd); + + /* + * There are three possibilities here - use scatter-gather segment, use + * the single mapping, or neither. + */ + if (nCmd->sg_cnt) { + /* + * Jump over the cmd and rsp SGEs. The fix routine + * has already adjusted for this. + */ + sgl += 2; + + first_data_sgl = sgl; + lpfc_ncmd->seg_cnt = nCmd->sg_cnt; + if (lpfc_ncmd->seg_cnt > phba->cfg_sg_seg_cnt) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6058 Too many sg segments from " + "NVME Transport. Max %d, " + "nvmeIO sg_cnt %d\n", + phba->cfg_sg_seg_cnt, + lpfc_ncmd->seg_cnt); + lpfc_ncmd->seg_cnt = 0; + return 1; + } + + /* + * The driver established a maximum scatter-gather segment count + * during probe that limits the number of sg elements in any + * single nvme command. Just run through the seg_cnt and format + * the sge's. + */ + nseg = nCmd->sg_cnt; + data_sg = nCmd->first_sgl; + for (i = 0; i < nseg; i++) { + if (data_sg == NULL) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6059 dptr err %d, nseg %d\n", + i, nseg); + lpfc_ncmd->seg_cnt = 0; + return 1; + } + physaddr = data_sg->dma_address; + dma_len = data_sg->length; + sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); + sgl->word2 = le32_to_cpu(sgl->word2); + if ((num_bde + 1) == nseg) + bf_set(lpfc_sli4_sge_last, sgl, 1); + else + bf_set(lpfc_sli4_sge_last, sgl, 0); + bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(dma_len); + + dma_offset += dma_len; + data_sg = sg_next(data_sg); + sgl++; + } + } else { + /* For this clause to be valid, the payload_length + * and sg_cnt must zero. + */ + if (nCmd->payload_length != 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6063 NVME DMA Prep Err: sg_cnt %d " + "payload_length x%x\n", + nCmd->sg_cnt, nCmd->payload_length); + return 1; + } + } + + /* + * Due to difference in data length between DIF/non-DIF paths, + * we need to set word 4 of WQE here + */ + wqe->fcp_iread.total_xfer_len = nCmd->payload_length; + return 0; +} + +/** + * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @lpfc_nvme_lport: Pointer to the driver's local port data + * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq + * @lpfc_nvme_fcreq: IO request from nvme fc to driver. + * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue + * + * Driver registers this routine as it io request handler. This + * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq + * data structure to the rport + indicated in @lpfc_nvme_rport. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static int +lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, + struct nvme_fc_remote_port *pnvme_rport, + void *hw_queue_handle, + struct nvmefc_fcp_req *pnvme_fcreq) +{ + int ret = 0; + struct lpfc_nvme_lport *lport; + struct lpfc_vport *vport; + struct lpfc_hba *phba; + struct lpfc_nodelist *ndlp; + struct lpfc_nvme_buf *lpfc_ncmd; + struct lpfc_nvme_rport *rport; + struct lpfc_nvme_qhandle *lpfc_queue_info; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint64_t start = 0; +#endif + + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + vport = lport->vport; + phba = vport->phba; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) + start = ktime_get_ns(); +#endif + rport = (struct lpfc_nvme_rport *)pnvme_rport->private; + lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle; + + /* + * Catch race where our node has transitioned, but the + * transport is still transitioning. + */ + ndlp = rport->ndlp; + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, + "6053 rport %p, ndlp %p, DID x%06x " + "ndlp not ready.\n", + rport, ndlp, pnvme_rport->port_id); + + ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + "6066 Missing node for DID %x\n", + pnvme_rport->port_id); + ret = -ENODEV; + goto out_fail; + } + } + + /* The remote node has to be a mapped target or it's an error. */ + if ((ndlp->nlp_type & NLP_NVME_TARGET) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, + "6036 rport %p, DID x%06x not ready for " + "IO. State x%x, Type x%x\n", + rport, pnvme_rport->port_id, + ndlp->nlp_state, ndlp->nlp_type); + ret = -ENODEV; + goto out_fail; + + } + + /* The node is shared with FCP IO, make sure the IO pending count does + * not exceed the programmed depth. + */ + if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { + ret = -EAGAIN; + goto out_fail; + } + + lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp); + if (lpfc_ncmd == NULL) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, + "6065 driver's buffer pool is empty, " + "IO failed\n"); + ret = -ENOMEM; + goto out_fail; + } +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) { + lpfc_ncmd->ts_cmd_start = start; + lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd; + } +#endif + + /* + * Store the data needed by the driver to issue, abort, and complete + * an IO. + * Do not let the IO hang out forever. There is no midlayer issuing + * an abort so inform the FW of the maximum IO pending time. + */ + pnvme_fcreq->private = (void *)lpfc_ncmd; + lpfc_ncmd->nvmeCmd = pnvme_fcreq; + lpfc_ncmd->nrport = rport; + lpfc_ncmd->start_time = jiffies; + + lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp); + ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd); + if (ret) { + ret = -ENOMEM; + goto out_free_nvme_buf; + } + + atomic_inc(&ndlp->cmd_pending); + + /* + * Issue the IO on the WQ indicated by index in the hw_queue_handle. + * This identfier was create in our hardware queue create callback + * routine. The driver now is dependent on the IO queue steering from + * the transport. We are trusting the upper NVME layers know which + * index to use and that they have affinitized a CPU to this hardware + * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ. + */ + lpfc_ncmd->cur_iocbq.hba_wqidx = lpfc_queue_info->index; + + lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n", + lpfc_ncmd->cur_iocbq.sli4_xritag, + lpfc_queue_info->index, ndlp->nlp_DID); + + ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq); + if (ret) { + atomic_dec(&ndlp->cmd_pending); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + "6113 FCP could not issue WQE err %x " + "sid: x%x did: x%x oxid: x%x\n", + ret, vport->fc_myDID, ndlp->nlp_DID, + lpfc_ncmd->cur_iocbq.sli4_xritag); + ret = -EINVAL; + goto out_free_nvme_buf; + } + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) + lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); + + if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { + lpfc_ncmd->cpu = smp_processor_id(); + if (lpfc_ncmd->cpu != lpfc_queue_info->index) { + /* Check for admin queue */ + if (lpfc_queue_info->qidx) { + lpfc_printf_vlog(vport, + KERN_ERR, LOG_NVME_IOERR, + "6702 CPU Check cmd: " + "cpu %d wq %d\n", + lpfc_ncmd->cpu, + lpfc_queue_info->index); + } + lpfc_ncmd->cpu = lpfc_queue_info->index; + } + if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT) + phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++; + } +#endif + return 0; + + out_free_nvme_buf: + lpfc_release_nvme_buf(phba, lpfc_ncmd); + out_fail: + return ret; +} + +/** + * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request. + * @phba: Pointer to HBA context object + * @cmdiocb: Pointer to command iocb object. + * @rspiocb: Pointer to response iocb object. + * + * This is the callback function for any NVME FCP IO that was aborted. + * + * Return value: + * None + **/ +void +lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_wcqe_complete *abts_cmpl) +{ + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6145 ABORT_XRI_CN completing on rpi x%x " + "original iotag x%x, abort cmd iotag x%x " + "req_tag x%x, status x%x, hwstatus x%x\n", + cmdiocb->iocb.un.acxri.abortContextTag, + cmdiocb->iocb.un.acxri.abortIoTag, + cmdiocb->iotag, + bf_get(lpfc_wcqe_c_request_tag, abts_cmpl), + bf_get(lpfc_wcqe_c_status, abts_cmpl), + bf_get(lpfc_wcqe_c_hw_status, abts_cmpl)); + lpfc_sli_release_iocbq(phba, cmdiocb); +} + +/** + * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @lpfc_nvme_lport: Pointer to the driver's local port data + * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq + * @lpfc_nvme_fcreq: IO request from nvme fc to driver. + * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue + * + * Driver registers this routine as its nvme request io abort handler. This + * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq + * data structure to the rport indicated in @lpfc_nvme_rport. This routine + * is executed asynchronously - one the target is validated as "MAPPED" and + * ready for IO, the driver issues the abort request and returns. + * + * Return value: + * None + **/ +static void +lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, + struct nvme_fc_remote_port *pnvme_rport, + void *hw_queue_handle, + struct nvmefc_fcp_req *pnvme_fcreq) +{ + struct lpfc_nvme_lport *lport; + struct lpfc_vport *vport; + struct lpfc_hba *phba; + struct lpfc_nodelist *ndlp; + struct lpfc_nvme_rport *rport; + struct lpfc_nvme_buf *lpfc_nbuf; + struct lpfc_iocbq *abts_buf; + struct lpfc_iocbq *nvmereq_wqe; + union lpfc_wqe *abts_wqe; + unsigned long flags; + int ret_val; + + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + rport = (struct lpfc_nvme_rport *)pnvme_rport->private; + vport = lport->vport; + phba = vport->phba; + + /* Announce entry to new IO submit field. */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, + "6002 Abort Request to rport DID x%06x " + "for nvme_fc_req %p\n", + pnvme_rport->port_id, + pnvme_fcreq); + + /* + * Catch race where our node has transitioned, but the + * transport is still transitioning. + */ + ndlp = rport->ndlp; + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS, + "6054 rport %p, ndlp %p, DID x%06x ndlp " + " not ready.\n", + rport, ndlp, pnvme_rport->port_id); + + ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, + "6055 Could not find node for " + "DID %x\n", + pnvme_rport->port_id); + return; + } + } + + /* The remote node has to be ready to send an abort. */ + if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) && + !(ndlp->nlp_type & NLP_NVME_TARGET)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS, + "6048 rport %p, DID x%06x not ready for " + "IO. State x%x, Type x%x\n", + rport, pnvme_rport->port_id, + ndlp->nlp_state, ndlp->nlp_type); + return; + } + + /* If the hba is getting reset, this flag is set. It is + * cleared when the reset is complete and rings reestablished. + */ + spin_lock_irqsave(&phba->hbalock, flags); + /* driver queued commands are in process of being flushed */ + if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6139 Driver in reset cleanup - flushing " + "NVME Req now. hba_flag x%x\n", + phba->hba_flag); + return; + } + + lpfc_nbuf = (struct lpfc_nvme_buf *)pnvme_fcreq->private; + if (!lpfc_nbuf) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6140 NVME IO req has no matching lpfc nvme " + "io buffer. Skipping abort req.\n"); + return; + } else if (!lpfc_nbuf->nvmeCmd) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6141 lpfc NVME IO req has no nvme_fcreq " + "io buffer. Skipping abort req.\n"); + return; + } + + /* + * The lpfc_nbuf and the mapped nvme_fcreq in the driver's + * state must match the nvme_fcreq passed by the nvme + * transport. If they don't match, it is likely the driver + * has already completed the NVME IO and the nvme transport + * has not seen it yet. + */ + if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6143 NVME req mismatch: " + "lpfc_nbuf %p nvmeCmd %p, " + "pnvme_fcreq %p. Skipping Abort\n", + lpfc_nbuf, lpfc_nbuf->nvmeCmd, + pnvme_fcreq); + return; + } + + /* Don't abort IOs no longer on the pending queue. */ + nvmereq_wqe = &lpfc_nbuf->cur_iocbq; + if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6142 NVME IO req %p not queued - skipping " + "abort req\n", + pnvme_fcreq); + return; + } + + lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n", + nvmereq_wqe->sli4_xritag, + nvmereq_wqe->hba_wqidx, ndlp->nlp_DID); + + /* Outstanding abort is in progress */ + if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6144 Outstanding NVME I/O Abort Request " + "still pending on nvme_fcreq %p, " + "lpfc_ncmd %p\n", + pnvme_fcreq, lpfc_nbuf); + return; + } + + abts_buf = __lpfc_sli_get_iocbq(phba); + if (!abts_buf) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6136 No available abort wqes. Skipping " + "Abts req for nvme_fcreq %p.\n", + pnvme_fcreq); + return; + } + + /* Ready - mark outstanding as aborted by driver. */ + nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED; + + /* Complete prepping the abort wqe and issue to the FW. */ + abts_wqe = &abts_buf->wqe; + + /* WQEs are reused. Clear stale data and set key fields to + * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. + */ + memset(abts_wqe, 0, sizeof(union lpfc_wqe)); + bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); + + /* word 7 */ + bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); + bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); + bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, + nvmereq_wqe->iocb.ulpClass); + + /* word 8 - tell the FW to abort the IO associated with this + * outstanding exchange ID. + */ + abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag; + + /* word 9 - this is the iotag for the abts_wqe completion. */ + bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, + abts_buf->iotag); + + /* word 10 */ + bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, nvmereq_wqe->hba_wqidx); + bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); + + /* word 11 */ + bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); + bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abts_buf->iocb_flag |= LPFC_IO_NVME; + abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx; + abts_buf->vport = vport; + abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; + ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (ret_val == IOCB_ERROR) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6137 Failed abts issue_wqe with status x%x " + "for nvme_fcreq %p.\n", + ret_val, pnvme_fcreq); + lpfc_sli_release_iocbq(phba, abts_buf); + return; + } + + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6138 Transport Abort NVME Request Issued for\n" + "ox_id x%x on reqtag x%x\n", + nvmereq_wqe->sli4_xritag, + abts_buf->iotag); +} + +/* Declare and initialization an instance of the FC NVME template. */ +static struct nvme_fc_port_template lpfc_nvme_template = { + /* initiator-based functions */ + .localport_delete = lpfc_nvme_localport_delete, + .remoteport_delete = lpfc_nvme_remoteport_delete, + .create_queue = lpfc_nvme_create_queue, + .delete_queue = lpfc_nvme_delete_queue, + .ls_req = lpfc_nvme_ls_req, + .fcp_io = lpfc_nvme_fcp_io_submit, + .ls_abort = lpfc_nvme_ls_abort, + .fcp_abort = lpfc_nvme_fcp_abort, + + .max_hw_queues = 1, + .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS, + .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS, + .dma_boundary = 0xFFFFFFFF, + + /* Sizes of additional private data for data structures. + * No use for the last two sizes at this time. + */ + .local_priv_sz = sizeof(struct lpfc_nvme_lport), + .remote_priv_sz = sizeof(struct lpfc_nvme_rport), + .lsrqst_priv_sz = 0, + .fcprqst_priv_sz = 0, +}; + +/** + * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware + * @phba: pointer to lpfc hba data structure. + * @nblist: pointer to nvme buffer list. + * @count: number of scsi buffers on the list. + * + * This routine is invoked to post a block of @count scsi sgl pages from a + * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. + * No Lock is held. + * + **/ +static int +lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba, + struct list_head *nblist, + int count) +{ + struct lpfc_nvme_buf *lpfc_ncmd; + struct lpfc_mbx_post_uembed_sgl_page1 *sgl; + struct sgl_page_pairs *sgl_pg_pairs; + void *viraddr; + LPFC_MBOXQ_t *mbox; + uint32_t reqlen, alloclen, pg_pairs; + uint32_t mbox_tmo; + uint16_t xritag_start = 0; + int rc = 0; + uint32_t shdr_status, shdr_add_status; + dma_addr_t pdma_phys_bpl1; + union lpfc_sli4_cfg_shdr *shdr; + + /* Calculate the requested length of the dma memory */ + reqlen = count * sizeof(struct sgl_page_pairs) + + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); + if (reqlen > SLI4_PAGE_SIZE) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "6118 Block sgl registration required DMA " + "size (%d) great than a page\n", reqlen); + return -ENOMEM; + } + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6119 Failed to allocate mbox cmd memory\n"); + return -ENOMEM; + } + + /* Allocate DMA memory and set up the non-embedded mailbox command */ + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, + LPFC_SLI4_MBX_NEMBED); + + if (alloclen < reqlen) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6120 Allocated DMA memory size (%d) is " + "less than the requested DMA memory " + "size (%d)\n", alloclen, reqlen); + lpfc_sli4_mbox_cmd_free(phba, mbox); + return -ENOMEM; + } + + /* Get the first SGE entry from the non-embedded DMA memory */ + viraddr = mbox->sge_array->addr[0]; + + /* Set up the SGL pages in the non-embedded DMA pages */ + sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; + sgl_pg_pairs = &sgl->sgl_pg_pairs; + + pg_pairs = 0; + list_for_each_entry(lpfc_ncmd, nblist, list) { + /* Set up the sge entry */ + sgl_pg_pairs->sgl_pg0_addr_lo = + cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); + sgl_pg_pairs->sgl_pg0_addr_hi = + cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); + if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) + pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + + SGL_PAGE_SIZE; + else + pdma_phys_bpl1 = 0; + sgl_pg_pairs->sgl_pg1_addr_lo = + cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); + sgl_pg_pairs->sgl_pg1_addr_hi = + cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); + /* Keep the first xritag on the list */ + if (pg_pairs == 0) + xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; + sgl_pg_pairs++; + pg_pairs++; + } + bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); + bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); + /* Perform endian conversion if necessary */ + sgl->word0 = cpu_to_le32(sgl->word0); + + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + lpfc_sli4_mbox_cmd_free(phba, mbox); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6125 POST_SGL_BLOCK mailbox command failed " + "status x%x add_status x%x mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list + * @phba: pointer to lpfc hba data structure. + * @post_nblist: pointer to the nvme buffer list. + * + * This routine walks a list of nvme buffers that was passed in. It attempts + * to construct blocks of nvme buffer sgls which contains contiguous xris and + * uses the non-embedded SGL block post mailbox commands to post to the port. + * For single NVME buffer sgl with non-contiguous xri, if any, it shall use + * embedded SGL post mailbox command for posting. The @post_nblist passed in + * must be local list, thus no lock is needed when manipulate the list. + * + * Returns: 0 = failure, non-zero number of successfully posted buffers. + **/ +static int +lpfc_post_nvme_sgl_list(struct lpfc_hba *phba, + struct list_head *post_nblist, int sb_count) +{ + struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; + int status, sgl_size; + int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; + dma_addr_t pdma_phys_sgl1; + int last_xritag = NO_XRI; + int cur_xritag; + LIST_HEAD(prep_nblist); + LIST_HEAD(blck_nblist); + LIST_HEAD(nvme_nblist); + + /* sanity check */ + if (sb_count <= 0) + return -EINVAL; + + sgl_size = phba->cfg_sg_dma_buf_size; + + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { + list_del_init(&lpfc_ncmd->list); + block_cnt++; + if ((last_xritag != NO_XRI) && + (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { + /* a hole in xri block, form a sgl posting block */ + list_splice_init(&prep_nblist, &blck_nblist); + post_cnt = block_cnt - 1; + /* prepare list for next posting block */ + list_add_tail(&lpfc_ncmd->list, &prep_nblist); + block_cnt = 1; + } else { + /* prepare list for next posting block */ + list_add_tail(&lpfc_ncmd->list, &prep_nblist); + /* enough sgls for non-embed sgl mbox command */ + if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { + list_splice_init(&prep_nblist, &blck_nblist); + post_cnt = block_cnt; + block_cnt = 0; + } + } + num_posting++; + last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; + + /* end of repost sgl list condition for NVME buffers */ + if (num_posting == sb_count) { + if (post_cnt == 0) { + /* last sgl posting block */ + list_splice_init(&prep_nblist, &blck_nblist); + post_cnt = block_cnt; + } else if (block_cnt == 1) { + /* last single sgl with non-contiguous xri */ + if (sgl_size > SGL_PAGE_SIZE) + pdma_phys_sgl1 = + lpfc_ncmd->dma_phys_sgl + + SGL_PAGE_SIZE; + else + pdma_phys_sgl1 = 0; + cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; + status = lpfc_sli4_post_sgl(phba, + lpfc_ncmd->dma_phys_sgl, + pdma_phys_sgl1, cur_xritag); + if (status) { + /* failure, put on abort nvme list */ + lpfc_ncmd->exch_busy = 1; + } else { + /* success, put on NVME buffer list */ + lpfc_ncmd->exch_busy = 0; + lpfc_ncmd->status = IOSTAT_SUCCESS; + num_posted++; + } + /* success, put on NVME buffer sgl list */ + list_add_tail(&lpfc_ncmd->list, &nvme_nblist); + } + } + + /* continue until a nembed page worth of sgls */ + if (post_cnt == 0) + continue; + + /* post block of NVME buffer list sgls */ + status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist, + post_cnt); + + /* don't reset xirtag due to hole in xri block */ + if (block_cnt == 0) + last_xritag = NO_XRI; + + /* reset NVME buffer post count for next round of posting */ + post_cnt = 0; + + /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ + while (!list_empty(&blck_nblist)) { + list_remove_head(&blck_nblist, lpfc_ncmd, + struct lpfc_nvme_buf, list); + if (status) { + /* failure, put on abort nvme list */ + lpfc_ncmd->exch_busy = 1; + } else { + /* success, put on NVME buffer list */ + lpfc_ncmd->exch_busy = 0; + lpfc_ncmd->status = IOSTAT_SUCCESS; + num_posted++; + } + list_add_tail(&lpfc_ncmd->list, &nvme_nblist); + } + } + /* Push NVME buffers with sgl posted to the available list */ + while (!list_empty(&nvme_nblist)) { + list_remove_head(&nvme_nblist, lpfc_ncmd, + struct lpfc_nvme_buf, list); + lpfc_release_nvme_buf(phba, lpfc_ncmd); + } + return num_posted; +} + +/** + * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls + * @phba: pointer to lpfc hba data structure. + * + * This routine walks the list of nvme buffers that have been allocated and + * repost them to the port by using SGL block post. This is needed after a + * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine + * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list + * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers. + * + * Returns: 0 = success, non-zero failure. + **/ +int +lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba) +{ + LIST_HEAD(post_nblist); + int num_posted, rc = 0; + + /* get all NVME buffers need to repost to a local list */ + spin_lock_irq(&phba->nvme_buf_list_get_lock); + spin_lock(&phba->nvme_buf_list_put_lock); + list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist); + list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist); + spin_unlock(&phba->nvme_buf_list_put_lock); + spin_unlock_irq(&phba->nvme_buf_list_get_lock); + + /* post the list of nvme buffer sgls to port if available */ + if (!list_empty(&post_nblist)) { + num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist, + phba->sli4_hba.nvme_xri_cnt); + /* failed to post any nvme buffer, return error */ + if (num_posted == 0) + rc = -EIO; + } + return rc; +} + +/** + * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec + * @vport: The virtual port for which this call being executed. + * @num_to_allocate: The requested number of buffers to allocate. + * + * This routine allocates nvme buffers for device with SLI-4 interface spec, + * the nvme buffer contains all the necessary information needed to initiate + * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put + * them on a list, it post them to the port by using SGL block post. + * + * Return codes: + * int - number of nvme buffers that were allocated and posted. + * 0 = failure, less than num_to_alloc is a partial failure. + **/ +static int +lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_nvme_buf *lpfc_ncmd; + struct lpfc_iocbq *pwqeq; + union lpfc_wqe128 *wqe; + struct sli4_sge *sgl; + dma_addr_t pdma_phys_sgl; + uint16_t iotag, lxri = 0; + int bcnt, num_posted, sgl_size; + LIST_HEAD(prep_nblist); + LIST_HEAD(post_nblist); + LIST_HEAD(nvme_nblist); + + sgl_size = phba->cfg_sg_dma_buf_size; + + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { + lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL); + if (!lpfc_ncmd) + break; + /* + * Get memory from the pci pool to map the virt space to + * pci bus space for an I/O. The DMA buffer includes the + * number of SGE's necessary to support the sg_tablesize. + */ + lpfc_ncmd->data = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool, + GFP_KERNEL, + &lpfc_ncmd->dma_handle); + if (!lpfc_ncmd->data) { + kfree(lpfc_ncmd); + break; + } + memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size); + + lxri = lpfc_sli4_next_xritag(phba); + if (lxri == NO_XRI) { + pci_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + break; + } + pwqeq = &(lpfc_ncmd->cur_iocbq); + wqe = (union lpfc_wqe128 *)&pwqeq->wqe; + + /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ + iotag = lpfc_sli_next_iotag(phba, pwqeq); + if (iotag == 0) { + pci_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6121 Failed to allocated IOTAG for" + " XRI:0x%x\n", lxri); + lpfc_sli4_free_xri(phba, lxri); + break; + } + pwqeq->sli4_lxritag = lxri; + pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; + pwqeq->iocb_flag |= LPFC_IO_NVME; + pwqeq->context1 = lpfc_ncmd; + pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl; + + /* Initialize local short-hand pointers. */ + lpfc_ncmd->nvme_sgl = lpfc_ncmd->data; + sgl = lpfc_ncmd->nvme_sgl; + pdma_phys_sgl = lpfc_ncmd->dma_handle; + lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl; + + /* Rsp SGE will be filled in when we rcv an IO + * from the NVME Layer to be sent. + * The cmd is going to be embedded so we need a SKIP SGE. + */ + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl->word2 = cpu_to_le32(sgl->word2); + /* Fill in word 3 / sgl_len during cmd submission */ + + lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; + + /* Word 7 */ + bf_set(wqe_erp, &wqe->generic.wqe_com, 0); + /* NVME upper layers will time things out, if needed */ + bf_set(wqe_tmo, &wqe->generic.wqe_com, 0); + + /* Word 10 */ + bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0); + bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); + + /* add the nvme buffer to a post list */ + list_add_tail(&lpfc_ncmd->list, &post_nblist); + spin_lock_irq(&phba->nvme_buf_list_get_lock); + phba->sli4_hba.nvme_xri_cnt++; + spin_unlock_irq(&phba->nvme_buf_list_get_lock); + } + lpfc_printf_log(phba, KERN_INFO, LOG_NVME, + "6114 Allocate %d out of %d requested new NVME " + "buffers\n", bcnt, num_to_alloc); + + /* post the list of nvme buffer sgls to port if available */ + if (!list_empty(&post_nblist)) + num_posted = lpfc_post_nvme_sgl_list(phba, + &post_nblist, bcnt); + else + num_posted = 0; + + return num_posted; +} + +/** + * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA + * @phba: The HBA for which this call is being executed. + * + * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list + * and returns to caller. + * + * Return codes: + * NULL - Error + * Pointer to lpfc_nvme_buf - Success + **/ +static struct lpfc_nvme_buf * +lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) +{ + struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; + unsigned long iflag = 0; + int found = 0; + + spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &phba->lpfc_nvme_buf_list_get, list) { + if (lpfc_test_rrq_active(phba, ndlp, + lpfc_ncmd->cur_iocbq.sli4_lxritag)) + continue; + list_del(&lpfc_ncmd->list); + found = 1; + break; + } + if (!found) { + spin_lock(&phba->nvme_buf_list_put_lock); + list_splice(&phba->lpfc_nvme_buf_list_put, + &phba->lpfc_nvme_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); + spin_unlock(&phba->nvme_buf_list_put_lock); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &phba->lpfc_nvme_buf_list_get, list) { + if (lpfc_test_rrq_active( + phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) + continue; + list_del(&lpfc_ncmd->list); + found = 1; + break; + } + } + spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag); + if (!found) + return NULL; + return lpfc_ncmd; +} + +/** + * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list. + * @phba: The Hba for which this call is being executed. + * @lpfc_ncmd: The nvme buffer which is being released. + * + * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba + * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer + * and cannot be reused for at least RA_TOV amount of time if it was + * aborted. + **/ +static void +lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) +{ + unsigned long iflag = 0; + + lpfc_ncmd->nonsg_phys = 0; + if (lpfc_ncmd->exch_busy) { + spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, + iflag); + lpfc_ncmd->nvmeCmd = NULL; + list_add_tail(&lpfc_ncmd->list, + &phba->sli4_hba.lpfc_abts_nvme_buf_list); + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, + iflag); + } else { + lpfc_ncmd->nvmeCmd = NULL; + lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME; + spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); + list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put); + spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); + } +} + +/** + * lpfc_nvme_create_localport - Create/Bind an nvme localport instance. + * @pvport - the lpfc_vport instance requesting a localport. + * + * This routine is invoked to create an nvme localport instance to bind + * to the nvme_fc_transport. It is called once during driver load + * like lpfc_create_shost after all other services are initialized. + * It requires a vport, vpi, and wwns at call time. Other localport + * parameters are modified as the driver's FCID and the Fabric WWN + * are established. + * + * Return codes + * 0 - successful + * -ENOMEM - no heap memory available + * other values - from nvme registration upcall + **/ +int +lpfc_nvme_create_localport(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct nvme_fc_port_info nfcp_info; + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + int len, ret = 0; + + /* Initialize this localport instance. The vport wwn usage ensures + * that NPIV is accounted for. + */ + memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info)); + nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR; + nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); + nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn); + + /* For now need + 1 to get around NVME transport logic */ + lpfc_nvme_template.max_sgl_segments = phba->cfg_sg_seg_cnt + 1; + lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel; + + /* localport is allocated from the stack, but the registration + * call allocates heap memory as well as the private area. + */ + ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, + &vport->phba->pcidev->dev, &localport); + if (!ret) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, + "6005 Successfully registered local " + "NVME port num %d, localP %p, private %p, " + "sg_seg %d\n", + localport->port_num, localport, + localport->private, + lpfc_nvme_template.max_sgl_segments); + + /* Private is our lport size declared in the template. */ + lport = (struct lpfc_nvme_lport *)localport->private; + vport->localport = localport; + lport->vport = vport; + INIT_LIST_HEAD(&lport->rport_list); + vport->nvmei_support = 1; + } + + len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max); + vport->phba->total_nvme_bufs += len; + return ret; +} + +/** + * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport. + * @pnvme: pointer to lpfc nvme data structure. + * + * This routine is invoked to destroy all lports bound to the phba. + * The lport memory was allocated by the nvme fc transport and is + * released there. This routine ensures all rports bound to the + * lport have been disconnected. + * + **/ +void +lpfc_nvme_destroy_localport(struct lpfc_vport *vport) +{ + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL; + int ret; + + if (vport->nvmei_support == 0) + return; + + localport = vport->localport; + vport->localport = NULL; + lport = (struct lpfc_nvme_lport *)localport->private; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, + "6011 Destroying NVME localport %p\n", + localport); + + list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) { + /* The last node ref has to get released now before the rport + * private memory area is released by the transport. + */ + list_del(&rport->list); + + init_completion(&rport->rport_unreg_done); + ret = nvme_fc_unregister_remoteport(rport->remoteport); + if (ret) + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + "6008 rport fail destroy %x\n", ret); + wait_for_completion_timeout(&rport->rport_unreg_done, 5); + } + /* lport's rport list is clear. Unregister + * lport and release resources. + */ + init_completion(&lport->lport_unreg_done); + ret = nvme_fc_unregister_localport(localport); + wait_for_completion_timeout(&lport->lport_unreg_done, 5); + + /* Regardless of the unregister upcall response, clear + * nvmei_support. All rports are unregistered and the + * driver will clean up. + */ + vport->nvmei_support = 0; + if (ret == 0) { + lpfc_printf_vlog(vport, + KERN_INFO, LOG_NVME_DISC, + "6009 Unregistered lport Success\n"); + } else { + lpfc_printf_vlog(vport, + KERN_INFO, LOG_NVME_DISC, + "6010 Unregistered lport " + "Failed, status x%x\n", + ret); + } +} + +void +lpfc_nvme_update_localport(struct lpfc_vport *vport) +{ + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + + localport = vport->localport; + lport = (struct lpfc_nvme_lport *)localport->private; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, + "6012 Update NVME lport %p did x%x\n", + localport, vport->fc_myDID); + + localport->port_id = vport->fc_myDID; + if (localport->port_id == 0) + localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY; + else + localport->port_role = FC_PORT_ROLE_NVME_INITIATOR; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6030 bound lport %p to DID x%06x\n", + lport, localport->port_id); + +} + +int +lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + int ret = 0; + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + struct lpfc_nvme_rport *rport; + struct nvme_fc_remote_port *remote_port; + struct nvme_fc_port_info rpinfo; + + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, + "6006 Register NVME PORT. DID x%06x nlptype x%x\n", + ndlp->nlp_DID, ndlp->nlp_type); + + localport = vport->localport; + lport = (struct lpfc_nvme_lport *)localport->private; + + if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) { + + /* The driver isn't expecting the rport wwn to change + * but it might get a different DID on a different + * fabric. + */ + list_for_each_entry(rport, &lport->rport_list, list) { + if (rport->remoteport->port_name != + wwn_to_u64(ndlp->nlp_portname.u.wwn)) + continue; + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, + "6035 lport %p, found matching rport " + "at wwpn 0x%llx, Data: x%x x%x x%x " + "x%06x\n", + lport, + rport->remoteport->port_name, + rport->remoteport->port_id, + rport->remoteport->port_role, + ndlp->nlp_type, + ndlp->nlp_DID); + remote_port = rport->remoteport; + if ((remote_port->port_id == 0) && + (remote_port->port_role == + FC_PORT_ROLE_NVME_DISCOVERY)) { + remote_port->port_id = ndlp->nlp_DID; + remote_port->port_role &= + ~FC_PORT_ROLE_NVME_DISCOVERY; + if (ndlp->nlp_type & NLP_NVME_TARGET) + remote_port->port_role |= + FC_PORT_ROLE_NVME_TARGET; + if (ndlp->nlp_type & NLP_NVME_INITIATOR) + remote_port->port_role |= + FC_PORT_ROLE_NVME_INITIATOR; + + lpfc_printf_vlog(ndlp->vport, KERN_INFO, + LOG_NVME_DISC, + "6014 Rebinding lport to " + "rport wwpn 0x%llx, " + "Data: x%x x%x x%x x%06x\n", + remote_port->port_name, + remote_port->port_id, + remote_port->port_role, + ndlp->nlp_type, + ndlp->nlp_DID); + } + return 0; + } + + /* NVME rports are not preserved across devloss. + * Just register this instance. + */ + rpinfo.port_id = ndlp->nlp_DID; + rpinfo.port_role = 0; + if (ndlp->nlp_type & NLP_NVME_TARGET) + rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET; + if (ndlp->nlp_type & NLP_NVME_INITIATOR) + rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR; + rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); + rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); + + ret = nvme_fc_register_remoteport(localport, &rpinfo, + &remote_port); + if (!ret) { + rport = remote_port->private; + rport->remoteport = remote_port; + rport->lport = lport; + rport->ndlp = lpfc_nlp_get(ndlp); + if (!rport->ndlp) + return -1; + ndlp->nrport = rport; + INIT_LIST_HEAD(&rport->list); + list_add_tail(&rport->list, &lport->rport_list); + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NVME_DISC | LOG_NODE, + "6022 Binding new rport to lport %p " + "Rport WWNN 0x%llx, Rport WWPN 0x%llx " + "DID x%06x Role x%x\n", + lport, + rpinfo.node_name, rpinfo.port_name, + rpinfo.port_id, rpinfo.port_role); + } else { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_NVME_DISC | LOG_NODE, + "6031 RemotePort Registration failed " + "err: %d, DID x%06x\n", + ret, ndlp->nlp_DID); + } + } else { + ret = -EINVAL; + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6027 Unknown nlp_type x%x on DID x%06x " + "ndlp %p. Not Registering nvme rport\n", + ndlp->nlp_type, ndlp->nlp_DID, ndlp); + } + return ret; +} + +/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport. + * + * There is no notion of Devloss or rport recovery from the current + * nvme_transport perspective. Loss of an rport just means IO cannot + * be sent and recovery is completely up to the initator. + * For now, the driver just unbinds the DID and port_role so that + * no further IO can be issued. Changes are planned for later. + * + * Notes - the ndlp reference count is not decremented here since + * since there is no nvme_transport api for devloss. Node ref count + * is only adjusted in driver unload. + */ +void +lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + int ret; + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + struct lpfc_nvme_rport *rport; + struct nvme_fc_remote_port *remoteport; + + localport = vport->localport; + + /* This is fundamental error. The localport is always + * available until driver unload. Just exit. + */ + if (!localport) + return; + + lport = (struct lpfc_nvme_lport *)localport->private; + if (!lport) + goto input_err; + + rport = ndlp->nrport; + if (!rport) + goto input_err; + + remoteport = rport->remoteport; + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6033 Unreg nvme remoteport %p, portname x%llx, " + "port_id x%06x, portstate x%x port type x%x\n", + remoteport, remoteport->port_name, + remoteport->port_id, remoteport->port_state, + ndlp->nlp_type); + + /* Sanity check ndlp type. Only call for NVME ports. Don't + * clear any rport state until the transport calls back. + */ + if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) { + init_completion(&rport->rport_unreg_done); + ret = nvme_fc_unregister_remoteport(remoteport); + if (ret != 0) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + "6167 NVME unregister failed %d " + "port_state x%x\n", + ret, remoteport->port_state); + } + + /* Wait for the driver's delete completion routine to finish + * before proceeding. This guarantees the transport and driver + * have completed the unreg process. + */ + ret = wait_for_completion_timeout(&rport->rport_unreg_done, 5); + if (ret == 0) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + "6169 Unreg nvme wait failed %d\n", + ret); + } + } + return; + + input_err: + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + "6168: State error: lport %p, rport%p FCID x%06x\n", + vport->localport, ndlp->rport, ndlp->nlp_DID); +} diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h new file mode 100644 index 000000000000..b2fae5e813f8 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_nvme.h @@ -0,0 +1,103 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + ********************************************************************/ + +#define LPFC_NVME_MIN_SEGS 16 +#define LPFC_NVME_DEFAULT_SEGS 66 /* 256K IOs - 64 + 2 */ +#define LPFC_NVME_MAX_SEGS 510 +#define LPFC_NVMET_MIN_POSTBUF 16 +#define LPFC_NVMET_DEFAULT_POSTBUF 1024 +#define LPFC_NVMET_MAX_POSTBUF 4096 +#define LPFC_NVME_WQSIZE 256 + +#define LPFC_NVME_ERSP_LEN 0x20 + +struct lpfc_nvme_qhandle { + uint32_t index; /* WQ index to use */ + uint32_t qidx; /* queue index passed to create */ + uint32_t cpu_id; /* current cpu id at time of create */ +}; + +/* Declare nvme-based local and remote port definitions. */ +struct lpfc_nvme_lport { + struct lpfc_vport *vport; + struct list_head rport_list; + struct completion lport_unreg_done; + /* Add sttats counters here */ +}; + +struct lpfc_nvme_rport { + struct list_head list; + struct lpfc_nvme_lport *lport; + struct nvme_fc_remote_port *remoteport; + struct lpfc_nodelist *ndlp; + struct completion rport_unreg_done; +}; + +struct lpfc_nvme_buf { + struct list_head list; + struct nvmefc_fcp_req *nvmeCmd; + struct lpfc_nvme_rport *nrport; + + uint32_t timeout; + + uint16_t flags; /* TBD convert exch_busy to flags */ +#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ + uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ + uint16_t status; /* From IOCB Word 7- ulpStatus */ + uint16_t cpu; + uint16_t qidx; + uint16_t sqid; + uint32_t result; /* From IOCB Word 4. */ + + uint32_t seg_cnt; /* Number of scatter-gather segments returned by + * dma_map_sg. The driver needs this for calls + * to dma_unmap_sg. + */ + dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */ + + /* + * data and dma_handle are the kernel virtual and bus address of the + * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter + * gather bde list that supports the sg_tablesize value. + */ + void *data; + dma_addr_t dma_handle; + + struct sli4_sge *nvme_sgl; + dma_addr_t dma_phys_sgl; + + /* cur_iocbq has phys of the dma-able buffer. + * Iotag is in here + */ + struct lpfc_iocbq cur_iocbq; + + wait_queue_head_t *waitq; + unsigned long start_time; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint64_t ts_cmd_start; + uint64_t ts_last_cmd; + uint64_t ts_cmd_wqput; + uint64_t ts_isr_cmpl; + uint64_t ts_data_nvme; +#endif +}; diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c new file mode 100644 index 000000000000..c421e1738ee9 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -0,0 +1,1986 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channsel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + ********************************************************************/ +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <asm/unaligned.h> +#include <linux/crc-t10dif.h> +#include <net/checksum.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_tcq.h> +#include <scsi/scsi_transport_fc.h> +#include <scsi/fc/fc_fs.h> + +#include <../drivers/nvme/host/nvme.h> +#include <linux/nvme-fc-driver.h> + +#include "lpfc_version.h" +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_nvmet.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_debugfs.h" + +static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *, + struct lpfc_nvmet_rcv_ctx *, + dma_addr_t rspbuf, + uint16_t rspsize); +static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *, + struct lpfc_nvmet_rcv_ctx *); +static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *, + struct lpfc_nvmet_rcv_ctx *, + uint32_t, uint16_t); +static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *, + struct lpfc_nvmet_rcv_ctx *, + uint32_t, uint16_t); +static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *, + struct lpfc_nvmet_rcv_ctx *, + uint32_t, uint16_t); + +/** + * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @wcqe: Pointer to driver response CQE object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for NVME LS commands + * The function frees memory resources used for the NVME commands. + **/ +static void +lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct nvmefc_tgt_ls_req *rsp; + struct lpfc_nvmet_rcv_ctx *ctxp; + uint32_t status, result; + + status = bf_get(lpfc_wcqe_c_status, wcqe); + result = wcqe->parameter; + if (!phba->targetport) + goto out; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + + if (status) + atomic_inc(&tgtp->xmt_ls_rsp_error); + else + atomic_inc(&tgtp->xmt_ls_rsp_cmpl); + +out: + ctxp = cmdwqe->context2; + rsp = &ctxp->ctx.ls_req; + + lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n", + ctxp->oxid, status, result); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__, + ctxp, status, result); + + lpfc_nlp_put(cmdwqe->context1); + cmdwqe->context2 = NULL; + cmdwqe->context3 = NULL; + lpfc_sli_release_iocbq(phba, cmdwqe); + rsp->done(rsp); + kfree(ctxp); +} + +/** + * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context + * @phba: HBA buffer is associated with + * @ctxp: context to clean up + * @mp: Buffer to free + * + * Description: Frees the given DMA buffer in the appropriate way given by + * reposting it to its associated RQ so it can be reused. + * + * Notes: Takes phba->hbalock. Can be called with or without other locks held. + * + * Returns: None + **/ +void +lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, + struct lpfc_dmabuf *mp) +{ + if (ctxp) { + if (ctxp->txrdy) { + pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, + ctxp->txrdy_phys); + ctxp->txrdy = NULL; + ctxp->txrdy_phys = 0; + } + ctxp->state = LPFC_NVMET_STE_FREE; + } + lpfc_rq_buf_free(phba, mp); +} + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +static void +lpfc_nvmet_ktime(struct lpfc_hba *phba, + struct lpfc_nvmet_rcv_ctx *ctxp) +{ + uint64_t seg1, seg2, seg3, seg4, seg5; + uint64_t seg6, seg7, seg8, seg9, seg10; + + if (!phba->ktime_on) + return; + + if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme || + !ctxp->ts_nvme_data || !ctxp->ts_data_wqput || + !ctxp->ts_isr_data || !ctxp->ts_data_nvme || + !ctxp->ts_nvme_status || !ctxp->ts_status_wqput || + !ctxp->ts_isr_status || !ctxp->ts_status_nvme) + return; + + if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme) + return; + if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data) + return; + if (ctxp->ts_nvme_data > ctxp->ts_data_wqput) + return; + if (ctxp->ts_data_wqput > ctxp->ts_isr_data) + return; + if (ctxp->ts_isr_data > ctxp->ts_data_nvme) + return; + if (ctxp->ts_data_nvme > ctxp->ts_nvme_status) + return; + if (ctxp->ts_nvme_status > ctxp->ts_status_wqput) + return; + if (ctxp->ts_status_wqput > ctxp->ts_isr_status) + return; + if (ctxp->ts_isr_status > ctxp->ts_status_nvme) + return; + /* + * Segment 1 - Time from FCP command received by MSI-X ISR + * to FCP command is passed to NVME Layer. + * Segment 2 - Time from FCP command payload handed + * off to NVME Layer to Driver receives a Command op + * from NVME Layer. + * Segment 3 - Time from Driver receives a Command op + * from NVME Layer to Command is put on WQ. + * Segment 4 - Time from Driver WQ put is done + * to MSI-X ISR for Command cmpl. + * Segment 5 - Time from MSI-X ISR for Command cmpl to + * Command cmpl is passed to NVME Layer. + * Segment 6 - Time from Command cmpl is passed to NVME + * Layer to Driver receives a RSP op from NVME Layer. + * Segment 7 - Time from Driver receives a RSP op from + * NVME Layer to WQ put is done on TRSP FCP Status. + * Segment 8 - Time from Driver WQ put is done on TRSP + * FCP Status to MSI-X ISR for TRSP cmpl. + * Segment 9 - Time from MSI-X ISR for TRSP cmpl to + * TRSP cmpl is passed to NVME Layer. + * Segment 10 - Time from FCP command received by + * MSI-X ISR to command is completed on wire. + * (Segments 1 thru 8) for READDATA / WRITEDATA + * (Segments 1 thru 4) for READDATA_RSP + */ + seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd; + seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1; + seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) - + seg1 - seg2; + seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) - + seg1 - seg2 - seg3; + seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) - + seg1 - seg2 - seg3 - seg4; + + /* For auto rsp commands seg6 thru seg10 will be 0 */ + if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) { + seg6 = (ctxp->ts_nvme_status - + ctxp->ts_isr_cmd) - + seg1 - seg2 - seg3 - seg4 - seg5; + seg7 = (ctxp->ts_status_wqput - + ctxp->ts_isr_cmd) - + seg1 - seg2 - seg3 - + seg4 - seg5 - seg6; + seg8 = (ctxp->ts_isr_status - + ctxp->ts_isr_cmd) - + seg1 - seg2 - seg3 - seg4 - + seg5 - seg6 - seg7; + seg9 = (ctxp->ts_status_nvme - + ctxp->ts_isr_cmd) - + seg1 - seg2 - seg3 - seg4 - + seg5 - seg6 - seg7 - seg8; + seg10 = (ctxp->ts_isr_status - + ctxp->ts_isr_cmd); + } else { + seg6 = 0; + seg7 = 0; + seg8 = 0; + seg9 = 0; + seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd); + } + + phba->ktime_seg1_total += seg1; + if (seg1 < phba->ktime_seg1_min) + phba->ktime_seg1_min = seg1; + else if (seg1 > phba->ktime_seg1_max) + phba->ktime_seg1_max = seg1; + + phba->ktime_seg2_total += seg2; + if (seg2 < phba->ktime_seg2_min) + phba->ktime_seg2_min = seg2; + else if (seg2 > phba->ktime_seg2_max) + phba->ktime_seg2_max = seg2; + + phba->ktime_seg3_total += seg3; + if (seg3 < phba->ktime_seg3_min) + phba->ktime_seg3_min = seg3; + else if (seg3 > phba->ktime_seg3_max) + phba->ktime_seg3_max = seg3; + + phba->ktime_seg4_total += seg4; + if (seg4 < phba->ktime_seg4_min) + phba->ktime_seg4_min = seg4; + else if (seg4 > phba->ktime_seg4_max) + phba->ktime_seg4_max = seg4; + + phba->ktime_seg5_total += seg5; + if (seg5 < phba->ktime_seg5_min) + phba->ktime_seg5_min = seg5; + else if (seg5 > phba->ktime_seg5_max) + phba->ktime_seg5_max = seg5; + + phba->ktime_data_samples++; + if (!seg6) + goto out; + + phba->ktime_seg6_total += seg6; + if (seg6 < phba->ktime_seg6_min) + phba->ktime_seg6_min = seg6; + else if (seg6 > phba->ktime_seg6_max) + phba->ktime_seg6_max = seg6; + + phba->ktime_seg7_total += seg7; + if (seg7 < phba->ktime_seg7_min) + phba->ktime_seg7_min = seg7; + else if (seg7 > phba->ktime_seg7_max) + phba->ktime_seg7_max = seg7; + + phba->ktime_seg8_total += seg8; + if (seg8 < phba->ktime_seg8_min) + phba->ktime_seg8_min = seg8; + else if (seg8 > phba->ktime_seg8_max) + phba->ktime_seg8_max = seg8; + + phba->ktime_seg9_total += seg9; + if (seg9 < phba->ktime_seg9_min) + phba->ktime_seg9_min = seg9; + else if (seg9 > phba->ktime_seg9_max) + phba->ktime_seg9_max = seg9; +out: + phba->ktime_seg10_total += seg10; + if (seg10 < phba->ktime_seg10_min) + phba->ktime_seg10_min = seg10; + else if (seg10 > phba->ktime_seg10_max) + phba->ktime_seg10_max = seg10; + phba->ktime_status_samples++; +} +#endif + +/** + * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @wcqe: Pointer to driver response CQE object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for NVME FCP commands + * The function frees memory resources used for the NVME commands. + **/ +static void +lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct nvmefc_tgt_fcp_req *rsp; + struct lpfc_nvmet_rcv_ctx *ctxp; + uint32_t status, result, op, start_clean; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint32_t id; +#endif + + ctxp = cmdwqe->context2; + rsp = &ctxp->ctx.fcp_req; + op = rsp->op; + ctxp->flag &= ~LPFC_NVMET_IO_INP; + + status = bf_get(lpfc_wcqe_c_status, wcqe); + result = wcqe->parameter; + + if (!phba->targetport) + goto out; + + lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n", + ctxp->oxid, op, status); + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (status) { + rsp->fcp_error = NVME_SC_DATA_XFER_ERROR; + rsp->transferred_length = 0; + atomic_inc(&tgtp->xmt_fcp_rsp_error); + } else { + rsp->fcp_error = NVME_SC_SUCCESS; + if (op == NVMET_FCOP_RSP) + rsp->transferred_length = rsp->rsplen; + else + rsp->transferred_length = rsp->transfer_length; + atomic_inc(&tgtp->xmt_fcp_rsp_cmpl); + } + +out: + if ((op == NVMET_FCOP_READDATA_RSP) || + (op == NVMET_FCOP_RSP)) { + /* Sanity check */ + ctxp->state = LPFC_NVMET_STE_DONE; + ctxp->entry_cnt++; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) { + if (rsp->op == NVMET_FCOP_READDATA_RSP) { + ctxp->ts_isr_data = + cmdwqe->isr_timestamp; + ctxp->ts_data_nvme = + ktime_get_ns(); + ctxp->ts_nvme_status = + ctxp->ts_data_nvme; + ctxp->ts_status_wqput = + ctxp->ts_data_nvme; + ctxp->ts_isr_status = + ctxp->ts_data_nvme; + ctxp->ts_status_nvme = + ctxp->ts_data_nvme; + } else { + ctxp->ts_isr_status = + cmdwqe->isr_timestamp; + ctxp->ts_status_nvme = + ktime_get_ns(); + } + } + if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { + id = smp_processor_id(); + if (ctxp->cpu != id) + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6703 CPU Check cmpl: " + "cpu %d expect %d\n", + id, ctxp->cpu); + if (ctxp->cpu < LPFC_CHECK_CPU_CNT) + phba->cpucheck_cmpl_io[id]++; + } +#endif + rsp->done(rsp); +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) + lpfc_nvmet_ktime(phba, ctxp); +#endif + /* Let Abort cmpl repost the context */ + if (!(ctxp->flag & LPFC_NVMET_ABORT_OP)) + lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); + } else { + ctxp->entry_cnt++; + start_clean = offsetof(struct lpfc_iocbq, wqe); + memset(((char *)cmdwqe) + start_clean, 0, + (sizeof(struct lpfc_iocbq) - start_clean)); +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) { + ctxp->ts_isr_data = cmdwqe->isr_timestamp; + ctxp->ts_data_nvme = ktime_get_ns(); + } + if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { + id = smp_processor_id(); + if (ctxp->cpu != id) + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6704 CPU Check cmdcmpl: " + "cpu %d expect %d\n", + id, ctxp->cpu); + if (ctxp->cpu < LPFC_CHECK_CPU_CNT) + phba->cpucheck_ccmpl_io[id]++; + } +#endif + rsp->done(rsp); + } +} + +static int +lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_ls_req *rsp) +{ + struct lpfc_nvmet_rcv_ctx *ctxp = + container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req); + struct lpfc_hba *phba = ctxp->phba; + struct hbq_dmabuf *nvmebuf = + (struct hbq_dmabuf *)ctxp->rqb_buffer; + struct lpfc_iocbq *nvmewqeq; + struct lpfc_nvmet_tgtport *nvmep = tgtport->private; + struct lpfc_dmabuf dmabuf; + struct ulp_bde64 bpl; + int rc; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6023 %s: Entrypoint ctx %p %p\n", __func__, + ctxp, tgtport); + + nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma, + rsp->rsplen); + if (nvmewqeq == NULL) { + atomic_inc(&nvmep->xmt_ls_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6150 LS Drop IO x%x: Prep\n", + ctxp->oxid); + lpfc_in_buf_free(phba, &nvmebuf->dbuf); + lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, + ctxp->sid, ctxp->oxid); + return -ENOMEM; + } + + /* Save numBdes for bpl2sgl */ + nvmewqeq->rsvd2 = 1; + nvmewqeq->hba_wqidx = 0; + nvmewqeq->context3 = &dmabuf; + dmabuf.virt = &bpl; + bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow; + bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh; + bpl.tus.f.bdeSize = rsp->rsplen; + bpl.tus.f.bdeFlags = 0; + bpl.tus.w = le32_to_cpu(bpl.tus.w); + + nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp; + nvmewqeq->iocb_cmpl = NULL; + nvmewqeq->context2 = ctxp; + + lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n", + ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen); + + rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq); + if (rc == WQE_SUCCESS) { + /* + * Okay to repost buffer here, but wait till cmpl + * before freeing ctxp and iocbq. + */ + lpfc_in_buf_free(phba, &nvmebuf->dbuf); + ctxp->rqb_buffer = 0; + atomic_inc(&nvmep->xmt_ls_rsp); + return 0; + } + /* Give back resources */ + atomic_inc(&nvmep->xmt_ls_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6151 LS Drop IO x%x: Issue %d\n", + ctxp->oxid, rc); + + lpfc_nlp_put(nvmewqeq->context1); + + lpfc_in_buf_free(phba, &nvmebuf->dbuf); + lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); + return -ENXIO; +} + +static int +lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *rsp) +{ + struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; + struct lpfc_nvmet_rcv_ctx *ctxp = + container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); + struct lpfc_hba *phba = ctxp->phba; + struct lpfc_iocbq *nvmewqeq; + unsigned long iflags; + int rc, id; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) { + if (rsp->op == NVMET_FCOP_RSP) + ctxp->ts_nvme_status = ktime_get_ns(); + else + ctxp->ts_nvme_data = ktime_get_ns(); + } + if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { + id = smp_processor_id(); + ctxp->cpu = id; + if (id < LPFC_CHECK_CPU_CNT) + phba->cpucheck_xmt_io[id]++; + if (rsp->hwqid != id) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6705 CPU Check OP: " + "cpu %d expect %d\n", + id, rsp->hwqid); + ctxp->cpu = rsp->hwqid; + } + } +#endif + + if (rsp->op == NVMET_FCOP_ABORT) { + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6103 Abort op: oxri x%x %d cnt %d\n", + ctxp->oxid, ctxp->state, ctxp->entry_cnt); + + lpfc_nvmeio_data(phba, "NVMET FCP ABRT: " + "xri x%x state x%x cnt x%x\n", + ctxp->oxid, ctxp->state, ctxp->entry_cnt); + + atomic_inc(&lpfc_nvmep->xmt_fcp_abort); + ctxp->entry_cnt++; + ctxp->flag |= LPFC_NVMET_ABORT_OP; + if (ctxp->flag & LPFC_NVMET_IO_INP) + lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); + else + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); + return 0; + } + + /* Sanity check */ + if (ctxp->state == LPFC_NVMET_STE_ABORT) { + atomic_inc(&lpfc_nvmep->xmt_fcp_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6102 Bad state IO x%x aborted\n", + ctxp->oxid); + goto aerr; + } + + nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp); + if (nvmewqeq == NULL) { + atomic_inc(&lpfc_nvmep->xmt_fcp_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6152 FCP Drop IO x%x: Prep\n", + ctxp->oxid); + goto aerr; + } + + nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp; + nvmewqeq->iocb_cmpl = NULL; + nvmewqeq->context2 = ctxp; + nvmewqeq->iocb_flag |= LPFC_IO_NVMET; + ctxp->wqeq->hba_wqidx = rsp->hwqid; + + lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", + ctxp->oxid, rsp->op, rsp->rsplen); + + /* For now we take hbalock */ + spin_lock_irqsave(&phba->hbalock, iflags); + rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); + spin_unlock_irqrestore(&phba->hbalock, iflags); + if (rc == WQE_SUCCESS) { + ctxp->flag |= LPFC_NVMET_IO_INP; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (!phba->ktime_on) + return 0; + if (rsp->op == NVMET_FCOP_RSP) + ctxp->ts_status_wqput = ktime_get_ns(); + else + ctxp->ts_data_wqput = ktime_get_ns(); +#endif + return 0; + } + + /* Give back resources */ + atomic_inc(&lpfc_nvmep->xmt_fcp_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6153 FCP Drop IO x%x: Issue: %d\n", + ctxp->oxid, rc); + + ctxp->wqeq->hba_wqidx = 0; + nvmewqeq->context2 = NULL; + nvmewqeq->context3 = NULL; +aerr: + return -ENXIO; +} + +static void +lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport) +{ + struct lpfc_nvmet_tgtport *tport = targetport->private; + + /* release any threads waiting for the unreg to complete */ + complete(&tport->tport_unreg_done); +} + +static struct nvmet_fc_target_template lpfc_tgttemplate = { + .targetport_delete = lpfc_nvmet_targetport_delete, + .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, + .fcp_op = lpfc_nvmet_xmt_fcp_op, + + .max_hw_queues = 1, + .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, + .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, + .dma_boundary = 0xFFFFFFFF, + + /* optional features */ + .target_features = 0, + /* sizes of additional private data for data structures */ + .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), +}; + +int +lpfc_nvmet_create_targetport(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + struct lpfc_nvmet_tgtport *tgtp; + struct nvmet_fc_port_info pinfo; + int error = 0; + + if (phba->targetport) + return 0; + + memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); + pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); + pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); + pinfo.port_id = vport->fc_myDID; + + lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; + lpfc_tgttemplate.max_sgl_segments = phba->cfg_sg_seg_cnt; + lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | + NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED; + + error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, + &phba->pcidev->dev, + &phba->targetport); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + "6025 Cannot register NVME targetport " + "x%x\n", error); + phba->targetport = NULL; + } else { + tgtp = (struct lpfc_nvmet_tgtport *) + phba->targetport->private; + tgtp->phba = phba; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6026 Registered NVME " + "targetport: %p, private %p " + "portnm %llx nodenm %llx\n", + phba->targetport, tgtp, + pinfo.port_name, pinfo.node_name); + + atomic_set(&tgtp->rcv_ls_req_in, 0); + atomic_set(&tgtp->rcv_ls_req_out, 0); + atomic_set(&tgtp->rcv_ls_req_drop, 0); + atomic_set(&tgtp->xmt_ls_abort, 0); + atomic_set(&tgtp->xmt_ls_rsp, 0); + atomic_set(&tgtp->xmt_ls_drop, 0); + atomic_set(&tgtp->xmt_ls_rsp_error, 0); + atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0); + atomic_set(&tgtp->rcv_fcp_cmd_in, 0); + atomic_set(&tgtp->rcv_fcp_cmd_out, 0); + atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); + atomic_set(&tgtp->xmt_fcp_abort, 0); + atomic_set(&tgtp->xmt_fcp_drop, 0); + atomic_set(&tgtp->xmt_fcp_read_rsp, 0); + atomic_set(&tgtp->xmt_fcp_read, 0); + atomic_set(&tgtp->xmt_fcp_write, 0); + atomic_set(&tgtp->xmt_fcp_rsp, 0); + atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); + atomic_set(&tgtp->xmt_fcp_rsp_error, 0); + atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); + atomic_set(&tgtp->xmt_abort_rsp, 0); + atomic_set(&tgtp->xmt_abort_rsp_error, 0); + atomic_set(&tgtp->xmt_abort_cmpl, 0); + } + return error; +} + +int +lpfc_nvmet_update_targetport(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + + if (!phba->targetport) + return 0; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, + "6007 Update NVMET port %p did x%x\n", + phba->targetport, vport->fc_myDID); + + phba->targetport->port_id = vport->fc_myDID; + return 0; +} + +void +lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) +{ + struct lpfc_nvmet_tgtport *tgtp; + + if (phba->nvmet_support == 0) + return; + if (phba->targetport) { + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + init_completion(&tgtp->tport_unreg_done); + nvmet_fc_unregister_targetport(phba->targetport); + wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); + } + phba->targetport = NULL; +} + +/** + * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer + * @phba: pointer to lpfc hba data structure. + * @pring: pointer to a SLI ring. + * @nvmebuf: pointer to lpfc nvme command HBQ data structure. + * + * This routine is used for processing the WQE associated with a unsolicited + * event. It first determines whether there is an existing ndlp that matches + * the DID from the unsolicited WQE. If not, it will create a new one with + * the DID from the unsolicited WQE. The ELS command from the unsolicited + * WQE is then used to invoke the proper routine and to set up proper state + * of the discovery state machine. + **/ +static void +lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct hbq_dmabuf *nvmebuf) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct fc_frame_header *fc_hdr; + struct lpfc_nvmet_rcv_ctx *ctxp; + uint32_t *payload; + uint32_t size, oxid, sid, rc; + + if (!nvmebuf || !phba->targetport) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6154 LS Drop IO\n"); + oxid = 0; + size = 0; + sid = 0; + goto dropit; + } + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + payload = (uint32_t *)(nvmebuf->dbuf.virt); + fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); + size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); + oxid = be16_to_cpu(fc_hdr->fh_ox_id); + sid = sli4_sid_from_fc_hdr(fc_hdr); + + ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC); + if (ctxp == NULL) { + atomic_inc(&tgtp->rcv_ls_req_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6155 LS Drop IO x%x: Alloc\n", + oxid); +dropit: + lpfc_nvmeio_data(phba, "NVMET LS DROP: " + "xri x%x sz %d from %06x\n", + oxid, size, sid); + if (nvmebuf) + lpfc_in_buf_free(phba, &nvmebuf->dbuf); + return; + } + ctxp->phba = phba; + ctxp->size = size; + ctxp->oxid = oxid; + ctxp->sid = sid; + ctxp->wqeq = NULL; + ctxp->state = LPFC_NVMET_STE_RCV; + ctxp->rqb_buffer = (void *)nvmebuf; + + lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n", + oxid, size, sid); + /* + * The calling sequence should be: + * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done + * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp. + */ + atomic_inc(&tgtp->rcv_ls_req_in); + rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req, + payload, size); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6037 %s: ctx %p sz %d rc %d: %08x %08x %08x " + "%08x %08x %08x\n", __func__, ctxp, size, rc, + *payload, *(payload+1), *(payload+2), + *(payload+3), *(payload+4), *(payload+5)); + + if (rc == 0) { + atomic_inc(&tgtp->rcv_ls_req_out); + return; + } + + lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n", + oxid, size, sid); + + atomic_inc(&tgtp->rcv_ls_req_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n", + ctxp->oxid, rc); + + /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ + if (nvmebuf) + lpfc_in_buf_free(phba, &nvmebuf->dbuf); + + atomic_inc(&tgtp->xmt_ls_abort); + lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid); +} + +/** + * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer + * @phba: pointer to lpfc hba data structure. + * @pring: pointer to a SLI ring. + * @nvmebuf: pointer to lpfc nvme command HBQ data structure. + * + * This routine is used for processing the WQE associated with a unsolicited + * event. It first determines whether there is an existing ndlp that matches + * the DID from the unsolicited WQE. If not, it will create a new one with + * the DID from the unsolicited WQE. The ELS command from the unsolicited + * WQE is then used to invoke the proper routine and to set up proper state + * of the discovery state machine. + **/ +static void +lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, + struct rqb_dmabuf *nvmebuf, + uint64_t isr_timestamp) +{ + struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_nvmet_tgtport *tgtp; + struct fc_frame_header *fc_hdr; + uint32_t *payload; + uint32_t size, oxid, sid, rc; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint32_t id; +#endif + + if (!nvmebuf || !phba->targetport) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6157 FCP Drop IO\n"); + oxid = 0; + size = 0; + sid = 0; + goto dropit; + } + + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + payload = (uint32_t *)(nvmebuf->dbuf.virt); + fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); + size = nvmebuf->bytes_recv; + oxid = be16_to_cpu(fc_hdr->fh_ox_id); + sid = sli4_sid_from_fc_hdr(fc_hdr); + + ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context; + if (ctxp == NULL) { + atomic_inc(&tgtp->rcv_fcp_cmd_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6158 FCP Drop IO x%x: Alloc\n", + oxid); + lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); + /* Cannot send ABTS without context */ + return; + } + memset(ctxp, 0, sizeof(ctxp->ctx)); + ctxp->wqeq = NULL; + ctxp->txrdy = NULL; + ctxp->offset = 0; + ctxp->phba = phba; + ctxp->size = size; + ctxp->oxid = oxid; + ctxp->sid = sid; + ctxp->state = LPFC_NVMET_STE_RCV; + ctxp->rqb_buffer = nvmebuf; + ctxp->entry_cnt = 1; + ctxp->flag = 0; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) { + ctxp->ts_isr_cmd = isr_timestamp; + ctxp->ts_cmd_nvme = ktime_get_ns(); + ctxp->ts_nvme_data = 0; + ctxp->ts_data_wqput = 0; + ctxp->ts_isr_data = 0; + ctxp->ts_data_nvme = 0; + ctxp->ts_nvme_status = 0; + ctxp->ts_status_wqput = 0; + ctxp->ts_isr_status = 0; + ctxp->ts_status_nvme = 0; + } + + if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { + id = smp_processor_id(); + if (id < LPFC_CHECK_CPU_CNT) + phba->cpucheck_rcv_io[id]++; + } +#endif + + lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d from %06x\n", + oxid, size, sid); + + atomic_inc(&tgtp->rcv_fcp_cmd_in); + /* + * The calling sequence should be: + * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done + * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. + */ + rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, + payload, size); + + /* Process FCP command */ + if (rc == 0) { + atomic_inc(&tgtp->rcv_fcp_cmd_out); + return; + } + + atomic_inc(&tgtp->rcv_fcp_cmd_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6159 FCP Drop IO x%x: nvmet_fc_rcv_fcp_req x%x\n", + ctxp->oxid, rc); +dropit: + lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", + oxid, size, sid); + if (oxid) { + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); + return; + } + + if (nvmebuf) { + nvmebuf->iocbq->hba_wqidx = 0; + /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ + lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); + } +} + +/** + * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport + * @phba: pointer to lpfc hba data structure. + * @pring: pointer to a SLI ring. + * @nvmebuf: pointer to received nvme data structure. + * + * This routine is used to process an unsolicited event received from a SLI + * (Service Level Interface) ring. The actual processing of the data buffer + * associated with the unsolicited event is done by invoking the routine + * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the + * SLI RQ on which the unsolicited event was received. + **/ +void +lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *piocb) +{ + struct lpfc_dmabuf *d_buf; + struct hbq_dmabuf *nvmebuf; + + d_buf = piocb->context2; + nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); + + if (phba->nvmet_support == 0) { + lpfc_in_buf_free(phba, &nvmebuf->dbuf); + return; + } + lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf); +} + +/** + * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport + * @phba: pointer to lpfc hba data structure. + * @pring: pointer to a SLI ring. + * @nvmebuf: pointer to received nvme data structure. + * + * This routine is used to process an unsolicited event received from a SLI + * (Service Level Interface) ring. The actual processing of the data buffer + * associated with the unsolicited event is done by invoking the routine + * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the + * SLI RQ on which the unsolicited event was received. + **/ +void +lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, + struct rqb_dmabuf *nvmebuf, + uint64_t isr_timestamp) +{ + if (phba->nvmet_support == 0) { + lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); + return; + } + lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf, + isr_timestamp); +} + +/** + * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure + * @phba: pointer to a host N_Port data structure. + * @ctxp: Context info for NVME LS Request + * @rspbuf: DMA buffer of NVME command. + * @rspsize: size of the NVME command. + * + * This routine is used for allocating a lpfc-WQE data structure from + * the driver lpfc-WQE free-list and prepare the WQE with the parameters + * passed into the routine for discovery state machine to issue an Extended + * Link Service (NVME) commands. It is a generic lpfc-WQE allocation + * and preparation routine that is used by all the discovery state machine + * routines and the NVME command-specific fields will be later set up by + * the individual discovery machine routines after calling this routine + * allocating and preparing a generic WQE data structure. It fills in the + * Buffer Descriptor Entries (BDEs), allocates buffers for both command + * payload and response payload (if expected). The reference count on the + * ndlp is incremented by 1 and the reference to the ndlp is put into + * context1 of the WQE data structure for this WQE to hold the ndlp + * reference for the command's callback function to access later. + * + * Return code + * Pointer to the newly allocated/prepared nvme wqe data structure + * NULL - when nvme wqe data structure allocation/preparation failed + **/ +static struct lpfc_iocbq * +lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, + struct lpfc_nvmet_rcv_ctx *ctxp, + dma_addr_t rspbuf, uint16_t rspsize) +{ + struct lpfc_nodelist *ndlp; + struct lpfc_iocbq *nvmewqe; + union lpfc_wqe *wqe; + + if (!lpfc_is_link_up(phba)) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + "6104 lpfc_nvmet_prep_ls_wqe: link err: " + "NPORT x%x oxid:x%x\n", + ctxp->sid, ctxp->oxid); + return NULL; + } + + /* Allocate buffer for command wqe */ + nvmewqe = lpfc_sli_get_iocbq(phba); + if (nvmewqe == NULL) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + "6105 lpfc_nvmet_prep_ls_wqe: No WQE: " + "NPORT x%x oxid:x%x\n", + ctxp->sid, ctxp->oxid); + return NULL; + } + + ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + "6106 lpfc_nvmet_prep_ls_wqe: No ndlp: " + "NPORT x%x oxid:x%x\n", + ctxp->sid, ctxp->oxid); + goto nvme_wqe_free_wqeq_exit; + } + ctxp->wqeq = nvmewqe; + + /* prevent preparing wqe with NULL ndlp reference */ + nvmewqe->context1 = lpfc_nlp_get(ndlp); + if (nvmewqe->context1 == NULL) + goto nvme_wqe_free_wqeq_exit; + nvmewqe->context2 = ctxp; + + wqe = &nvmewqe->wqe; + memset(wqe, 0, sizeof(union lpfc_wqe)); + + /* Words 0 - 2 */ + wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize; + wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf)); + wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf)); + + /* Word 3 */ + + /* Word 4 */ + + /* Word 5 */ + bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); + bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1); + bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0); + bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_DD_SOL_CTL); + bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME); + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag); + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com, + CMD_XMIT_SEQUENCE64_WQE); + bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI); + bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3); + bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); + + /* Word 8 */ + wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag); + /* Needs to be set by caller */ + bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid); + + /* Word 10 */ + bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); + bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, + LPFC_WQE_LENLOC_WORD12); + bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com, + LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com, + OTHER_COMMAND); + + /* Word 12 */ + wqe->xmit_sequence.xmit_len = rspsize; + + nvmewqe->retry = 1; + nvmewqe->vport = phba->pport; + nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; + nvmewqe->iocb_flag |= LPFC_IO_NVME_LS; + + /* Xmit NVME response to remote NPORT <did> */ + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6039 Xmit NVME LS response to remote " + "NPORT x%x iotag:x%x oxid:x%x size:x%x\n", + ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid, + rspsize); + return nvmewqe; + +nvme_wqe_free_wqeq_exit: + nvmewqe->context2 = NULL; + nvmewqe->context3 = NULL; + lpfc_sli_release_iocbq(phba, nvmewqe); + return NULL; +} + + +static struct lpfc_iocbq * +lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, + struct lpfc_nvmet_rcv_ctx *ctxp) +{ + struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req; + struct lpfc_nvmet_tgtport *tgtp; + struct sli4_sge *sgl; + struct lpfc_nodelist *ndlp; + struct lpfc_iocbq *nvmewqe; + struct scatterlist *sgel; + union lpfc_wqe128 *wqe; + uint32_t *txrdy; + dma_addr_t physaddr; + int i, cnt; + int xc = 1; + + if (!lpfc_is_link_up(phba)) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6107 lpfc_nvmet_prep_fcp_wqe: link err:" + "NPORT x%x oxid:x%x\n", ctxp->sid, + ctxp->oxid); + return NULL; + } + + ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: " + "NPORT x%x oxid:x%x\n", + ctxp->sid, ctxp->oxid); + return NULL; + } + + if (rsp->sg_cnt > phba->cfg_sg_seg_cnt) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: " + "NPORT x%x oxid:x%x\n", + ctxp->sid, ctxp->oxid); + return NULL; + } + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + nvmewqe = ctxp->wqeq; + if (nvmewqe == NULL) { + /* Allocate buffer for command wqe */ + nvmewqe = ctxp->rqb_buffer->iocbq; + if (nvmewqe == NULL) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6110 lpfc_nvmet_prep_fcp_wqe: No " + "WQE: NPORT x%x oxid:x%x\n", + ctxp->sid, ctxp->oxid); + return NULL; + } + ctxp->wqeq = nvmewqe; + xc = 0; /* create new XRI */ + nvmewqe->sli4_lxritag = NO_XRI; + nvmewqe->sli4_xritag = NO_XRI; + } + + /* Sanity check */ + if (((ctxp->state == LPFC_NVMET_STE_RCV) && + (ctxp->entry_cnt == 1)) || + ((ctxp->state == LPFC_NVMET_STE_DATA) && + (ctxp->entry_cnt > 1))) { + wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6111 Wrong state %s: %d cnt %d\n", + __func__, ctxp->state, ctxp->entry_cnt); + return NULL; + } + + sgl = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl; + switch (rsp->op) { + case NVMET_FCOP_READDATA: + case NVMET_FCOP_READDATA_RSP: + /* Words 0 - 2 : The first sg segment */ + sgel = &rsp->sg[0]; + physaddr = sg_dma_address(sgel); + wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel); + wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr)); + wqe->fcp_tsend.bde.addrHigh = + cpu_to_le32(putPaddrHigh(physaddr)); + + /* Word 3 */ + wqe->fcp_tsend.payload_offset_len = 0; + + /* Word 4 */ + wqe->fcp_tsend.relative_offset = ctxp->offset; + + /* Word 5 */ + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com, + nvmewqe->sli4_xritag); + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); + + /* Word 8 */ + wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag); + bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid); + + /* Word 10 */ + bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, + LPFC_WQE_LENLOC_WORD12); + bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc); + bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); + if (phba->cfg_nvme_oas) + bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, + LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, + FCP_COMMAND_TSEND); + + /* Word 12 */ + wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; + + /* Setup 2 SKIP SGEs */ + sgl->addr_hi = 0; + sgl->addr_lo = 0; + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = 0; + sgl++; + sgl->addr_hi = 0; + sgl->addr_lo = 0; + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = 0; + sgl++; + if (rsp->op == NVMET_FCOP_READDATA_RSP) { + atomic_inc(&tgtp->xmt_fcp_read_rsp); + bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); + if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) && + (rsp->rsplen == 12)) { + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); + } else { + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, + ((rsp->rsplen >> 2) - 1)); + memcpy(&wqe->words[16], rsp->rspaddr, + rsp->rsplen); + } + } else { + atomic_inc(&tgtp->xmt_fcp_read); + + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); + } + ctxp->state = LPFC_NVMET_STE_DATA; + break; + + case NVMET_FCOP_WRITEDATA: + /* Words 0 - 2 : The first sg segment */ + txrdy = pci_pool_alloc(phba->txrdy_payload_pool, + GFP_KERNEL, &physaddr); + if (!txrdy) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6041 Bad txrdy buffer: oxid x%x\n", + ctxp->oxid); + return NULL; + } + ctxp->txrdy = txrdy; + ctxp->txrdy_phys = physaddr; + wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN; + wqe->fcp_treceive.bde.addrLow = + cpu_to_le32(putPaddrLow(physaddr)); + wqe->fcp_treceive.bde.addrHigh = + cpu_to_le32(putPaddrHigh(physaddr)); + + /* Word 3 */ + wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN; + + /* Word 4 */ + wqe->fcp_treceive.relative_offset = ctxp->offset; + + /* Word 5 */ + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com, + nvmewqe->sli4_xritag); + + /* Word 7 */ + bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0); + bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, + CMD_FCP_TRECEIVE64_WQE); + + /* Word 8 */ + wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag); + bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid); + + /* Word 10 */ + bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); + bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1); + bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ); + bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, + LPFC_WQE_LENLOC_WORD12); + bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc); + bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0); + bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0); + bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0); + bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); + if (phba->cfg_nvme_oas) + bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, + LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, + FCP_COMMAND_TRECEIVE); + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); + + /* Word 12 */ + wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; + + /* Setup 1 TXRDY and 1 SKIP SGE */ + txrdy[0] = 0; + txrdy[1] = cpu_to_be32(rsp->transfer_length); + txrdy[2] = 0; + + sgl->addr_hi = putPaddrHigh(physaddr); + sgl->addr_lo = putPaddrLow(physaddr); + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN); + sgl++; + sgl->addr_hi = 0; + sgl->addr_lo = 0; + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = 0; + sgl++; + ctxp->state = LPFC_NVMET_STE_DATA; + atomic_inc(&tgtp->xmt_fcp_write); + break; + + case NVMET_FCOP_RSP: + /* Words 0 - 2 */ + sgel = &rsp->sg[0]; + physaddr = rsp->rspdma; + wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen; + wqe->fcp_trsp.bde.addrLow = + cpu_to_le32(putPaddrLow(physaddr)); + wqe->fcp_trsp.bde.addrHigh = + cpu_to_le32(putPaddrHigh(physaddr)); + + /* Word 3 */ + wqe->fcp_trsp.response_len = rsp->rsplen; + + /* Word 4 */ + wqe->fcp_trsp.rsvd_4_5[0] = 0; + + + /* Word 5 */ + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com, + nvmewqe->sli4_xritag); + + /* Word 7 */ + bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); + bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE); + + /* Word 8 */ + wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag); + bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid); + + /* Word 10 */ + bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); + bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0); + bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, + LPFC_WQE_LENLOC_WORD3); + bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc); + bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); + if (phba->cfg_nvme_oas) + bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, + LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, + FCP_COMMAND_TRSP); + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); + ctxp->state = LPFC_NVMET_STE_RSP; + + if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { + /* Good response - all zero's on wire */ + bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0); + bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0); + bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0); + } else { + bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1); + bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1); + bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, + ((rsp->rsplen >> 2) - 1)); + memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen); + } + + /* Use rspbuf, NOT sg list */ + rsp->sg_cnt = 0; + sgl->word2 = 0; + atomic_inc(&tgtp->xmt_fcp_rsp); + break; + + default: + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6064 Unknown Rsp Op %d\n", + rsp->op); + return NULL; + } + + nvmewqe->retry = 1; + nvmewqe->vport = phba->pport; + nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; + nvmewqe->context1 = ndlp; + + for (i = 0; i < rsp->sg_cnt; i++) { + sgel = &rsp->sg[i]; + physaddr = sg_dma_address(sgel); + cnt = sg_dma_len(sgel); + sgl->addr_hi = putPaddrHigh(physaddr); + sgl->addr_lo = putPaddrLow(physaddr); + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset); + if ((i+1) == rsp->sg_cnt) + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(cnt); + sgl++; + ctxp->offset += cnt; + } + return nvmewqe; +} + +/** + * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @wcqe: Pointer to driver response CQE object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for NVME ABTS for FCP cmds + * The function frees memory resources used for the NVME commands. + **/ +static void +lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_nvmet_tgtport *tgtp; + uint32_t status, result; + + ctxp = cmdwqe->context2; + status = bf_get(lpfc_wcqe_c_status, wcqe); + result = wcqe->parameter; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + atomic_inc(&tgtp->xmt_abort_cmpl); + + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + "6165 Abort cmpl: xri x%x WCQE: %08x %08x %08x %08x\n", + ctxp->oxid, wcqe->word0, wcqe->total_data_placed, + result, wcqe->word3); + + ctxp->state = LPFC_NVMET_STE_DONE; + lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); + + cmdwqe->context2 = NULL; + cmdwqe->context3 = NULL; + lpfc_sli_release_iocbq(phba, cmdwqe); +} + +/** + * lpfc_nvmet_xmt_fcp_abort_cmp - Completion handler for ABTS + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @wcqe: Pointer to driver response CQE object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for NVME ABTS for FCP cmds + * The function frees memory resources used for the NVME commands. + **/ +static void +lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_nvmet_tgtport *tgtp; + uint32_t status, result; + + ctxp = cmdwqe->context2; + status = bf_get(lpfc_wcqe_c_status, wcqe); + result = wcqe->parameter; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + atomic_inc(&tgtp->xmt_abort_cmpl); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6070 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n", + ctxp, wcqe->word0, wcqe->total_data_placed, + result, wcqe->word3); + + if (ctxp) { + /* Sanity check */ + if (ctxp->state != LPFC_NVMET_STE_ABORT) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + "6112 ABORT Wrong state:%d oxid x%x\n", + ctxp->state, ctxp->oxid); + } + ctxp->state = LPFC_NVMET_STE_DONE; + lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); + cmdwqe->context2 = NULL; + cmdwqe->context3 = NULL; + } +} + +/** + * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @wcqe: Pointer to driver response CQE object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for NVME ABTS for LS cmds + * The function frees memory resources used for the NVME commands. + **/ +static void +lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_nvmet_tgtport *tgtp; + uint32_t status, result; + + ctxp = cmdwqe->context2; + status = bf_get(lpfc_wcqe_c_status, wcqe); + result = wcqe->parameter; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + atomic_inc(&tgtp->xmt_abort_cmpl); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n", + ctxp, wcqe->word0, wcqe->total_data_placed, + result, wcqe->word3); + + if (ctxp) { + cmdwqe->context2 = NULL; + cmdwqe->context3 = NULL; + lpfc_sli_release_iocbq(phba, cmdwqe); + kfree(ctxp); + } else + lpfc_sli_release_iocbq(phba, cmdwqe); +} + +static int +lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, + struct lpfc_nvmet_rcv_ctx *ctxp, + uint32_t sid, uint16_t xri) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_iocbq *abts_wqeq; + union lpfc_wqe *wqe_abts; + struct lpfc_nodelist *ndlp; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6067 %s: Entrypoint: sid %x xri %x\n", __func__, + sid, xri); + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + + ndlp = lpfc_findnode_did(phba->pport, sid); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { + atomic_inc(&tgtp->xmt_abort_rsp_error); + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + "6134 Drop ABTS - wrong NDLP state x%x.\n", + ndlp->nlp_state); + + /* No failure to an ABTS request. */ + return 0; + } + + abts_wqeq = ctxp->wqeq; + wqe_abts = &abts_wqeq->wqe; + ctxp->state = LPFC_NVMET_STE_ABORT; + + /* + * Since we zero the whole WQE, we need to ensure we set the WQE fields + * that were initialized in lpfc_sli4_nvmet_alloc. + */ + memset(wqe_abts, 0, sizeof(union lpfc_wqe)); + + /* Word 5 */ + bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0); + bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1); + bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0); + bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS); + bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS); + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com, + abts_wqeq->sli4_xritag); + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com, + CMD_XMIT_SEQUENCE64_WQE); + bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI); + bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3); + bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0); + + /* Word 8 */ + wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag); + /* Needs to be set by caller */ + bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri); + + /* Word 10 */ + bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1); + bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com, + LPFC_WQE_LENLOC_WORD12); + bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0); + bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com, + LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com, + OTHER_COMMAND); + + abts_wqeq->vport = phba->pport; + abts_wqeq->context1 = ndlp; + abts_wqeq->context2 = ctxp; + abts_wqeq->context3 = NULL; + abts_wqeq->rsvd2 = 0; + /* hba_wqidx should already be setup from command we are aborting */ + abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR; + abts_wqeq->iocb.ulpLe = 1; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6069 Issue ABTS to xri x%x reqtag x%x\n", + xri, abts_wqeq->iotag); + return 1; +} + +static int +lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, + struct lpfc_nvmet_rcv_ctx *ctxp, + uint32_t sid, uint16_t xri) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_iocbq *abts_wqeq; + union lpfc_wqe *abts_wqe; + struct lpfc_nodelist *ndlp; + unsigned long flags; + int rc; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (!ctxp->wqeq) { + ctxp->wqeq = ctxp->rqb_buffer->iocbq; + ctxp->wqeq->hba_wqidx = 0; + } + + ndlp = lpfc_findnode_did(phba->pport, sid); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { + atomic_inc(&tgtp->xmt_abort_rsp_error); + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + "6160 Drop ABTS - wrong NDLP state x%x.\n", + ndlp->nlp_state); + + /* No failure to an ABTS request. */ + return 0; + } + + /* Issue ABTS for this WQE based on iotag */ + ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); + if (!ctxp->abort_wqeq) { + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + "6161 Abort failed: No wqeqs: " + "xri: x%x\n", ctxp->oxid); + /* No failure to an ABTS request. */ + return 0; + } + abts_wqeq = ctxp->abort_wqeq; + abts_wqe = &abts_wqeq->wqe; + ctxp->state = LPFC_NVMET_STE_ABORT; + + /* Announce entry to new IO submit field. */ + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + "6162 Abort Request to rport DID x%06x " + "for xri x%x x%x\n", + ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag); + + /* If the hba is getting reset, this flag is set. It is + * cleared when the reset is complete and rings reestablished. + */ + spin_lock_irqsave(&phba->hbalock, flags); + /* driver queued commands are in process of being flushed */ + if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6163 Driver in reset cleanup - flushing " + "NVME Req now. hba_flag x%x oxid x%x\n", + phba->hba_flag, ctxp->oxid); + lpfc_sli_release_iocbq(phba, abts_wqeq); + return 0; + } + + /* Outstanding abort is in progress */ + if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6164 Outstanding NVME I/O Abort Request " + "still pending on oxid x%x\n", + ctxp->oxid); + lpfc_sli_release_iocbq(phba, abts_wqeq); + return 0; + } + + /* Ready - mark outstanding as aborted by driver. */ + abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED; + + /* WQEs are reused. Clear stale data and set key fields to + * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. + */ + memset(abts_wqe, 0, sizeof(union lpfc_wqe)); + + /* word 3 */ + bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); + + /* word 7 */ + bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); + bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); + + /* word 8 - tell the FW to abort the IO associated with this + * outstanding exchange ID. + */ + abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag; + + /* word 9 - this is the iotag for the abts_wqe completion. */ + bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, + abts_wqeq->iotag); + + /* word 10 */ + bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); + + /* word 11 */ + bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); + bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx; + abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; + abts_wqeq->iocb_cmpl = 0; + abts_wqeq->iocb_flag |= LPFC_IO_NVME; + abts_wqeq->context2 = ctxp; + rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (rc == WQE_SUCCESS) + return 0; + + lpfc_sli_release_iocbq(phba, abts_wqeq); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6166 Failed abts issue_wqe with status x%x " + "for oxid x%x.\n", + rc, ctxp->oxid); + return 1; +} + + +static int +lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, + struct lpfc_nvmet_rcv_ctx *ctxp, + uint32_t sid, uint16_t xri) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_iocbq *abts_wqeq; + unsigned long flags; + int rc; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (!ctxp->wqeq) { + ctxp->wqeq = ctxp->rqb_buffer->iocbq; + ctxp->wqeq->hba_wqidx = 0; + } + + rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); + if (rc == 0) + goto aerr; + + spin_lock_irqsave(&phba->hbalock, flags); + abts_wqeq = ctxp->wqeq; + abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_abort_cmp; + abts_wqeq->iocb_cmpl = 0; + abts_wqeq->iocb_flag |= LPFC_IO_NVMET; + rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (rc == WQE_SUCCESS) { + atomic_inc(&tgtp->xmt_abort_rsp); + return 0; + } + +aerr: + lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); + atomic_inc(&tgtp->xmt_abort_rsp_error); + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", + ctxp->oxid, rc); + return 1; +} + +static int +lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, + struct lpfc_nvmet_rcv_ctx *ctxp, + uint32_t sid, uint16_t xri) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_iocbq *abts_wqeq; + union lpfc_wqe *wqe_abts; + unsigned long flags; + int rc; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (!ctxp->wqeq) { + /* Issue ABTS for this WQE based on iotag */ + ctxp->wqeq = lpfc_sli_get_iocbq(phba); + if (!ctxp->wqeq) { + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + "6068 Abort failed: No wqeqs: " + "xri: x%x\n", xri); + /* No failure to an ABTS request. */ + kfree(ctxp); + return 0; + } + } + abts_wqeq = ctxp->wqeq; + wqe_abts = &abts_wqeq->wqe; + lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); + + spin_lock_irqsave(&phba->hbalock, flags); + abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; + abts_wqeq->iocb_cmpl = 0; + abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS; + rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (rc == WQE_SUCCESS) { + atomic_inc(&tgtp->xmt_abort_rsp); + return 0; + } + + atomic_inc(&tgtp->xmt_abort_rsp_error); + abts_wqeq->context2 = NULL; + abts_wqeq->context3 = NULL; + lpfc_sli_release_iocbq(phba, abts_wqeq); + kfree(ctxp); + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + "6056 Failed to Issue ABTS. Status x%x\n", rc); + return 0; +} diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h new file mode 100644 index 000000000000..ca96f05c1604 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_nvmet.h @@ -0,0 +1,116 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + ********************************************************************/ + +#define LPFC_NVMET_MIN_SEGS 16 +#define LPFC_NVMET_DEFAULT_SEGS 64 /* 256K IOs */ +#define LPFC_NVMET_MAX_SEGS 510 +#define LPFC_NVMET_SUCCESS_LEN 12 + +/* Used for NVME Target */ +struct lpfc_nvmet_tgtport { + struct lpfc_hba *phba; + struct completion tport_unreg_done; + + /* Stats counters - lpfc_nvmet_unsol_ls_buffer */ + atomic_t rcv_ls_req_in; + atomic_t rcv_ls_req_out; + atomic_t rcv_ls_req_drop; + atomic_t xmt_ls_abort; + + /* Stats counters - lpfc_nvmet_xmt_ls_rsp */ + atomic_t xmt_ls_rsp; + atomic_t xmt_ls_drop; + + /* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */ + atomic_t xmt_ls_rsp_error; + atomic_t xmt_ls_rsp_cmpl; + + /* Stats counters - lpfc_nvmet_unsol_fcp_buffer */ + atomic_t rcv_fcp_cmd_in; + atomic_t rcv_fcp_cmd_out; + atomic_t rcv_fcp_cmd_drop; + + /* Stats counters - lpfc_nvmet_xmt_fcp_op */ + atomic_t xmt_fcp_abort; + atomic_t xmt_fcp_drop; + atomic_t xmt_fcp_read_rsp; + atomic_t xmt_fcp_read; + atomic_t xmt_fcp_write; + atomic_t xmt_fcp_rsp; + + /* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */ + atomic_t xmt_fcp_rsp_cmpl; + atomic_t xmt_fcp_rsp_error; + atomic_t xmt_fcp_rsp_drop; + + + /* Stats counters - lpfc_nvmet_unsol_issue_abort */ + atomic_t xmt_abort_rsp; + atomic_t xmt_abort_rsp_error; + + /* Stats counters - lpfc_nvmet_xmt_abort_cmp */ + atomic_t xmt_abort_cmpl; +}; + +struct lpfc_nvmet_rcv_ctx { + union { + struct nvmefc_tgt_ls_req ls_req; + struct nvmefc_tgt_fcp_req fcp_req; + } ctx; + struct lpfc_hba *phba; + struct lpfc_iocbq *wqeq; + struct lpfc_iocbq *abort_wqeq; + dma_addr_t txrdy_phys; + uint32_t *txrdy; + uint32_t sid; + uint32_t offset; + uint16_t oxid; + uint16_t size; + uint16_t entry_cnt; + uint16_t cpu; + uint16_t state; + /* States */ +#define LPFC_NVMET_STE_FREE 0 +#define LPFC_NVMET_STE_RCV 1 +#define LPFC_NVMET_STE_DATA 2 +#define LPFC_NVMET_STE_ABORT 3 +#define LPFC_NVMET_STE_RSP 4 +#define LPFC_NVMET_STE_DONE 5 + uint16_t flag; +#define LPFC_NVMET_IO_INP 1 +#define LPFC_NVMET_ABORT_OP 2 + struct rqb_dmabuf *rqb_buffer; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint64_t ts_isr_cmd; + uint64_t ts_cmd_nvme; + uint64_t ts_nvme_data; + uint64_t ts_data_wqput; + uint64_t ts_isr_data; + uint64_t ts_data_nvme; + uint64_t ts_nvme_status; + uint64_t ts_status_wqput; + uint64_t ts_isr_status; + uint64_t ts_status_nvme; +#endif +}; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 1180a22beb43..9d6384af9fce 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -413,7 +415,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) * struct fcp_cmnd, struct fcp_rsp and the number of bde's * necessary to support the sg_tablesize. */ - psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool, + psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, &psb->dma_handle); if (!psb->data) { kfree(psb); @@ -424,8 +426,8 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); if (iotag == 0) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); + pci_pool_free(phba->lpfc_sg_dma_buf_pool, + psb->data, psb->dma_handle); kfree(psb); break; } @@ -522,6 +524,8 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) struct lpfc_scsi_buf *psb, *next_psb; unsigned long iflag = 0; + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return; spin_lock_irqsave(&phba->hbalock, iflag); spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); list_for_each_entry_safe(psb, next_psb, @@ -554,8 +558,10 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, int i; struct lpfc_nodelist *ndlp; int rrq_empty = 0; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring; + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return; spin_lock_irqsave(&phba->hbalock, iflag); spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); list_for_each_entry_safe(psb, next_psb, @@ -819,7 +825,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) * for the struct fcp_cmnd, struct fcp_rsp and the number * of bde's necessary to support the sg_tablesize. */ - psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool, + psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, &psb->dma_handle); if (!psb->data) { kfree(psb); @@ -832,7 +838,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) */ if (phba->cfg_enable_bg && (((unsigned long)(psb->data) & (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + pci_pool_free(phba->lpfc_sg_dma_buf_pool, psb->data, psb->dma_handle); kfree(psb); break; @@ -841,8 +847,8 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); + pci_pool_free(phba->lpfc_sg_dma_buf_pool, + psb->data, psb->dma_handle); kfree(psb); break; } @@ -850,8 +856,8 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); if (iotag == 0) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); + pci_pool_free(phba->lpfc_sg_dma_buf_pool, + psb->data, psb->dma_handle); kfree(psb); lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "3368 Failed to allocate IOTAG for" @@ -920,7 +926,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) phba->sli4_hba.scsi_xri_cnt++; spin_unlock_irq(&phba->scsi_buf_list_get_lock); } - lpfc_printf_log(phba, KERN_INFO, LOG_BG, + lpfc_printf_log(phba, KERN_INFO, LOG_BG | LOG_FCP, "3021 Allocate %d out of %d requested new SCSI " "buffers\n", bcnt, num_to_alloc); @@ -3894,7 +3900,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, } } chann = atomic_add_return(1, &phba->fcp_qidx); - chann = (chann % phba->cfg_fcp_io_channel); + chann = chann % phba->cfg_fcp_io_channel; return chann; } @@ -3925,6 +3931,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, struct Scsi_Host *shost; uint32_t logit = LOG_FCP; + phba->fc4ScsiIoCmpls++; + /* Sanity check on return of outstanding command */ cmd = lpfc_cmd->pCmd; if (!cmd) @@ -3967,6 +3975,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_cmd->prot_data_segment = NULL; } #endif + if (pnode && NLP_CHK_NODE_ACT(pnode)) atomic_dec(&pnode->cmd_pending); @@ -4241,19 +4250,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, vport->cfg_first_burst_size; } fcp_cmnd->fcpCntl3 = WRITE_DATA; - phba->fc4OutputRequests++; + phba->fc4ScsiOutputRequests++; } else { iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; fcp_cmnd->fcpCntl3 = READ_DATA; - phba->fc4InputRequests++; + phba->fc4ScsiInputRequests++; } } else { iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; iocb_cmd->un.fcpi.fcpi_parm = 0; iocb_cmd->ulpPU = 0; fcp_cmnd->fcpCntl3 = 0; - phba->fc4ControlRequests++; + phba->fc4ScsiControlRequests++; } if (phba->sli_rev == 3 && !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) @@ -4467,7 +4476,7 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) unsigned long poll_tmo_expires = (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); - if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq)) + if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq)) mod_timer(&phba->fcp_poll_timer, poll_tmo_expires); } @@ -4497,7 +4506,7 @@ void lpfc_poll_timeout(unsigned long ptr) if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, - &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); + &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); @@ -4561,7 +4570,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) if (lpfc_cmd == NULL) { lpfc_rampdown_queue_depth(phba); - lpfc_printf_vlog(vport, KERN_INFO, LOG_MISC, + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, "0707 driver's buffer pool is empty, " "IO busied\n"); goto out_host_busy; @@ -4636,7 +4645,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) } if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, - &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); + &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); @@ -4681,7 +4690,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) IOCB_t *cmd, *icmd; int ret = SUCCESS, status = 0; struct lpfc_sli_ring *pring_s4; - int ring_number, ret_val; + int ret_val; unsigned long flags, iflags; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); @@ -4769,7 +4778,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) icmd->ulpClass = cmd->ulpClass; /* ABTS WQE must go to the same WQ as the WQE to be aborted */ - abtsiocb->fcp_wqidx = iocb->fcp_wqidx; + abtsiocb->hba_wqidx = iocb->hba_wqidx; abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; if (iocb->iocb_flag & LPFC_IO_FOF) abtsiocb->iocb_flag |= LPFC_IO_FOF; @@ -4782,8 +4791,11 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; abtsiocb->vport = vport; if (phba->sli_rev == LPFC_SLI_REV4) { - ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx; - pring_s4 = &phba->sli.ring[ring_number]; + pring_s4 = lpfc_sli4_calc_ring(phba, iocb); + if (pring_s4 == NULL) { + ret = FAILED; + goto out_unlock; + } /* Note: both hbalock and ring_lock must be set here */ spin_lock_irqsave(&pring_s4->ring_lock, iflags); ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, @@ -4805,7 +4817,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_sli_handle_fast_ring_event(phba, - &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); + &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); wait_for_cmpl: lpfc_cmd->waitq = &waitq; @@ -5105,7 +5117,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); if (cnt) lpfc_sli_abort_taskmgmt(vport, - &phba->sli.ring[phba->sli.fcp_ring], + &phba->sli.sli3_ring[LPFC_FCP_RING], tgt_id, lun_id, context); later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; while (time_after(later, jiffies) && cnt) { @@ -5323,7 +5335,8 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) continue; if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && ndlp->nlp_sid == i && - ndlp->rport) { + ndlp->rport && + ndlp->nlp_type & NLP_FCP_TARGET) { match = 1; break; } @@ -5534,7 +5547,7 @@ lpfc_slave_configure(struct scsi_device *sdev) if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, - &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); + &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); } @@ -5898,6 +5911,48 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, return false; } +static int +lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) +{ + return SCSI_MLQUEUE_HOST_BUSY; +} + +static int +lpfc_no_handler(struct scsi_cmnd *cmnd) +{ + return FAILED; +} + +static int +lpfc_no_slave(struct scsi_device *sdev) +{ + return -ENODEV; +} + +struct scsi_host_template lpfc_template_nvme = { + .module = THIS_MODULE, + .name = LPFC_DRIVER_NAME, + .proc_name = LPFC_DRIVER_NAME, + .info = lpfc_info, + .queuecommand = lpfc_no_command, + .eh_abort_handler = lpfc_no_handler, + .eh_device_reset_handler = lpfc_no_handler, + .eh_target_reset_handler = lpfc_no_handler, + .eh_bus_reset_handler = lpfc_no_handler, + .eh_host_reset_handler = lpfc_no_handler, + .slave_alloc = lpfc_no_slave, + .slave_configure = lpfc_no_slave, + .scan_finished = lpfc_scan_finished, + .this_id = -1, + .sg_tablesize = 1, + .cmd_per_lun = 1, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = lpfc_hba_attrs, + .max_sectors = 0xFFFF, + .vendor_id = LPFC_NL_VENDOR_ID, + .track_queue_depth = 0, +}; + struct scsi_host_template lpfc_template_s3 = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 8cb80dabada8..5da7e15400cb 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -135,6 +137,8 @@ struct lpfc_scsi_buf { uint32_t timeout; + uint16_t flags; /* TBD convert exch_busy to flags */ +#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ uint16_t status; /* From IOCB Word 7- ulpStatus */ uint32_t result; /* From IOCB Word 4. */ @@ -164,6 +168,8 @@ struct lpfc_scsi_buf { * Iotag is in here */ struct lpfc_iocbq cur_iocbq; + uint16_t cpu; + wait_queue_head_t *waitq; unsigned long start_time; @@ -178,13 +184,15 @@ struct lpfc_scsi_buf { #endif }; -#define LPFC_SCSI_DMA_EXT_SIZE 264 -#define LPFC_BPL_SIZE 1024 -#define MDAC_DIRECT_CMD 0x22 +#define LPFC_SCSI_DMA_EXT_SIZE 264 +#define LPFC_BPL_SIZE 1024 +#define MDAC_DIRECT_CMD 0x22 + +#define FIND_FIRST_OAS_LUN 0 +#define NO_MORE_OAS_LUN -1 +#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN -#define FIND_FIRST_OAS_LUN 0 -#define NO_MORE_OAS_LUN -1 -#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN +#define TXRDY_PAYLOAD_LEN 12 int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 8e886caf2454..e43e5e23c24b 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -34,14 +36,18 @@ #include <scsi/fc/fc_fs.h> #include <linux/aer.h> +#include <linux/nvme-fc-driver.h> + #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_nvmet.h" #include "lpfc_crtn.h" #include "lpfc_logmsg.h" #include "lpfc_compat.h" @@ -67,14 +73,17 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, struct lpfc_iocbq *); static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, struct hbq_dmabuf *); -static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *, +static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_cqe *); -static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *, +static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, int); static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *, uint32_t); static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); +static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, + struct lpfc_iocbq *cmdiocb); static IOCB_t * lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) @@ -271,10 +280,11 @@ lpfc_sli4_eq_get(struct lpfc_queue *q) /* * insert barrier for instruction interlock : data from the hardware * must have the valid bit checked before it can be copied and acted - * upon. Given what was seen in lpfc_sli4_cq_get() of speculative - * instructions allowing action on content before valid bit checked, - * add barrier here as well. May not be needed as "content" is a - * single 32-bit entity here (vs multi word structure for cq's). + * upon. Speculative instructions were allowing a bcopy at the start + * of lpfc_sli4_fp_handle_wcqe(), which is called immediately + * after our return, to copy data before the valid bit check above + * was done. As such, some of the copied data was stale. The barrier + * ensures the check is before any data is copied. */ mb(); return eqe; @@ -386,11 +396,10 @@ lpfc_sli4_cq_get(struct lpfc_queue *q) /* * insert barrier for instruction interlock : data from the hardware * must have the valid bit checked before it can be copied and acted - * upon. Speculative instructions were allowing a bcopy at the start - * of lpfc_sli4_fp_handle_wcqe(), which is called immediately - * after our return, to copy data before the valid bit check above - * was done. As such, some of the copied data was stale. The barrier - * ensures the check is before any data is copied. + * upon. Given what was seen in lpfc_sli4_cq_get() of speculative + * instructions allowing action on content before valid bit checked, + * add barrier here as well. May not be needed as "content" is a + * single 32-bit entity here (vs multi word structure for cq's). */ mb(); return cqe; @@ -456,7 +465,7 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) * on @q then this function will return -ENOMEM. * The caller is expected to hold the hbalock when calling this routine. **/ -static int +int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) { @@ -602,7 +611,7 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba) * * Returns sglq ponter = success, NULL = Failure. **/ -static struct lpfc_sglq * +struct lpfc_sglq * __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) { struct lpfc_sglq *sglq; @@ -902,7 +911,7 @@ out: } /** - * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool + * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool * @phba: Pointer to HBA context object. * @piocb: Pointer to the iocbq. * @@ -912,9 +921,9 @@ out: * allocated sglq object else it returns NULL. **/ static struct lpfc_sglq * -__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) +__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) { - struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; + struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; struct lpfc_sglq *sglq = NULL; struct lpfc_sglq *start_sglq = NULL; struct lpfc_scsi_buf *lpfc_cmd; @@ -938,18 +947,21 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) ndlp = piocbq->context1; } - list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); start_sglq = sglq; while (!found) { if (!sglq) return NULL; - if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) { + if (ndlp && ndlp->active_rrqs_xri_bitmap && + test_bit(sglq->sli4_lxritag, + ndlp->active_rrqs_xri_bitmap)) { /* This xri has an rrq outstanding for this DID. * put it back in the list and get another xri. */ - list_add_tail(&sglq->list, lpfc_sgl_list); + list_add_tail(&sglq->list, lpfc_els_sgl_list); sglq = NULL; - list_remove_head(lpfc_sgl_list, sglq, + list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); if (sglq == start_sglq) { sglq = NULL; @@ -962,6 +974,35 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; sglq->state = SGL_ALLOCATED; } + spin_unlock(&phba->sli4_hba.sgl_list_lock); + return sglq; +} + +/** + * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool + * @phba: Pointer to HBA context object. + * @piocb: Pointer to the iocbq. + * + * This function is called with the sgl_list lock held. This function + * gets a new driver sglq object from the sglq list. If the + * list is not empty then it is successful, it returns pointer to the newly + * allocated sglq object else it returns NULL. + **/ +struct lpfc_sglq * +__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) +{ + struct list_head *lpfc_nvmet_sgl_list; + struct lpfc_sglq *sglq = NULL; + + lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; + + lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); + + list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); + if (!sglq) + return NULL; + phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; + sglq->state = SGL_ALLOCATED; return sglq; } @@ -1002,7 +1043,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba) * this IO was aborted then the sglq entry it put on the * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the * IO has good status or fails for any other reason then the sglq - * entry is added to the free list (lpfc_sgl_list). + * entry is added to the free list (lpfc_els_sgl_list). **/ static void __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) @@ -1010,7 +1051,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) struct lpfc_sglq *sglq; size_t start_clean = offsetof(struct lpfc_iocbq, iocb); unsigned long iflag = 0; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; lockdep_assert_held(&phba->hbalock); @@ -1021,21 +1062,36 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) if (sglq) { + if (iocbq->iocb_flag & LPFC_IO_NVMET) { + spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, + iflag); + sglq->state = SGL_FREED; + sglq->ndlp = NULL; + list_add_tail(&sglq->list, + &phba->sli4_hba.lpfc_nvmet_sgl_list); + spin_unlock_irqrestore( + &phba->sli4_hba.sgl_list_lock, iflag); + goto out; + } + + pring = phba->sli4_hba.els_wq->pring; if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && (sglq->state != SGL_XRI_ABORTED)) { - spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, - iflag); + spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, + iflag); list_add(&sglq->list, - &phba->sli4_hba.lpfc_abts_els_sgl_list); + &phba->sli4_hba.lpfc_abts_els_sgl_list); spin_unlock_irqrestore( - &phba->sli4_hba.abts_sgl_list_lock, iflag); + &phba->sli4_hba.sgl_list_lock, iflag); } else { - spin_lock_irqsave(&pring->ring_lock, iflag); + spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, + iflag); sglq->state = SGL_FREED; sglq->ndlp = NULL; list_add_tail(&sglq->list, - &phba->sli4_hba.lpfc_sgl_list); - spin_unlock_irqrestore(&pring->ring_lock, iflag); + &phba->sli4_hba.lpfc_els_sgl_list); + spin_unlock_irqrestore( + &phba->sli4_hba.sgl_list_lock, iflag); /* Check if TXQ queue needs to be serviced */ if (!list_empty(&pring->txq)) @@ -1043,13 +1099,15 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) } } - +out: /* * Clean all volatile data fields, preserve iotag and node struct. */ memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); iocbq->sli4_lxritag = NO_XRI; iocbq->sli4_xritag = NO_XRI; + iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | + LPFC_IO_NVME_LS); list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); } @@ -1639,7 +1697,7 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) if (lpfc_is_link_up(phba) && (!list_empty(&pring->txq)) && - (pring->ringno != phba->sli.fcp_ring || + (pring->ringno != LPFC_FCP_RING || phba->sli.sli_flag & LPFC_PROCESS_LA)) { while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && @@ -1718,7 +1776,6 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) struct hbq_dmabuf *hbq_buf; unsigned long flags; int i, hbq_count; - uint32_t hbqno; hbq_count = lpfc_sli_hbq_count(); /* Return all memory used by all HBQs */ @@ -1732,24 +1789,6 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) } phba->hbqs[i].buffer_count = 0; } - /* Return all HBQ buffer that are in-fly */ - list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list, - list) { - hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); - list_del(&hbq_buf->dbuf.list); - if (hbq_buf->tag == -1) { - (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) - (phba, hbq_buf); - } else { - hbqno = hbq_buf->tag >> 16; - if (hbqno >= LPFC_MAX_HBQS) - (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) - (phba, hbq_buf); - else - (phba->hbqs[hbqno].hbq_free_buffer)(phba, - hbq_buf); - } - } /* Mark the HBQs not in use */ phba->hbq_in_use = 0; @@ -1802,7 +1841,7 @@ lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); - hbqe->bde.tus.f.bdeSize = hbq_buf->size; + hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; hbqe->bde.tus.f.bdeFlags = 0; hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); @@ -1834,17 +1873,23 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, int rc; struct lpfc_rqe hrqe; struct lpfc_rqe drqe; + struct lpfc_queue *hrq; + struct lpfc_queue *drq; + + if (hbqno != LPFC_ELS_HBQ) + return 1; + hrq = phba->sli4_hba.hdr_rq; + drq = phba->sli4_hba.dat_rq; lockdep_assert_held(&phba->hbalock); hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); - rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, - &hrqe, &drqe); + rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); if (rc < 0) return rc; - hbq_buf->tag = rc; + hbq_buf->tag = (rc | (hbqno << 16)); list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); return 0; } @@ -1861,22 +1906,9 @@ static struct lpfc_hbq_init lpfc_els_hbq = { .add_count = 40, }; -/* HBQ for the extra ring if needed */ -static struct lpfc_hbq_init lpfc_extra_hbq = { - .rn = 1, - .entry_count = 200, - .mask_count = 0, - .profile = 0, - .ring_mask = (1 << LPFC_EXTRA_RING), - .buffer_count = 0, - .init_count = 0, - .add_count = 5, -}; - /* Array of HBQs */ struct lpfc_hbq_init *lpfc_hbq_defs[] = { &lpfc_els_hbq, - &lpfc_extra_hbq, }; /** @@ -1998,6 +2030,29 @@ lpfc_sli_hbqbuf_get(struct list_head *rb_list) } /** + * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list + * @phba: Pointer to HBA context object. + * @hbqno: HBQ number. + * + * This function removes the first RQ buffer on an RQ buffer list and returns a + * pointer to that buffer. If it finds no buffers on the list it returns NULL. + **/ +static struct rqb_dmabuf * +lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) +{ + struct lpfc_dmabuf *h_buf; + struct lpfc_rqb *rqbp; + + rqbp = hrq->rqbp; + list_remove_head(&rqbp->rqb_buffer_list, h_buf, + struct lpfc_dmabuf, list); + if (!h_buf) + return NULL; + rqbp->buffer_count--; + return container_of(h_buf, struct rqb_dmabuf, hbuf); +} + +/** * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag * @phba: Pointer to HBA context object. * @tag: Tag of the hbq buffer. @@ -2463,6 +2518,14 @@ lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, { int i; + switch (fch_type) { + case FC_TYPE_NVME: + lpfc_nvmet_unsol_ls_event(phba, pring, saveq); + return 1; + default: + break; + } + /* unSolicited Responses */ if (pring->prt[0].profile) { if (pring->prt[0].lpfc_sli_rcv_unsol_event) @@ -2713,7 +2776,7 @@ static struct lpfc_iocbq * lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint16_t iotag) { - struct lpfc_iocbq *cmd_iocb; + struct lpfc_iocbq *cmd_iocb = NULL; lockdep_assert_held(&phba->hbalock); if (iotag != 0 && iotag <= phba->sli.last_iotag) { @@ -2727,8 +2790,10 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, } lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0372 iotag x%x is out of range: max iotag (x%x)\n", - iotag, phba->sli.last_iotag); + "0372 iotag x%x lookup error: max iotag (x%x) " + "iocb_flag x%x\n", + iotag, phba->sli.last_iotag, + cmd_iocb ? cmd_iocb->iocb_flag : 0xffff); return NULL; } @@ -3598,6 +3663,33 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) } /** + * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * + * This function aborts all iocbs in the given ring and frees all the iocb + * objects in txq. This function issues an abort iocb for all the iocb commands + * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before + * the return of this function. The caller is not required to hold any locks. + **/ +void +lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) +{ + LIST_HEAD(completions); + struct lpfc_iocbq *iocb, *next_iocb; + + if (pring->ringno == LPFC_ELS_RING) + lpfc_fabric_abort_hba(phba); + + spin_lock_irq(&phba->hbalock); + /* Next issue ABTS for everything on the txcmplq */ + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) + lpfc_sli4_abort_nvme_io(phba, pring, iocb); + spin_unlock_irq(&phba->hbalock); +} + + +/** * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. @@ -3617,15 +3709,40 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) /* Look on all the FCP Rings for the iotag */ if (phba->sli_rev >= LPFC_SLI_REV4) { for (i = 0; i < phba->cfg_fcp_io_channel; i++) { - pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS]; + pring = phba->sli4_hba.fcp_wq[i]->pring; lpfc_sli_abort_iocb_ring(phba, pring); } } else { - pring = &psli->ring[psli->fcp_ring]; + pring = &psli->sli3_ring[LPFC_FCP_RING]; lpfc_sli_abort_iocb_ring(phba, pring); } } +/** + * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings + * @phba: Pointer to HBA context object. + * + * This function aborts all wqes in NVME rings. This function issues an + * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in + * the txcmplq is not guaranteed to complete before the return of this + * function. The caller is not required to hold any locks. + **/ +void +lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba) +{ + struct lpfc_sli_ring *pring; + uint32_t i; + + if (phba->sli_rev < LPFC_SLI_REV4) + return; + + /* Abort all IO on each NVME ring. */ + for (i = 0; i < phba->cfg_nvme_io_channel; i++) { + pring = phba->sli4_hba.nvme_wq[i]->pring; + lpfc_sli_abort_wqe_ring(phba, pring); + } +} + /** * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring @@ -3654,7 +3771,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) /* Look on all the FCP Rings for the iotag */ if (phba->sli_rev >= LPFC_SLI_REV4) { for (i = 0; i < phba->cfg_fcp_io_channel; i++) { - pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS]; + pring = phba->sli4_hba.fcp_wq[i]->pring; spin_lock_irq(&pring->ring_lock); /* Retrieve everything on txq */ @@ -3675,7 +3792,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) IOERR_SLI_DOWN); } } else { - pring = &psli->ring[psli->fcp_ring]; + pring = &psli->sli3_ring[LPFC_FCP_RING]; spin_lock_irq(&phba->hbalock); /* Retrieve everything on txq */ @@ -3696,6 +3813,51 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) } /** + * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings + * @phba: Pointer to HBA context object. + * + * This function flushes all wqes in the nvme rings and frees all resources + * in the txcmplq. This function does not issue abort wqes for the IO + * commands in txcmplq, they will just be returned with + * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI + * slot has been permanently disabled. + **/ +void +lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba) +{ + LIST_HEAD(txcmplq); + struct lpfc_sli_ring *pring; + uint32_t i; + + if (phba->sli_rev < LPFC_SLI_REV4) + return; + + /* Hint to other driver operations that a flush is in progress. */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag |= HBA_NVME_IOQ_FLUSH; + spin_unlock_irq(&phba->hbalock); + + /* Cycle through all NVME rings and complete each IO with + * a local driver reason code. This is a flush so no + * abort exchange to FW. + */ + for (i = 0; i < phba->cfg_nvme_io_channel; i++) { + pring = phba->sli4_hba.nvme_wq[i]->pring; + + /* Retrieve everything on the txcmplq */ + spin_lock_irq(&pring->ring_lock); + list_splice_init(&pring->txcmplq, &txcmplq); + pring->txcmplq_cnt = 0; + spin_unlock_irq(&pring->ring_lock); + + /* Flush the txcmpq &&&PAE */ + lpfc_sli_cancel_iocbs(phba, &txcmplq, + IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + } +} + +/** * lpfc_sli_brdready_s3 - Check for sli3 host ready status * @phba: Pointer to HBA context object. * @mask: Bit mask to be checked. @@ -4069,7 +4231,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) /* Initialize relevant SLI info */ for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; + pring = &psli->sli3_ring[i]; pring->flag = 0; pring->sli.sli3.rspidx = 0; pring->sli.sli3.next_cmdidx = 0; @@ -4498,10 +4660,11 @@ static int lpfc_sli4_rb_setup(struct lpfc_hba *phba) { phba->hbq_in_use = 1; - phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count; + phba->hbqs[LPFC_ELS_HBQ].entry_count = + lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; phba->hbq_count = 1; + lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); /* Initially populate or replenish the HBQs */ - lpfc_sli_hbqbuf_init_hbqs(phba, 0); return 0; } @@ -5107,26 +5270,38 @@ out_free_mboxq: static void lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) { - int fcp_eqidx; + int qidx; lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); - fcp_eqidx = 0; - if (phba->sli4_hba.fcp_cq) { - do { - lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], - LPFC_QUEUE_REARM); - } while (++fcp_eqidx < phba->cfg_fcp_io_channel); - } + if (phba->sli4_hba.nvmels_cq) + lpfc_sli4_cq_release(phba->sli4_hba.nvmels_cq, + LPFC_QUEUE_REARM); + + if (phba->sli4_hba.fcp_cq) + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) + lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[qidx], + LPFC_QUEUE_REARM); + + if (phba->sli4_hba.nvme_cq) + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) + lpfc_sli4_cq_release(phba->sli4_hba.nvme_cq[qidx], + LPFC_QUEUE_REARM); if (phba->cfg_fof) lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM); - if (phba->sli4_hba.hba_eq) { - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; - fcp_eqidx++) - lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx], - LPFC_QUEUE_REARM); + if (phba->sli4_hba.hba_eq) + for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) + lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[qidx], + LPFC_QUEUE_REARM); + + if (phba->nvmet_support) { + for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { + lpfc_sli4_cq_release( + phba->sli4_hba.nvmet_cqset[qidx], + LPFC_QUEUE_REARM); + } } if (phba->cfg_fof) @@ -5560,9 +5735,13 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) rsrc_blks->rsrc_size = rsrc_size; list_add_tail(&rsrc_blks->list, ext_blk_list); rsrc_start = rsrc_id; - if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) + if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { phba->sli4_hba.scsi_xri_start = rsrc_start + - lpfc_sli4_get_els_iocb_cnt(phba); + lpfc_sli4_get_iocb_cnt(phba); + phba->sli4_hba.nvme_xri_start = + phba->sli4_hba.scsi_xri_start + + phba->sli4_hba.scsi_xri_max; + } while (rsrc_id < (rsrc_start + rsrc_size)) { ids[j] = rsrc_id; @@ -5578,6 +5757,8 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) return rc; } + + /** * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. * @phba: Pointer to HBA context object. @@ -6156,42 +6337,45 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, } /** - * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block + * lpfc_sli4_repost_sgl_list - Repsot the buffers sgl pages as block * @phba: pointer to lpfc hba data structure. + * @pring: Pointer to driver SLI ring object. + * @sgl_list: linked link of sgl buffers to post + * @cnt: number of linked list buffers * - * This routine walks the list of els buffers that have been allocated and + * This routine walks the list of buffers that have been allocated and * repost them to the port by using SGL block post. This is needed after a * pci_function_reset/warm_start or start. It attempts to construct blocks - * of els buffer sgls which contains contiguous xris and uses the non-embedded - * SGL block post mailbox commands to post them to the port. For single els + * of buffer sgls which contains contiguous xris and uses the non-embedded + * SGL block post mailbox commands to post them to the port. For single * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post * mailbox command for posting. * * Returns: 0 = success, non-zero failure. **/ static int -lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) +lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, + struct list_head *sgl_list, int cnt) { struct lpfc_sglq *sglq_entry = NULL; struct lpfc_sglq *sglq_entry_next = NULL; struct lpfc_sglq *sglq_entry_first = NULL; - int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0; + int status, total_cnt; + int post_cnt = 0, num_posted = 0, block_cnt = 0; int last_xritag = NO_XRI; - struct lpfc_sli_ring *pring; LIST_HEAD(prep_sgl_list); LIST_HEAD(blck_sgl_list); LIST_HEAD(allc_sgl_list); LIST_HEAD(post_sgl_list); LIST_HEAD(free_sgl_list); - pring = &phba->sli.ring[LPFC_ELS_RING]; spin_lock_irq(&phba->hbalock); - spin_lock(&pring->ring_lock); - list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); - spin_unlock(&pring->ring_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(sgl_list, &allc_sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->hbalock); - total_cnt = phba->sli4_hba.els_xri_cnt; + total_cnt = cnt; list_for_each_entry_safe(sglq_entry, sglq_entry_next, &allc_sgl_list, list) { list_del_init(&sglq_entry->list); @@ -6220,8 +6404,8 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) /* keep track of last sgl's xritag */ last_xritag = sglq_entry->sli4_xritag; - /* end of repost sgl list condition for els buffers */ - if (num_posted == phba->sli4_hba.els_xri_cnt) { + /* end of repost sgl list condition for buffers */ + if (num_posted == total_cnt) { if (post_cnt == 0) { list_splice_init(&prep_sgl_list, &blck_sgl_list); @@ -6238,7 +6422,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) /* Failure, put sgl to free list */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "3159 Failed to post els " + "3159 Failed to post " "sgl, xritag:x%x\n", sglq_entry->sli4_xritag); list_add_tail(&sglq_entry->list, @@ -6252,9 +6436,9 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) if (post_cnt == 0) continue; - /* post the els buffer list sgls as a block */ - status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list, - post_cnt); + /* post the buffer list sgls as a block */ + status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, + post_cnt); if (!status) { /* success, put sgl list to posted sgl list */ @@ -6265,7 +6449,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) struct lpfc_sglq, list); lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "3160 Failed to post els sgl-list, " + "3160 Failed to post sgl-list, " "xritag:x%x-x%x\n", sglq_entry_first->sli4_xritag, (sglq_entry_first->sli4_xritag + @@ -6278,29 +6462,28 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) if (block_cnt == 0) last_xritag = NO_XRI; - /* reset els sgl post count for next round of posting */ + /* reset sgl post count for next round of posting */ post_cnt = 0; } - /* update the number of XRIs posted for ELS */ - phba->sli4_hba.els_xri_cnt = total_cnt; - /* free the els sgls failed to post */ + /* free the sgls failed to post */ lpfc_free_sgl_list(phba, &free_sgl_list); - /* push els sgls posted to the availble list */ + /* push sgls posted to the available list */ if (!list_empty(&post_sgl_list)) { spin_lock_irq(&phba->hbalock); - spin_lock(&pring->ring_lock); - list_splice_init(&post_sgl_list, - &phba->sli4_hba.lpfc_sgl_list); - spin_unlock(&pring->ring_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&post_sgl_list, sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->hbalock); } else { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "3161 Failure to post els sgl to port.\n"); + "3161 Failure to post sgl to port.\n"); return -EIO; } - return 0; + + /* return the number of XRIs actually posted */ + return total_cnt; } void @@ -6335,7 +6518,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) int lpfc_sli4_hba_setup(struct lpfc_hba *phba) { - int rc; + int rc, i; LPFC_MBOXQ_t *mboxq; struct lpfc_mqe *mqe; uint8_t *vpd; @@ -6344,6 +6527,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); struct lpfc_vport *vport = phba->pport; struct lpfc_dmabuf *mp; + struct lpfc_rqb *rqbp; /* Perform a PCI function reset to start from clean */ rc = lpfc_pci_function_reset(phba); @@ -6622,35 +6806,141 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); - /* update host els and scsi xri-sgl sizes and mappings */ - rc = lpfc_sli4_xri_sgl_update(phba); + /* Create all the SLI4 queues */ + rc = lpfc_sli4_queue_create(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3089 Failed to allocate queues\n"); + rc = -ENODEV; + goto out_free_mbox; + } + /* Set up all the queues to the device */ + rc = lpfc_sli4_queue_setup(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0381 Error %d during queue setup.\n ", rc); + goto out_stop_timers; + } + /* Initialize the driver internal SLI layer lists. */ + lpfc_sli4_setup(phba); + lpfc_sli4_queue_init(phba); + + /* update host els xri-sgl sizes and mappings */ + rc = lpfc_sli4_els_sgl_update(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "1400 Failed to update xri-sgl size and " "mapping: %d\n", rc); - goto out_free_mbox; + goto out_destroy_queue; } /* register the els sgl pool to the port */ - rc = lpfc_sli4_repost_els_sgl_list(phba); - if (unlikely(rc)) { + rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, + phba->sli4_hba.els_xri_cnt); + if (unlikely(rc < 0)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "0582 Error %d during els sgl post " "operation\n", rc); rc = -ENODEV; - goto out_free_mbox; + goto out_destroy_queue; } + phba->sli4_hba.els_xri_cnt = rc; - /* register the allocated scsi sgl pool to the port */ - rc = lpfc_sli4_repost_scsi_sgl_list(phba); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0383 Error %d during scsi sgl post " - "operation\n", rc); - /* Some Scsi buffers were moved to the abort scsi list */ - /* A pci function reset will repost them */ - rc = -ENODEV; - goto out_free_mbox; + if (phba->nvmet_support) { + /* update host nvmet xri-sgl sizes and mappings */ + rc = lpfc_sli4_nvmet_sgl_update(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "6308 Failed to update nvmet-sgl size " + "and mapping: %d\n", rc); + goto out_destroy_queue; + } + + /* register the nvmet sgl pool to the port */ + rc = lpfc_sli4_repost_sgl_list( + phba, + &phba->sli4_hba.lpfc_nvmet_sgl_list, + phba->sli4_hba.nvmet_xri_cnt); + if (unlikely(rc < 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "3117 Error %d during nvmet " + "sgl post\n", rc); + rc = -ENODEV; + goto out_destroy_queue; + } + phba->sli4_hba.nvmet_xri_cnt = rc; + lpfc_nvmet_create_targetport(phba); + } else { + /* update host scsi xri-sgl sizes and mappings */ + rc = lpfc_sli4_scsi_sgl_update(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "6309 Failed to update scsi-sgl size " + "and mapping: %d\n", rc); + goto out_destroy_queue; + } + + /* update host nvme xri-sgl sizes and mappings */ + rc = lpfc_sli4_nvme_sgl_update(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "6082 Failed to update nvme-sgl size " + "and mapping: %d\n", rc); + goto out_destroy_queue; + } + } + + if (phba->nvmet_support && phba->cfg_nvmet_mrq) { + + /* Post initial buffers to all RQs created */ + for (i = 0; i < phba->cfg_nvmet_mrq; i++) { + rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; + INIT_LIST_HEAD(&rqbp->rqb_buffer_list); + rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; + rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; + rqbp->entry_count = 256; + rqbp->buffer_count = 0; + + /* Divide by 4 and round down to multiple of 16 */ + rc = (phba->cfg_nvmet_mrq_post >> 2) & 0xfff8; + phba->sli4_hba.nvmet_mrq_hdr[i]->entry_repost = rc; + phba->sli4_hba.nvmet_mrq_data[i]->entry_repost = rc; + + lpfc_post_rq_buffer( + phba, phba->sli4_hba.nvmet_mrq_hdr[i], + phba->sli4_hba.nvmet_mrq_data[i], + phba->cfg_nvmet_mrq_post); + } + } + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { + /* register the allocated scsi sgl pool to the port */ + rc = lpfc_sli4_repost_scsi_sgl_list(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0383 Error %d during scsi sgl post " + "operation\n", rc); + /* Some Scsi buffers were moved to abort scsi list */ + /* A pci function reset will repost them */ + rc = -ENODEV; + goto out_destroy_queue; + } + } + + if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && + (phba->nvmet_support == 0)) { + + /* register the allocated nvme sgl pool to the port */ + rc = lpfc_repost_nvme_sgl_list(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "6116 Error %d during nvme sgl post " + "operation\n", rc); + /* Some NVME buffers were moved to abort nvme list */ + /* A pci function reset will repost them */ + rc = -ENODEV; + goto out_destroy_queue; + } } /* Post the rpi header region to the device. */ @@ -6660,24 +6950,46 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) "0393 Error %d during rpi post operation\n", rc); rc = -ENODEV; - goto out_free_mbox; + goto out_destroy_queue; } lpfc_sli4_node_prep(phba); - /* Create all the SLI4 queues */ - rc = lpfc_sli4_queue_create(phba); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3089 Failed to allocate queues\n"); - rc = -ENODEV; - goto out_stop_timers; - } - /* Set up all the queues to the device */ - rc = lpfc_sli4_queue_setup(phba); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0381 Error %d during queue setup.\n ", rc); - goto out_destroy_queue; + if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { + /* + * The FC Port needs to register FCFI (index 0) + */ + lpfc_reg_fcfi(phba, mboxq); + mboxq->vport = phba->pport; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) + goto out_unset_queue; + rc = 0; + phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, + &mboxq->u.mqe.un.reg_fcfi); + } else { + /* We are a NVME Target mode with MRQ > 1 */ + + /* First register the FCFI */ + lpfc_reg_fcfi_mrq(phba, mboxq, 0); + mboxq->vport = phba->pport; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) + goto out_unset_queue; + rc = 0; + phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, + &mboxq->u.mqe.un.reg_fcfi_mrq); + + /* Next register the MRQs */ + lpfc_reg_fcfi_mrq(phba, mboxq, 1); + mboxq->vport = phba->pport; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) + goto out_unset_queue; + rc = 0; + } + /* Check if the port is configured to be disabled */ + lpfc_sli_read_link_ste(phba); } /* Arm the CQs and then EQs on device */ @@ -6731,23 +7043,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = 0; } - if (!(phba->hba_flag & HBA_FCOE_MODE)) { - /* - * The FC Port needs to register FCFI (index 0) - */ - lpfc_reg_fcfi(phba, mboxq); - mboxq->vport = phba->pport; - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - if (rc != MBX_SUCCESS) - goto out_unset_queue; - rc = 0; - phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, - &mboxq->u.mqe.un.reg_fcfi); - - /* Check if the port is configured to be disabled */ - lpfc_sli_read_link_ste(phba); - } - /* * The port is ready, set the host's link state to LINK_DOWN * in preparation for link interrupts. @@ -6884,7 +7179,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) /* Find the eq associated with the mcq */ if (phba->sli4_hba.hba_eq) - for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++) + for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) if (phba->sli4_hba.hba_eq[eqidx]->queue_id == phba->sli4_hba.mbx_cq->assoc_qid) { fpeq = phba->sli4_hba.hba_eq[eqidx]; @@ -7243,16 +7538,15 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, = MAILBOX_HBA_EXT_OFFSET; /* Copy the mailbox extension data */ - if (pmbox->in_ext_byte_len && pmbox->context2) { + if (pmbox->in_ext_byte_len && pmbox->context2) lpfc_memcpy_to_slim(phba->MBslimaddr + MAILBOX_HBA_EXT_OFFSET, pmbox->context2, pmbox->in_ext_byte_len); - } - if (mbx->mbxCommand == MBX_CONFIG_PORT) { + if (mbx->mbxCommand == MBX_CONFIG_PORT) /* copy command data into host mbox for cmpl */ - lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); - } + lpfc_sli_pcimem_bcopy(mbx, phba->mbox, + MAILBOX_CMD_SIZE); /* First copy mbox command data to HBA SLIM, skip past first word */ @@ -7266,10 +7560,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, writel(ldata, to_slim); readl(to_slim); /* flush */ - if (mbx->mbxCommand == MBX_CONFIG_PORT) { + if (mbx->mbxCommand == MBX_CONFIG_PORT) /* switch over to host mailbox */ psli->sli_flag |= LPFC_SLI_ACTIVE; - } } wmb(); @@ -7368,7 +7661,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* copy results back to user */ - lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE); + lpfc_sli_pcimem_bcopy(phba->mbox, mbx, + MAILBOX_CMD_SIZE); /* Copy the mailbox extension data */ if (pmbox->out_ext_byte_len && pmbox->context2) { lpfc_sli_pcimem_bcopy(phba->mbox_ext, @@ -7378,7 +7672,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, } else { /* First copy command data */ lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, - MAILBOX_CMD_SIZE); + MAILBOX_CMD_SIZE); /* Copy the mailbox extension data */ if (pmbox->out_ext_byte_len && pmbox->context2) { lpfc_memcpy_from_slim(pmbox->context2, @@ -8059,7 +8353,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, { struct lpfc_iocbq *nextiocb; IOCB_t *iocb; - struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; + struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; lockdep_assert_held(&phba->hbalock); @@ -8133,7 +8427,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, * For FCP commands, we must be in a state where we can process link * attention events. */ - } else if (unlikely(pring->ringno == phba->sli.fcp_ring && + } else if (unlikely(pring->ringno == LPFC_FCP_RING && !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { goto iocb_busy; } @@ -8870,9 +9164,21 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, union lpfc_wqe *wqe; union lpfc_wqe128 wqe128; struct lpfc_queue *wq; - struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; + struct lpfc_sli_ring *pring; - lockdep_assert_held(&phba->hbalock); + /* Get the WQ */ + if ((piocb->iocb_flag & LPFC_IO_FCP) || + (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { + if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) + wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx]; + else + wq = phba->sli4_hba.oas_wq; + } else { + wq = phba->sli4_hba.els_wq; + } + + /* Get corresponding ring */ + pring = wq->pring; /* * The WQE can be either 64 or 128 bytes, @@ -8880,6 +9186,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, */ wqe = (union lpfc_wqe *)&wqe128; + lockdep_assert_held(&phba->hbalock); + if (piocb->sli4_xritag == NO_XRI) { if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) @@ -8894,7 +9202,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, return IOCB_BUSY; } } else { - sglq = __lpfc_sli_get_sglq(phba, piocb); + sglq = __lpfc_sli_get_els_sglq(phba, piocb); if (!sglq) { if (!(flag & SLI_IOCB_RET_IOCB)) { __lpfc_sli_ringtx_put(phba, @@ -8906,10 +9214,10 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, } } } - } else if (piocb->iocb_flag & LPFC_IO_FCP) { + } else if (piocb->iocb_flag & LPFC_IO_FCP) /* These IO's already have an XRI and a mapped sgl. */ sglq = NULL; - } else { + else { /* * This is a continuation of a commandi,(CX) so this * sglq is on the active list @@ -8929,21 +9237,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, if (lpfc_sli4_iocb2wqe(phba, piocb, wqe)) return IOCB_ERROR; - if ((piocb->iocb_flag & LPFC_IO_FCP) || - (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { - if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) { - wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx]; - } else { - wq = phba->sli4_hba.oas_wq; - } - if (lpfc_sli4_wq_put(wq, wqe)) - return IOCB_ERROR; - } else { - if (unlikely(!phba->sli4_hba.els_wq)) - return IOCB_ERROR; - if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe)) - return IOCB_ERROR; - } + if (lpfc_sli4_wq_put(wq, wqe)) + return IOCB_ERROR; lpfc_sli_ringtxcmpl_put(phba, pring, piocb); return 0; @@ -9001,46 +9296,44 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) } /** - * lpfc_sli_calc_ring - Calculates which ring to use + * lpfc_sli4_calc_ring - Calculates which ring to use * @phba: Pointer to HBA context object. - * @ring_number: Initial ring * @piocb: Pointer to command iocb. * - * For SLI4, FCP IO can deferred to one fo many WQs, based on - * fcp_wqidx, thus we need to calculate the corresponding ring. + * For SLI4 only, FCP IO can deferred to one fo many WQs, based on + * hba_wqidx, thus we need to calculate the corresponding ring. * Since ABORTS must go on the same WQ of the command they are - * aborting, we use command's fcp_wqidx. + * aborting, we use command's hba_wqidx. */ -static int -lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number, - struct lpfc_iocbq *piocb) +struct lpfc_sli_ring * +lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) { - if (phba->sli_rev < LPFC_SLI_REV4) - return ring_number; - - if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { + if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { if (!(phba->cfg_fof) || - (!(piocb->iocb_flag & LPFC_IO_FOF))) { + (!(piocb->iocb_flag & LPFC_IO_FOF))) { if (unlikely(!phba->sli4_hba.fcp_wq)) - return LPFC_HBA_ERROR; + return NULL; /* - * for abort iocb fcp_wqidx should already + * for abort iocb hba_wqidx should already * be setup based on what work queue we used. */ if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) - piocb->fcp_wqidx = + piocb->hba_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb->context1); - ring_number = MAX_SLI3_CONFIGURED_RINGS + - piocb->fcp_wqidx; + return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring; } else { if (unlikely(!phba->sli4_hba.oas_wq)) - return LPFC_HBA_ERROR; - piocb->fcp_wqidx = 0; - ring_number = LPFC_FCP_OAS_RING; + return NULL; + piocb->hba_wqidx = 0; + return phba->sli4_hba.oas_wq->pring; } + } else { + if (unlikely(!phba->sli4_hba.els_wq)) + return NULL; + piocb->hba_wqidx = 0; + return phba->sli4_hba.els_wq->pring; } - return ring_number; } /** @@ -9060,7 +9353,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { - struct lpfc_fcp_eq_hdl *fcp_eq_hdl; + struct lpfc_hba_eq_hdl *hba_eq_hdl; struct lpfc_sli_ring *pring; struct lpfc_queue *fpeq; struct lpfc_eqe *eqe; @@ -9068,21 +9361,19 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, int rc, idx; if (phba->sli_rev == LPFC_SLI_REV4) { - ring_number = lpfc_sli_calc_ring(phba, ring_number, piocb); - if (unlikely(ring_number == LPFC_HBA_ERROR)) + pring = lpfc_sli4_calc_ring(phba, piocb); + if (unlikely(pring == NULL)) return IOCB_ERROR; - idx = piocb->fcp_wqidx; - pring = &phba->sli.ring[ring_number]; spin_lock_irqsave(&pring->ring_lock, iflags); rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); spin_unlock_irqrestore(&pring->ring_lock, iflags); if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) { - fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx]; + idx = piocb->hba_wqidx; + hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx]; - if (atomic_dec_and_test(&fcp_eq_hdl-> - fcp_eq_in_use)) { + if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) { /* Get associated EQ with this index */ fpeq = phba->sli4_hba.hba_eq[idx]; @@ -9103,7 +9394,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); } - atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); + atomic_inc(&hba_eq_hdl->hba_eq_in_use); } } else { /* For now, SLI2/3 will still use hbalock */ @@ -9123,7 +9414,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, * only when driver needs to support target mode functionality * or IP over FC functionalities. * - * This function is called with no lock held. + * This function is called with no lock held. SLI3 only. **/ static int lpfc_extra_ring_setup( struct lpfc_hba *phba) @@ -9136,14 +9427,14 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba) /* Adjust cmd/rsp ring iocb entries more evenly */ /* Take some away from the FCP ring */ - pring = &psli->ring[psli->fcp_ring]; + pring = &psli->sli3_ring[LPFC_FCP_RING]; pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; /* and give them to the extra ring */ - pring = &psli->ring[psli->extra_ring]; + pring = &psli->sli3_ring[LPFC_EXTRA_RING]; pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; @@ -9328,7 +9619,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba, /** - * lpfc_sli_setup - SLI ring setup function + * lpfc_sli4_setup - SLI ring setup function * @phba: Pointer to HBA context object. * * lpfc_sli_setup sets up rings of the SLI interface with @@ -9339,6 +9630,51 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba, * This function always returns 0. **/ int +lpfc_sli4_setup(struct lpfc_hba *phba) +{ + struct lpfc_sli_ring *pring; + + pring = phba->sli4_hba.els_wq->pring; + pring->num_mask = LPFC_MAX_RING_MASK; + pring->prt[0].profile = 0; /* Mask 0 */ + pring->prt[0].rctl = FC_RCTL_ELS_REQ; + pring->prt[0].type = FC_TYPE_ELS; + pring->prt[0].lpfc_sli_rcv_unsol_event = + lpfc_els_unsol_event; + pring->prt[1].profile = 0; /* Mask 1 */ + pring->prt[1].rctl = FC_RCTL_ELS_REP; + pring->prt[1].type = FC_TYPE_ELS; + pring->prt[1].lpfc_sli_rcv_unsol_event = + lpfc_els_unsol_event; + pring->prt[2].profile = 0; /* Mask 2 */ + /* NameServer Inquiry */ + pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; + /* NameServer */ + pring->prt[2].type = FC_TYPE_CT; + pring->prt[2].lpfc_sli_rcv_unsol_event = + lpfc_ct_unsol_event; + pring->prt[3].profile = 0; /* Mask 3 */ + /* NameServer response */ + pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; + /* NameServer */ + pring->prt[3].type = FC_TYPE_CT; + pring->prt[3].lpfc_sli_rcv_unsol_event = + lpfc_ct_unsol_event; + return 0; +} + +/** + * lpfc_sli_setup - SLI ring setup function + * @phba: Pointer to HBA context object. + * + * lpfc_sli_setup sets up rings of the SLI interface with + * number of iocbs per ring and iotags. This function is + * called while driver attach to the HBA and before the + * interrupts are enabled. So there is no need for locking. + * + * This function always returns 0. SLI3 only. + **/ +int lpfc_sli_setup(struct lpfc_hba *phba) { int i, totiocbsize = 0; @@ -9346,19 +9682,14 @@ lpfc_sli_setup(struct lpfc_hba *phba) struct lpfc_sli_ring *pring; psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; - if (phba->sli_rev == LPFC_SLI_REV4) - psli->num_rings += phba->cfg_fcp_io_channel; psli->sli_flag = 0; - psli->fcp_ring = LPFC_FCP_RING; - psli->next_ring = LPFC_FCP_NEXT_RING; - psli->extra_ring = LPFC_EXTRA_RING; psli->iocbq_lookup = NULL; psli->iocbq_lookup_len = 0; psli->last_iotag = 0; for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; + pring = &psli->sli3_ring[i]; switch (i) { case LPFC_FCP_RING: /* ring 0 - FCP */ /* numCiocb and numRiocb are used in config_port */ @@ -9457,18 +9788,90 @@ lpfc_sli_setup(struct lpfc_hba *phba) } /** - * lpfc_sli_queue_setup - Queue initialization function + * lpfc_sli4_queue_init - Queue initialization function * @phba: Pointer to HBA context object. * - * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each + * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each * ring. This function also initializes ring indices of each ring. * This function is called during the initialization of the SLI * interface of an HBA. * This function is called with no lock held and always returns * 1. **/ -int -lpfc_sli_queue_setup(struct lpfc_hba *phba) +void +lpfc_sli4_queue_init(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli; + struct lpfc_sli_ring *pring; + int i; + + psli = &phba->sli; + spin_lock_irq(&phba->hbalock); + INIT_LIST_HEAD(&psli->mboxq); + INIT_LIST_HEAD(&psli->mboxq_cmpl); + /* Initialize list headers for txq and txcmplq as double linked lists */ + for (i = 0; i < phba->cfg_fcp_io_channel; i++) { + pring = phba->sli4_hba.fcp_wq[i]->pring; + pring->flag = 0; + pring->ringno = LPFC_FCP_RING; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); + INIT_LIST_HEAD(&pring->iocb_continueq); + spin_lock_init(&pring->ring_lock); + } + for (i = 0; i < phba->cfg_nvme_io_channel; i++) { + pring = phba->sli4_hba.nvme_wq[i]->pring; + pring->flag = 0; + pring->ringno = LPFC_FCP_RING; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); + INIT_LIST_HEAD(&pring->iocb_continueq); + spin_lock_init(&pring->ring_lock); + } + pring = phba->sli4_hba.els_wq->pring; + pring->flag = 0; + pring->ringno = LPFC_ELS_RING; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); + INIT_LIST_HEAD(&pring->iocb_continueq); + spin_lock_init(&pring->ring_lock); + + if (phba->cfg_nvme_io_channel) { + pring = phba->sli4_hba.nvmels_wq->pring; + pring->flag = 0; + pring->ringno = LPFC_ELS_RING; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); + INIT_LIST_HEAD(&pring->iocb_continueq); + spin_lock_init(&pring->ring_lock); + } + + if (phba->cfg_fof) { + pring = phba->sli4_hba.oas_wq->pring; + pring->flag = 0; + pring->ringno = LPFC_FCP_RING; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); + INIT_LIST_HEAD(&pring->iocb_continueq); + spin_lock_init(&pring->ring_lock); + } + + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli_queue_init - Queue initialization function + * @phba: Pointer to HBA context object. + * + * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each + * ring. This function also initializes ring indices of each ring. + * This function is called during the initialization of the SLI + * interface of an HBA. + * This function is called with no lock held and always returns + * 1. + **/ +void +lpfc_sli_queue_init(struct lpfc_hba *phba) { struct lpfc_sli *psli; struct lpfc_sli_ring *pring; @@ -9480,21 +9883,20 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba) INIT_LIST_HEAD(&psli->mboxq_cmpl); /* Initialize list headers for txq and txcmplq as double linked lists */ for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; + pring = &psli->sli3_ring[i]; pring->ringno = i; pring->sli.sli3.next_cmdidx = 0; pring->sli.sli3.local_getidx = 0; pring->sli.sli3.cmdidx = 0; - pring->flag = 0; - INIT_LIST_HEAD(&pring->txq); - INIT_LIST_HEAD(&pring->txcmplq); INIT_LIST_HEAD(&pring->iocb_continueq); INIT_LIST_HEAD(&pring->iocb_continue_saveq); INIT_LIST_HEAD(&pring->postbufq); + pring->flag = 0; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); spin_lock_init(&pring->ring_lock); } spin_unlock_irq(&phba->hbalock); - return 1; } /** @@ -9566,6 +9968,7 @@ lpfc_sli_host_down(struct lpfc_vport *vport) LIST_HEAD(completions); struct lpfc_hba *phba = vport->phba; struct lpfc_sli *psli = &phba->sli; + struct lpfc_queue *qp = NULL; struct lpfc_sli_ring *pring; struct lpfc_iocbq *iocb, *next_iocb; int i; @@ -9575,36 +9978,64 @@ lpfc_sli_host_down(struct lpfc_vport *vport) lpfc_cleanup_discovery_resources(vport); spin_lock_irqsave(&phba->hbalock, flags); - for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; - prev_pring_flag = pring->flag; - /* Only slow rings */ - if (pring->ringno == LPFC_ELS_RING) { - pring->flag |= LPFC_DEFERRED_RING_EVENT; - /* Set the lpfc data pending flag */ - set_bit(LPFC_DATA_READY, &phba->data_flags); - } - /* - * Error everything on the txq since these iocbs have not been - * given to the FW yet. - */ - list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { - if (iocb->vport != vport) - continue; - list_move_tail(&iocb->list, &completions); - } - /* Next issue ABTS for everything on the txcmplq */ - list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, - list) { - if (iocb->vport != vport) + /* + * Error everything on the txq since these iocbs + * have not been given to the FW yet. + * Also issue ABTS for everything on the txcmplq + */ + if (phba->sli_rev != LPFC_SLI_REV4) { + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + prev_pring_flag = pring->flag; + /* Only slow rings */ + if (pring->ringno == LPFC_ELS_RING) { + pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); + } + list_for_each_entry_safe(iocb, next_iocb, + &pring->txq, list) { + if (iocb->vport != vport) + continue; + list_move_tail(&iocb->list, &completions); + } + list_for_each_entry_safe(iocb, next_iocb, + &pring->txcmplq, list) { + if (iocb->vport != vport) + continue; + lpfc_sli_issue_abort_iotag(phba, pring, iocb); + } + pring->flag = prev_pring_flag; + } + } else { + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) continue; - lpfc_sli_issue_abort_iotag(phba, pring, iocb); + if (pring == phba->sli4_hba.els_wq->pring) { + pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); + } + prev_pring_flag = pring->flag; + spin_lock_irq(&pring->ring_lock); + list_for_each_entry_safe(iocb, next_iocb, + &pring->txq, list) { + if (iocb->vport != vport) + continue; + list_move_tail(&iocb->list, &completions); + } + spin_unlock_irq(&pring->ring_lock); + list_for_each_entry_safe(iocb, next_iocb, + &pring->txcmplq, list) { + if (iocb->vport != vport) + continue; + lpfc_sli_issue_abort_iotag(phba, pring, iocb); + } + pring->flag = prev_pring_flag; } - - pring->flag = prev_pring_flag; } - spin_unlock_irqrestore(&phba->hbalock, flags); /* Cancel all the IOCBs from the completions list */ @@ -9633,6 +10064,7 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) { LIST_HEAD(completions); struct lpfc_sli *psli = &phba->sli; + struct lpfc_queue *qp = NULL; struct lpfc_sli_ring *pring; struct lpfc_dmabuf *buf_ptr; unsigned long flags = 0; @@ -9646,20 +10078,36 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) lpfc_fabric_abort_hba(phba); spin_lock_irqsave(&phba->hbalock, flags); - for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; - /* Only slow rings */ - if (pring->ringno == LPFC_ELS_RING) { - pring->flag |= LPFC_DEFERRED_RING_EVENT; - /* Set the lpfc data pending flag */ - set_bit(LPFC_DATA_READY, &phba->data_flags); - } - /* - * Error everything on the txq since these iocbs have not been - * given to the FW yet. - */ - list_splice_init(&pring->txq, &completions); + /* + * Error everything on the txq since these iocbs + * have not been given to the FW yet. + */ + if (phba->sli_rev != LPFC_SLI_REV4) { + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + /* Only slow rings */ + if (pring->ringno == LPFC_ELS_RING) { + pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); + } + list_splice_init(&pring->txq, &completions); + } + } else { + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) + continue; + spin_lock_irq(&pring->ring_lock); + list_splice_init(&pring->txq, &completions); + spin_unlock_irq(&pring->ring_lock); + if (pring == phba->sli4_hba.els_wq->pring) { + pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); + } + } } spin_unlock_irqrestore(&phba->hbalock, flags); @@ -9986,7 +10434,6 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *abtsiocbp; IOCB_t *icmd = NULL; IOCB_t *iabt = NULL; - int ring_number; int retval; unsigned long iflags; @@ -10026,7 +10473,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, iabt->ulpClass = icmd->ulpClass; /* ABTS WQE must go to the same WQ as the WQE to be aborted */ - abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; + abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; if (cmdiocb->iocb_flag & LPFC_IO_FCP) abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; if (cmdiocb->iocb_flag & LPFC_IO_FOF) @@ -10048,11 +10495,9 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, abtsiocbp->iotag); if (phba->sli_rev == LPFC_SLI_REV4) { - ring_number = - lpfc_sli_calc_ring(phba, pring->ringno, abtsiocbp); - if (unlikely(ring_number == LPFC_HBA_ERROR)) + pring = lpfc_sli4_calc_ring(phba, abtsiocbp); + if (unlikely(pring == NULL)) return 0; - pring = &phba->sli.ring[ring_number]; /* Note: both hbalock and ring_lock need to be set here */ spin_lock_irqsave(&pring->ring_lock, iflags); retval = __lpfc_sli_issue_iocb(phba, pring->ringno, @@ -10134,6 +10579,108 @@ abort_iotag_exit: } /** + * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @cmdiocb: Pointer to driver command iocb object. + * + * This function issues an abort iocb for the provided command iocb down to + * the port. Other than the case the outstanding command iocb is an abort + * request, this function issues abort out unconditionally. This function is + * called with hbalock held. The function returns 0 when it fails due to + * memory allocation failure or when the command iocb is an abort request. + **/ +static int +lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *cmdiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct lpfc_iocbq *abtsiocbp; + union lpfc_wqe *abts_wqe; + int retval; + + /* + * There are certain command types we don't want to abort. And we + * don't want to abort commands that are already in the process of + * being aborted. + */ + if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || + cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || + (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) + return 0; + + /* issue ABTS for this io based on iotag */ + abtsiocbp = __lpfc_sli_get_iocbq(phba); + if (abtsiocbp == NULL) + return 0; + + /* This signals the response to set the correct status + * before calling the completion handler + */ + cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; + + /* Complete prepping the abort wqe and issue to the FW. */ + abts_wqe = &abtsiocbp->wqe; + bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0); + bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); + + /* Explicitly set reserved fields to zero.*/ + abts_wqe->abort_cmd.rsrvd4 = 0; + abts_wqe->abort_cmd.rsrvd5 = 0; + + /* WQE Common - word 6. Context is XRI tag. Set 0. */ + bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0); + bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0); + + /* word 7 */ + bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); + bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); + bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, + cmdiocb->iocb.ulpClass); + + /* word 8 - tell the FW to abort the IO associated with this + * outstanding exchange ID. + */ + abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag; + + /* word 9 - this is the iotag for the abts_wqe completion. */ + bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, + abtsiocbp->iotag); + + /* word 10 */ + bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx); + bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); + + /* word 11 */ + bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); + bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abtsiocbp->iocb_flag |= LPFC_IO_NVME; + abtsiocbp->vport = vport; + abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; + retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp); + if (retval == IOCB_ERROR) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6147 Failed abts issue_wqe with status x%x " + "for oxid x%x\n", + retval, cmdiocb->sli4_xritag); + lpfc_sli_release_iocbq(phba, abtsiocbp); + return retval; + } + + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6148 Drv Abort NVME Request Issued for " + "ox_id x%x on reqtag x%x\n", + cmdiocb->sli4_xritag, + abtsiocbp->iotag); + + return retval; +} + +/** * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. * @phba: pointer to lpfc HBA data structure. * @@ -10144,10 +10691,20 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; + struct lpfc_queue *qp = NULL; int i; - for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; + if (phba->sli_rev != LPFC_SLI_REV4) { + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + lpfc_sli_abort_iocb_ring(phba, pring); + } + return; + } + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) + continue; lpfc_sli_abort_iocb_ring(phba, pring); } } @@ -10351,7 +10908,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, abtsiocb->vport = vport; /* ABTS WQE must go to the same WQ as the WQE to be aborted */ - abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; + abtsiocb->hba_wqidx = iocbq->hba_wqidx; if (iocbq->iocb_flag & LPFC_IO_FCP) abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; if (iocbq->iocb_flag & LPFC_IO_FOF) @@ -10411,7 +10968,6 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, int sum, i, ret_val; unsigned long iflags; struct lpfc_sli_ring *pring_s4; - uint32_t ring_number; spin_lock_irq(&phba->hbalock); @@ -10454,7 +11010,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, abtsiocbq->vport = vport; /* ABTS WQE must go to the same WQ as the WQE to be aborted */ - abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx; + abtsiocbq->hba_wqidx = iocbq->hba_wqidx; if (iocbq->iocb_flag & LPFC_IO_FCP) abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; if (iocbq->iocb_flag & LPFC_IO_FOF) @@ -10479,9 +11035,9 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; if (phba->sli_rev == LPFC_SLI_REV4) { - ring_number = MAX_SLI3_CONFIGURED_RINGS + - iocbq->fcp_wqidx; - pring_s4 = &phba->sli.ring[ring_number]; + pring_s4 = lpfc_sli4_calc_ring(phba, iocbq); + if (pring_s4 == NULL) + continue; /* Note: both hbalock and ring_lock must be set here */ spin_lock_irqsave(&pring_s4->ring_lock, iflags); ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, @@ -10643,10 +11199,14 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, struct lpfc_iocbq *iocb; int txq_cnt = 0; int txcmplq_cnt = 0; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; unsigned long iflags; bool iocb_completed = true; + if (phba->sli_rev >= LPFC_SLI_REV4) + pring = lpfc_sli4_calc_ring(phba, piocb); + else + pring = &phba->sli.sli3_ring[ring_number]; /* * If the caller has provided a response iocbq buffer, then context2 * is NULL or its an error. @@ -11441,6 +12001,7 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) uint32_t ha_copy; unsigned long status; unsigned long iflag; + struct lpfc_sli_ring *pring; /* Get the driver's phba structure from the dev_id and * assume the HBA is not interrupting. @@ -11485,10 +12046,9 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); status >>= (4*LPFC_FCP_RING); + pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; if (status & HA_RXMASK) - lpfc_sli_handle_fast_ring_event(phba, - &phba->sli.ring[LPFC_FCP_RING], - status); + lpfc_sli_handle_fast_ring_event(phba, pring, status); if (phba->cfg_multi_ring_support == 2) { /* @@ -11499,7 +12059,7 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) status >>= (4*LPFC_EXTRA_RING); if (status & HA_RXMASK) { lpfc_sli_handle_fast_ring_event(phba, - &phba->sli.ring[LPFC_EXTRA_RING], + &phba->sli.sli3_ring[LPFC_EXTRA_RING], status); } } @@ -11812,11 +12372,13 @@ static struct lpfc_iocbq * lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, struct lpfc_iocbq *irspiocbq) { - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; struct lpfc_iocbq *cmdiocbq; struct lpfc_wcqe_complete *wcqe; unsigned long iflags; + pring = lpfc_phba_elsring(phba); + wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; spin_lock_irqsave(&pring->ring_lock, iflags); pring->stats.iocb_event++; @@ -12052,8 +12614,6 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, txq_cnt++; if (!list_empty(&pring->txcmplq)) txcmplq_cnt++; - if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq)) - fcp_txcmplq_cnt++; lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", @@ -12172,6 +12732,7 @@ static bool lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) { bool workposted = false; + struct fc_frame_header *fc_hdr; struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; struct lpfc_queue *drq = phba->sli4_hba.dat_rq; struct hbq_dmabuf *dma_buf; @@ -12206,6 +12767,10 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) } hrq->RQ_rcv_buf++; memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); + + /* If a NVME LS event (type 0x28), treat it as Fast path */ + fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; + /* save off the frame for the word thread to process */ list_add_tail(&dma_buf->cq_event.list, &phba->sli4_hba.sp_queue_event); @@ -12324,6 +12889,9 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, return; } + /* Save EQ associated with this CQ */ + cq->assoc_qp = speq; + /* Process all the entries to the CQ */ switch (cq->type) { case LPFC_MCQ: @@ -12336,8 +12904,9 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, break; case LPFC_WCQ: while ((cqe = lpfc_sli4_cq_get(cq))) { - if (cq->subtype == LPFC_FCP) - workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, + if ((cq->subtype == LPFC_FCP) || + (cq->subtype == LPFC_NVME)) + workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); else workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, @@ -12424,7 +12993,23 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, bf_get(lpfc_wcqe_c_request_tag, wcqe)); return; } - if (unlikely(!cmdiocbq->iocb_cmpl)) { + + if (cq->assoc_qp) + cmdiocbq->isr_timestamp = + cq->assoc_qp->isr_timestamp; + + if (cmdiocbq->iocb_cmpl == NULL) { + if (cmdiocbq->wqe_cmpl) { + if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { + spin_lock_irqsave(&phba->hbalock, iflags); + cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; + spin_unlock_irqrestore(&phba->hbalock, iflags); + } + + /* Pass the cmd_iocb and the wcqe to the upper layer */ + (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe); + return; + } lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0375 FCP cmdiocb not callback function " "iotag: (%d)\n", @@ -12460,12 +13045,12 @@ lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, { struct lpfc_queue *childwq; bool wqid_matched = false; - uint16_t fcp_wqid; + uint16_t hba_wqid; /* Check for fast-path FCP work queue release */ - fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); + hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); list_for_each_entry(childwq, &cq->child_list, list) { - if (childwq->queue_id == fcp_wqid) { + if (childwq->queue_id == hba_wqid) { lpfc_sli4_wq_release(childwq, bf_get(lpfc_wcqe_r_wqe_index, wcqe)); wqid_matched = true; @@ -12476,11 +13061,108 @@ lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, if (wqid_matched != true) lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "2580 Fast-path wqe consume event carries " - "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid); + "miss-matched qid: wcqe-qid=x%x\n", hba_wqid); +} + +/** + * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry + * @phba: Pointer to HBA context object. + * @rcqe: Pointer to receive-queue completion queue entry. + * + * This routine process a receive-queue completion queue entry. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_rcqe *rcqe) +{ + bool workposted = false; + struct lpfc_queue *hrq; + struct lpfc_queue *drq; + struct rqb_dmabuf *dma_buf; + struct fc_frame_header *fc_hdr; + uint32_t status, rq_id; + unsigned long iflags; + uint32_t fctl, idx; + + if ((phba->nvmet_support == 0) || + (phba->sli4_hba.nvmet_cqset == NULL)) + return workposted; + + idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; + hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; + drq = phba->sli4_hba.nvmet_mrq_data[idx]; + + /* sanity check on queue memory */ + if (unlikely(!hrq) || unlikely(!drq)) + return workposted; + + if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) + rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); + else + rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); + + if ((phba->nvmet_support == 0) || + (rq_id != hrq->queue_id)) + return workposted; + + status = bf_get(lpfc_rcqe_status, rcqe); + switch (status) { + case FC_STATUS_RQ_BUF_LEN_EXCEEDED: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6126 Receive Frame Truncated!!\n"); + hrq->RQ_buf_trunc++; + break; + case FC_STATUS_RQ_SUCCESS: + lpfc_sli4_rq_release(hrq, drq); + spin_lock_irqsave(&phba->hbalock, iflags); + dma_buf = lpfc_sli_rqbuf_get(phba, hrq); + if (!dma_buf) { + hrq->RQ_no_buf_found++; + spin_unlock_irqrestore(&phba->hbalock, iflags); + goto out; + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + hrq->RQ_rcv_buf++; + fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; + + /* Just some basic sanity checks on FCP Command frame */ + fctl = (fc_hdr->fh_f_ctl[0] << 16 | + fc_hdr->fh_f_ctl[1] << 8 | + fc_hdr->fh_f_ctl[2]); + if (((fctl & + (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != + (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || + (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ + goto drop; + + if (fc_hdr->fh_type == FC_TYPE_FCP) { + dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); + lpfc_nvmet_unsol_fcp_event( + phba, phba->sli4_hba.els_wq->pring, dma_buf, + cq->assoc_qp->isr_timestamp); + return false; + } +drop: + lpfc_in_buf_free(phba, &dma_buf->dbuf); + break; + case FC_STATUS_INSUFF_BUF_NEED_BUF: + case FC_STATUS_INSUFF_BUF_FRM_DISC: + hrq->RQ_no_posted_buf++; + /* Post more buffers if possible */ + spin_lock_irqsave(&phba->hbalock, iflags); + phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + break; + } +out: + return workposted; } /** - * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry + * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry * @cq: Pointer to the completion queue. * @eqe: Pointer to fast-path completion queue entry. * @@ -12488,7 +13170,7 @@ lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, * event queue for FCP command response completion. **/ static int -lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, +lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe) { struct lpfc_wcqe_release wcqe; @@ -12500,10 +13182,15 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, /* Check and process for different type of WCQE and dispatch */ switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { case CQE_CODE_COMPL_WQE: + case CQE_CODE_NVME_ERSP: cq->CQ_wq++; /* Process the WQ complete event */ phba->last_completion_time = jiffies; - lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, + if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME)) + lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, + (struct lpfc_wcqe_complete *)&wcqe); + if (cq->subtype == LPFC_NVME_LS) + lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, (struct lpfc_wcqe_complete *)&wcqe); break; case CQE_CODE_RELEASE_WQE: @@ -12519,9 +13206,17 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, (struct sli4_wcqe_xri_aborted *)&wcqe); break; + case CQE_CODE_RECEIVE_V1: + case CQE_CODE_RECEIVE: + phba->last_completion_time = jiffies; + if (cq->subtype == LPFC_NVMET) { + workposted = lpfc_sli4_nvmet_handle_rcqe( + phba, cq, (struct lpfc_rcqe *)&wcqe); + } + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0144 Not a valid WCQE code: x%x\n", + "0144 Not a valid CQE code: x%x\n", bf_get(lpfc_wcqe_c_code, &wcqe)); break; } @@ -12544,10 +13239,10 @@ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, uint32_t qidx) { - struct lpfc_queue *cq; + struct lpfc_queue *cq = NULL; struct lpfc_cqe *cqe; bool workposted = false; - uint16_t cqid; + uint16_t cqid, id; int ecount = 0; if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { @@ -12562,28 +13257,42 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, /* Get the reference to the corresponding CQ */ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); - /* Check if this is a Slow path event */ - if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) { - lpfc_sli4_sp_handle_eqe(phba, eqe, - phba->sli4_hba.hba_eq[qidx]); - return; + if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { + id = phba->sli4_hba.nvmet_cqset[0]->queue_id; + if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { + /* Process NVMET unsol rcv */ + cq = phba->sli4_hba.nvmet_cqset[cqid - id]; + goto process_cq; + } } - if (unlikely(!phba->sli4_hba.fcp_cq)) { - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "3146 Fast-path completion queues " - "does not exist\n"); - return; + if (phba->sli4_hba.nvme_cq_map && + (cqid == phba->sli4_hba.nvme_cq_map[qidx])) { + /* Process NVME / NVMET command completion */ + cq = phba->sli4_hba.nvme_cq[qidx]; + goto process_cq; } - cq = phba->sli4_hba.fcp_cq[qidx]; - if (unlikely(!cq)) { - if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0367 Fast-path completion queue " - "(%d) does not exist\n", qidx); + + if (phba->sli4_hba.fcp_cq_map && + (cqid == phba->sli4_hba.fcp_cq_map[qidx])) { + /* Process FCP command completion */ + cq = phba->sli4_hba.fcp_cq[qidx]; + goto process_cq; + } + + if (phba->sli4_hba.nvmels_cq && + (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { + /* Process NVME unsol rcv */ + cq = phba->sli4_hba.nvmels_cq; + } + + /* Otherwise this is a Slow path event */ + if (cq == NULL) { + lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]); return; } +process_cq: if (unlikely(cqid != cq->queue_id)) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0368 Miss-matched fast-path completion " @@ -12592,9 +13301,12 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, return; } + /* Save EQ associated with this CQ */ + cq->assoc_qp = phba->sli4_hba.hba_eq[qidx]; + /* Process all the entries to the CQ */ while ((cqe = lpfc_sli4_cq_get(cq))) { - workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); + workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); if (!(++ecount % cq->entry_repost)) lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); } @@ -12685,7 +13397,7 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) /* Process all the entries to the OAS CQ */ while ((cqe = lpfc_sli4_cq_get(cq))) { - workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); + workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); if (!(++ecount % cq->entry_repost)) lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); } @@ -12733,15 +13445,15 @@ irqreturn_t lpfc_sli4_fof_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; - struct lpfc_fcp_eq_hdl *fcp_eq_hdl; + struct lpfc_hba_eq_hdl *hba_eq_hdl; struct lpfc_queue *eq; struct lpfc_eqe *eqe; unsigned long iflag; int ecount = 0; /* Get the driver's phba structure from the dev_id */ - fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; - phba = fcp_eq_hdl->phba; + hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; + phba = hba_eq_hdl->phba; if (unlikely(!phba)) return IRQ_NONE; @@ -12827,17 +13539,17 @@ irqreturn_t lpfc_sli4_hba_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; - struct lpfc_fcp_eq_hdl *fcp_eq_hdl; + struct lpfc_hba_eq_hdl *hba_eq_hdl; struct lpfc_queue *fpeq; struct lpfc_eqe *eqe; unsigned long iflag; int ecount = 0; - int fcp_eqidx; + int hba_eqidx; /* Get the driver's phba structure from the dev_id */ - fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; - phba = fcp_eq_hdl->phba; - fcp_eqidx = fcp_eq_hdl->idx; + hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; + phba = hba_eq_hdl->phba; + hba_eqidx = hba_eq_hdl->idx; if (unlikely(!phba)) return IRQ_NONE; @@ -12845,15 +13557,20 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) return IRQ_NONE; /* Get to the EQ struct associated with this vector */ - fpeq = phba->sli4_hba.hba_eq[fcp_eqidx]; + fpeq = phba->sli4_hba.hba_eq[hba_eqidx]; if (unlikely(!fpeq)) return IRQ_NONE; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) + fpeq->isr_timestamp = ktime_get_ns(); +#endif + if (lpfc_fcp_look_ahead) { - if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use)) + if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) lpfc_sli4_eq_clr_intr(fpeq); else { - atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); + atomic_inc(&hba_eq_hdl->hba_eq_in_use); return IRQ_NONE; } } @@ -12868,7 +13585,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) lpfc_sli4_eq_flush(phba, fpeq); spin_unlock_irqrestore(&phba->hbalock, iflag); if (lpfc_fcp_look_ahead) - atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); + atomic_inc(&hba_eq_hdl->hba_eq_in_use); return IRQ_NONE; } @@ -12879,7 +13596,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) if (eqe == NULL) break; - lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx); + lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx); if (!(++ecount % fpeq->entry_repost)) lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); fpeq->EQ_processed++; @@ -12896,7 +13613,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) fpeq->EQ_no_entry++; if (lpfc_fcp_look_ahead) { - atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); + atomic_inc(&hba_eq_hdl->hba_eq_in_use); return IRQ_NONE; } @@ -12910,7 +13627,8 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) } if (lpfc_fcp_look_ahead) - atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); + atomic_inc(&hba_eq_hdl->hba_eq_in_use); + return IRQ_HANDLED; } /* lpfc_sli4_fp_intr_handler */ @@ -12937,7 +13655,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id) struct lpfc_hba *phba; irqreturn_t hba_irq_rc; bool hba_handled = false; - int fcp_eqidx; + int qidx; /* Get the driver's phba structure from the dev_id */ phba = (struct lpfc_hba *)dev_id; @@ -12948,16 +13666,16 @@ lpfc_sli4_intr_handler(int irq, void *dev_id) /* * Invoke fast-path host attention interrupt handling as appropriate. */ - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { + for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) { hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, - &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); + &phba->sli4_hba.hba_eq_hdl[qidx]); if (hba_irq_rc == IRQ_HANDLED) hba_handled |= true; } if (phba->cfg_fof) { hba_irq_rc = lpfc_sli4_fof_intr_handler(irq, - &phba->sli4_hba.fcp_eq_hdl[0]); + &phba->sli4_hba.hba_eq_hdl[qidx]); if (hba_irq_rc == IRQ_HANDLED) hba_handled |= true; } @@ -12988,6 +13706,11 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue) dmabuf->virt, dmabuf->phys); kfree(dmabuf); } + if (queue->rqbp) { + lpfc_free_rq_buffer(queue->phba, queue); + kfree(queue->rqbp); + } + kfree(queue->pring); kfree(queue); return; } @@ -13021,7 +13744,13 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, return NULL; queue->page_count = (ALIGN(entry_size * entry_count, hw_page_size))/hw_page_size; + + /* If needed, Adjust page count to match the max the adapter supports */ + if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt) + queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt; + INIT_LIST_HEAD(&queue->list); + INIT_LIST_HEAD(&queue->wq_list); INIT_LIST_HEAD(&queue->page_list); INIT_LIST_HEAD(&queue->child_list); for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { @@ -13093,7 +13822,7 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) } /** - * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs + * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs * @phba: HBA structure that indicates port to create a queue on. * @startq: The starting FCP EQ to modify * @@ -13109,7 +13838,7 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) * fails this function will return -ENXIO. **/ int -lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq) +lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq) { struct lpfc_mbx_modify_eq_delay *eq_delay; LPFC_MBOXQ_t *mbox; @@ -13117,11 +13846,11 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq) int cnt, rc, length, status = 0; uint32_t shdr_status, shdr_add_status; uint32_t result; - int fcp_eqidx; + int qidx; union lpfc_sli4_cfg_shdr *shdr; uint16_t dmult; - if (startq >= phba->cfg_fcp_io_channel) + if (startq >= phba->io_channel_irqs) return 0; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -13135,16 +13864,15 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq) eq_delay = &mbox->u.mqe.un.eq_delay; /* Calculate delay multiper from maximum interrupt per second */ - result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel; - if (result > LPFC_DMULT_CONST) + result = phba->cfg_fcp_imax / phba->io_channel_irqs; + if (result > LPFC_DMULT_CONST || result == 0) dmult = 0; else dmult = LPFC_DMULT_CONST/result - 1; cnt = 0; - for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel; - fcp_eqidx++) { - eq = phba->sli4_hba.hba_eq[fcp_eqidx]; + for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) { + eq = phba->sli4_hba.hba_eq[qidx]; if (!eq) continue; eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; @@ -13359,8 +14087,10 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, switch (cq->entry_count) { default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0361 Unsupported CQ count. (%d)\n", - cq->entry_count); + "0361 Unsupported CQ count: " + "entry cnt %d sz %d pg cnt %d repost %d\n", + cq->entry_count, cq->entry_size, + cq->page_count, cq->entry_repost); if (cq->entry_count < 256) { status = -EINVAL; goto out; @@ -13420,6 +14150,234 @@ out: } /** + * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ + * @phba: HBA structure that indicates port to create a queue on. + * @cqp: The queue structure array to use to create the completion queues. + * @eqp: The event queue array to bind these completion queues to. + * + * This function creates a set of completion queue, s to support MRQ + * as detailed in @cqp, on a port, + * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @cq struct + * is used to get the entry count and entry size that are necessary to + * determine the number of pages to allocate and use for this queue. The @eq + * is used to indicate which event queue to bind this completion queue to. This + * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the + * completion queue. This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. + **/ +int +lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, + struct lpfc_queue **eqp, uint32_t type, uint32_t subtype) +{ + struct lpfc_queue *cq; + struct lpfc_queue *eq; + struct lpfc_mbx_cq_create_set *cq_set; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, alloclen, status = 0; + int cnt, idx, numcq, page_idx = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + + /* sanity check on queue memory */ + numcq = phba->cfg_nvmet_mrq; + if (!cqp || !eqp || !numcq) + return -ENODEV; + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = SLI4_PAGE_SIZE; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + length = sizeof(struct lpfc_mbx_cq_create_set); + length += ((numcq * cqp[0]->page_count) * + sizeof(struct dma_address)); + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, + LPFC_SLI4_MBX_NEMBED); + if (alloclen < length) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3098 Allocated DMA memory size (%d) is " + "less than the requested DMA memory size " + "(%d)\n", alloclen, length); + status = -ENOMEM; + goto out; + } + cq_set = mbox->sge_array->addr[0]; + shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; + bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); + + for (idx = 0; idx < numcq; idx++) { + cq = cqp[idx]; + eq = eqp[idx]; + if (!cq || !eq) { + status = -ENOMEM; + goto out; + } + + switch (idx) { + case 0: + bf_set(lpfc_mbx_cq_create_set_page_size, + &cq_set->u.request, + (hw_page_size / SLI4_PAGE_SIZE)); + bf_set(lpfc_mbx_cq_create_set_num_pages, + &cq_set->u.request, cq->page_count); + bf_set(lpfc_mbx_cq_create_set_evt, + &cq_set->u.request, 1); + bf_set(lpfc_mbx_cq_create_set_valid, + &cq_set->u.request, 1); + bf_set(lpfc_mbx_cq_create_set_cqe_size, + &cq_set->u.request, 0); + bf_set(lpfc_mbx_cq_create_set_num_cq, + &cq_set->u.request, numcq); + switch (cq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3118 Bad CQ count. (%d)\n", + cq->entry_count); + if (cq->entry_count < 256) { + status = -EINVAL; + goto out; + } + /* otherwise default to smallest (drop thru) */ + case 256: + bf_set(lpfc_mbx_cq_create_set_cqe_cnt, + &cq_set->u.request, LPFC_CQ_CNT_256); + break; + case 512: + bf_set(lpfc_mbx_cq_create_set_cqe_cnt, + &cq_set->u.request, LPFC_CQ_CNT_512); + break; + case 1024: + bf_set(lpfc_mbx_cq_create_set_cqe_cnt, + &cq_set->u.request, LPFC_CQ_CNT_1024); + break; + } + bf_set(lpfc_mbx_cq_create_set_eq_id0, + &cq_set->u.request, eq->queue_id); + break; + case 1: + bf_set(lpfc_mbx_cq_create_set_eq_id1, + &cq_set->u.request, eq->queue_id); + break; + case 2: + bf_set(lpfc_mbx_cq_create_set_eq_id2, + &cq_set->u.request, eq->queue_id); + break; + case 3: + bf_set(lpfc_mbx_cq_create_set_eq_id3, + &cq_set->u.request, eq->queue_id); + break; + case 4: + bf_set(lpfc_mbx_cq_create_set_eq_id4, + &cq_set->u.request, eq->queue_id); + break; + case 5: + bf_set(lpfc_mbx_cq_create_set_eq_id5, + &cq_set->u.request, eq->queue_id); + break; + case 6: + bf_set(lpfc_mbx_cq_create_set_eq_id6, + &cq_set->u.request, eq->queue_id); + break; + case 7: + bf_set(lpfc_mbx_cq_create_set_eq_id7, + &cq_set->u.request, eq->queue_id); + break; + case 8: + bf_set(lpfc_mbx_cq_create_set_eq_id8, + &cq_set->u.request, eq->queue_id); + break; + case 9: + bf_set(lpfc_mbx_cq_create_set_eq_id9, + &cq_set->u.request, eq->queue_id); + break; + case 10: + bf_set(lpfc_mbx_cq_create_set_eq_id10, + &cq_set->u.request, eq->queue_id); + break; + case 11: + bf_set(lpfc_mbx_cq_create_set_eq_id11, + &cq_set->u.request, eq->queue_id); + break; + case 12: + bf_set(lpfc_mbx_cq_create_set_eq_id12, + &cq_set->u.request, eq->queue_id); + break; + case 13: + bf_set(lpfc_mbx_cq_create_set_eq_id13, + &cq_set->u.request, eq->queue_id); + break; + case 14: + bf_set(lpfc_mbx_cq_create_set_eq_id14, + &cq_set->u.request, eq->queue_id); + break; + case 15: + bf_set(lpfc_mbx_cq_create_set_eq_id15, + &cq_set->u.request, eq->queue_id); + break; + } + + /* link the cq onto the parent eq child list */ + list_add_tail(&cq->list, &eq->child_list); + /* Set up completion queue's type and subtype */ + cq->type = type; + cq->subtype = subtype; + cq->assoc_qid = eq->queue_id; + cq->host_index = 0; + cq->hba_index = 0; + + rc = 0; + list_for_each_entry(dmabuf, &cq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); + cnt = page_idx + dmabuf->buffer_tag; + cq_set->u.request.page[cnt].addr_lo = + putPaddrLow(dmabuf->phys); + cq_set->u.request.page[cnt].addr_hi = + putPaddrHigh(dmabuf->phys); + rc++; + } + page_idx += rc; + } + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3119 CQ_CREATE_SET mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); + if (rc == 0xFFFF) { + status = -ENXIO; + goto out; + } + + for (idx = 0; idx < numcq; idx++) { + cq = cqp[idx]; + cq->queue_id = rc + idx; + } + +out: + lpfc_sli4_mbox_cmd_free(phba, mbox); + return status; +} + +/** * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration * @phba: HBA structure that indicates port to create a queue on. * @mq: The queue structure to use to create the mailbox queue. @@ -13722,7 +14680,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, LPFC_WQ_WQE_SIZE_128); bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, - (PAGE_SIZE/SLI4_PAGE_SIZE)); + LPFC_WQ_PAGE_SIZE_4096); page = wq_create->u.request_1.page; break; } @@ -13748,8 +14706,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, LPFC_WQ_WQE_SIZE_128); break; } - bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, - (PAGE_SIZE/SLI4_PAGE_SIZE)); + bf_set(lpfc_mbx_wq_create_page_size, + &wq_create->u.request_1, + LPFC_WQ_PAGE_SIZE_4096); page = wq_create->u.request_1.page; break; default: @@ -13825,6 +14784,11 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, wq->db_format = LPFC_DB_LIST_FORMAT; wq->db_regaddr = phba->sli4_hba.WQDBregaddr; } + wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); + if (wq->pring == NULL) { + status = -ENOMEM; + goto out; + } wq->type = LPFC_WQ; wq->assoc_qid = cq->queue_id; wq->subtype = subtype; @@ -13935,7 +14899,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, LPFC_RQE_SIZE_8); bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, - (PAGE_SIZE/SLI4_PAGE_SIZE)); + LPFC_RQ_PAGE_SIZE_4096); } else { switch (hrq->entry_count) { default: @@ -14144,6 +15108,197 @@ out: } /** + * lpfc_mrq_create - Create MRQ Receive Queues on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @hrqp: The queue structure array to use to create the header receive queues. + * @drqp: The queue structure array to use to create the data receive queues. + * @cqp: The completion queue array to bind these receive queues to. + * + * This function creates a receive buffer queue pair , as detailed in @hrq and + * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command + * to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq + * struct is used to get the entry count that is necessary to determine the + * number of pages to use for this queue. The @cq is used to indicate which + * completion queue to bind received buffers that are posted to these queues to. + * This function will send the RQ_CREATE mailbox command to the HBA to setup the + * receive queue pair. This function is asynchronous and will wait for the + * mailbox command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. + **/ +int +lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, + struct lpfc_queue **drqp, struct lpfc_queue **cqp, + uint32_t subtype) +{ + struct lpfc_queue *hrq, *drq, *cq; + struct lpfc_mbx_rq_create_v2 *rq_create; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, alloclen, status = 0; + int cnt, idx, numrq, page_idx = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + + numrq = phba->cfg_nvmet_mrq; + /* sanity check on array memory */ + if (!hrqp || !drqp || !cqp || !numrq) + return -ENODEV; + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = SLI4_PAGE_SIZE; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + length = sizeof(struct lpfc_mbx_rq_create_v2); + length += ((2 * numrq * hrqp[0]->page_count) * + sizeof(struct dma_address)); + + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, + LPFC_SLI4_MBX_NEMBED); + if (alloclen < length) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3099 Allocated DMA memory size (%d) is " + "less than the requested DMA memory size " + "(%d)\n", alloclen, length); + status = -ENOMEM; + goto out; + } + + + + rq_create = mbox->sge_array->addr[0]; + shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; + + bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); + cnt = 0; + + for (idx = 0; idx < numrq; idx++) { + hrq = hrqp[idx]; + drq = drqp[idx]; + cq = cqp[idx]; + + if (hrq->entry_count != drq->entry_count) { + status = -EINVAL; + goto out; + } + + /* sanity check on queue memory */ + if (!hrq || !drq || !cq) { + status = -ENODEV; + goto out; + } + + if (idx == 0) { + bf_set(lpfc_mbx_rq_create_num_pages, + &rq_create->u.request, + hrq->page_count); + bf_set(lpfc_mbx_rq_create_rq_cnt, + &rq_create->u.request, (numrq * 2)); + bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, + 1); + bf_set(lpfc_rq_context_base_cq, + &rq_create->u.request.context, + cq->queue_id); + bf_set(lpfc_rq_context_data_size, + &rq_create->u.request.context, + LPFC_DATA_BUF_SIZE); + bf_set(lpfc_rq_context_hdr_size, + &rq_create->u.request.context, + LPFC_HDR_BUF_SIZE); + bf_set(lpfc_rq_context_rqe_count_1, + &rq_create->u.request.context, + hrq->entry_count); + bf_set(lpfc_rq_context_rqe_size, + &rq_create->u.request.context, + LPFC_RQE_SIZE_8); + bf_set(lpfc_rq_context_page_size, + &rq_create->u.request.context, + (PAGE_SIZE/SLI4_PAGE_SIZE)); + } + rc = 0; + list_for_each_entry(dmabuf, &hrq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); + cnt = page_idx + dmabuf->buffer_tag; + rq_create->u.request.page[cnt].addr_lo = + putPaddrLow(dmabuf->phys); + rq_create->u.request.page[cnt].addr_hi = + putPaddrHigh(dmabuf->phys); + rc++; + } + page_idx += rc; + + rc = 0; + list_for_each_entry(dmabuf, &drq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); + cnt = page_idx + dmabuf->buffer_tag; + rq_create->u.request.page[cnt].addr_lo = + putPaddrLow(dmabuf->phys); + rq_create->u.request.page[cnt].addr_hi = + putPaddrHigh(dmabuf->phys); + rc++; + } + page_idx += rc; + + hrq->db_format = LPFC_DB_RING_FORMAT; + hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; + hrq->type = LPFC_HRQ; + hrq->assoc_qid = cq->queue_id; + hrq->subtype = subtype; + hrq->host_index = 0; + hrq->hba_index = 0; + + drq->db_format = LPFC_DB_RING_FORMAT; + drq->db_regaddr = phba->sli4_hba.RQDBregaddr; + drq->type = LPFC_DRQ; + drq->assoc_qid = cq->queue_id; + drq->subtype = subtype; + drq->host_index = 0; + drq->hba_index = 0; + + list_add_tail(&hrq->list, &cq->child_list); + list_add_tail(&drq->list, &cq->child_list); + } + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3120 RQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); + if (rc == 0xFFFF) { + status = -ENXIO; + goto out; + } + + /* Initialize all RQs with associated queue id */ + for (idx = 0; idx < numrq; idx++) { + hrq = hrqp[idx]; + hrq->queue_id = rc + (2 * idx); + drq = drqp[idx]; + drq->queue_id = rc + (2 * idx) + 1; + } + +out: + lpfc_sli4_mbox_cmd_free(phba, mbox); + return status; +} + +/** * lpfc_eq_destroy - Destroy an event Queue on the HBA * @eq: The queue structure associated with the queue to destroy. * @@ -14609,7 +15764,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba) } /** - * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port. + * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. * @phba: pointer to lpfc hba data structure. * @post_sgl_list: pointer to els sgl entry list. * @count: number of els sgl entries on the list. @@ -14620,7 +15775,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba) * stopped. **/ static int -lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba, +lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, struct list_head *post_sgl_list, int post_cnt) { @@ -14636,14 +15791,15 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba, uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; - reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) + + reqlen = post_cnt * sizeof(struct sgl_page_pairs) + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); if (reqlen > SLI4_PAGE_SIZE) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2559 Block sgl registration required DMA " "size (%d) great than a page\n", reqlen); return -ENOMEM; } + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -14687,8 +15843,9 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba, /* Complete initialization and perform endian conversion. */ bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); - bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt); + bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); sgl->word0 = cpu_to_le32(sgl->word0); + if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { @@ -14823,6 +15980,9 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, return rc; } +static char *lpfc_rctl_names[] = FC_RCTL_NAMES_INIT; +static char *lpfc_type_names[] = FC_TYPE_NAMES_INIT; + /** * lpfc_fc_frame_check - Check that this frame is a valid frame to handle * @phba: pointer to lpfc_hba struct that the frame was received on @@ -14837,8 +15997,6 @@ static int lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) { /* make rctl_names static to save stack space */ - static char *rctl_names[] = FC_RCTL_NAMES_INIT; - char *type_names[] = FC_TYPE_NAMES_INIT; struct fc_vft_header *fc_vft_hdr; uint32_t *header = (uint32_t *) fc_hdr; @@ -14883,6 +16041,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) case FC_TYPE_ELS: case FC_TYPE_FCP: case FC_TYPE_CT: + case FC_TYPE_NVME: break; case FC_TYPE_IP: case FC_TYPE_ILS: @@ -14893,8 +16052,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "2538 Received frame rctl:%s (x%x), type:%s (x%x), " "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", - rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, - type_names[fc_hdr->fh_type], fc_hdr->fh_type, + lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, + lpfc_type_names[fc_hdr->fh_type], fc_hdr->fh_type, be32_to_cpu(header[0]), be32_to_cpu(header[1]), be32_to_cpu(header[2]), be32_to_cpu(header[3]), be32_to_cpu(header[4]), be32_to_cpu(header[5]), @@ -14903,8 +16062,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) drop: lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, "2539 Dropped frame rctl:%s type:%s\n", - rctl_names[fc_hdr->fh_r_ctl], - type_names[fc_hdr->fh_type]); + lpfc_rctl_names[fc_hdr->fh_r_ctl], + lpfc_type_names[fc_hdr->fh_type]); return 1; } @@ -14940,14 +16099,11 @@ lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) **/ static struct lpfc_vport * lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, - uint16_t fcfi) + uint16_t fcfi, uint32_t did) { struct lpfc_vport **vports; struct lpfc_vport *vport = NULL; int i; - uint32_t did = (fc_hdr->fh_d_id[0] << 16 | - fc_hdr->fh_d_id[1] << 8 | - fc_hdr->fh_d_id[2]); if (did == Fabric_DID) return phba->pport; @@ -14956,7 +16112,7 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, return phba->pport; vports = lpfc_create_vport_work_array(phba); - if (vports != NULL) + if (vports != NULL) { for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { if (phba->fcf.fcfi == fcfi && vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && @@ -14965,6 +16121,7 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, break; } } + } lpfc_destroy_vport_work_array(phba, vports); return vport; } @@ -15394,7 +16551,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, * a BA_RJT. */ if ((fctl & FC_FC_EX_CTX) && - (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) { + (lxri > lpfc_sli4_get_iocb_cnt(phba))) { icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); @@ -15571,6 +16728,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) /* Initialize the first IOCB. */ first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; + first_iocbq->vport = vport; /* Check FC Header to see what TYPE of frame we are rcv'ing */ if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { @@ -15683,7 +16841,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, return; } if (!lpfc_complete_unsol_iocb(phba, - &phba->sli.ring[LPFC_ELS_RING], + phba->sli4_hba.els_wq->pring, iocbq, fc_hdr->fh_r_ctl, fc_hdr->fh_type)) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -15708,8 +16866,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, * This function is called with no lock held. This function processes all * the received buffers and gives it to upper layers when a received buffer * indicates that it is the final frame in the sequence. The interrupt - * service routine processes received buffers at interrupt contexts and adds - * received dma buffers to the rb_pend_list queue and signals the worker thread. + * service routine processes received buffers at interrupt contexts. * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the * appropriate receive function when the final frame in a sequence is received. **/ @@ -15725,11 +16882,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, /* Process each received buffer */ fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + /* check to see if this a valid type of frame */ if (lpfc_fc_frame_check(phba, fc_hdr)) { lpfc_in_buf_free(phba, &dmabuf->dbuf); return; } + if ((bf_get(lpfc_cqe_code, &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) fcfi = bf_get(lpfc_rcqe_fcf_id_v1, @@ -15738,16 +16897,16 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); - vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); + /* d_id this frame is directed to */ + did = sli4_did_from_fc_hdr(fc_hdr); + + vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); if (!vport) { /* throw out the frame */ lpfc_in_buf_free(phba, &dmabuf->dbuf); return; } - /* d_id this frame is directed to */ - did = sli4_did_from_fc_hdr(fc_hdr); - /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && (did != Fabric_DID)) { @@ -17225,7 +18384,7 @@ uint32_t lpfc_drain_txq(struct lpfc_hba *phba) { LIST_HEAD(completions); - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; struct lpfc_iocbq *piocbq = NULL; unsigned long iflags = 0; char *fail_msg = NULL; @@ -17234,6 +18393,8 @@ lpfc_drain_txq(struct lpfc_hba *phba) union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128; uint32_t txq_cnt = 0; + pring = lpfc_phba_elsring(phba); + spin_lock_irqsave(&pring->ring_lock, iflags); list_for_each_entry(piocbq, &pring->txq, list) { txq_cnt++; @@ -17255,7 +18416,7 @@ lpfc_drain_txq(struct lpfc_hba *phba) txq_cnt); break; } - sglq = __lpfc_sli_get_sglq(phba, piocbq); + sglq = __lpfc_sli_get_els_sglq(phba, piocbq); if (!sglq) { __lpfc_sli_ringtx_put(phba, pring, piocbq); spin_unlock_irqrestore(&pring->ring_lock, iflags); @@ -17295,3 +18456,217 @@ lpfc_drain_txq(struct lpfc_hba *phba) return txq_cnt; } + +/** + * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. + * @phba: Pointer to HBA context object. + * @pwqe: Pointer to command WQE. + * @sglq: Pointer to the scatter gather queue object. + * + * This routine converts the bpl or bde that is in the WQE + * to a sgl list for the sli4 hardware. The physical address + * of the bpl/bde is converted back to a virtual address. + * If the WQE contains a BPL then the list of BDE's is + * converted to sli4_sge's. If the WQE contains a single + * BDE then it is converted to a single sli_sge. + * The WQE is still in cpu endianness so the contents of + * the bpl can be used without byte swapping. + * + * Returns valid XRI = Success, NO_XRI = Failure. + */ +static uint16_t +lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, + struct lpfc_sglq *sglq) +{ + uint16_t xritag = NO_XRI; + struct ulp_bde64 *bpl = NULL; + struct ulp_bde64 bde; + struct sli4_sge *sgl = NULL; + struct lpfc_dmabuf *dmabuf; + union lpfc_wqe *wqe; + int numBdes = 0; + int i = 0; + uint32_t offset = 0; /* accumulated offset in the sg request list */ + int inbound = 0; /* number of sg reply entries inbound from firmware */ + uint32_t cmd; + + if (!pwqeq || !sglq) + return xritag; + + sgl = (struct sli4_sge *)sglq->sgl; + wqe = &pwqeq->wqe; + pwqeq->iocb.ulpIoTag = pwqeq->iotag; + + cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); + if (cmd == CMD_XMIT_BLS_RSP64_WQE) + return sglq->sli4_xritag; + numBdes = pwqeq->rsvd2; + if (numBdes) { + /* The addrHigh and addrLow fields within the WQE + * have not been byteswapped yet so there is no + * need to swap them back. + */ + if (pwqeq->context3) + dmabuf = (struct lpfc_dmabuf *)pwqeq->context3; + else + return xritag; + + bpl = (struct ulp_bde64 *)dmabuf->virt; + if (!bpl) + return xritag; + + for (i = 0; i < numBdes; i++) { + /* Should already be byte swapped. */ + sgl->addr_hi = bpl->addrHigh; + sgl->addr_lo = bpl->addrLow; + + sgl->word2 = le32_to_cpu(sgl->word2); + if ((i+1) == numBdes) + bf_set(lpfc_sli4_sge_last, sgl, 1); + else + bf_set(lpfc_sli4_sge_last, sgl, 0); + /* swap the size field back to the cpu so we + * can assign it to the sgl. + */ + bde.tus.w = le32_to_cpu(bpl->tus.w); + sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); + /* The offsets in the sgl need to be accumulated + * separately for the request and reply lists. + * The request is always first, the reply follows. + */ + switch (cmd) { + case CMD_GEN_REQUEST64_WQE: + /* add up the reply sg entries */ + if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) + inbound++; + /* first inbound? reset the offset */ + if (inbound == 1) + offset = 0; + bf_set(lpfc_sli4_sge_offset, sgl, offset); + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); + offset += bde.tus.f.bdeSize; + break; + case CMD_FCP_TRSP64_WQE: + bf_set(lpfc_sli4_sge_offset, sgl, 0); + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); + break; + case CMD_FCP_TSEND64_WQE: + case CMD_FCP_TRECEIVE64_WQE: + bf_set(lpfc_sli4_sge_type, sgl, + bpl->tus.f.bdeFlags); + if (i < 3) + offset = 0; + else + offset += bde.tus.f.bdeSize; + bf_set(lpfc_sli4_sge_offset, sgl, offset); + break; + } + sgl->word2 = cpu_to_le32(sgl->word2); + bpl++; + sgl++; + } + } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { + /* The addrHigh and addrLow fields of the BDE have not + * been byteswapped yet so they need to be swapped + * before putting them in the sgl. + */ + sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); + sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); + } + return sglq->sli4_xritag; +} + +/** + * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) + * @phba: Pointer to HBA context object. + * @ring_number: Base sli ring number + * @pwqe: Pointer to command WQE. + **/ +int +lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *pwqe) +{ + union lpfc_wqe *wqe = &pwqe->wqe; + struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_queue *wq; + struct lpfc_sglq *sglq; + struct lpfc_sli_ring *pring; + unsigned long iflags; + + /* NVME_LS and NVME_LS ABTS requests. */ + if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { + pring = phba->sli4_hba.nvmels_wq->pring; + spin_lock_irqsave(&pring->ring_lock, iflags); + sglq = __lpfc_sli_get_els_sglq(phba, pwqe); + if (!sglq) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return WQE_BUSY; + } + pwqe->sli4_lxritag = sglq->sli4_lxritag; + pwqe->sli4_xritag = sglq->sli4_xritag; + if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return WQE_ERROR; + } + bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, + pwqe->sli4_xritag); + if (lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe)) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return WQE_ERROR; + } + lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return 0; + } + + /* NVME_FCREQ and NVME_ABTS requests */ + if (pwqe->iocb_flag & LPFC_IO_NVME) { + /* Get the IO distribution (hba_wqidx) for WQ assignment. */ + pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring; + + spin_lock_irqsave(&pring->ring_lock, iflags); + wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; + bf_set(wqe_cqid, &wqe->generic.wqe_com, + phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); + if (lpfc_sli4_wq_put(wq, wqe)) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return WQE_ERROR; + } + lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return 0; + } + + /* NVMET requests */ + if (pwqe->iocb_flag & LPFC_IO_NVMET) { + /* Get the IO distribution (hba_wqidx) for WQ assignment. */ + pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring; + + spin_lock_irqsave(&pring->ring_lock, iflags); + ctxp = pwqe->context2; + sglq = ctxp->rqb_buffer->sglq; + if (pwqe->sli4_xritag == NO_XRI) { + pwqe->sli4_lxritag = sglq->sli4_lxritag; + pwqe->sli4_xritag = sglq->sli4_xritag; + } + bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, + pwqe->sli4_xritag); + wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; + bf_set(wqe_cqid, &wqe->generic.wqe_com, + phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); + if (lpfc_sli4_wq_put(wq, wqe)) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return WQE_ERROR; + } + lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return 0; + } + return WQE_ERROR; +} diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 74227a28bd56..9085306ddd78 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -54,9 +56,16 @@ struct lpfc_iocbq { uint16_t iotag; /* pre-assigned IO tag */ uint16_t sli4_lxritag; /* logical pre-assigned XRI. */ uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ + uint16_t hba_wqidx; /* index to HBA work queue */ struct lpfc_cq_event cq_event; + struct lpfc_wcqe_complete wcqe_cmpl; /* WQE cmpl */ + uint64_t isr_timestamp; - IOCB_t iocb; /* IOCB cmd */ + /* Be careful here */ + union lpfc_wqe wqe; /* WQE cmd */ + IOCB_t iocb; /* For IOCB cmd or if we want 128 byte WQE */ + + uint8_t rsvd2; uint8_t priority; /* OAS priority */ uint8_t retry; /* retry counter for IOCB cmd - if needed */ uint32_t iocb_flag; @@ -82,9 +91,13 @@ struct lpfc_iocbq { #define LPFC_IO_OAS 0x10000 /* OAS FCP IO */ #define LPFC_IO_FOF 0x20000 /* FOF FCP IO */ #define LPFC_IO_LOOPBACK 0x40000 /* Loopback IO */ +#define LPFC_PRLI_NVME_REQ 0x80000 /* This is an NVME PRLI. */ +#define LPFC_PRLI_FCP_REQ 0x100000 /* This is an NVME PRLI. */ +#define LPFC_IO_NVME 0x200000 /* NVME FCP command */ +#define LPFC_IO_NVME_LS 0x400000 /* NVME LS command */ +#define LPFC_IO_NVMET 0x800000 /* NVMET command */ uint32_t drvrTimeout; /* driver timeout in seconds */ - uint32_t fcp_wqidx; /* index to FCP work queue */ struct lpfc_vport *vport;/* virtual port pointer */ void *context1; /* caller context information */ void *context2; /* caller context information */ @@ -97,12 +110,14 @@ struct lpfc_iocbq { struct lpfc_node_rrq *rrq; } context_un; - void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, + void (*fabric_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); - void (*wait_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, + void (*wait_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); - void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, + void (*iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); + void (*wqe_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_wcqe_complete *); }; #define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ @@ -112,6 +127,14 @@ struct lpfc_iocbq { #define IOCB_ERROR 2 #define IOCB_TIMEDOUT 3 +#define SLI_WQE_RET_WQE 1 /* Return WQE if cmd ring full */ + +#define WQE_SUCCESS 0 +#define WQE_BUSY 1 +#define WQE_ERROR 2 +#define WQE_TIMEDOUT 3 +#define WQE_ABORTED 4 + #define LPFC_MBX_WAKE 1 #define LPFC_MBX_IMED_UNREG 2 @@ -297,12 +320,9 @@ struct lpfc_sli { #define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ #define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ #define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */ +#define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */ - struct lpfc_sli_ring *ring; - int fcp_ring; /* ring used for FCP initiator commands */ - int next_ring; - - int extra_ring; /* extra ring used for other protocols */ + struct lpfc_sli_ring *sli3_ring; struct lpfc_sli_stat slistat; /* SLI statistical info */ struct list_head mboxq; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 0b88b5703e0f..91153c9f6d18 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -35,9 +37,10 @@ #define LPFC_NEMBED_MBOX_SGL_CNT 254 /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */ -#define LPFC_FCP_IO_CHAN_DEF 4 -#define LPFC_FCP_IO_CHAN_MIN 1 -#define LPFC_FCP_IO_CHAN_MAX 16 +#define LPFC_HBA_IO_CHAN_MIN 0 +#define LPFC_HBA_IO_CHAN_MAX 32 +#define LPFC_FCP_IO_CHAN_DEF 4 +#define LPFC_NVME_IO_CHAN_DEF 0 /* Number of channels used for Flash Optimized Fabric (FOF) operations */ @@ -107,6 +110,9 @@ enum lpfc_sli4_queue_subtype { LPFC_MBOX, LPFC_FCP, LPFC_ELS, + LPFC_NVME, + LPFC_NVMET, + LPFC_NVME_LS, LPFC_USOL }; @@ -125,25 +131,41 @@ union sli4_qe { struct lpfc_rqe *rqe; }; +/* RQ buffer list */ +struct lpfc_rqb { + uint16_t entry_count; /* Current number of RQ slots */ + uint16_t buffer_count; /* Current number of buffers posted */ + struct list_head rqb_buffer_list; /* buffers assigned to this HBQ */ + /* Callback for HBQ buffer allocation */ + struct rqb_dmabuf *(*rqb_alloc_buffer)(struct lpfc_hba *); + /* Callback for HBQ buffer free */ + void (*rqb_free_buffer)(struct lpfc_hba *, + struct rqb_dmabuf *); +}; + struct lpfc_queue { struct list_head list; + struct list_head wq_list; enum lpfc_sli4_queue_type type; enum lpfc_sli4_queue_subtype subtype; struct lpfc_hba *phba; struct list_head child_list; + struct list_head page_list; + struct list_head sgl_list; uint32_t entry_count; /* Number of entries to support on the queue */ uint32_t entry_size; /* Size of each queue entry. */ uint32_t entry_repost; /* Count of entries before doorbell is rung */ #define LPFC_QUEUE_MIN_REPOST 8 uint32_t queue_id; /* Queue ID assigned by the hardware */ uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ - struct list_head page_list; uint32_t page_count; /* Number of pages allocated for this queue */ uint32_t host_index; /* The host's index for putting or getting */ uint32_t hba_index; /* The last known hba index for get or put */ struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */ + struct lpfc_rqb *rqbp; /* ptr to RQ buffers */ + uint16_t sgl_list_cnt; uint16_t db_format; #define LPFC_DB_RING_FORMAT 0x01 #define LPFC_DB_LIST_FORMAT 0x02 @@ -176,6 +198,8 @@ struct lpfc_queue { #define RQ_buf_trunc q_cnt_3 #define RQ_rcv_buf q_cnt_4 + uint64_t isr_timestamp; + struct lpfc_queue *assoc_qp; union sli4_qe qe[1]; /* array to index entries (must be last) */ }; @@ -338,6 +362,7 @@ struct lpfc_bmbx { #define LPFC_CQE_DEF_COUNT 1024 #define LPFC_WQE_DEF_COUNT 256 #define LPFC_WQE128_DEF_COUNT 128 +#define LPFC_WQE128_MAX_COUNT 256 #define LPFC_MQE_DEF_COUNT 16 #define LPFC_RQE_DEF_COUNT 512 @@ -379,10 +404,14 @@ struct lpfc_max_cfg_param { struct lpfc_hba; /* SLI4 HBA multi-fcp queue handler struct */ -struct lpfc_fcp_eq_hdl { +struct lpfc_hba_eq_hdl { uint32_t idx; struct lpfc_hba *phba; - atomic_t fcp_eq_in_use; + atomic_t hba_eq_in_use; + struct cpumask *cpumask; + /* CPU affinitsed to or 0xffffffff if multiple */ + uint32_t cpu; +#define LPFC_MULTI_CPU_AFFINITY 0xffffffff }; /* Port Capabilities for SLI4 Parameters */ @@ -427,6 +456,7 @@ struct lpfc_pc_sli4_params { uint8_t wqsize; #define LPFC_WQ_SZ64_SUPPORT 1 #define LPFC_WQ_SZ128_SUPPORT 2 + uint8_t wqpcnt; }; struct lpfc_iov { @@ -445,7 +475,7 @@ struct lpfc_sli4_lnk_info { uint8_t optic_state; }; -#define LPFC_SLI4_HANDLER_CNT (LPFC_FCP_IO_CHAN_MAX+ \ +#define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \ LPFC_FOF_IO_CHAN_NUM) #define LPFC_SLI4_HANDLER_NAME_SZ 16 @@ -515,23 +545,34 @@ struct lpfc_sli4_hba { uint32_t ue_to_rp; struct lpfc_register sli_intf; struct lpfc_pc_sli4_params pc_sli4_params; - struct msix_entry *msix_entries; uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ]; - struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ + struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */ /* Pointers to the constructed SLI4 queues */ - struct lpfc_queue **hba_eq;/* Event queues for HBA */ - struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ - struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */ + struct lpfc_queue **hba_eq; /* Event queues for HBA */ + struct lpfc_queue **fcp_cq; /* Fast-path FCP compl queue */ + struct lpfc_queue **nvme_cq; /* Fast-path NVME compl queue */ + struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */ + struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */ + struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */ + struct lpfc_queue **fcp_wq; /* Fast-path FCP work queue */ + struct lpfc_queue **nvme_wq; /* Fast-path NVME work queue */ uint16_t *fcp_cq_map; + uint16_t *nvme_cq_map; + struct list_head lpfc_wq_list; struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ + struct lpfc_queue *nvmels_cq; /* NVME LS complete queue */ struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */ struct lpfc_queue *els_wq; /* Slow-path ELS work queue */ + struct lpfc_queue *nvmels_wq; /* NVME LS work queue */ struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ + struct lpfc_name wwnn; + struct lpfc_name wwpn; + uint32_t fw_func_mode; /* FW function protocol mode */ uint32_t ulp0_mode; /* ULP0 protocol mode */ uint32_t ulp1_mode; /* ULP1 protocol mode */ @@ -568,14 +609,20 @@ struct lpfc_sli4_hba { uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */ uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */ uint16_t next_rpi; + uint16_t nvme_xri_max; + uint16_t nvme_xri_cnt; + uint16_t nvme_xri_start; uint16_t scsi_xri_max; uint16_t scsi_xri_cnt; - uint16_t els_xri_cnt; uint16_t scsi_xri_start; - struct list_head lpfc_free_sgl_list; - struct list_head lpfc_sgl_list; + uint16_t els_xri_cnt; + uint16_t nvmet_xri_cnt; + struct list_head lpfc_els_sgl_list; struct list_head lpfc_abts_els_sgl_list; + struct list_head lpfc_nvmet_sgl_list; + struct list_head lpfc_abts_nvmet_sgl_list; struct list_head lpfc_abts_scsi_buf_list; + struct list_head lpfc_abts_nvme_buf_list; struct lpfc_sglq **lpfc_sglq_active_list; struct list_head lpfc_rpi_hdr_list; unsigned long *rpi_bmask; @@ -602,8 +649,10 @@ struct lpfc_sli4_hba { #define LPFC_SLI4_PPNAME_NON 0 #define LPFC_SLI4_PPNAME_GET 1 struct lpfc_iov iov; + spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ - spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ + spinlock_t sgl_list_lock; /* list of aborted els IOs */ + spinlock_t nvmet_io_lock; uint32_t physical_port; /* CPU to vector mapping information */ @@ -611,11 +660,14 @@ struct lpfc_sli4_hba { uint16_t num_online_cpu; uint16_t num_present_cpu; uint16_t curr_disp_cpu; + + uint16_t nvmet_mrq_post_idx; }; enum lpfc_sge_type { GEN_BUFF_TYPE, - SCSI_BUFF_TYPE + SCSI_BUFF_TYPE, + NVMET_BUFF_TYPE }; enum lpfc_sgl_state { @@ -694,15 +746,21 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, uint32_t); void lpfc_sli4_queue_free(struct lpfc_queue *); int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t); -int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint32_t); +int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq); int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t, uint32_t); +int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, + struct lpfc_queue **eqp, uint32_t type, + uint32_t subtype); int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t); int lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t); int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, struct lpfc_queue *, uint32_t); +int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, + struct lpfc_queue **drqp, struct lpfc_queue **cqp, + uint32_t subtype); void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int); int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); @@ -714,6 +772,7 @@ int lpfc_sli4_queue_setup(struct lpfc_hba *); void lpfc_sli4_queue_unset(struct lpfc_hba *); int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); +int lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba); uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); void lpfc_sli4_free_xri(struct lpfc_hba *, int); int lpfc_sli4_post_async_mbox(struct lpfc_hba *); @@ -746,6 +805,7 @@ int lpfc_sli4_brdreset(struct lpfc_hba *); int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *); void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *); int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *); +int lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba); int lpfc_sli4_init_vpi(struct lpfc_vport *); uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 0ee0623a354c..86c6c9b26b82 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -18,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "11.2.0.4" +#define LPFC_DRIVER_VERSION "11.2.0.7" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ @@ -30,4 +32,6 @@ #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ LPFC_DRIVER_VERSION -#define LPFC_COPYRIGHT "Copyright(c) 2004-2016 Emulex. All rights reserved." +#define LPFC_COPYRIGHT "Copyright (C) 2017 Broadcom. All Rights Reserved. " \ + "The term \"Broadcom\" refers to Broadcom Limited " \ + "and/or its subsidiaries." diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index e18bbc66e83b..9a0339dbc024 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -28,11 +30,13 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/sched/signal.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> + #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" @@ -402,6 +406,22 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) vport->fdmi_port_mask = phba->pport->fdmi_port_mask; } + if ((phba->nvmet_support == 0) && + ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))) { + /* Create NVME binding with nvme_fc_transport. This + * ensures the vport is initialized. + */ + rc = lpfc_nvme_create_localport(vport); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6003 %s status x%x\n", + "NVME registration failed, ", + rc); + goto error_out; + } + } + /* * In SLI4, the vpi must be activated before it can be used * by the port. diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h index 6b2c94eb8134..62295971f66c 100644 --- a/drivers/scsi/lpfc/lpfc_vport.h +++ b/drivers/scsi/lpfc/lpfc_vport.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2006 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index a3fe1fb55c17..5b7aec5d575a 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -1148,7 +1148,7 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) /* TMs are on msix_index == 0 */ if (reply_q->msix_index == 0) continue; - synchronize_irq(reply_q->vector); + synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); } } @@ -1837,11 +1837,8 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc) list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { list_del(&reply_q->list); - if (smp_affinity_enable) { - irq_set_affinity_hint(reply_q->vector, NULL); - free_cpumask_var(reply_q->affinity_hint); - } - free_irq(reply_q->vector, reply_q); + free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index), + reply_q); kfree(reply_q); } } @@ -1850,13 +1847,13 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc) * _base_request_irq - request irq * @ioc: per adapter object * @index: msix index into vector table - * @vector: irq vector * * Inserting respective reply_queue into the list. */ static int -_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector) +_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) { + struct pci_dev *pdev = ioc->pdev; struct adapter_reply_queue *reply_q; int r; @@ -1868,14 +1865,6 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector) } reply_q->ioc = ioc; reply_q->msix_index = index; - reply_q->vector = vector; - - if (smp_affinity_enable) { - if (!zalloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) { - kfree(reply_q); - return -ENOMEM; - } - } atomic_set(&reply_q->busy, 0); if (ioc->msix_enable) @@ -1884,12 +1873,11 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector) else snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", ioc->driver_name, ioc->id); - r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name, - reply_q); + r = request_irq(pci_irq_vector(pdev, index), _base_interrupt, + IRQF_SHARED, reply_q->name, reply_q); if (r) { pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n", - reply_q->name, vector); - free_cpumask_var(reply_q->affinity_hint); + reply_q->name, pci_irq_vector(pdev, index)); kfree(reply_q); return -EBUSY; } @@ -1925,6 +1913,21 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) if (!nr_msix) return; + if (smp_affinity_enable) { + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev, + reply_q->msix_index); + if (!mask) { + pr_warn(MPT3SAS_FMT "no affinity for msi %x\n", + ioc->name, reply_q->msix_index); + continue; + } + + for_each_cpu(cpu, mask) + ioc->cpu_msix_table[cpu] = reply_q->msix_index; + } + return; + } cpu = cpumask_first(cpu_online_mask); list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { @@ -1938,18 +1941,9 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) group++; for (i = 0 ; i < group ; i++) { - ioc->cpu_msix_table[cpu] = index; - if (smp_affinity_enable) - cpumask_or(reply_q->affinity_hint, - reply_q->affinity_hint, get_cpu_mask(cpu)); + ioc->cpu_msix_table[cpu] = reply_q->msix_index; cpu = cpumask_next(cpu, cpu_online_mask); } - if (smp_affinity_enable) - if (irq_set_affinity_hint(reply_q->vector, - reply_q->affinity_hint)) - dinitprintk(ioc, pr_info(MPT3SAS_FMT - "Err setting affinity hint to irq vector %d\n", - ioc->name, reply_q->vector)); index++; } } @@ -1976,10 +1970,10 @@ _base_disable_msix(struct MPT3SAS_ADAPTER *ioc) static int _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) { - struct msix_entry *entries, *a; int r; int i, local_max_msix_vectors; u8 try_msix = 0; + unsigned int irq_flags = PCI_IRQ_MSIX; if (msix_disable == -1 || msix_disable == 0) try_msix = 1; @@ -1991,7 +1985,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) goto try_ioapic; ioc->reply_queue_count = min_t(int, ioc->cpu_count, - ioc->msix_vector_count); + ioc->msix_vector_count); printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores" ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count, @@ -2002,56 +1996,51 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) else local_max_msix_vectors = max_msix_vectors; - if (local_max_msix_vectors > 0) { + if (local_max_msix_vectors > 0) ioc->reply_queue_count = min_t(int, local_max_msix_vectors, ioc->reply_queue_count); - ioc->msix_vector_count = ioc->reply_queue_count; - } else if (local_max_msix_vectors == 0) + else if (local_max_msix_vectors == 0) goto try_ioapic; if (ioc->msix_vector_count < ioc->cpu_count) smp_affinity_enable = 0; - entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), - GFP_KERNEL); - if (!entries) { - dfailprintk(ioc, pr_info(MPT3SAS_FMT - "kcalloc failed @ at %s:%d/%s() !!!\n", - ioc->name, __FILE__, __LINE__, __func__)); - goto try_ioapic; - } + if (smp_affinity_enable) + irq_flags |= PCI_IRQ_AFFINITY; - for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) - a->entry = i; - - r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count); - if (r) { + r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count, + irq_flags); + if (r < 0) { dfailprintk(ioc, pr_info(MPT3SAS_FMT - "pci_enable_msix_exact failed (r=%d) !!!\n", + "pci_alloc_irq_vectors failed (r=%d) !!!\n", ioc->name, r)); - kfree(entries); goto try_ioapic; } ioc->msix_enable = 1; - for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) { - r = _base_request_irq(ioc, i, a->vector); + ioc->reply_queue_count = r; + for (i = 0; i < ioc->reply_queue_count; i++) { + r = _base_request_irq(ioc, i); if (r) { _base_free_irq(ioc); _base_disable_msix(ioc); - kfree(entries); goto try_ioapic; } } - kfree(entries); return 0; /* failback to io_apic interrupt routing */ try_ioapic: ioc->reply_queue_count = 1; - r = _base_request_irq(ioc, 0, ioc->pdev->irq); + r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY); + if (r < 0) { + dfailprintk(ioc, pr_info(MPT3SAS_FMT + "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n", + ioc->name, r)); + } else + r = _base_request_irq(ioc, 0); return r; } @@ -2222,7 +2211,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) list_for_each_entry(reply_q, &ioc->reply_queue_list, list) pr_info(MPT3SAS_FMT "%s: IRQ %d\n", reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : - "IO-APIC enabled"), reply_q->vector); + "IO-APIC enabled"), + pci_irq_vector(ioc->pdev, reply_q->msix_index)); pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n", ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz); @@ -5357,7 +5347,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) sizeof(resource_size_t *), GFP_KERNEL); if (!ioc->reply_post_host_index) { dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation " - "for cpu_msix_table failed!!!\n", ioc->name)); + "for reply_post_host_index failed!!!\n", + ioc->name)); r = -ENOMEM; goto out_free_resources; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 4ab634fc27df..7fe7e6ed595b 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -731,12 +731,10 @@ struct _event_ack_list { struct adapter_reply_queue { struct MPT3SAS_ADAPTER *ioc; u8 msix_index; - unsigned int vector; u32 reply_post_host_index; Mpi2ReplyDescriptorsUnion_t *reply_post_free; char name[MPT_NAME_LENGTH]; atomic_t busy; - cpumask_var_t affinity_hint; struct list_head list; }; diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index 243eab3d10d0..e0ce5d2fd14d 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -372,6 +372,7 @@ EXPORT_SYMBOL(osduld_device_same); static int __detect_osd(struct osd_uld_device *oud) { struct scsi_device *scsi_device = oud->od.scsi_device; + struct scsi_sense_hdr sense_hdr; char caps[OSD_CAP_LEN]; int error; @@ -380,7 +381,7 @@ static int __detect_osd(struct osd_uld_device *oud) */ OSD_DEBUG("start scsi_test_unit_ready %p %p %p\n", oud, scsi_device, scsi_device->request_queue); - error = scsi_test_unit_ready(scsi_device, 10*HZ, 5, NULL); + error = scsi_test_unit_ready(scsi_device, 10*HZ, 5, &sense_hdr); if (error) OSD_ERR("warning: scsi_test_unit_ready failed\n"); diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 75ac662793a3..c47f4b349bac 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -35,7 +35,7 @@ static const char * osst_version = "0.99.4"; #include <linux/fs.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/proc_fs.h> #include <linux/mm.h> #include <linux/slab.h> diff --git a/drivers/scsi/qedf/Kconfig b/drivers/scsi/qedf/Kconfig new file mode 100644 index 000000000000..943f5ee45807 --- /dev/null +++ b/drivers/scsi/qedf/Kconfig @@ -0,0 +1,11 @@ +config QEDF + tristate "QLogic QEDF 25/40/100Gb FCoE Initiator Driver Support" + depends on PCI && SCSI + depends on QED + depends on LIBFC + depends on LIBFCOE + select QED_LL2 + select QED_FCOE + ---help--- + This driver supports FCoE offload for the QLogic FastLinQ + 41000 Series Converged Network Adapters. diff --git a/drivers/scsi/qedf/Makefile b/drivers/scsi/qedf/Makefile new file mode 100644 index 000000000000..64e9f507ce32 --- /dev/null +++ b/drivers/scsi/qedf/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_QEDF) := qedf.o +qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \ + qedf_attr.o qedf_els.o + +qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h new file mode 100644 index 000000000000..96346a1b1515 --- /dev/null +++ b/drivers/scsi/qedf/qedf.h @@ -0,0 +1,545 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#ifndef _QEDFC_H_ +#define _QEDFC_H_ + +#include <scsi/libfcoe.h> +#include <scsi/libfc.h> +#include <scsi/fc/fc_fip.h> +#include <scsi/fc/fc_fc2.h> +#include <scsi/scsi_tcq.h> +#include <scsi/fc_encode.h> +#include <linux/version.h> + + +/* qedf_hsi.h needs to before included any qed includes */ +#include "qedf_hsi.h" + +#include <linux/qed/qed_if.h> +#include <linux/qed/qed_fcoe_if.h> +#include <linux/qed/qed_ll2_if.h> +#include "qedf_version.h" +#include "qedf_dbg.h" + +/* Helpers to extract upper and lower 32-bits of pointer */ +#define U64_HI(val) ((u32)(((u64)(val)) >> 32)) +#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff)) + +#define QEDF_DESCR "QLogic FCoE Offload Driver" +#define QEDF_MODULE_NAME "qedf" + +#define QEDF_MIN_XID 0 +#define QEDF_MAX_SCSI_XID (NUM_TASKS_PER_CONNECTION - 1) +#define QEDF_MAX_ELS_XID 4095 +#define QEDF_FLOGI_RETRY_CNT 3 +#define QEDF_RPORT_RETRY_CNT 255 +#define QEDF_MAX_SESSIONS 1024 +#define QEDF_MAX_PAYLOAD 2048 +#define QEDF_MAX_BDS_PER_CMD 256 +#define QEDF_MAX_BD_LEN 0xffff +#define QEDF_BD_SPLIT_SZ 0x1000 +#define QEDF_PAGE_SIZE 4096 +#define QED_HW_DMA_BOUNDARY 0xfff +#define QEDF_MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1) +#define QEDF_MFS (QEDF_MAX_PAYLOAD + \ + sizeof(struct fc_frame_header)) +#define QEDF_MAX_NPIV 64 +#define QEDF_TM_TIMEOUT 10 +#define QEDF_ABORT_TIMEOUT 10 +#define QEDF_CLEANUP_TIMEOUT 10 +#define QEDF_MAX_CDB_LEN 16 + +#define UPSTREAM_REMOVE 1 +#define UPSTREAM_KEEP 1 + +struct qedf_mp_req { + uint8_t tm_flags; + + uint32_t req_len; + void *req_buf; + dma_addr_t req_buf_dma; + struct fcoe_sge *mp_req_bd; + dma_addr_t mp_req_bd_dma; + struct fc_frame_header req_fc_hdr; + + uint32_t resp_len; + void *resp_buf; + dma_addr_t resp_buf_dma; + struct fcoe_sge *mp_resp_bd; + dma_addr_t mp_resp_bd_dma; + struct fc_frame_header resp_fc_hdr; +}; + +struct qedf_els_cb_arg { + struct qedf_ioreq *aborted_io_req; + struct qedf_ioreq *io_req; + u8 op; /* Used to keep track of ELS op */ + uint16_t l2_oxid; + u32 offset; /* Used for sequence cleanup */ + u8 r_ctl; /* Used for sequence cleanup */ +}; + +enum qedf_ioreq_event { + QEDF_IOREQ_EV_ABORT_SUCCESS, + QEDF_IOREQ_EV_ABORT_FAILED, + QEDF_IOREQ_EV_SEND_RRQ, + QEDF_IOREQ_EV_ELS_TMO, + QEDF_IOREQ_EV_ELS_ERR_DETECT, + QEDF_IOREQ_EV_ELS_FLUSH, + QEDF_IOREQ_EV_CLEANUP_SUCCESS, + QEDF_IOREQ_EV_CLEANUP_FAILED, +}; + +#define FC_GOOD 0 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2) +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3) +#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0) +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1) +struct qedf_ioreq { + struct list_head link; + uint16_t xid; + struct scsi_cmnd *sc_cmd; + bool use_slowpath; /* Use slow SGL for this I/O */ +#define QEDF_SCSI_CMD 1 +#define QEDF_TASK_MGMT_CMD 2 +#define QEDF_ABTS 3 +#define QEDF_ELS 4 +#define QEDF_CLEANUP 5 +#define QEDF_SEQ_CLEANUP 6 + u8 cmd_type; +#define QEDF_CMD_OUTSTANDING 0x0 +#define QEDF_CMD_IN_ABORT 0x1 +#define QEDF_CMD_IN_CLEANUP 0x2 +#define QEDF_CMD_SRR_SENT 0x3 + u8 io_req_flags; + struct qedf_rport *fcport; + unsigned long flags; + enum qedf_ioreq_event event; + size_t data_xfer_len; + struct kref refcount; + struct qedf_cmd_mgr *cmd_mgr; + struct io_bdt *bd_tbl; + struct delayed_work timeout_work; + struct completion tm_done; + struct completion abts_done; + struct fcoe_task_context *task; + int idx; +/* + * Need to allocate enough room for both sense data and FCP response data + * which has a max length of 8 bytes according to spec. + */ +#define QEDF_SCSI_SENSE_BUFFERSIZE (SCSI_SENSE_BUFFERSIZE + 8) + uint8_t *sense_buffer; + dma_addr_t sense_buffer_dma; + u32 fcp_resid; + u32 fcp_rsp_len; + u32 fcp_sns_len; + u8 cdb_status; + u8 fcp_status; + u8 fcp_rsp_code; + u8 scsi_comp_flags; +#define QEDF_MAX_REUSE 0xfff + u16 reuse_count; + struct qedf_mp_req mp_req; + void (*cb_func)(struct qedf_els_cb_arg *cb_arg); + struct qedf_els_cb_arg *cb_arg; + int fp_idx; + unsigned int cpu; + unsigned int int_cpu; +#define QEDF_IOREQ_SLOW_SGE 0 +#define QEDF_IOREQ_SINGLE_SGE 1 +#define QEDF_IOREQ_FAST_SGE 2 + u8 sge_type; + struct delayed_work rrq_work; + + /* Used for sequence level recovery; i.e. REC/SRR */ + uint32_t rx_buf_off; + uint32_t tx_buf_off; + uint32_t rx_id; + uint32_t task_retry_identifier; + + /* + * Used to tell if we need to return a SCSI command + * during some form of error processing. + */ + bool return_scsi_cmd_on_abts; +}; + +extern struct workqueue_struct *qedf_io_wq; + +struct qedf_rport { + spinlock_t rport_lock; +#define QEDF_RPORT_SESSION_READY 1 +#define QEDF_RPORT_UPLOADING_CONNECTION 2 + unsigned long flags; + unsigned long retry_delay_timestamp; + struct fc_rport *rport; + struct fc_rport_priv *rdata; + struct qedf_ctx *qedf; + u32 handle; /* Handle from qed */ + u32 fw_cid; /* fw_cid from qed */ + void __iomem *p_doorbell; + /* Send queue management */ + atomic_t free_sqes; + atomic_t num_active_ios; + struct fcoe_wqe *sq; + dma_addr_t sq_dma; + u16 sq_prod_idx; + u16 fw_sq_prod_idx; + u16 sq_con_idx; + u32 sq_mem_size; + void *sq_pbl; + dma_addr_t sq_pbl_dma; + u32 sq_pbl_size; + u32 sid; +#define QEDF_RPORT_TYPE_DISK 1 +#define QEDF_RPORT_TYPE_TAPE 2 + uint dev_type; /* Disk or tape */ + struct list_head peers; +}; + +/* Used to contain LL2 skb's in ll2_skb_list */ +struct qedf_skb_work { + struct work_struct work; + struct sk_buff *skb; + struct qedf_ctx *qedf; +}; + +struct qedf_fastpath { +#define QEDF_SB_ID_NULL 0xffff + u16 sb_id; + struct qed_sb_info *sb_info; + struct qedf_ctx *qedf; + /* Keep track of number of completions on this fastpath */ + unsigned long completions; + uint32_t cq_num_entries; +}; + +/* Used to pass fastpath information needed to process CQEs */ +struct qedf_io_work { + struct work_struct work; + struct fcoe_cqe cqe; + struct qedf_ctx *qedf; + struct fc_frame *fp; +}; + +struct qedf_glbl_q_params { + u64 hw_p_cq; /* Completion queue PBL */ + u64 hw_p_rq; /* Request queue PBL */ + u64 hw_p_cmdq; /* Command queue PBL */ +}; + +struct global_queue { + struct fcoe_cqe *cq; + dma_addr_t cq_dma; + u32 cq_mem_size; + u32 cq_cons_idx; /* Completion queue consumer index */ + u32 cq_prod_idx; + + void *cq_pbl; + dma_addr_t cq_pbl_dma; + u32 cq_pbl_size; +}; + +/* I/O tracing entry */ +#define QEDF_IO_TRACE_SIZE 2048 +struct qedf_io_log { +#define QEDF_IO_TRACE_REQ 0 +#define QEDF_IO_TRACE_RSP 1 + uint8_t direction; + uint16_t task_id; + uint32_t port_id; /* Remote port fabric ID */ + int lun; + char op; /* SCSI CDB */ + uint8_t lba[4]; + unsigned int bufflen; /* SCSI buffer length */ + unsigned int sg_count; /* Number of SG elements */ + int result; /* Result passed back to mid-layer */ + unsigned long jiffies; /* Time stamp when I/O logged */ + int refcount; /* Reference count for task id */ + unsigned int req_cpu; /* CPU that the task is queued on */ + unsigned int int_cpu; /* Interrupt CPU that the task is received on */ + unsigned int rsp_cpu; /* CPU that task is returned on */ + u8 sge_type; /* Did we take the slow, single or fast SGE path */ +}; + +/* Number of entries in BDQ */ +#define QEDF_BDQ_SIZE 256 +#define QEDF_BDQ_BUF_SIZE 2072 + +/* DMA coherent buffers for BDQ */ +struct qedf_bdq_buf { + void *buf_addr; + dma_addr_t buf_dma; +}; + +/* Main adapter struct */ +struct qedf_ctx { + struct qedf_dbg_ctx dbg_ctx; + struct fcoe_ctlr ctlr; + struct fc_lport *lport; + u8 data_src_addr[ETH_ALEN]; +#define QEDF_LINK_DOWN 0 +#define QEDF_LINK_UP 1 + atomic_t link_state; +#define QEDF_DCBX_PENDING 0 +#define QEDF_DCBX_DONE 1 + atomic_t dcbx; + uint16_t max_scsi_xid; + uint16_t max_els_xid; +#define QEDF_NULL_VLAN_ID -1 +#define QEDF_FALLBACK_VLAN 1002 +#define QEDF_DEFAULT_PRIO 3 + int vlan_id; + uint vlan_hw_insert:1; + struct qed_dev *cdev; + struct qed_dev_fcoe_info dev_info; + struct qed_int_info int_info; + uint16_t last_command; + spinlock_t hba_lock; + struct pci_dev *pdev; + u64 wwnn; + u64 wwpn; + u8 __aligned(16) mac[ETH_ALEN]; + struct list_head fcports; + atomic_t num_offloads; + unsigned int curr_conn_id; + struct workqueue_struct *ll2_recv_wq; + struct workqueue_struct *link_update_wq; + struct delayed_work link_update; + struct delayed_work link_recovery; + struct completion flogi_compl; + struct completion fipvlan_compl; + + /* + * Used to tell if we're in the window where we are waiting for + * the link to come back up before informting fcoe that the link is + * done. + */ + atomic_t link_down_tmo_valid; +#define QEDF_TIMER_INTERVAL (1 * HZ) + struct timer_list timer; /* One second book keeping timer */ +#define QEDF_DRAIN_ACTIVE 1 +#define QEDF_LL2_STARTED 2 +#define QEDF_UNLOADING 3 +#define QEDF_GRCDUMP_CAPTURE 4 +#define QEDF_IN_RECOVERY 5 +#define QEDF_DBG_STOP_IO 6 + unsigned long flags; /* Miscellaneous state flags */ + int fipvlan_retries; + u8 num_queues; + struct global_queue **global_queues; + /* Pointer to array of queue structures */ + struct qedf_glbl_q_params *p_cpuq; + /* Physical address of array of queue structures */ + dma_addr_t hw_p_cpuq; + + struct qedf_bdq_buf bdq[QEDF_BDQ_SIZE]; + void *bdq_pbl; + dma_addr_t bdq_pbl_dma; + size_t bdq_pbl_mem_size; + void *bdq_pbl_list; + dma_addr_t bdq_pbl_list_dma; + u8 bdq_pbl_list_num_entries; + void __iomem *bdq_primary_prod; + void __iomem *bdq_secondary_prod; + uint16_t bdq_prod_idx; + + /* Structure for holding all the fastpath for this qedf_ctx */ + struct qedf_fastpath *fp_array; + struct qed_fcoe_tid tasks; + struct qedf_cmd_mgr *cmd_mgr; + /* Holds the PF parameters we pass to qed to start he FCoE function */ + struct qed_pf_params pf_params; + /* Used to time middle path ELS and TM commands */ + struct workqueue_struct *timer_work_queue; + +#define QEDF_IO_WORK_MIN 64 + mempool_t *io_mempool; + struct workqueue_struct *dpc_wq; + + u32 slow_sge_ios; + u32 fast_sge_ios; + u32 single_sge_ios; + + uint8_t *grcdump; + uint32_t grcdump_size; + + struct qedf_io_log io_trace_buf[QEDF_IO_TRACE_SIZE]; + spinlock_t io_trace_lock; + uint16_t io_trace_idx; + + bool stop_io_on_error; + + u32 flogi_cnt; + u32 flogi_failed; + + /* Used for fc statistics */ + u64 input_requests; + u64 output_requests; + u64 control_requests; + u64 packet_aborts; + u64 alloc_failures; +}; + +struct io_bdt { + struct qedf_ioreq *io_req; + struct fcoe_sge *bd_tbl; + dma_addr_t bd_tbl_dma; + u16 bd_valid; +}; + +struct qedf_cmd_mgr { + struct qedf_ctx *qedf; + u16 idx; + struct io_bdt **io_bdt_pool; +#define FCOE_PARAMS_NUM_TASKS 4096 + struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS]; + spinlock_t lock; + atomic_t free_list_cnt; +}; + +/* Stolen from qed_cxt_api.h and adapted for qed_fcoe_info + * Usage: + * + * void *ptr; + * ptr = qedf_get_task_mem(&qedf->tasks, 128); + */ +static inline void *qedf_get_task_mem(struct qed_fcoe_tid *info, u32 tid) +{ + return (void *)(info->blocks[tid / info->num_tids_per_block] + + (tid % info->num_tids_per_block) * info->size); +} + +static inline void qedf_stop_all_io(struct qedf_ctx *qedf) +{ + set_bit(QEDF_DBG_STOP_IO, &qedf->flags); +} + +/* + * Externs + */ +#define QEDF_DEFAULT_LOG_MASK 0x3CFB6 +extern const struct qed_fcoe_ops *qed_ops; +extern uint qedf_dump_frames; +extern uint qedf_io_tracing; +extern uint qedf_stop_io_on_error; +extern uint qedf_link_down_tmo; +#define QEDF_RETRY_DELAY_MAX 20 /* 2 seconds */ +extern bool qedf_retry_delay; +extern uint qedf_debug; + +extern struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf); +extern void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr); +extern int qedf_queuecommand(struct Scsi_Host *host, + struct scsi_cmnd *sc_cmd); +extern void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb); +extern void qedf_update_src_mac(struct fc_lport *lport, u8 *addr); +extern u8 *qedf_get_src_mac(struct fc_lport *lport); +extern void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb); +extern void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf); +extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req); +extern void qedf_process_warning_compl(struct qedf_ctx *qedf, + struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); +extern void qedf_process_error_detect(struct qedf_ctx *qedf, + struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); +extern void qedf_flush_active_ios(struct qedf_rport *fcport, int lun); +extern void qedf_release_cmd(struct kref *ref); +extern int qedf_initiate_abts(struct qedf_ioreq *io_req, + bool return_scsi_cmd_on_abts); +extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req); +extern struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, + u8 cmd_type); + +extern struct device_attribute *qedf_host_attrs[]; +extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, + unsigned int timer_msec); +extern int qedf_init_mp_req(struct qedf_ioreq *io_req); +extern void qedf_init_mp_task(struct qedf_ioreq *io_req, + struct fcoe_task_context *task_ctx); +extern void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, + u32 ptu_invalidate, enum fcoe_task_type req_type, u32 offset); +extern void qedf_ring_doorbell(struct qedf_rport *fcport); +extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *els_req); +extern int qedf_send_rrq(struct qedf_ioreq *aborted_io_req); +extern int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp); +extern int qedf_initiate_cleanup(struct qedf_ioreq *io_req, + bool return_scsi_cmd_on_abts); +extern void qedf_process_cleanup_compl(struct qedf_ctx *qedf, + struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); +extern int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags); +extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req); +extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe); +extern void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, + int result); +extern void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id); +extern void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf); +extern void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf); +extern void qedf_capture_grc_dump(struct qedf_ctx *qedf); +extern void qedf_wait_for_upload(struct qedf_ctx *qedf); +extern void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx, + struct fcoe_cqe *cqe); +extern void qedf_restart_rport(struct qedf_rport *fcport); +extern int qedf_send_rec(struct qedf_ioreq *orig_io_req); +extern int qedf_post_io_req(struct qedf_rport *fcport, + struct qedf_ioreq *io_req); +extern void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf, + struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); +extern int qedf_send_flogi(struct qedf_ctx *qedf); +extern void qedf_fp_io_handler(struct work_struct *work); + +#define FCOE_WORD_TO_BYTE 4 +#define QEDF_MAX_TASK_NUM 0xFFFF + +struct fip_vlan { + struct ethhdr eth; + struct fip_header fip; + struct { + struct fip_mac_desc mac; + struct fip_wwn_desc wwnn; + } desc; +}; + +/* SQ/CQ Sizes */ +#define GBL_RSVD_TASKS 16 +#define NUM_TASKS_PER_CONNECTION 1024 +#define NUM_RW_TASKS_PER_CONNECTION 512 +#define FCOE_PARAMS_CQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS + +#define FCOE_PARAMS_CMDQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS +#define SQ_NUM_ENTRIES NUM_TASKS_PER_CONNECTION + +#define QEDF_FCOE_PARAMS_GL_RQ_PI 0 +#define QEDF_FCOE_PARAMS_GL_CMD_PI 1 + +#define QEDF_READ (1 << 1) +#define QEDF_WRITE (1 << 0) +#define MAX_FIBRE_LUNS 0xffffffff + +#define QEDF_MAX_NUM_CQS 8 + +/* + * PCI function probe defines + */ +/* Probe/remove called during normal PCI probe */ +#define QEDF_MODE_NORMAL 0 +/* Probe/remove called from qed error recovery */ +#define QEDF_MODE_RECOVERY 1 + +#define SUPPORTED_25000baseKR_Full (1<<27) +#define SUPPORTED_50000baseKR2_Full (1<<28) +#define SUPPORTED_100000baseKR4_Full (1<<29) +#define SUPPORTED_100000baseCR4_Full (1<<30) + +#endif diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c new file mode 100644 index 000000000000..47720611ad2c --- /dev/null +++ b/drivers/scsi/qedf/qedf_attr.c @@ -0,0 +1,165 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#include "qedf.h" + +static ssize_t +qedf_fcoe_mac_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fc_lport *lport = shost_priv(class_to_shost(dev)); + u32 port_id; + u8 lport_src_id[3]; + u8 fcoe_mac[6]; + + port_id = fc_host_port_id(lport->host); + lport_src_id[2] = (port_id & 0x000000FF); + lport_src_id[1] = (port_id & 0x0000FF00) >> 8; + lport_src_id[0] = (port_id & 0x00FF0000) >> 16; + fc_fcoe_set_mac(fcoe_mac, lport_src_id); + + return scnprintf(buf, PAGE_SIZE, "%pM\n", fcoe_mac); +} + +static DEVICE_ATTR(fcoe_mac, S_IRUGO, qedf_fcoe_mac_show, NULL); + +struct device_attribute *qedf_host_attrs[] = { + &dev_attr_fcoe_mac, + NULL, +}; + +extern const struct qed_fcoe_ops *qed_ops; + +inline bool qedf_is_vport(struct qedf_ctx *qedf) +{ + return (!(qedf->lport->vport == NULL)); +} + +/* Get base qedf for physical port from vport */ +static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf) +{ + struct fc_lport *lport; + struct fc_lport *base_lport; + + if (!(qedf_is_vport(qedf))) + return NULL; + + lport = qedf->lport; + base_lport = shost_priv(vport_to_shost(lport->vport)); + return (struct qedf_ctx *)(lport_priv(base_lport)); +} + +void qedf_capture_grc_dump(struct qedf_ctx *qedf) +{ + struct qedf_ctx *base_qedf; + + /* Make sure we use the base qedf to take the GRC dump */ + if (qedf_is_vport(qedf)) + base_qedf = qedf_get_base_qedf(qedf); + else + base_qedf = qedf; + + if (test_bit(QEDF_GRCDUMP_CAPTURE, &base_qedf->flags)) { + QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_INFO, + "GRC Dump already captured.\n"); + return; + } + + + qedf_get_grc_dump(base_qedf->cdev, qed_ops->common, + &base_qedf->grcdump, &base_qedf->grcdump_size); + QEDF_ERR(&(base_qedf->dbg_ctx), "GRC Dump captured.\n"); + set_bit(QEDF_GRCDUMP_CAPTURE, &base_qedf->flags); + qedf_uevent_emit(base_qedf->lport->host, QEDF_UEVENT_CODE_GRCDUMP, + NULL); +} + +static ssize_t +qedf_sysfs_read_grcdump(struct file *filep, struct kobject *kobj, + struct bin_attribute *ba, char *buf, loff_t off, + size_t count) +{ + ssize_t ret = 0; + struct fc_lport *lport = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qedf_ctx *qedf = lport_priv(lport); + + if (test_bit(QEDF_GRCDUMP_CAPTURE, &qedf->flags)) { + ret = memory_read_from_buffer(buf, count, &off, + qedf->grcdump, qedf->grcdump_size); + } else { + QEDF_ERR(&(qedf->dbg_ctx), "GRC Dump not captured!\n"); + } + + return ret; +} + +static ssize_t +qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj, + struct bin_attribute *ba, char *buf, loff_t off, + size_t count) +{ + struct fc_lport *lport = NULL; + struct qedf_ctx *qedf = NULL; + long reading; + int ret = 0; + char msg[40]; + + if (off != 0) + return ret; + + + lport = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + qedf = lport_priv(lport); + + buf[1] = 0; + ret = kstrtol(buf, 10, &reading); + if (ret) { + QEDF_ERR(&(qedf->dbg_ctx), "Invalid input, err(%d)\n", ret); + return ret; + } + + memset(msg, 0, sizeof(msg)); + switch (reading) { + case 0: + memset(qedf->grcdump, 0, qedf->grcdump_size); + clear_bit(QEDF_GRCDUMP_CAPTURE, &qedf->flags); + break; + case 1: + qedf_capture_grc_dump(qedf); + break; + } + + return count; +} + +static struct bin_attribute sysfs_grcdump_attr = { + .attr = { + .name = "grcdump", + .mode = S_IRUSR | S_IWUSR, + }, + .size = 0, + .read = qedf_sysfs_read_grcdump, + .write = qedf_sysfs_write_grcdump, +}; + +static struct sysfs_bin_attrs bin_file_entries[] = { + {"grcdump", &sysfs_grcdump_attr}, + {NULL}, +}; + +void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf) +{ + qedf_create_sysfs_attr(qedf->lport->host, bin_file_entries); +} + +void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf) +{ + qedf_remove_sysfs_attr(qedf->lport->host, bin_file_entries); +} diff --git a/drivers/scsi/qedf/qedf_dbg.c b/drivers/scsi/qedf/qedf_dbg.c new file mode 100644 index 000000000000..e023f5d0dc12 --- /dev/null +++ b/drivers/scsi/qedf/qedf_dbg.c @@ -0,0 +1,195 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#include "qedf_dbg.h" +#include <linux/vmalloc.h> + +void +qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char nfunc[32]; + + memset(nfunc, 0, sizeof(nfunc)); + memcpy(nfunc, func, sizeof(nfunc) - 1); + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (likely(qedf) && likely(qedf->pdev)) + pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)), + nfunc, line, qedf->host_no, &vaf); + else + pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + + va_end(va); +} + +void +qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char nfunc[32]; + + memset(nfunc, 0, sizeof(nfunc)); + memcpy(nfunc, func, sizeof(nfunc) - 1); + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!(qedf_debug & QEDF_LOG_WARN)) + goto ret; + + if (likely(qedf) && likely(qedf->pdev)) + pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)), + nfunc, line, qedf->host_no, &vaf); + else + pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + +ret: + va_end(va); +} + +void +qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char nfunc[32]; + + memset(nfunc, 0, sizeof(nfunc)); + memcpy(nfunc, func, sizeof(nfunc) - 1); + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!(qedf_debug & QEDF_LOG_NOTICE)) + goto ret; + + if (likely(qedf) && likely(qedf->pdev)) + pr_notice("[%s]:[%s:%d]:%d: %pV", + dev_name(&(qedf->pdev->dev)), nfunc, line, + qedf->host_no, &vaf); + else + pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + +ret: + va_end(va); +} + +void +qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + u32 level, const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char nfunc[32]; + + memset(nfunc, 0, sizeof(nfunc)); + memcpy(nfunc, func, sizeof(nfunc) - 1); + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!(qedf_debug & level)) + goto ret; + + if (likely(qedf) && likely(qedf->pdev)) + pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)), + nfunc, line, qedf->host_no, &vaf); + else + pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + +ret: + va_end(va); +} + +int +qedf_alloc_grc_dump_buf(u8 **buf, uint32_t len) +{ + *buf = vmalloc(len); + if (!(*buf)) + return -ENOMEM; + + memset(*buf, 0, len); + return 0; +} + +void +qedf_free_grc_dump_buf(uint8_t **buf) +{ + vfree(*buf); + *buf = NULL; +} + +int +qedf_get_grc_dump(struct qed_dev *cdev, const struct qed_common_ops *common, + u8 **buf, uint32_t *grcsize) +{ + if (!*buf) + return -EINVAL; + + return common->dbg_grc(cdev, *buf, grcsize); +} + +void +qedf_uevent_emit(struct Scsi_Host *shost, u32 code, char *msg) +{ + char event_string[40]; + char *envp[] = {event_string, NULL}; + + memset(event_string, 0, sizeof(event_string)); + switch (code) { + case QEDF_UEVENT_CODE_GRCDUMP: + if (msg) + strncpy(event_string, msg, strlen(msg)); + else + sprintf(event_string, "GRCDUMP=%u", shost->host_no); + break; + default: + /* do nothing */ + break; + } + + kobject_uevent_env(&shost->shost_gendev.kobj, KOBJ_CHANGE, envp); +} + +int +qedf_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) +{ + int ret = 0; + + for (; iter->name; iter++) { + ret = sysfs_create_bin_file(&shost->shost_gendev.kobj, + iter->attr); + if (ret) + pr_err("Unable to create sysfs %s attr, err(%d).\n", + iter->name, ret); + } + return ret; +} + +void +qedf_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) +{ + for (; iter->name; iter++) + sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr); +} diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h new file mode 100644 index 000000000000..23bd70628a2f --- /dev/null +++ b/drivers/scsi/qedf/qedf_dbg.h @@ -0,0 +1,154 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#ifndef _QEDF_DBG_H_ +#define _QEDF_DBG_H_ + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/compiler.h> +#include <linux/string.h> +#include <linux/version.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <scsi/scsi_transport.h> +#include <linux/fs.h> + +#include <linux/qed/common_hsi.h> +#include <linux/qed/qed_if.h> + +extern uint qedf_debug; + +/* Debug print level definitions */ +#define QEDF_LOG_DEFAULT 0x1 /* Set default logging mask */ +#define QEDF_LOG_INFO 0x2 /* + * Informational logs, + * MAC address, WWPN, WWNN + */ +#define QEDF_LOG_DISC 0x4 /* Init, discovery, rport */ +#define QEDF_LOG_LL2 0x8 /* LL2, VLAN logs */ +#define QEDF_LOG_CONN 0x10 /* Connection setup, cleanup */ +#define QEDF_LOG_EVT 0x20 /* Events, link, mtu */ +#define QEDF_LOG_TIMER 0x40 /* Timer events */ +#define QEDF_LOG_MP_REQ 0x80 /* Middle Path (MP) logs */ +#define QEDF_LOG_SCSI_TM 0x100 /* SCSI Aborts, Task Mgmt */ +#define QEDF_LOG_UNSOL 0x200 /* unsolicited event logs */ +#define QEDF_LOG_IO 0x400 /* scsi cmd, completion */ +#define QEDF_LOG_MQ 0x800 /* Multi Queue logs */ +#define QEDF_LOG_BSG 0x1000 /* BSG logs */ +#define QEDF_LOG_DEBUGFS 0x2000 /* debugFS logs */ +#define QEDF_LOG_LPORT 0x4000 /* lport logs */ +#define QEDF_LOG_ELS 0x8000 /* ELS logs */ +#define QEDF_LOG_NPIV 0x10000 /* NPIV logs */ +#define QEDF_LOG_SESS 0x20000 /* Conection setup, cleanup */ +#define QEDF_LOG_TID 0x80000 /* + * FW TID context acquire + * free + */ +#define QEDF_TRACK_TID 0x100000 /* + * Track TID state. To be + * enabled only at module load + * and not run-time. + */ +#define QEDF_TRACK_CMD_LIST 0x300000 /* + * Track active cmd list nodes, + * done with reference to TID, + * hence TRACK_TID also enabled. + */ +#define QEDF_LOG_NOTICE 0x40000000 /* Notice logs */ +#define QEDF_LOG_WARN 0x80000000 /* Warning logs */ + +/* Debug context structure */ +struct qedf_dbg_ctx { + unsigned int host_no; + struct pci_dev *pdev; +#ifdef CONFIG_DEBUG_FS + struct dentry *bdf_dentry; +#endif +}; + +#define QEDF_ERR(pdev, fmt, ...) \ + qedf_dbg_err(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__) +#define QEDF_WARN(pdev, fmt, ...) \ + qedf_dbg_warn(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__) +#define QEDF_NOTICE(pdev, fmt, ...) \ + qedf_dbg_notice(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__) +#define QEDF_INFO(pdev, level, fmt, ...) \ + qedf_dbg_info(pdev, __func__, __LINE__, level, fmt, \ + ## __VA_ARGS__) + +extern void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + const char *fmt, ...); +extern void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + const char *, ...); +extern void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, + u32 line, const char *, ...); +extern void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + u32 info, const char *fmt, ...); + +/* GRC Dump related defines */ + +struct Scsi_Host; + +#define QEDF_UEVENT_CODE_GRCDUMP 0 + +struct sysfs_bin_attrs { + char *name; + struct bin_attribute *attr; +}; + +extern int qedf_alloc_grc_dump_buf(uint8_t **buf, uint32_t len); +extern void qedf_free_grc_dump_buf(uint8_t **buf); +extern int qedf_get_grc_dump(struct qed_dev *cdev, + const struct qed_common_ops *common, uint8_t **buf, + uint32_t *grcsize); +extern void qedf_uevent_emit(struct Scsi_Host *shost, u32 code, char *msg); +extern int qedf_create_sysfs_attr(struct Scsi_Host *shost, + struct sysfs_bin_attrs *iter); +extern void qedf_remove_sysfs_attr(struct Scsi_Host *shost, + struct sysfs_bin_attrs *iter); + +#ifdef CONFIG_DEBUG_FS +/* DebugFS related code */ +struct qedf_list_of_funcs { + char *oper_str; + ssize_t (*oper_func)(struct qedf_dbg_ctx *qedf); +}; + +struct qedf_debugfs_ops { + char *name; + struct qedf_list_of_funcs *qedf_funcs; +}; + +#define qedf_dbg_fileops(drv, ops) \ +{ \ + .owner = THIS_MODULE, \ + .open = simple_open, \ + .read = drv##_dbg_##ops##_cmd_read, \ + .write = drv##_dbg_##ops##_cmd_write \ +} + +/* Used for debugfs sequential files */ +#define qedf_dbg_fileops_seq(drv, ops) \ +{ \ + .owner = THIS_MODULE, \ + .open = drv##_dbg_##ops##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +extern void qedf_dbg_host_init(struct qedf_dbg_ctx *qedf, + struct qedf_debugfs_ops *dops, + struct file_operations *fops); +extern void qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf); +extern void qedf_dbg_init(char *drv_name); +extern void qedf_dbg_exit(void); +#endif /* CONFIG_DEBUG_FS */ + +#endif /* _QEDF_DBG_H_ */ diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c new file mode 100644 index 000000000000..cb08b625c594 --- /dev/null +++ b/drivers/scsi/qedf/qedf_debugfs.c @@ -0,0 +1,460 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#ifdef CONFIG_DEBUG_FS + +#include <linux/uaccess.h> +#include <linux/debugfs.h> +#include <linux/module.h> + +#include "qedf.h" +#include "qedf_dbg.h" + +static struct dentry *qedf_dbg_root; + +/** + * qedf_dbg_host_init - setup the debugfs file for the pf + * @pf: the pf that is starting up + **/ +void +qedf_dbg_host_init(struct qedf_dbg_ctx *qedf, + struct qedf_debugfs_ops *dops, + struct file_operations *fops) +{ + char host_dirname[32]; + struct dentry *file_dentry = NULL; + + QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Creating debugfs host node\n"); + /* create pf dir */ + sprintf(host_dirname, "host%u", qedf->host_no); + qedf->bdf_dentry = debugfs_create_dir(host_dirname, qedf_dbg_root); + if (!qedf->bdf_dentry) + return; + + /* create debugfs files */ + while (dops) { + if (!(dops->name)) + break; + + file_dentry = debugfs_create_file(dops->name, 0600, + qedf->bdf_dentry, qedf, + fops); + if (!file_dentry) { + QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, + "Debugfs entry %s creation failed\n", + dops->name); + debugfs_remove_recursive(qedf->bdf_dentry); + return; + } + dops++; + fops++; + } +} + +/** + * qedf_dbg_host_exit - clear out the pf's debugfs entries + * @pf: the pf that is stopping + **/ +void +qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf) +{ + QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Destroying debugfs host " + "entry\n"); + /* remove debugfs entries of this PF */ + debugfs_remove_recursive(qedf->bdf_dentry); + qedf->bdf_dentry = NULL; +} + +/** + * qedf_dbg_init - start up debugfs for the driver + **/ +void +qedf_dbg_init(char *drv_name) +{ + QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Creating debugfs root node\n"); + + /* create qed dir in root of debugfs. NULL means debugfs root */ + qedf_dbg_root = debugfs_create_dir(drv_name, NULL); + if (!qedf_dbg_root) + QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Init of debugfs " + "failed\n"); +} + +/** + * qedf_dbg_exit - clean out the driver's debugfs entries + **/ +void +qedf_dbg_exit(void) +{ + QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Destroying debugfs root " + "entry\n"); + + /* remove qed dir in root of debugfs */ + debugfs_remove_recursive(qedf_dbg_root); + qedf_dbg_root = NULL; +} + +struct qedf_debugfs_ops qedf_debugfs_ops[] = { + { "fp_int", NULL }, + { "io_trace", NULL }, + { "debug", NULL }, + { "stop_io_on_error", NULL}, + { "driver_stats", NULL}, + { "clear_stats", NULL}, + { "offload_stats", NULL}, + /* This must be last */ + { NULL, NULL } +}; + +DECLARE_PER_CPU(struct qedf_percpu_iothread_s, qedf_percpu_iothreads); + +static ssize_t +qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + size_t cnt = 0; + int id; + struct qedf_fastpath *fp = NULL; + struct qedf_dbg_ctx *qedf_dbg = + (struct qedf_dbg_ctx *)filp->private_data; + struct qedf_ctx *qedf = container_of(qedf_dbg, + struct qedf_ctx, dbg_ctx); + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); + + cnt = sprintf(buffer, "\nFastpath I/O completions\n\n"); + + for (id = 0; id < qedf->num_queues; id++) { + fp = &(qedf->fp_array[id]); + if (fp->sb_id == QEDF_SB_ID_NULL) + continue; + cnt += sprintf((buffer + cnt), "#%d: %lu\n", id, + fp->completions); + } + + cnt = min_t(int, count, cnt - *ppos); + *ppos += cnt; + return cnt; +} + +static ssize_t +qedf_dbg_fp_int_cmd_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + if (!count || *ppos) + return 0; + + return count; +} + +static ssize_t +qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + int cnt; + struct qedf_dbg_ctx *qedf = + (struct qedf_dbg_ctx *)filp->private_data; + + QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "entered\n"); + cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug); + + cnt = min_t(int, count, cnt - *ppos); + *ppos += cnt; + return cnt; +} + +static ssize_t +qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + uint32_t val; + void *kern_buf; + int rval; + struct qedf_dbg_ctx *qedf = + (struct qedf_dbg_ctx *)filp->private_data; + + if (!count || *ppos) + return 0; + + kern_buf = memdup_user(buffer, count); + if (IS_ERR(kern_buf)) + return PTR_ERR(kern_buf); + + rval = kstrtouint(kern_buf, 10, &val); + kfree(kern_buf); + if (rval) + return rval; + + if (val == 1) + qedf_debug = QEDF_DEFAULT_LOG_MASK; + else + qedf_debug = val; + + QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val); + return count; +} + +static ssize_t +qedf_dbg_stop_io_on_error_cmd_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + int cnt; + struct qedf_dbg_ctx *qedf_dbg = + (struct qedf_dbg_ctx *)filp->private_data; + struct qedf_ctx *qedf = container_of(qedf_dbg, + struct qedf_ctx, dbg_ctx); + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); + cnt = sprintf(buffer, "%s\n", + qedf->stop_io_on_error ? "true" : "false"); + + cnt = min_t(int, count, cnt - *ppos); + *ppos += cnt; + return cnt; +} + +static ssize_t +qedf_dbg_stop_io_on_error_cmd_write(struct file *filp, + const char __user *buffer, size_t count, + loff_t *ppos) +{ + void *kern_buf; + struct qedf_dbg_ctx *qedf_dbg = + (struct qedf_dbg_ctx *)filp->private_data; + struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx, + dbg_ctx); + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); + + if (!count || *ppos) + return 0; + + kern_buf = memdup_user(buffer, 6); + if (IS_ERR(kern_buf)) + return PTR_ERR(kern_buf); + + if (strncmp(kern_buf, "false", 5) == 0) + qedf->stop_io_on_error = false; + else if (strncmp(kern_buf, "true", 4) == 0) + qedf->stop_io_on_error = true; + else if (strncmp(kern_buf, "now", 3) == 0) + /* Trigger from user to stop all I/O on this host */ + set_bit(QEDF_DBG_STOP_IO, &qedf->flags); + + kfree(kern_buf); + return count; +} + +static int +qedf_io_trace_show(struct seq_file *s, void *unused) +{ + int i, idx = 0; + struct qedf_ctx *qedf = s->private; + struct qedf_dbg_ctx *qedf_dbg = &qedf->dbg_ctx; + struct qedf_io_log *io_log; + unsigned long flags; + + if (!qedf_io_tracing) { + seq_puts(s, "I/O tracing not enabled.\n"); + goto out; + } + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); + + spin_lock_irqsave(&qedf->io_trace_lock, flags); + idx = qedf->io_trace_idx; + for (i = 0; i < QEDF_IO_TRACE_SIZE; i++) { + io_log = &qedf->io_trace_buf[idx]; + seq_printf(s, "%d:", io_log->direction); + seq_printf(s, "0x%x:", io_log->task_id); + seq_printf(s, "0x%06x:", io_log->port_id); + seq_printf(s, "%d:", io_log->lun); + seq_printf(s, "0x%02x:", io_log->op); + seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0], + io_log->lba[1], io_log->lba[2], io_log->lba[3]); + seq_printf(s, "%d:", io_log->bufflen); + seq_printf(s, "%d:", io_log->sg_count); + seq_printf(s, "0x%08x:", io_log->result); + seq_printf(s, "%lu:", io_log->jiffies); + seq_printf(s, "%d:", io_log->refcount); + seq_printf(s, "%d:", io_log->req_cpu); + seq_printf(s, "%d:", io_log->int_cpu); + seq_printf(s, "%d:", io_log->rsp_cpu); + seq_printf(s, "%d\n", io_log->sge_type); + + idx++; + if (idx == QEDF_IO_TRACE_SIZE) + idx = 0; + } + spin_unlock_irqrestore(&qedf->io_trace_lock, flags); + +out: + return 0; +} + +static int +qedf_dbg_io_trace_open(struct inode *inode, struct file *file) +{ + struct qedf_dbg_ctx *qedf_dbg = inode->i_private; + struct qedf_ctx *qedf = container_of(qedf_dbg, + struct qedf_ctx, dbg_ctx); + + return single_open(file, qedf_io_trace_show, qedf); +} + +static int +qedf_driver_stats_show(struct seq_file *s, void *unused) +{ + struct qedf_ctx *qedf = s->private; + struct qedf_rport *fcport; + struct fc_rport_priv *rdata; + + seq_printf(s, "cmg_mgr free io_reqs: %d\n", + atomic_read(&qedf->cmd_mgr->free_list_cnt)); + seq_printf(s, "slow SGEs: %d\n", qedf->slow_sge_ios); + seq_printf(s, "single SGEs: %d\n", qedf->single_sge_ios); + seq_printf(s, "fast SGEs: %d\n\n", qedf->fast_sge_ios); + + seq_puts(s, "Offloaded ports:\n\n"); + + rcu_read_lock(); + list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { + rdata = fcport->rdata; + if (rdata == NULL) + continue; + seq_printf(s, "%06x: free_sqes: %d, num_active_ios: %d\n", + rdata->ids.port_id, atomic_read(&fcport->free_sqes), + atomic_read(&fcport->num_active_ios)); + } + rcu_read_unlock(); + + return 0; +} + +static int +qedf_dbg_driver_stats_open(struct inode *inode, struct file *file) +{ + struct qedf_dbg_ctx *qedf_dbg = inode->i_private; + struct qedf_ctx *qedf = container_of(qedf_dbg, + struct qedf_ctx, dbg_ctx); + + return single_open(file, qedf_driver_stats_show, qedf); +} + +static ssize_t +qedf_dbg_clear_stats_cmd_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + int cnt = 0; + + /* Essentially a read stub */ + cnt = min_t(int, count, cnt - *ppos); + *ppos += cnt; + return cnt; +} + +static ssize_t +qedf_dbg_clear_stats_cmd_write(struct file *filp, + const char __user *buffer, size_t count, + loff_t *ppos) +{ + struct qedf_dbg_ctx *qedf_dbg = + (struct qedf_dbg_ctx *)filp->private_data; + struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx, + dbg_ctx); + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Clearing stat counters.\n"); + + if (!count || *ppos) + return 0; + + /* Clear stat counters exposed by 'stats' node */ + qedf->slow_sge_ios = 0; + qedf->single_sge_ios = 0; + qedf->fast_sge_ios = 0; + + return count; +} + +static int +qedf_offload_stats_show(struct seq_file *s, void *unused) +{ + struct qedf_ctx *qedf = s->private; + struct qed_fcoe_stats *fw_fcoe_stats; + + fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL); + if (!fw_fcoe_stats) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for " + "fw_fcoe_stats.\n"); + goto out; + } + + /* Query firmware for offload stats */ + qed_ops->get_stats(qedf->cdev, fw_fcoe_stats); + + seq_printf(s, "fcoe_rx_byte_cnt=%llu\n" + "fcoe_rx_data_pkt_cnt=%llu\n" + "fcoe_rx_xfer_pkt_cnt=%llu\n" + "fcoe_rx_other_pkt_cnt=%llu\n" + "fcoe_silent_drop_pkt_cmdq_full_cnt=%u\n" + "fcoe_silent_drop_pkt_crc_error_cnt=%u\n" + "fcoe_silent_drop_pkt_task_invalid_cnt=%u\n" + "fcoe_silent_drop_total_pkt_cnt=%u\n" + "fcoe_silent_drop_pkt_rq_full_cnt=%u\n" + "fcoe_tx_byte_cnt=%llu\n" + "fcoe_tx_data_pkt_cnt=%llu\n" + "fcoe_tx_xfer_pkt_cnt=%llu\n" + "fcoe_tx_other_pkt_cnt=%llu\n", + fw_fcoe_stats->fcoe_rx_byte_cnt, + fw_fcoe_stats->fcoe_rx_data_pkt_cnt, + fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt, + fw_fcoe_stats->fcoe_rx_other_pkt_cnt, + fw_fcoe_stats->fcoe_silent_drop_pkt_cmdq_full_cnt, + fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt, + fw_fcoe_stats->fcoe_silent_drop_pkt_task_invalid_cnt, + fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt, + fw_fcoe_stats->fcoe_silent_drop_pkt_rq_full_cnt, + fw_fcoe_stats->fcoe_tx_byte_cnt, + fw_fcoe_stats->fcoe_tx_data_pkt_cnt, + fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt, + fw_fcoe_stats->fcoe_tx_other_pkt_cnt); + + kfree(fw_fcoe_stats); +out: + return 0; +} + +static int +qedf_dbg_offload_stats_open(struct inode *inode, struct file *file) +{ + struct qedf_dbg_ctx *qedf_dbg = inode->i_private; + struct qedf_ctx *qedf = container_of(qedf_dbg, + struct qedf_ctx, dbg_ctx); + + return single_open(file, qedf_offload_stats_show, qedf); +} + + +const struct file_operations qedf_dbg_fops[] = { + qedf_dbg_fileops(qedf, fp_int), + qedf_dbg_fileops_seq(qedf, io_trace), + qedf_dbg_fileops(qedf, debug), + qedf_dbg_fileops(qedf, stop_io_on_error), + qedf_dbg_fileops_seq(qedf, driver_stats), + qedf_dbg_fileops(qedf, clear_stats), + qedf_dbg_fileops_seq(qedf, offload_stats), + /* This must be last */ + { NULL, NULL }, +}; + +#else /* CONFIG_DEBUG_FS */ +void qedf_dbg_host_init(struct qedf_dbg_ctx *); +void qedf_dbg_host_exit(struct qedf_dbg_ctx *); +void qedf_dbg_init(char *); +void qedf_dbg_exit(void); +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c new file mode 100644 index 000000000000..59f3e5c73a13 --- /dev/null +++ b/drivers/scsi/qedf/qedf_els.c @@ -0,0 +1,949 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#include "qedf.h" + +/* It's assumed that the lock is held when calling this function. */ +static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, + void *data, uint32_t data_len, + void (*cb_func)(struct qedf_els_cb_arg *cb_arg), + struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec) +{ + struct qedf_ctx *qedf = fcport->qedf; + struct fc_lport *lport = qedf->lport; + struct qedf_ioreq *els_req; + struct qedf_mp_req *mp_req; + struct fc_frame_header *fc_hdr; + struct fcoe_task_context *task; + int rc = 0; + uint32_t did, sid; + uint16_t xid; + uint32_t start_time = jiffies / HZ; + uint32_t current_time; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n"); + + rc = fc_remote_port_chkready(fcport->rport); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op); + rc = -EAGAIN; + goto els_err; + } + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n", + op); + rc = -EAGAIN; + goto els_err; + } + + if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) { + QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op); + rc = -EINVAL; + goto els_err; + } + +retry_els: + els_req = qedf_alloc_cmd(fcport, QEDF_ELS); + if (!els_req) { + current_time = jiffies / HZ; + if ((current_time - start_time) > 10) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "els: Failed els 0x%x\n", op); + rc = -ENOMEM; + goto els_err; + } + mdelay(20 * USEC_PER_MSEC); + goto retry_els; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = " + "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg, + els_req->xid); + els_req->sc_cmd = NULL; + els_req->cmd_type = QEDF_ELS; + els_req->fcport = fcport; + els_req->cb_func = cb_func; + cb_arg->io_req = els_req; + cb_arg->op = op; + els_req->cb_arg = cb_arg; + els_req->data_xfer_len = data_len; + + /* Record which cpu this request is associated with */ + els_req->cpu = smp_processor_id(); + + mp_req = (struct qedf_mp_req *)&(els_req->mp_req); + rc = qedf_init_mp_req(els_req); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n"); + kref_put(&els_req->refcount, qedf_release_cmd); + goto els_err; + } else { + rc = 0; + } + + /* Fill ELS Payload */ + if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { + memcpy(mp_req->req_buf, data, data_len); + } else { + QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op); + els_req->cb_func = NULL; + els_req->cb_arg = NULL; + kref_put(&els_req->refcount, qedf_release_cmd); + rc = -EINVAL; + } + + if (rc) + goto els_err; + + /* Fill FC header */ + fc_hdr = &(mp_req->req_fc_hdr); + + did = fcport->rdata->ids.port_id; + sid = fcport->sid; + + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, sid, did, + FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + + /* Obtain exchange id */ + xid = els_req->xid; + + /* Initialize task context for this IO request */ + task = qedf_get_task_mem(&qedf->tasks, xid); + qedf_init_mp_task(els_req, task); + + /* Put timer on original I/O request */ + if (timer_msec) + qedf_cmd_timer_set(qedf, els_req, timer_msec); + + qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0); + + /* Ring doorbell */ + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS " + "req\n"); + qedf_ring_doorbell(fcport); +els_err: + return rc; +} + +void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *els_req) +{ + struct fcoe_task_context *task_ctx; + struct scsi_cmnd *sc_cmd; + uint16_t xid; + struct fcoe_cqe_midpath_info *mp_info; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x" + " cmd_type = %d.\n", els_req->xid, els_req->cmd_type); + + /* Kill the ELS timer */ + cancel_delayed_work(&els_req->timeout_work); + + xid = els_req->xid; + task_ctx = qedf_get_task_mem(&qedf->tasks, xid); + sc_cmd = els_req->sc_cmd; + + /* Get ELS response length from CQE */ + mp_info = &cqe->cqe_info.midpath_info; + els_req->mp_req.resp_len = mp_info->data_placement_size; + + /* Parse ELS response */ + if ((els_req->cb_func) && (els_req->cb_arg)) { + els_req->cb_func(els_req->cb_arg); + els_req->cb_arg = NULL; + } + + kref_put(&els_req->refcount, qedf_release_cmd); +} + +static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg) +{ + struct qedf_ioreq *orig_io_req; + struct qedf_ioreq *rrq_req; + struct qedf_ctx *qedf; + int refcount; + + rrq_req = cb_arg->io_req; + qedf = rrq_req->fcport->qedf; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n"); + + orig_io_req = cb_arg->aborted_io_req; + + if (!orig_io_req) + goto out_free; + + if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO && + rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) + cancel_delayed_work_sync(&orig_io_req->timeout_work); + + refcount = kref_read(&orig_io_req->refcount); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p," + " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n", + orig_io_req, orig_io_req->xid, rrq_req->xid, refcount); + + /* This should return the aborted io_req to the command pool */ + if (orig_io_req) + kref_put(&orig_io_req->refcount, qedf_release_cmd); + +out_free: + kfree(cb_arg); +} + +/* Assumes kref is already held by caller */ +int qedf_send_rrq(struct qedf_ioreq *aborted_io_req) +{ + + struct fc_els_rrq rrq; + struct qedf_rport *fcport; + struct fc_lport *lport; + struct qedf_els_cb_arg *cb_arg = NULL; + struct qedf_ctx *qedf; + uint32_t sid; + uint32_t r_a_tov; + int rc; + + if (!aborted_io_req) { + QEDF_ERR(NULL, "abort_io_req is NULL.\n"); + return -EINVAL; + } + + fcport = aborted_io_req->fcport; + + /* Check that fcport is still offloaded */ + if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) { + QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); + return -EINVAL; + } + + if (!fcport->qedf) { + QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); + return -EINVAL; + } + + qedf = fcport->qedf; + lport = qedf->lport; + sid = fcport->sid; + r_a_tov = lport->r_a_tov; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig " + "io = %p, orig_xid = 0x%x\n", aborted_io_req, + aborted_io_req->xid); + memset(&rrq, 0, sizeof(rrq)); + + cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " + "RRQ\n"); + rc = -ENOMEM; + goto rrq_err; + } + + cb_arg->aborted_io_req = aborted_io_req; + + rrq.rrq_cmd = ELS_RRQ; + hton24(rrq.rrq_s_id, sid); + rrq.rrq_ox_id = htons(aborted_io_req->xid); + rrq.rrq_rx_id = + htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id); + + rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq), + qedf_rrq_compl, cb_arg, r_a_tov); + +rrq_err: + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io " + "req 0x%x\n", aborted_io_req->xid); + kfree(cb_arg); + kref_put(&aborted_io_req->refcount, qedf_release_cmd); + } + return rc; +} + +static void qedf_process_l2_frame_compl(struct qedf_rport *fcport, + struct fc_frame *fp, + u16 l2_oxid) +{ + struct fc_lport *lport = fcport->qedf->lport; + struct fc_frame_header *fh; + u32 crc; + + fh = (struct fc_frame_header *)fc_frame_header_get(fp); + + /* Set the OXID we return to what libfc used */ + if (l2_oxid != FC_XID_UNKNOWN) + fh->fh_ox_id = htons(l2_oxid); + + /* Setup header fields */ + fh->fh_r_ctl = FC_RCTL_ELS_REP; + fh->fh_type = FC_TYPE_ELS; + /* Last sequence, end sequence */ + fh->fh_f_ctl[0] = 0x98; + hton24(fh->fh_d_id, lport->port_id); + hton24(fh->fh_s_id, fcport->rdata->ids.port_id); + fh->fh_rx_id = 0xffff; + + /* Set frame attributes */ + crc = fcoe_fc_crc(fp); + fc_frame_init(fp); + fr_dev(fp) = lport; + fr_sof(fp) = FC_SOF_I3; + fr_eof(fp) = FC_EOF_T; + fr_crc(fp) = cpu_to_le32(~crc); + + /* Send completed request to libfc */ + fc_exch_recv(lport, fp); +} + +/* + * In instances where an ELS command times out we may need to restart the + * rport by logging out and then logging back in. + */ +void qedf_restart_rport(struct qedf_rport *fcport) +{ + struct fc_lport *lport; + struct fc_rport_priv *rdata; + u32 port_id; + + if (!fcport) + return; + + rdata = fcport->rdata; + if (rdata) { + lport = fcport->qedf->lport; + port_id = rdata->ids.port_id; + QEDF_ERR(&(fcport->qedf->dbg_ctx), + "LOGO port_id=%x.\n", port_id); + fc_rport_logoff(rdata); + /* Recreate the rport and log back in */ + rdata = fc_rport_create(lport, port_id); + if (rdata) + fc_rport_login(rdata); + } +} + +static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg) +{ + struct qedf_ioreq *els_req; + struct qedf_rport *fcport; + struct qedf_mp_req *mp_req; + struct fc_frame *fp; + struct fc_frame_header *fh, *mp_fc_hdr; + void *resp_buf, *fc_payload; + u32 resp_len; + u16 l2_oxid; + + l2_oxid = cb_arg->l2_oxid; + els_req = cb_arg->io_req; + + if (!els_req) { + QEDF_ERR(NULL, "els_req is NULL.\n"); + goto free_arg; + } + + /* + * If we are flushing the command just free the cb_arg as none of the + * response data will be valid. + */ + if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) + goto free_arg; + + fcport = els_req->fcport; + mp_req = &(els_req->mp_req); + mp_fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + resp_buf = mp_req->resp_buf; + + /* + * If a middle path ELS command times out, don't try to return + * the command but rather do any internal cleanup and then libfc + * timeout the command and clean up its internal resources. + */ + if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) { + /* + * If ADISC times out, libfc will timeout the exchange and then + * try to send a PLOGI which will timeout since the session is + * still offloaded. Force libfc to logout the session which + * will offload the connection and allow the PLOGI response to + * flow over the LL2 path. + */ + if (cb_arg->op == ELS_ADISC) + qedf_restart_rport(fcport); + return; + } + + if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is " + "beyond page size.\n"); + goto free_arg; + } + + fp = fc_frame_alloc(fcport->qedf->lport, resp_len); + if (!fp) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), + "fc_frame_alloc failure.\n"); + return; + } + + /* Copy frame header from firmware into fp */ + fh = (struct fc_frame_header *)fc_frame_header_get(fp); + memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); + + /* Copy payload from firmware into fp */ + fc_payload = fc_frame_payload_get(fp, resp_len); + memcpy(fc_payload, resp_buf, resp_len); + + QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, + "Completing OX_ID 0x%x back to libfc.\n", l2_oxid); + qedf_process_l2_frame_compl(fcport, fp, l2_oxid); + +free_arg: + kfree(cb_arg); +} + +int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp) +{ + struct fc_els_adisc *adisc; + struct fc_frame_header *fh; + struct fc_lport *lport = fcport->qedf->lport; + struct qedf_els_cb_arg *cb_arg = NULL; + struct qedf_ctx *qedf; + uint32_t r_a_tov = lport->r_a_tov; + int rc; + + qedf = fcport->qedf; + fh = fc_frame_header_get(fp); + + cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " + "ADISC\n"); + rc = -ENOMEM; + goto adisc_err; + } + cb_arg->l2_oxid = ntohs(fh->fh_ox_id); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid); + + adisc = fc_frame_payload_get(fp, sizeof(*adisc)); + + rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc), + qedf_l2_els_compl, cb_arg, r_a_tov); + +adisc_err: + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n"); + kfree(cb_arg); + } + return rc; +} + +static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg) +{ + struct qedf_ioreq *orig_io_req; + struct qedf_ioreq *srr_req; + struct qedf_mp_req *mp_req; + struct fc_frame_header *mp_fc_hdr, *fh; + struct fc_frame *fp; + void *resp_buf, *fc_payload; + u32 resp_len; + struct fc_lport *lport; + struct qedf_ctx *qedf; + int refcount; + u8 opcode; + + srr_req = cb_arg->io_req; + qedf = srr_req->fcport->qedf; + lport = qedf->lport; + + orig_io_req = cb_arg->aborted_io_req; + + if (!orig_io_req) + goto out_free; + + clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); + + if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO && + srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) + cancel_delayed_work_sync(&orig_io_req->timeout_work); + + refcount = kref_read(&orig_io_req->refcount); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p," + " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n", + orig_io_req, orig_io_req->xid, srr_req->xid, refcount); + + /* If a SRR times out, simply free resources */ + if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) + goto out_free; + + /* Normalize response data into struct fc_frame */ + mp_req = &(srr_req->mp_req); + mp_fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + resp_buf = mp_req->resp_buf; + + fp = fc_frame_alloc(lport, resp_len); + if (!fp) { + QEDF_ERR(&(qedf->dbg_ctx), + "fc_frame_alloc failure.\n"); + goto out_free; + } + + /* Copy frame header from firmware into fp */ + fh = (struct fc_frame_header *)fc_frame_header_get(fp); + memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); + + /* Copy payload from firmware into fp */ + fc_payload = fc_frame_payload_get(fp, resp_len); + memcpy(fc_payload, resp_buf, resp_len); + + opcode = fc_frame_payload_op(fp); + switch (opcode) { + case ELS_LS_ACC: + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "SRR success.\n"); + break; + case ELS_LS_RJT: + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, + "SRR rejected.\n"); + qedf_initiate_abts(orig_io_req, true); + break; + } + + fc_frame_free(fp); +out_free: + /* Put reference for original command since SRR completed */ + kref_put(&orig_io_req->refcount, qedf_release_cmd); + kfree(cb_arg); +} + +static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl) +{ + struct fcp_srr srr; + struct qedf_ctx *qedf; + struct qedf_rport *fcport; + struct fc_lport *lport; + struct qedf_els_cb_arg *cb_arg = NULL; + u32 sid, r_a_tov; + int rc; + + if (!orig_io_req) { + QEDF_ERR(NULL, "orig_io_req is NULL.\n"); + return -EINVAL; + } + + fcport = orig_io_req->fcport; + + /* Check that fcport is still offloaded */ + if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) { + QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); + return -EINVAL; + } + + if (!fcport->qedf) { + QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); + return -EINVAL; + } + + /* Take reference until SRR command completion */ + kref_get(&orig_io_req->refcount); + + qedf = fcport->qedf; + lport = qedf->lport; + sid = fcport->sid; + r_a_tov = lport->r_a_tov; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, " + "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid); + memset(&srr, 0, sizeof(srr)); + + cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " + "SRR\n"); + rc = -ENOMEM; + goto srr_err; + } + + cb_arg->aborted_io_req = orig_io_req; + + srr.srr_op = ELS_SRR; + srr.srr_ox_id = htons(orig_io_req->xid); + srr.srr_rx_id = htons(orig_io_req->rx_id); + srr.srr_rel_off = htonl(offset); + srr.srr_r_ctl = r_ctl; + + rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr), + qedf_srr_compl, cb_arg, r_a_tov); + +srr_err: + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req" + "=0x%x\n", orig_io_req->xid); + kfree(cb_arg); + /* If we fail to queue SRR, send ABTS to orig_io */ + qedf_initiate_abts(orig_io_req, true); + kref_put(&orig_io_req->refcount, qedf_release_cmd); + } else + /* Tell other threads that SRR is in progress */ + set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); + + return rc; +} + +static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req, + u32 offset, u8 r_ctl) +{ + struct qedf_rport *fcport; + unsigned long flags; + struct qedf_els_cb_arg *cb_arg; + + fcport = orig_io_req->fcport; + + QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, + "Doing sequence cleanup for xid=0x%x offset=%u.\n", + orig_io_req->xid, offset); + + cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg " + "for sequence cleanup\n"); + return; + } + + /* Get reference for cleanup request */ + kref_get(&orig_io_req->refcount); + + orig_io_req->cmd_type = QEDF_SEQ_CLEANUP; + cb_arg->offset = offset; + cb_arg->r_ctl = r_ctl; + orig_io_req->cb_arg = cb_arg; + + qedf_cmd_timer_set(fcport->qedf, orig_io_req, + QEDF_CLEANUP_TIMEOUT * HZ); + + spin_lock_irqsave(&fcport->rport_lock, flags); + + qedf_add_to_sq(fcport, orig_io_req->xid, 0, + FCOE_TASK_TYPE_SEQUENCE_CLEANUP, offset); + qedf_ring_doorbell(fcport); + + spin_unlock_irqrestore(&fcport->rport_lock, flags); +} + +void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf, + struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) +{ + int rc; + struct qedf_els_cb_arg *cb_arg; + + cb_arg = io_req->cb_arg; + + /* If we timed out just free resources */ + if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) + goto free; + + /* Kill the timer we put on the request */ + cancel_delayed_work_sync(&io_req->timeout_work); + + rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl); + if (rc) + QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will " + "abort, xid=0x%x.\n", io_req->xid); +free: + kfree(cb_arg); + kref_put(&io_req->refcount, qedf_release_cmd); +} + +static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req) +{ + struct qedf_rport *fcport; + struct qedf_ioreq *new_io_req; + unsigned long flags; + bool rc = false; + + fcport = orig_io_req->fcport; + if (!fcport) { + QEDF_ERR(NULL, "fcport is NULL.\n"); + goto out; + } + + if (!orig_io_req->sc_cmd) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for " + "xid=0x%x.\n", orig_io_req->xid); + goto out; + } + + new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); + if (!new_io_req) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new " + "io_req.\n"); + goto out; + } + + new_io_req->sc_cmd = orig_io_req->sc_cmd; + + /* + * This keeps the sc_cmd struct from being returned to the tape + * driver and being requeued twice. We do need to put a reference + * for the original I/O request since we will not do a SCSI completion + * for it. + */ + orig_io_req->sc_cmd = NULL; + kref_put(&orig_io_req->refcount, qedf_release_cmd); + + spin_lock_irqsave(&fcport->rport_lock, flags); + + /* kref for new command released in qedf_post_io_req on error */ + if (qedf_post_io_req(fcport, new_io_req)) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n"); + /* Return SQE to pool */ + atomic_inc(&fcport->free_sqes); + } else { + QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, + "Reissued SCSI command from orig_xid=0x%x on " + "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid); + /* + * Abort the original I/O but do not return SCSI command as + * it has been reissued on another OX_ID. + */ + spin_unlock_irqrestore(&fcport->rport_lock, flags); + qedf_initiate_abts(orig_io_req, false); + goto out; + } + + spin_unlock_irqrestore(&fcport->rport_lock, flags); +out: + return rc; +} + + +static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg) +{ + struct qedf_ioreq *orig_io_req; + struct qedf_ioreq *rec_req; + struct qedf_mp_req *mp_req; + struct fc_frame_header *mp_fc_hdr, *fh; + struct fc_frame *fp; + void *resp_buf, *fc_payload; + u32 resp_len; + struct fc_lport *lport; + struct qedf_ctx *qedf; + int refcount; + enum fc_rctl r_ctl; + struct fc_els_ls_rjt *rjt; + struct fc_els_rec_acc *acc; + u8 opcode; + u32 offset, e_stat; + struct scsi_cmnd *sc_cmd; + bool srr_needed = false; + + rec_req = cb_arg->io_req; + qedf = rec_req->fcport->qedf; + lport = qedf->lport; + + orig_io_req = cb_arg->aborted_io_req; + + if (!orig_io_req) + goto out_free; + + if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO && + rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) + cancel_delayed_work_sync(&orig_io_req->timeout_work); + + refcount = kref_read(&orig_io_req->refcount); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p," + " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n", + orig_io_req, orig_io_req->xid, rec_req->xid, refcount); + + /* If a REC times out, free resources */ + if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) + goto out_free; + + /* Normalize response data into struct fc_frame */ + mp_req = &(rec_req->mp_req); + mp_fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + acc = resp_buf = mp_req->resp_buf; + + fp = fc_frame_alloc(lport, resp_len); + if (!fp) { + QEDF_ERR(&(qedf->dbg_ctx), + "fc_frame_alloc failure.\n"); + goto out_free; + } + + /* Copy frame header from firmware into fp */ + fh = (struct fc_frame_header *)fc_frame_header_get(fp); + memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); + + /* Copy payload from firmware into fp */ + fc_payload = fc_frame_payload_get(fp, resp_len); + memcpy(fc_payload, resp_buf, resp_len); + + opcode = fc_frame_payload_op(fp); + if (opcode == ELS_LS_RJT) { + rjt = fc_frame_payload_get(fp, sizeof(*rjt)); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Received LS_RJT for REC: er_reason=0x%x, " + "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan); + /* + * The following response(s) mean that we need to reissue the + * request on another exchange. We need to do this without + * informing the upper layers lest it cause an application + * error. + */ + if ((rjt->er_reason == ELS_RJT_LOGIC || + rjt->er_reason == ELS_RJT_UNAB) && + rjt->er_explan == ELS_EXPL_OXID_RXID) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Handle CMD LOST case.\n"); + qedf_requeue_io_req(orig_io_req); + } + } else if (opcode == ELS_LS_ACC) { + offset = ntohl(acc->reca_fc4value); + e_stat = ntohl(acc->reca_e_stat); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n", + offset, e_stat); + if (e_stat & ESB_ST_SEQ_INIT) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Target has the seq init\n"); + goto out_free_frame; + } + sc_cmd = orig_io_req->sc_cmd; + if (!sc_cmd) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "sc_cmd is NULL for xid=0x%x.\n", + orig_io_req->xid); + goto out_free_frame; + } + /* SCSI write case */ + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { + if (offset == orig_io_req->data_xfer_len) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "WRITE - response lost.\n"); + r_ctl = FC_RCTL_DD_CMD_STATUS; + srr_needed = true; + offset = 0; + } else { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "WRITE - XFER_RDY/DATA lost.\n"); + r_ctl = FC_RCTL_DD_DATA_DESC; + /* Use data from warning CQE instead of REC */ + offset = orig_io_req->tx_buf_off; + } + /* SCSI read case */ + } else { + if (orig_io_req->rx_buf_off == + orig_io_req->data_xfer_len) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "READ - response lost.\n"); + srr_needed = true; + r_ctl = FC_RCTL_DD_CMD_STATUS; + offset = 0; + } else { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "READ - DATA lost.\n"); + /* + * For read case we always set the offset to 0 + * for sequence recovery task. + */ + offset = 0; + r_ctl = FC_RCTL_DD_SOL_DATA; + } + } + + if (srr_needed) + qedf_send_srr(orig_io_req, offset, r_ctl); + else + qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl); + } + +out_free_frame: + fc_frame_free(fp); +out_free: + /* Put reference for original command since REC completed */ + kref_put(&orig_io_req->refcount, qedf_release_cmd); + kfree(cb_arg); +} + +/* Assumes kref is already held by caller */ +int qedf_send_rec(struct qedf_ioreq *orig_io_req) +{ + + struct fc_els_rec rec; + struct qedf_rport *fcport; + struct fc_lport *lport; + struct qedf_els_cb_arg *cb_arg = NULL; + struct qedf_ctx *qedf; + uint32_t sid; + uint32_t r_a_tov; + int rc; + + if (!orig_io_req) { + QEDF_ERR(NULL, "orig_io_req is NULL.\n"); + return -EINVAL; + } + + fcport = orig_io_req->fcport; + + /* Check that fcport is still offloaded */ + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); + return -EINVAL; + } + + if (!fcport->qedf) { + QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); + return -EINVAL; + } + + /* Take reference until REC command completion */ + kref_get(&orig_io_req->refcount); + + qedf = fcport->qedf; + lport = qedf->lport; + sid = fcport->sid; + r_a_tov = lport->r_a_tov; + + memset(&rec, 0, sizeof(rec)); + + cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " + "REC\n"); + rc = -ENOMEM; + goto rec_err; + } + + cb_arg->aborted_io_req = orig_io_req; + + rec.rec_cmd = ELS_REC; + hton24(rec.rec_s_id, sid); + rec.rec_ox_id = htons(orig_io_req->xid); + rec.rec_rx_id = + htons(orig_io_req->task->tstorm_st_context.read_write.rx_id); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, " + "orig_xid=0x%x rx_id=0x%x\n", orig_io_req, + orig_io_req->xid, rec.rec_rx_id); + rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec), + qedf_rec_compl, cb_arg, r_a_tov); + +rec_err: + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req" + "=0x%x\n", orig_io_req->xid); + kfree(cb_arg); + kref_put(&orig_io_req->refcount, qedf_release_cmd); + } + return rc; +} diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c new file mode 100644 index 000000000000..868d423380d1 --- /dev/null +++ b/drivers/scsi/qedf/qedf_fip.c @@ -0,0 +1,269 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include "qedf.h" + +extern const struct qed_fcoe_ops *qed_ops; +/* + * FIP VLAN functions that will eventually move to libfcoe. + */ + +void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf) +{ + struct sk_buff *skb; + char *eth_fr; + int fr_len; + struct fip_vlan *vlan; +#define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 }) + static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS; + + skb = dev_alloc_skb(sizeof(struct fip_vlan)); + if (!skb) + return; + + fr_len = sizeof(*vlan); + eth_fr = (char *)skb->data; + vlan = (struct fip_vlan *)eth_fr; + + memset(vlan, 0, sizeof(*vlan)); + ether_addr_copy(vlan->eth.h_source, qedf->mac); + ether_addr_copy(vlan->eth.h_dest, my_fcoe_all_fcfs); + vlan->eth.h_proto = htons(ETH_P_FIP); + + vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + vlan->fip.fip_op = htons(FIP_OP_VLAN); + vlan->fip.fip_subcode = FIP_SC_VL_REQ; + vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); + + vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; + vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; + ether_addr_copy(vlan->desc.mac.fd_mac, qedf->mac); + + vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; + vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; + put_unaligned_be64(qedf->lport->wwnn, &vlan->desc.wwnn.fd_wwn); + + skb_put(skb, sizeof(*vlan)); + skb->protocol = htons(ETH_P_FIP); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Sending FIP VLAN " + "request."); + + if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { + QEDF_WARN(&(qedf->dbg_ctx), "Cannot send vlan request " + "because link is not up.\n"); + + kfree_skb(skb); + return; + } + qed_ops->ll2->start_xmit(qedf->cdev, skb); +} + +static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf, + struct sk_buff *skb) +{ + struct fip_header *fiph; + struct fip_desc *desc; + u16 vid = 0; + ssize_t rlen; + size_t dlen; + + fiph = (struct fip_header *)(((void *)skb->data) + 2 * ETH_ALEN + 2); + + rlen = ntohs(fiph->fip_dl_len) * 4; + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + switch (desc->fip_dtype) { + case FIP_DT_VLAN: + vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "VLAN response, " + "vid=0x%x.\n", vid); + + if (vid > 0 && qedf->vlan_id != vid) { + qedf_set_vlan_id(qedf, vid); + + /* Inform waiter that it's ok to call fcoe_ctlr_link up() */ + complete(&qedf->fipvlan_compl); + } +} + +void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct qedf_ctx *qedf = container_of(fip, struct qedf_ctx, ctlr); + struct ethhdr *eth_hdr; + struct vlan_ethhdr *vlan_hdr; + struct fip_header *fiph; + u16 op, vlan_tci = 0; + u8 sub; + + if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) { + QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n"); + kfree_skb(skb); + return; + } + + fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2); + eth_hdr = (struct ethhdr *)skb_mac_header(skb); + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (!qedf->vlan_hw_insert) { + vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, sizeof(*vlan_hdr) + - sizeof(*eth_hdr)); + memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN); + vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); + vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; + vlan_hdr->h_vlan_TCI = vlan_tci = htons(qedf->vlan_id); + } + + /* Update eth_hdr since we added a VLAN tag */ + eth_hdr = (struct ethhdr *)skb_mac_header(skb); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame send: " + "dest=%pM op=%x sub=%x vlan=%04x.", eth_hdr->h_dest, op, sub, + ntohs(vlan_tci)); + if (qedf_dump_frames) + print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb->len, false); + + qed_ops->ll2->start_xmit(qedf->cdev, skb); +} + +/* Process incoming FIP frames. */ +void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb) +{ + struct ethhdr *eth_hdr; + struct fip_header *fiph; + struct fip_desc *desc; + struct fip_mac_desc *mp; + struct fip_wwn_desc *wp; + struct fip_vn_desc *vp; + size_t rlen, dlen; + uint32_t cvl_port_id; + __u8 cvl_mac[ETH_ALEN]; + u16 op; + u8 sub; + + eth_hdr = (struct ethhdr *)skb_mac_header(skb); + fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2); + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame received: " + "skb=%p fiph=%p source=%pM op=%x sub=%x", skb, fiph, + eth_hdr->h_source, op, sub); + if (qedf_dump_frames) + print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb->len, false); + + /* Handle FIP VLAN resp in the driver */ + if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) { + qedf_fcoe_process_vlan_resp(qedf, skb); + qedf->vlan_hw_insert = 0; + kfree_skb(skb); + } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Clear virtual " + "link received.\n"); + + /* Check that an FCF has been selected by fcoe */ + if (qedf->ctlr.sel_fcf == NULL) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Dropping CVL since FCF has not been selected " + "yet."); + return; + } + + cvl_port_id = 0; + memset(cvl_mac, 0, ETH_ALEN); + /* + * We need to loop through the CVL descriptors to determine + * if we want to reset the fcoe link + */ + rlen = ntohs(fiph->fip_dl_len) * FIP_BPW; + desc = (struct fip_desc *)(fiph + 1); + while (rlen >= sizeof(*desc)) { + dlen = desc->fip_dlen * FIP_BPW; + switch (desc->fip_dtype) { + case FIP_DT_MAC: + mp = (struct fip_mac_desc *)desc; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "fd_mac=%pM.\n", __func__, mp->fd_mac); + ether_addr_copy(cvl_mac, mp->fd_mac); + break; + case FIP_DT_NAME: + wp = (struct fip_wwn_desc *)desc; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "fc_wwpn=%016llx.\n", + get_unaligned_be64(&wp->fd_wwn)); + break; + case FIP_DT_VN_ID: + vp = (struct fip_vn_desc *)desc; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "fd_fc_id=%x.\n", ntoh24(vp->fd_fc_id)); + cvl_port_id = ntoh24(vp->fd_fc_id); + break; + default: + /* Ignore anything else */ + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "cvl_port_id=%06x cvl_mac=%pM.\n", cvl_port_id, + cvl_mac); + if (cvl_port_id == qedf->lport->port_id && + ether_addr_equal(cvl_mac, + qedf->ctlr.sel_fcf->fcf_mac)) { + fcoe_ctlr_link_down(&qedf->ctlr); + qedf_wait_for_upload(qedf); + fcoe_ctlr_link_up(&qedf->ctlr); + } + kfree_skb(skb); + } else { + /* Everything else is handled by libfcoe */ + __skb_pull(skb, ETH_HLEN); + fcoe_ctlr_recv(&qedf->ctlr, skb); + } +} + +void qedf_update_src_mac(struct fc_lport *lport, u8 *addr) +{ + struct qedf_ctx *qedf = lport_priv(lport); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Setting data_src_addr=%pM.\n", addr); + ether_addr_copy(qedf->data_src_addr, addr); +} + +u8 *qedf_get_src_mac(struct fc_lport *lport) +{ + u8 mac[ETH_ALEN]; + u8 port_id[3]; + struct qedf_ctx *qedf = lport_priv(lport); + + /* We need to use the lport port_id to create the data_src_addr */ + if (is_zero_ether_addr(qedf->data_src_addr)) { + hton24(port_id, lport->port_id); + fc_fcoe_set_mac(mac, port_id); + qedf->ctlr.update_mac(lport, mac); + } + return qedf->data_src_addr; +} diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h new file mode 100644 index 000000000000..dfd65dec2874 --- /dev/null +++ b/drivers/scsi/qedf/qedf_hsi.h @@ -0,0 +1,422 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#ifndef __QEDF_HSI__ +#define __QEDF_HSI__ +/* + * Add include to common target + */ +#include <linux/qed/common_hsi.h> + +/* + * Add include to common storage target + */ +#include <linux/qed/storage_common.h> + +/* + * Add include to common fcoe target for both eCore and protocol driver + */ +#include <linux/qed/fcoe_common.h> + + +/* + * FCoE CQ element ABTS information + */ +struct fcoe_abts_info { + u8 r_ctl /* R_CTL in the ABTS response frame */; + u8 reserved0; + __le16 rx_id; + __le32 reserved2[2]; + __le32 fc_payload[3] /* ABTS FC payload response frame */; +}; + + +/* + * FCoE class type + */ +enum fcoe_class_type { + FCOE_TASK_CLASS_TYPE_3, + FCOE_TASK_CLASS_TYPE_2, + MAX_FCOE_CLASS_TYPE +}; + + +/* + * FCoE CMDQ element control information + */ +struct fcoe_cmdqe_control { + __le16 conn_id; + u8 num_additional_cmdqes; + u8 cmdType; + /* true for ABTS request cmdqe. used in Target mode */ +#define FCOE_CMDQE_CONTROL_ABTSREQCMD_MASK 0x1 +#define FCOE_CMDQE_CONTROL_ABTSREQCMD_SHIFT 0 +#define FCOE_CMDQE_CONTROL_RESERVED1_MASK 0x7F +#define FCOE_CMDQE_CONTROL_RESERVED1_SHIFT 1 + u8 reserved2[4]; +}; + +/* + * FCoE control + payload CMDQ element + */ +struct fcoe_cmdqe { + struct fcoe_cmdqe_control hdr; + u8 fc_header[24]; + __le32 fcp_cmd_payload[8]; +}; + + + +/* + * FCP RSP flags + */ +struct fcoe_fcp_rsp_flags { + u8 flags; +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_MASK 0x1 +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0 +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_MASK 0x1 +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_MASK 0x1 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_MASK 0x1 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3 +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_MASK 0x1 +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4 +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_MASK 0x7 +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5 +}; + +/* + * FCoE CQ element response information + */ +struct fcoe_cqe_rsp_info { + struct fcoe_fcp_rsp_flags rsp_flags; + u8 scsi_status_code; + __le16 retry_delay_timer; + __le32 fcp_resid; + __le32 fcp_sns_len; + __le32 fcp_rsp_len; + __le16 rx_id; + u8 fw_error_flags; +#define FCOE_CQE_RSP_INFO_FW_UNDERRUN_MASK 0x1 /* FW detected underrun */ +#define FCOE_CQE_RSP_INFO_FW_UNDERRUN_SHIFT 0 +#define FCOE_CQE_RSP_INFO_RESREVED_MASK 0x7F +#define FCOE_CQE_RSP_INFO_RESREVED_SHIFT 1 + u8 reserved; + __le32 fw_residual /* Residual bytes calculated by FW */; +}; + +/* + * FCoE CQ element Target completion information + */ +struct fcoe_cqe_target_info { + __le16 rx_id; + __le16 reserved0; + __le32 reserved1[5]; +}; + +/* + * FCoE error/warning reporting entry + */ +struct fcoe_err_report_entry { + __le32 err_warn_bitmap_lo /* Error bitmap lower 32 bits */; + __le32 err_warn_bitmap_hi /* Error bitmap higher 32 bits */; + /* Buffer offset the beginning of the Sequence last transmitted */ + __le32 tx_buf_off; + /* Buffer offset from the beginning of the Sequence last received */ + __le32 rx_buf_off; + __le16 rx_id /* RX_ID of the associated task */; + __le16 reserved1; + __le32 reserved2; +}; + +/* + * FCoE CQ element middle path information + */ +struct fcoe_cqe_midpath_info { + __le32 data_placement_size; + __le16 rx_id; + __le16 reserved0; + __le32 reserved1[4]; +}; + +/* + * FCoE CQ element unsolicited information + */ +struct fcoe_unsolic_info { + /* BD information: Physical address and opaque data */ + struct scsi_bd bd_info; + __le16 conn_id /* Connection ID the frame is associated to */; + __le16 pkt_len /* Packet length */; + u8 reserved1[4]; +}; + +/* + * FCoE warning reporting entry + */ +struct fcoe_warning_report_entry { + /* BD information: Physical address and opaque data */ + struct scsi_bd bd_info; + /* Buffer offset the beginning of the Sequence last transmitted */ + __le32 buf_off; + __le16 rx_id /* RX_ID of the associated task */; + __le16 reserved1; +}; + +/* + * FCoE CQ element information + */ +union fcoe_cqe_info { + struct fcoe_cqe_rsp_info rsp_info /* Response completion information */; + /* Target completion information */ + struct fcoe_cqe_target_info target_info; + /* Error completion information */ + struct fcoe_err_report_entry err_info; + struct fcoe_abts_info abts_info /* ABTS completion information */; + /* Middle path completion information */ + struct fcoe_cqe_midpath_info midpath_info; + /* Unsolicited packet completion information */ + struct fcoe_unsolic_info unsolic_info; + /* Warning completion information (Rec Tov expiration) */ + struct fcoe_warning_report_entry warn_info; +}; + +/* + * FCoE CQ element + */ +struct fcoe_cqe { + __le32 cqe_data; + /* The task identifier (OX_ID) to be completed */ +#define FCOE_CQE_TASK_ID_MASK 0xFFFF +#define FCOE_CQE_TASK_ID_SHIFT 0 + /* + * The CQE type: 0x0 Indicating on a pending work request completion. + * 0x1 - Indicating on an unsolicited event notification. use enum + * fcoe_cqe_type (use enum fcoe_cqe_type) + */ +#define FCOE_CQE_CQE_TYPE_MASK 0xF +#define FCOE_CQE_CQE_TYPE_SHIFT 16 +#define FCOE_CQE_RESERVED0_MASK 0xFFF +#define FCOE_CQE_RESERVED0_SHIFT 20 + __le16 reserved1; + __le16 fw_cq_prod; + union fcoe_cqe_info cqe_info; +}; + +/* + * FCoE CQE type + */ +enum fcoe_cqe_type { + /* solicited response on a R/W or middle-path SQE */ + FCOE_GOOD_COMPLETION_CQE_TYPE, + FCOE_UNSOLIC_CQE_TYPE /* unsolicited packet, RQ consumed */, + FCOE_ERROR_DETECTION_CQE_TYPE /* timer expiration, validation error */, + FCOE_WARNING_CQE_TYPE /* rec_tov or rr_tov timer expiration */, + FCOE_EXCH_CLEANUP_CQE_TYPE /* task cleanup completed */, + FCOE_ABTS_CQE_TYPE /* ABTS received and task cleaned */, + FCOE_DUMMY_CQE_TYPE /* just increment SQ CONS */, + /* Task was completed wight after sending a pkt to the target */ + FCOE_LOCAL_COMP_CQE_TYPE, + MAX_FCOE_CQE_TYPE +}; + + +/* + * FCoE device type + */ +enum fcoe_device_type { + FCOE_TASK_DEV_TYPE_DISK, + FCOE_TASK_DEV_TYPE_TAPE, + MAX_FCOE_DEVICE_TYPE +}; + + + + +/* + * FCoE fast path error codes + */ +enum fcoe_fp_error_warning_code { + FCOE_ERROR_CODE_XFER_OOO_RO /* XFER error codes */, + FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED, + FCOE_ERROR_CODE_XFER_NULL_BURST_LEN, + FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS, + FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE, + FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE, + FCOE_ERROR_CODE_XFER_PEND_XFER_SET, + FCOE_ERROR_CODE_XFER_OPENED_SEQ, + FCOE_ERROR_CODE_XFER_FCTL, + FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET /* FCP RSP error codes */, + FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD, + FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD, + FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE, + FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET, + FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ, + FCOE_ERROR_CODE_FCP_RSP_FCTL, + FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET, + FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET, + FCOE_ERROR_CODE_DATA_OOO_RO /* FCP DATA error codes */, + FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE, + FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS, + FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET, + FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET, + FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET, + FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET, + FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ, + FCOE_ERROR_CODE_DATA_FCTL_INITIATIR, + FCOE_ERROR_CODE_MIDPATH_INVALID_TYPE /* Middle path error codes */, + FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET, + FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET, + FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET, + FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET, + FCOE_ERROR_CODE_MIDPATH_REPLY_FCTL, + FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY, + FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL, + FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD /* Common error codes */, + FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE, + FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH, + FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT, + FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH, + FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES, + FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR, + FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG, + FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED, + FCOE_ERROR_CODE_COMMON_TASK_DDF_RCTL_INFO_FIELD, + FCOE_ERROR_CODE_COMMON_TASK_INVALID_RCTL, + FCOE_ERROR_CODE_COMMON_TASK_RCTL_GENERAL_MISMATCH, + FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION /* Timer error codes */, + FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION /* Timer error codes */, + FCOE_ERROR_CODE_RR_TOV_TIMER_EXPIRATION /* Timer error codes */, + /* ABTSrsp pckt arrived unexpected */ + FCOE_ERROR_CODE_ABTS_REPLY_UNEXPECTED, + FCOE_ERROR_CODE_TARGET_MODE_FCP_RSP, + FCOE_ERROR_CODE_TARGET_MODE_FCP_XFER, + FCOE_ERROR_CODE_TARGET_MODE_DATA_TASK_TYPE_NOT_WRITE, + FCOE_ERROR_CODE_DATA_FCTL_TARGET, + FCOE_ERROR_CODE_TARGET_DATA_SIZE_NO_MATCH_XFER, + FCOE_ERROR_CODE_TARGET_DIF_CRC_CHECKSUM_ERROR, + FCOE_ERROR_CODE_TARGET_DIF_REF_TAG_ERROR, + FCOE_ERROR_CODE_TARGET_DIF_APP_TAG_ERROR, + MAX_FCOE_FP_ERROR_WARNING_CODE +}; + + +/* + * FCoE RESPQ element + */ +struct fcoe_respqe { + __le16 ox_id /* OX_ID that is located in the FCP_RSP FC header */; + __le16 rx_id /* RX_ID that is located in the FCP_RSP FC header */; + __le32 additional_info; +/* PARAM that is located in the FCP_RSP FC header */ +#define FCOE_RESPQE_PARAM_MASK 0xFFFFFF +#define FCOE_RESPQE_PARAM_SHIFT 0 +/* Indication whther its Target-auto-rsp mode or not */ +#define FCOE_RESPQE_TARGET_AUTO_RSP_MASK 0xFF +#define FCOE_RESPQE_TARGET_AUTO_RSP_SHIFT 24 +}; + + +/* + * FCoE slow path error codes + */ +enum fcoe_sp_error_code { + /* Error codes for Error Reporting in slow path flows */ + FCOE_ERROR_CODE_SLOW_PATH_TOO_MANY_FUNCS, + FCOE_ERROR_SLOW_PATH_CODE_NO_LICENSE, + MAX_FCOE_SP_ERROR_CODE +}; + + +/* + * FCoE SQE request type + */ +enum fcoe_sqe_request_type { + SEND_FCOE_CMD, + SEND_FCOE_MIDPATH, + SEND_FCOE_ABTS_REQUEST, + FCOE_EXCHANGE_CLEANUP, + FCOE_SEQUENCE_RECOVERY, + SEND_FCOE_XFER_RDY, + SEND_FCOE_RSP, + SEND_FCOE_RSP_WITH_SENSE_DATA, + SEND_FCOE_TARGET_DATA, + SEND_FCOE_INITIATOR_DATA, + /* + * Xfer Continuation (==1) ready to be sent. Previous XFERs data + * received successfully. + */ + SEND_FCOE_XFER_CONTINUATION_RDY, + SEND_FCOE_TARGET_ABTS_RSP, + MAX_FCOE_SQE_REQUEST_TYPE +}; + + +/* + * FCoE task TX state + */ +enum fcoe_task_tx_state { + /* Initiate state after driver has initialized the task */ + FCOE_TASK_TX_STATE_NORMAL, + /* Updated by TX path after complete transmitting unsolicited packet */ + FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED, + /* + * Updated by TX path after start processing the task requesting the + * cleanup/abort operation + */ + FCOE_TASK_TX_STATE_CLEAN_REQ, + FCOE_TASK_TX_STATE_ABTS /* Updated by TX path during abort procedure */, + /* Updated by TX path during exchange cleanup procedure */ + FCOE_TASK_TX_STATE_EXCLEANUP, + /* + * Updated by TX path during exchange cleanup continuation task + * procedure + */ + FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE_CONT, + /* Updated by TX path during exchange cleanup first xfer procedure */ + FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE, + /* Updated by TX path during exchange cleanup read task in Target */ + FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_READ_OR_RSP, + /* Updated by TX path during target exchange cleanup procedure */ + FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE_LAST_CYCLE, + /* Updated by TX path during sequence recovery procedure */ + FCOE_TASK_TX_STATE_SEQRECOVERY, + MAX_FCOE_TASK_TX_STATE +}; + + +/* + * FCoE task type + */ +enum fcoe_task_type { + FCOE_TASK_TYPE_WRITE_INITIATOR, + FCOE_TASK_TYPE_READ_INITIATOR, + FCOE_TASK_TYPE_MIDPATH, + FCOE_TASK_TYPE_UNSOLICITED, + FCOE_TASK_TYPE_ABTS, + FCOE_TASK_TYPE_EXCHANGE_CLEANUP, + FCOE_TASK_TYPE_SEQUENCE_CLEANUP, + FCOE_TASK_TYPE_WRITE_TARGET, + FCOE_TASK_TYPE_READ_TARGET, + FCOE_TASK_TYPE_RSP, + FCOE_TASK_TYPE_RSP_SENSE_DATA, + FCOE_TASK_TYPE_ABTS_TARGET, + FCOE_TASK_TYPE_ENUM_SIZE, + MAX_FCOE_TASK_TYPE +}; + +struct scsi_glbl_queue_entry { + /* Start physical address for the RQ (receive queue) PBL. */ + struct regpair rq_pbl_addr; + /* Start physical address for the CQ (completion queue) PBL. */ + struct regpair cq_pbl_addr; + /* Start physical address for the CMDQ (command queue) PBL. */ + struct regpair cmdq_pbl_addr; +}; + +#endif /* __QEDF_HSI__ */ diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c new file mode 100644 index 000000000000..ee0dcf9d3aba --- /dev/null +++ b/drivers/scsi/qedf/qedf_io.c @@ -0,0 +1,2282 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#include <linux/spinlock.h> +#include <linux/vmalloc.h> +#include "qedf.h" +#include <scsi/scsi_tcq.h> + +void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, + unsigned int timer_msec) +{ + queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work, + msecs_to_jiffies(timer_msec)); +} + +static void qedf_cmd_timeout(struct work_struct *work) +{ + + struct qedf_ioreq *io_req = + container_of(work, struct qedf_ioreq, timeout_work.work); + struct qedf_ctx *qedf = io_req->fcport->qedf; + struct qedf_rport *fcport = io_req->fcport; + u8 op = 0; + + switch (io_req->cmd_type) { + case QEDF_ABTS: + QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n", + io_req->xid); + /* Cleanup timed out ABTS */ + qedf_initiate_cleanup(io_req, true); + complete(&io_req->abts_done); + + /* + * Need to call kref_put for reference taken when initiate_abts + * was called since abts_compl won't be called now that we've + * cleaned up the task. + */ + kref_put(&io_req->refcount, qedf_release_cmd); + + /* + * Now that the original I/O and the ABTS are complete see + * if we need to reconnect to the target. + */ + qedf_restart_rport(fcport); + break; + case QEDF_ELS: + kref_get(&io_req->refcount); + /* + * Don't attempt to clean an ELS timeout as any subseqeunt + * ABTS or cleanup requests just hang. For now just free + * the resources of the original I/O and the RRQ + */ + QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n", + io_req->xid); + io_req->event = QEDF_IOREQ_EV_ELS_TMO; + /* Call callback function to complete command */ + if (io_req->cb_func && io_req->cb_arg) { + op = io_req->cb_arg->op; + io_req->cb_func(io_req->cb_arg); + io_req->cb_arg = NULL; + } + qedf_initiate_cleanup(io_req, true); + kref_put(&io_req->refcount, qedf_release_cmd); + break; + case QEDF_SEQ_CLEANUP: + QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, " + "xid=0x%x.\n", io_req->xid); + qedf_initiate_cleanup(io_req, true); + io_req->event = QEDF_IOREQ_EV_ELS_TMO; + qedf_process_seq_cleanup_compl(qedf, NULL, io_req); + break; + default: + break; + } +} + +void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr) +{ + struct io_bdt *bdt_info; + struct qedf_ctx *qedf = cmgr->qedf; + size_t bd_tbl_sz; + u16 min_xid = QEDF_MIN_XID; + u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1); + int num_ios; + int i; + struct qedf_ioreq *io_req; + + num_ios = max_xid - min_xid + 1; + + /* Free fcoe_bdt_ctx structures */ + if (!cmgr->io_bdt_pool) + goto free_cmd_pool; + + bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge); + for (i = 0; i < num_ios; i++) { + bdt_info = cmgr->io_bdt_pool[i]; + if (bdt_info->bd_tbl) { + dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz, + bdt_info->bd_tbl, bdt_info->bd_tbl_dma); + bdt_info->bd_tbl = NULL; + } + } + + /* Destroy io_bdt pool */ + for (i = 0; i < num_ios; i++) { + kfree(cmgr->io_bdt_pool[i]); + cmgr->io_bdt_pool[i] = NULL; + } + + kfree(cmgr->io_bdt_pool); + cmgr->io_bdt_pool = NULL; + +free_cmd_pool: + + for (i = 0; i < num_ios; i++) { + io_req = &cmgr->cmds[i]; + /* Make sure we free per command sense buffer */ + if (io_req->sense_buffer) + dma_free_coherent(&qedf->pdev->dev, + QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer, + io_req->sense_buffer_dma); + cancel_delayed_work_sync(&io_req->rrq_work); + } + + /* Free command manager itself */ + vfree(cmgr); +} + +static void qedf_handle_rrq(struct work_struct *work) +{ + struct qedf_ioreq *io_req = + container_of(work, struct qedf_ioreq, rrq_work.work); + + qedf_send_rrq(io_req); + +} + +struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf) +{ + struct qedf_cmd_mgr *cmgr; + struct io_bdt *bdt_info; + struct qedf_ioreq *io_req; + u16 xid; + int i; + int num_ios; + u16 min_xid = QEDF_MIN_XID; + u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1); + + /* Make sure num_queues is already set before calling this function */ + if (!qedf->num_queues) { + QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n"); + return NULL; + } + + if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { + QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and " + "max_xid 0x%x.\n", min_xid, max_xid); + return NULL; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid " + "0x%x.\n", min_xid, max_xid); + + num_ios = max_xid - min_xid + 1; + + cmgr = vzalloc(sizeof(struct qedf_cmd_mgr)); + if (!cmgr) { + QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n"); + return NULL; + } + + cmgr->qedf = qedf; + spin_lock_init(&cmgr->lock); + + /* + * Initialize list of qedf_ioreq. + */ + xid = QEDF_MIN_XID; + + for (i = 0; i < num_ios; i++) { + io_req = &cmgr->cmds[i]; + INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout); + + io_req->xid = xid++; + + INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq); + + /* Allocate DMA memory to hold sense buffer */ + io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev, + QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma, + GFP_KERNEL); + if (!io_req->sense_buffer) + goto mem_err; + } + + /* Allocate pool of io_bdts - one for each qedf_ioreq */ + cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *), + GFP_KERNEL); + + if (!cmgr->io_bdt_pool) { + QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n"); + goto mem_err; + } + + for (i = 0; i < num_ios; i++) { + cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt), + GFP_KERNEL); + if (!cmgr->io_bdt_pool[i]) { + QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc " + "io_bdt_pool[%d].\n", i); + goto mem_err; + } + } + + for (i = 0; i < num_ios; i++) { + bdt_info = cmgr->io_bdt_pool[i]; + bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev, + QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge), + &bdt_info->bd_tbl_dma, GFP_KERNEL); + if (!bdt_info->bd_tbl) { + QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc " + "bdt_tbl[%d].\n", i); + goto mem_err; + } + } + atomic_set(&cmgr->free_list_cnt, num_ios); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "cmgr->free_list_cnt=%d.\n", + atomic_read(&cmgr->free_list_cnt)); + + return cmgr; + +mem_err: + qedf_cmd_mgr_free(cmgr); + return NULL; +} + +struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type) +{ + struct qedf_ctx *qedf = fcport->qedf; + struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr; + struct qedf_ioreq *io_req = NULL; + struct io_bdt *bd_tbl; + u16 xid; + uint32_t free_sqes; + int i; + unsigned long flags; + + free_sqes = atomic_read(&fcport->free_sqes); + + if (!free_sqes) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Returning NULL, free_sqes=%d.\n ", + free_sqes); + goto out_failed; + } + + /* Limit the number of outstanding R/W tasks */ + if ((atomic_read(&fcport->num_active_ios) >= + NUM_RW_TASKS_PER_CONNECTION)) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Returning NULL, num_active_ios=%d.\n", + atomic_read(&fcport->num_active_ios)); + goto out_failed; + } + + /* Limit global TIDs certain tasks */ + if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Returning NULL, free_list_cnt=%d.\n", + atomic_read(&cmd_mgr->free_list_cnt)); + goto out_failed; + } + + spin_lock_irqsave(&cmd_mgr->lock, flags); + for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { + io_req = &cmd_mgr->cmds[cmd_mgr->idx]; + cmd_mgr->idx++; + if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS) + cmd_mgr->idx = 0; + + /* Check to make sure command was previously freed */ + if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) + break; + } + + if (i == FCOE_PARAMS_NUM_TASKS) { + spin_unlock_irqrestore(&cmd_mgr->lock, flags); + goto out_failed; + } + + set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); + spin_unlock_irqrestore(&cmd_mgr->lock, flags); + + atomic_inc(&fcport->num_active_ios); + atomic_dec(&fcport->free_sqes); + xid = io_req->xid; + atomic_dec(&cmd_mgr->free_list_cnt); + + io_req->cmd_mgr = cmd_mgr; + io_req->fcport = fcport; + + /* Hold the io_req against deletion */ + kref_init(&io_req->refcount); + + /* Bind io_bdt for this io_req */ + /* Have a static link between io_req and io_bdt_pool */ + bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; + if (bd_tbl == NULL) { + QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid); + kref_put(&io_req->refcount, qedf_release_cmd); + goto out_failed; + } + bd_tbl->io_req = io_req; + io_req->cmd_type = cmd_type; + + /* Reset sequence offset data */ + io_req->rx_buf_off = 0; + io_req->tx_buf_off = 0; + io_req->rx_id = 0xffff; /* No OX_ID */ + + return io_req; + +out_failed: + /* Record failure for stats and return NULL to caller */ + qedf->alloc_failures++; + return NULL; +} + +static void qedf_free_mp_resc(struct qedf_ioreq *io_req) +{ + struct qedf_mp_req *mp_req = &(io_req->mp_req); + struct qedf_ctx *qedf = io_req->fcport->qedf; + uint64_t sz = sizeof(struct fcoe_sge); + + /* clear tm flags */ + mp_req->tm_flags = 0; + if (mp_req->mp_req_bd) { + dma_free_coherent(&qedf->pdev->dev, sz, + mp_req->mp_req_bd, mp_req->mp_req_bd_dma); + mp_req->mp_req_bd = NULL; + } + if (mp_req->mp_resp_bd) { + dma_free_coherent(&qedf->pdev->dev, sz, + mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma); + mp_req->mp_resp_bd = NULL; + } + if (mp_req->req_buf) { + dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, + mp_req->req_buf, mp_req->req_buf_dma); + mp_req->req_buf = NULL; + } + if (mp_req->resp_buf) { + dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, + mp_req->resp_buf, mp_req->resp_buf_dma); + mp_req->resp_buf = NULL; + } +} + +void qedf_release_cmd(struct kref *ref) +{ + struct qedf_ioreq *io_req = + container_of(ref, struct qedf_ioreq, refcount); + struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr; + struct qedf_rport *fcport = io_req->fcport; + + if (io_req->cmd_type == QEDF_ELS || + io_req->cmd_type == QEDF_TASK_MGMT_CMD) + qedf_free_mp_resc(io_req); + + atomic_inc(&cmd_mgr->free_list_cnt); + atomic_dec(&fcport->num_active_ios); + if (atomic_read(&fcport->num_active_ios) < 0) + QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n"); + + /* Increment task retry identifier now that the request is released */ + io_req->task_retry_identifier++; + + clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); +} + +static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len, + int bd_index) +{ + struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl; + int frag_size, sg_frags; + + sg_frags = 0; + while (sg_len) { + if (sg_len > QEDF_BD_SPLIT_SZ) + frag_size = QEDF_BD_SPLIT_SZ; + else + frag_size = sg_len; + bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr); + bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr); + bd[bd_index + sg_frags].size = (uint16_t)frag_size; + + addr += (u64)frag_size; + sg_frags++; + sg_len -= frag_size; + } + return sg_frags; +} + +static int qedf_map_sg(struct qedf_ioreq *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + struct Scsi_Host *host = sc->device->host; + struct fc_lport *lport = shost_priv(host); + struct qedf_ctx *qedf = lport_priv(lport); + struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl; + struct scatterlist *sg; + int byte_count = 0; + int sg_count = 0; + int bd_count = 0; + int sg_frags; + unsigned int sg_len; + u64 addr, end_addr; + int i; + + sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc), + scsi_sg_count(sc), sc->sc_data_direction); + + sg = scsi_sglist(sc); + + /* + * New condition to send single SGE as cached-SGL with length less + * than 64k. + */ + if ((sg_count == 1) && (sg_dma_len(sg) <= + QEDF_MAX_SGLEN_FOR_CACHESGL)) { + sg_len = sg_dma_len(sg); + addr = (u64)sg_dma_address(sg); + + bd[bd_count].sge_addr.lo = (addr & 0xffffffff); + bd[bd_count].sge_addr.hi = (addr >> 32); + bd[bd_count].size = (u16)sg_len; + + return ++bd_count; + } + + scsi_for_each_sg(sc, sg, sg_count, i) { + sg_len = sg_dma_len(sg); + addr = (u64)sg_dma_address(sg); + end_addr = (u64)(addr + sg_len); + + /* + * First s/g element in the list so check if the end_addr + * is paged aligned. Also check to make sure the length is + * at least page size. + */ + if ((i == 0) && (sg_count > 1) && + ((end_addr % QEDF_PAGE_SIZE) || + sg_len < QEDF_PAGE_SIZE)) + io_req->use_slowpath = true; + /* + * Last s/g element so check if the start address is paged + * aligned. + */ + else if ((i == (sg_count - 1)) && (sg_count > 1) && + (addr % QEDF_PAGE_SIZE)) + io_req->use_slowpath = true; + /* + * Intermediate s/g element so check if start and end address + * is page aligned. + */ + else if ((i != 0) && (i != (sg_count - 1)) && + ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE))) + io_req->use_slowpath = true; + + if (sg_len > QEDF_MAX_BD_LEN) { + sg_frags = qedf_split_bd(io_req, addr, sg_len, + bd_count); + } else { + sg_frags = 1; + bd[bd_count].sge_addr.lo = U64_LO(addr); + bd[bd_count].sge_addr.hi = U64_HI(addr); + bd[bd_count].size = (uint16_t)sg_len; + } + + bd_count += sg_frags; + byte_count += sg_len; + } + + if (byte_count != scsi_bufflen(sc)) + QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != " + "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count, + scsi_bufflen(sc), io_req->xid); + + return bd_count; +} + +static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl; + int bd_count; + + if (scsi_sg_count(sc)) { + bd_count = qedf_map_sg(io_req); + if (bd_count == 0) + return -ENOMEM; + } else { + bd_count = 0; + bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0; + bd[0].size = 0; + } + io_req->bd_tbl->bd_valid = bd_count; + + return 0; +} + +static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req, + struct fcp_cmnd *fcp_cmnd) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + + /* fcp_cmnd is 32 bytes */ + memset(fcp_cmnd, 0, FCP_CMND_LEN); + + /* 8 bytes: SCSI LUN info */ + int_to_scsilun(sc_cmd->device->lun, + (struct scsi_lun *)&fcp_cmnd->fc_lun); + + /* 4 bytes: flag info */ + fcp_cmnd->fc_pri_ta = 0; + fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; + fcp_cmnd->fc_flags = io_req->io_req_flags; + fcp_cmnd->fc_cmdref = 0; + + /* Populate data direction */ + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) + fcp_cmnd->fc_flags |= FCP_CFL_WRDATA; + else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) + fcp_cmnd->fc_flags |= FCP_CFL_RDDATA; + + fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; + + /* 16 bytes: CDB information */ + memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); + + /* 4 bytes: FCP data length */ + fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); + +} + +static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, + struct qedf_ioreq *io_req, u32 *ptu_invalidate, + struct fcoe_task_context *task_ctx) +{ + enum fcoe_task_type task_type; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct io_bdt *bd_tbl = io_req->bd_tbl; + union fcoe_data_desc_ctx *data_desc; + u32 *fcp_cmnd; + u32 tmp_fcp_cmnd[8]; + int cnt, i; + int bd_count; + struct qedf_ctx *qedf = fcport->qedf; + uint16_t cq_idx = smp_processor_id() % qedf->num_queues; + u8 tmp_sgl_mode = 0; + u8 mst_sgl_mode = 0; + + memset(task_ctx, 0, sizeof(struct fcoe_task_context)); + io_req->task = task_ctx; + + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) + task_type = FCOE_TASK_TYPE_WRITE_INITIATOR; + else + task_type = FCOE_TASK_TYPE_READ_INITIATOR; + + /* Y Storm context */ + task_ctx->ystorm_st_context.expect_first_xfer = 1; + task_ctx->ystorm_st_context.data_2_trns_rem = io_req->data_xfer_len; + /* Check if this is required */ + task_ctx->ystorm_st_context.ox_id = io_req->xid; + task_ctx->ystorm_st_context.task_rety_identifier = + io_req->task_retry_identifier; + + /* T Storm ag context */ + SET_FIELD(task_ctx->tstorm_ag_context.flags0, + TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, PROTOCOLID_FCOE); + task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid; + + /* T Storm st context */ + SET_FIELD(task_ctx->tstorm_st_context.read_write.flags, + FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, + 1); + task_ctx->tstorm_st_context.read_write.rx_id = 0xffff; + + task_ctx->tstorm_st_context.read_only.dev_type = + FCOE_TASK_DEV_TYPE_DISK; + task_ctx->tstorm_st_context.read_only.conf_supported = 0; + task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid; + + /* Completion queue for response. */ + task_ctx->tstorm_st_context.read_only.glbl_q_num = cq_idx; + task_ctx->tstorm_st_context.read_only.fcp_cmd_trns_size = + io_req->data_xfer_len; + task_ctx->tstorm_st_context.read_write.e_d_tov_exp_timeout_val = + lport->e_d_tov; + + task_ctx->ustorm_ag_context.global_cq_num = cq_idx; + io_req->fp_idx = cq_idx; + + bd_count = bd_tbl->bd_valid; + if (task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) { + /* Setup WRITE task */ + struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl; + + task_ctx->ystorm_st_context.task_type = + FCOE_TASK_TYPE_WRITE_INITIATOR; + data_desc = &task_ctx->ystorm_st_context.data_desc; + + if (io_req->use_slowpath) { + SET_FIELD(task_ctx->ystorm_st_context.sgl_mode, + YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, + FCOE_SLOW_SGL); + data_desc->slow.base_sgl_addr.lo = + U64_LO(bd_tbl->bd_tbl_dma); + data_desc->slow.base_sgl_addr.hi = + U64_HI(bd_tbl->bd_tbl_dma); + data_desc->slow.remainder_num_sges = bd_count; + data_desc->slow.curr_sge_off = 0; + data_desc->slow.curr_sgl_index = 0; + qedf->slow_sge_ios++; + io_req->sge_type = QEDF_IOREQ_SLOW_SGE; + } else { + SET_FIELD(task_ctx->ystorm_st_context.sgl_mode, + YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, + (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count : + FCOE_MUL_FAST_SGES); + + if (bd_count == 1) { + data_desc->single_sge.sge_addr.lo = + fcoe_bd_tbl->sge_addr.lo; + data_desc->single_sge.sge_addr.hi = + fcoe_bd_tbl->sge_addr.hi; + data_desc->single_sge.size = + fcoe_bd_tbl->size; + data_desc->single_sge.is_valid_sge = 0; + qedf->single_sge_ios++; + io_req->sge_type = QEDF_IOREQ_SINGLE_SGE; + } else { + data_desc->fast.sgl_start_addr.lo = + U64_LO(bd_tbl->bd_tbl_dma); + data_desc->fast.sgl_start_addr.hi = + U64_HI(bd_tbl->bd_tbl_dma); + data_desc->fast.sgl_byte_offset = + data_desc->fast.sgl_start_addr.lo & + (QEDF_PAGE_SIZE - 1); + if (data_desc->fast.sgl_byte_offset > 0) + QEDF_ERR(&(qedf->dbg_ctx), + "byte_offset=%u for xid=0x%x.\n", + io_req->xid, + data_desc->fast.sgl_byte_offset); + data_desc->fast.task_reuse_cnt = + io_req->reuse_count; + io_req->reuse_count++; + if (io_req->reuse_count == QEDF_MAX_REUSE) { + *ptu_invalidate = 1; + io_req->reuse_count = 0; + } + qedf->fast_sge_ios++; + io_req->sge_type = QEDF_IOREQ_FAST_SGE; + } + } + + /* T Storm context */ + task_ctx->tstorm_st_context.read_only.task_type = + FCOE_TASK_TYPE_WRITE_INITIATOR; + + /* M Storm context */ + tmp_sgl_mode = GET_FIELD(task_ctx->ystorm_st_context.sgl_mode, + YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE); + SET_FIELD(task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode, + FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE, + tmp_sgl_mode); + + } else { + /* Setup READ task */ + + /* M Storm context */ + struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl; + + data_desc = &task_ctx->mstorm_st_context.fp.data_desc; + task_ctx->mstorm_st_context.fp.data_2_trns_rem = + io_req->data_xfer_len; + + if (io_req->use_slowpath) { + SET_FIELD( + task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode, + FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE, + FCOE_SLOW_SGL); + data_desc->slow.base_sgl_addr.lo = + U64_LO(bd_tbl->bd_tbl_dma); + data_desc->slow.base_sgl_addr.hi = + U64_HI(bd_tbl->bd_tbl_dma); + data_desc->slow.remainder_num_sges = + bd_count; + data_desc->slow.curr_sge_off = 0; + data_desc->slow.curr_sgl_index = 0; + qedf->slow_sge_ios++; + io_req->sge_type = QEDF_IOREQ_SLOW_SGE; + } else { + SET_FIELD( + task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode, + FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE, + (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count : + FCOE_MUL_FAST_SGES); + + if (bd_count == 1) { + data_desc->single_sge.sge_addr.lo = + fcoe_bd_tbl->sge_addr.lo; + data_desc->single_sge.sge_addr.hi = + fcoe_bd_tbl->sge_addr.hi; + data_desc->single_sge.size = + fcoe_bd_tbl->size; + data_desc->single_sge.is_valid_sge = 0; + qedf->single_sge_ios++; + io_req->sge_type = QEDF_IOREQ_SINGLE_SGE; + } else { + data_desc->fast.sgl_start_addr.lo = + U64_LO(bd_tbl->bd_tbl_dma); + data_desc->fast.sgl_start_addr.hi = + U64_HI(bd_tbl->bd_tbl_dma); + data_desc->fast.sgl_byte_offset = 0; + data_desc->fast.task_reuse_cnt = + io_req->reuse_count; + io_req->reuse_count++; + if (io_req->reuse_count == QEDF_MAX_REUSE) { + *ptu_invalidate = 1; + io_req->reuse_count = 0; + } + qedf->fast_sge_ios++; + io_req->sge_type = QEDF_IOREQ_FAST_SGE; + } + } + + /* Y Storm context */ + task_ctx->ystorm_st_context.expect_first_xfer = 0; + task_ctx->ystorm_st_context.task_type = + FCOE_TASK_TYPE_READ_INITIATOR; + + /* T Storm context */ + task_ctx->tstorm_st_context.read_only.task_type = + FCOE_TASK_TYPE_READ_INITIATOR; + mst_sgl_mode = GET_FIELD( + task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode, + FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE); + SET_FIELD(task_ctx->tstorm_st_context.read_write.flags, + FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE, + mst_sgl_mode); + } + + /* fill FCP_CMND IU */ + fcp_cmnd = (u32 *)task_ctx->ystorm_st_context.tx_info_union.fcp_cmd_payload.opaque; + qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); + + /* Swap fcp_cmnd since FC is big endian */ + cnt = sizeof(struct fcp_cmnd) / sizeof(u32); + + for (i = 0; i < cnt; i++) { + *fcp_cmnd = cpu_to_be32(tmp_fcp_cmnd[i]); + fcp_cmnd++; + } + + /* M Storm context - Sense buffer */ + task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo = + U64_LO(io_req->sense_buffer_dma); + task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi = + U64_HI(io_req->sense_buffer_dma); +} + +void qedf_init_mp_task(struct qedf_ioreq *io_req, + struct fcoe_task_context *task_ctx) +{ + struct qedf_mp_req *mp_req = &(io_req->mp_req); + struct qedf_rport *fcport = io_req->fcport; + struct qedf_ctx *qedf = io_req->fcport->qedf; + struct fc_frame_header *fc_hdr; + enum fcoe_task_type task_type = 0; + union fcoe_data_desc_ctx *data_desc; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Initializing MP task " + "for cmd_type = %d\n", io_req->cmd_type); + + qedf->control_requests++; + + /* Obtain task_type */ + if ((io_req->cmd_type == QEDF_TASK_MGMT_CMD) || + (io_req->cmd_type == QEDF_ELS)) { + task_type = FCOE_TASK_TYPE_MIDPATH; + } else if (io_req->cmd_type == QEDF_ABTS) { + task_type = FCOE_TASK_TYPE_ABTS; + } + + memset(task_ctx, 0, sizeof(struct fcoe_task_context)); + + /* Setup the task from io_req for easy reference */ + io_req->task = task_ctx; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "task type = %d\n", + task_type); + + /* YSTORM only */ + { + /* Initialize YSTORM task context */ + struct fcoe_tx_mid_path_params *task_fc_hdr = + &task_ctx->ystorm_st_context.tx_info_union.tx_params.mid_path; + memset(task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params)); + task_ctx->ystorm_st_context.task_rety_identifier = + io_req->task_retry_identifier; + + /* Init SGL parameters */ + if ((task_type == FCOE_TASK_TYPE_MIDPATH) || + (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { + data_desc = &task_ctx->ystorm_st_context.data_desc; + data_desc->slow.base_sgl_addr.lo = + U64_LO(mp_req->mp_req_bd_dma); + data_desc->slow.base_sgl_addr.hi = + U64_HI(mp_req->mp_req_bd_dma); + data_desc->slow.remainder_num_sges = 1; + data_desc->slow.curr_sge_off = 0; + data_desc->slow.curr_sgl_index = 0; + } + + fc_hdr = &(mp_req->req_fc_hdr); + if (task_type == FCOE_TASK_TYPE_MIDPATH) { + fc_hdr->fh_ox_id = io_req->xid; + fc_hdr->fh_rx_id = htons(0xffff); + } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { + fc_hdr->fh_rx_id = io_req->xid; + } + + /* Fill FC Header into middle path buffer */ + task_fc_hdr->parameter = fc_hdr->fh_parm_offset; + task_fc_hdr->r_ctl = fc_hdr->fh_r_ctl; + task_fc_hdr->type = fc_hdr->fh_type; + task_fc_hdr->cs_ctl = fc_hdr->fh_cs_ctl; + task_fc_hdr->df_ctl = fc_hdr->fh_df_ctl; + task_fc_hdr->rx_id = fc_hdr->fh_rx_id; + task_fc_hdr->ox_id = fc_hdr->fh_ox_id; + + task_ctx->ystorm_st_context.data_2_trns_rem = + io_req->data_xfer_len; + task_ctx->ystorm_st_context.task_type = task_type; + } + + /* TSTORM ONLY */ + { + task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid; + task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid; + /* Always send middle-path repsonses on CQ #0 */ + task_ctx->tstorm_st_context.read_only.glbl_q_num = 0; + io_req->fp_idx = 0; + SET_FIELD(task_ctx->tstorm_ag_context.flags0, + TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, + PROTOCOLID_FCOE); + task_ctx->tstorm_st_context.read_only.task_type = task_type; + SET_FIELD(task_ctx->tstorm_st_context.read_write.flags, + FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, + 1); + task_ctx->tstorm_st_context.read_write.rx_id = 0xffff; + } + + /* MSTORM only */ + { + if (task_type == FCOE_TASK_TYPE_MIDPATH) { + /* Initialize task context */ + data_desc = &task_ctx->mstorm_st_context.fp.data_desc; + + /* Set cache sges address and length */ + data_desc->slow.base_sgl_addr.lo = + U64_LO(mp_req->mp_resp_bd_dma); + data_desc->slow.base_sgl_addr.hi = + U64_HI(mp_req->mp_resp_bd_dma); + data_desc->slow.remainder_num_sges = 1; + data_desc->slow.curr_sge_off = 0; + data_desc->slow.curr_sgl_index = 0; + + /* + * Also need to fil in non-fastpath response address + * for middle path commands. + */ + task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo = + U64_LO(mp_req->mp_resp_bd_dma); + task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi = + U64_HI(mp_req->mp_resp_bd_dma); + } + } + + /* USTORM ONLY */ + { + task_ctx->ustorm_ag_context.global_cq_num = 0; + } + + /* I/O stats. Middle path commands always use slow SGEs */ + qedf->slow_sge_ios++; + io_req->sge_type = QEDF_IOREQ_SLOW_SGE; +} + +void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, u32 ptu_invalidate, + enum fcoe_task_type req_type, u32 offset) +{ + struct fcoe_wqe *sqe; + uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe)); + + sqe = &fcport->sq[fcport->sq_prod_idx]; + + fcport->sq_prod_idx++; + fcport->fw_sq_prod_idx++; + if (fcport->sq_prod_idx == total_sqe) + fcport->sq_prod_idx = 0; + + switch (req_type) { + case FCOE_TASK_TYPE_WRITE_INITIATOR: + case FCOE_TASK_TYPE_READ_INITIATOR: + SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_CMD); + if (ptu_invalidate) + SET_FIELD(sqe->flags, FCOE_WQE_INVALIDATE_PTU, 1); + break; + case FCOE_TASK_TYPE_MIDPATH: + SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_MIDPATH); + break; + case FCOE_TASK_TYPE_ABTS: + SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, + SEND_FCOE_ABTS_REQUEST); + break; + case FCOE_TASK_TYPE_EXCHANGE_CLEANUP: + SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, + FCOE_EXCHANGE_CLEANUP); + break; + case FCOE_TASK_TYPE_SEQUENCE_CLEANUP: + SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, + FCOE_SEQUENCE_RECOVERY); + /* NOTE: offset param only used for sequence recovery */ + sqe->additional_info_union.seq_rec_updated_offset = offset; + break; + case FCOE_TASK_TYPE_UNSOLICITED: + break; + default: + break; + } + + sqe->task_id = xid; + + /* Make sure SQ data is coherent */ + wmb(); + +} + +void qedf_ring_doorbell(struct qedf_rport *fcport) +{ + struct fcoe_db_data dbell = { 0 }; + + dbell.agg_flags = 0; + + dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT; + dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT; + dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD << + FCOE_DB_DATA_AGG_VAL_SEL_SHIFT; + + dbell.sq_prod = fcport->fw_sq_prod_idx; + writel(*(u32 *)&dbell, fcport->p_doorbell); + /* Make sure SQ index is updated so f/w prcesses requests in order */ + wmb(); + mmiowb(); +} + +static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req, + int8_t direction) +{ + struct qedf_ctx *qedf = fcport->qedf; + struct qedf_io_log *io_log; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + unsigned long flags; + uint8_t op; + + spin_lock_irqsave(&qedf->io_trace_lock, flags); + + io_log = &qedf->io_trace_buf[qedf->io_trace_idx]; + io_log->direction = direction; + io_log->task_id = io_req->xid; + io_log->port_id = fcport->rdata->ids.port_id; + io_log->lun = sc_cmd->device->lun; + io_log->op = op = sc_cmd->cmnd[0]; + io_log->lba[0] = sc_cmd->cmnd[2]; + io_log->lba[1] = sc_cmd->cmnd[3]; + io_log->lba[2] = sc_cmd->cmnd[4]; + io_log->lba[3] = sc_cmd->cmnd[5]; + io_log->bufflen = scsi_bufflen(sc_cmd); + io_log->sg_count = scsi_sg_count(sc_cmd); + io_log->result = sc_cmd->result; + io_log->jiffies = jiffies; + io_log->refcount = kref_read(&io_req->refcount); + + if (direction == QEDF_IO_TRACE_REQ) { + /* For requests we only care abot the submission CPU */ + io_log->req_cpu = io_req->cpu; + io_log->int_cpu = 0; + io_log->rsp_cpu = 0; + } else if (direction == QEDF_IO_TRACE_RSP) { + io_log->req_cpu = io_req->cpu; + io_log->int_cpu = io_req->int_cpu; + io_log->rsp_cpu = smp_processor_id(); + } + + io_log->sge_type = io_req->sge_type; + + qedf->io_trace_idx++; + if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE) + qedf->io_trace_idx = 0; + + spin_unlock_irqrestore(&qedf->io_trace_lock, flags); +} + +int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct Scsi_Host *host = sc_cmd->device->host; + struct fc_lport *lport = shost_priv(host); + struct qedf_ctx *qedf = lport_priv(lport); + struct fcoe_task_context *task_ctx; + u16 xid; + enum fcoe_task_type req_type = 0; + u32 ptu_invalidate = 0; + + /* Initialize rest of io_req fileds */ + io_req->data_xfer_len = scsi_bufflen(sc_cmd); + sc_cmd->SCp.ptr = (char *)io_req; + io_req->use_slowpath = false; /* Assume fast SGL by default */ + + /* Record which cpu this request is associated with */ + io_req->cpu = smp_processor_id(); + + if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { + req_type = FCOE_TASK_TYPE_READ_INITIATOR; + io_req->io_req_flags = QEDF_READ; + qedf->input_requests++; + } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { + req_type = FCOE_TASK_TYPE_WRITE_INITIATOR; + io_req->io_req_flags = QEDF_WRITE; + qedf->output_requests++; + } else { + io_req->io_req_flags = 0; + qedf->control_requests++; + } + + xid = io_req->xid; + + /* Build buffer descriptor list for firmware from sg list */ + if (qedf_build_bd_list_from_sg(io_req)) { + QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n"); + kref_put(&io_req->refcount, qedf_release_cmd); + return -EAGAIN; + } + + /* Get the task context */ + task_ctx = qedf_get_task_mem(&qedf->tasks, xid); + if (!task_ctx) { + QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n", + xid); + kref_put(&io_req->refcount, qedf_release_cmd); + return -EINVAL; + } + + qedf_init_task(fcport, lport, io_req, &ptu_invalidate, task_ctx); + + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); + kref_put(&io_req->refcount, qedf_release_cmd); + } + + /* Obtain free SQ entry */ + qedf_add_to_sq(fcport, xid, ptu_invalidate, req_type, 0); + + /* Ring doorbell */ + qedf_ring_doorbell(fcport); + + if (qedf_io_tracing && io_req->sc_cmd) + qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ); + + return false; +} + +int +qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd) +{ + struct fc_lport *lport = shost_priv(host); + struct qedf_ctx *qedf = lport_priv(lport); + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct qedf_rport *fcport = rport->dd_data; + struct qedf_ioreq *io_req; + int rc = 0; + int rval; + unsigned long flags = 0; + + + if (test_bit(QEDF_UNLOADING, &qedf->flags) || + test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { + sc_cmd->result = DID_NO_CONNECT << 16; + sc_cmd->scsi_done(sc_cmd); + return 0; + } + + rval = fc_remote_port_chkready(rport); + if (rval) { + sc_cmd->result = rval; + sc_cmd->scsi_done(sc_cmd); + return 0; + } + + /* Retry command if we are doing a qed drain operation */ + if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd; + } + + if (lport->state != LPORT_ST_READY || + atomic_read(&qedf->link_state) != QEDF_LINK_UP) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd; + } + + /* rport and tgt are allocated together, so tgt should be non-NULL */ + fcport = (struct qedf_rport *)&rp[1]; + + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + /* + * Session is not offloaded yet. Let SCSI-ml retry + * the command. + */ + rc = SCSI_MLQUEUE_TARGET_BUSY; + goto exit_qcmd; + } + if (fcport->retry_delay_timestamp) { + if (time_after(jiffies, fcport->retry_delay_timestamp)) { + fcport->retry_delay_timestamp = 0; + } else { + /* If retry_delay timer is active, flow off the ML */ + rc = SCSI_MLQUEUE_TARGET_BUSY; + goto exit_qcmd; + } + } + + io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); + if (!io_req) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd; + } + + io_req->sc_cmd = sc_cmd; + + /* Take fcport->rport_lock for posting to fcport send queue */ + spin_lock_irqsave(&fcport->rport_lock, flags); + if (qedf_post_io_req(fcport, io_req)) { + QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n"); + /* Return SQE to pool */ + atomic_inc(&fcport->free_sqes); + rc = SCSI_MLQUEUE_HOST_BUSY; + } + spin_unlock_irqrestore(&fcport->rport_lock, flags); + +exit_qcmd: + return rc; +} + +static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req, + struct fcoe_cqe_rsp_info *fcp_rsp) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct qedf_ctx *qedf = io_req->fcport->qedf; + u8 rsp_flags = fcp_rsp->rsp_flags.flags; + int fcp_sns_len = 0; + int fcp_rsp_len = 0; + uint8_t *rsp_info, *sense_data; + + io_req->fcp_status = FC_GOOD; + io_req->fcp_resid = 0; + if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER | + FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER)) + io_req->fcp_resid = fcp_rsp->fcp_resid; + + io_req->scsi_comp_flags = rsp_flags; + CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status = + fcp_rsp->scsi_status_code; + + if (rsp_flags & + FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) + fcp_rsp_len = fcp_rsp->fcp_rsp_len; + + if (rsp_flags & + FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) + fcp_sns_len = fcp_rsp->fcp_sns_len; + + io_req->fcp_rsp_len = fcp_rsp_len; + io_req->fcp_sns_len = fcp_sns_len; + rsp_info = sense_data = io_req->sense_buffer; + + /* fetch fcp_rsp_code */ + if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { + /* Only for task management function */ + io_req->fcp_rsp_code = rsp_info[3]; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "fcp_rsp_code = %d\n", io_req->fcp_rsp_code); + /* Adjust sense-data location. */ + sense_data += fcp_rsp_len; + } + + if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Truncating sense buffer\n"); + fcp_sns_len = SCSI_SENSE_BUFFERSIZE; + } + + memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + if (fcp_sns_len) + memcpy(sc_cmd->sense_buffer, sense_data, + fcp_sns_len); +} + +static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + + if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) { + dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc), + scsi_sg_count(sc), sc->sc_data_direction); + io_req->bd_tbl->bd_valid = 0; + } +} + +void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + u16 xid, rval; + struct fcoe_task_context *task_ctx; + struct scsi_cmnd *sc_cmd; + struct fcoe_cqe_rsp_info *fcp_rsp; + struct qedf_rport *fcport; + int refcount; + u16 scope, qualifier = 0; + u8 fw_residual_flag = 0; + + if (!io_req) + return; + if (!cqe) + return; + + xid = io_req->xid; + task_ctx = qedf_get_task_mem(&qedf->tasks, xid); + sc_cmd = io_req->sc_cmd; + fcp_rsp = &cqe->cqe_info.rsp_info; + + if (!sc_cmd) { + QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n"); + return; + } + + if (!sc_cmd->SCp.ptr) { + QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in " + "another context.\n"); + return; + } + + if (!sc_cmd->request) { + QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, " + "sc_cmd=%p.\n", sc_cmd); + return; + } + + if (!sc_cmd->request->special) { + QEDF_WARN(&(qedf->dbg_ctx), "request->special is NULL so " + "request not valid, sc_cmd=%p.\n", sc_cmd); + return; + } + + if (!sc_cmd->request->q) { + QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request " + "is not valid, sc_cmd=%p.\n", sc_cmd); + return; + } + + fcport = io_req->fcport; + + qedf_parse_fcp_rsp(io_req, fcp_rsp); + + qedf_unmap_sg_list(qedf, io_req); + + /* Check for FCP transport error */ + if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) { + QEDF_ERR(&(qedf->dbg_ctx), + "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d " + "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len, + io_req->fcp_rsp_code); + sc_cmd->result = DID_BUS_BUSY << 16; + goto out; + } + + fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags, + FCOE_CQE_RSP_INFO_FW_UNDERRUN); + if (fw_residual_flag) { + QEDF_ERR(&(qedf->dbg_ctx), + "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x " + "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid, + fcp_rsp->rsp_flags.flags, io_req->fcp_resid, + cqe->cqe_info.rsp_info.fw_residual); + + if (io_req->cdb_status == 0) + sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status; + else + sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; + + /* Abort the command since we did not get all the data */ + init_completion(&io_req->abts_done); + rval = qedf_initiate_abts(io_req, true); + if (rval) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); + sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status; + } + + /* + * Set resid to the whole buffer length so we won't try to resue + * any previously data. + */ + scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); + goto out; + } + + switch (io_req->fcp_status) { + case FC_GOOD: + if (io_req->cdb_status == 0) { + /* Good I/O completion */ + sc_cmd->result = DID_OK << 16; + } else { + refcount = kref_read(&io_req->refcount); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "%d:0:%d:%d xid=0x%0x op=0x%02x " + "lba=%02x%02x%02x%02x cdb_status=%d " + "fcp_resid=0x%x refcount=%d.\n", + qedf->lport->host->host_no, sc_cmd->device->id, + sc_cmd->device->lun, io_req->xid, + sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3], + sc_cmd->cmnd[4], sc_cmd->cmnd[5], + io_req->cdb_status, io_req->fcp_resid, + refcount); + sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; + + if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || + io_req->cdb_status == SAM_STAT_BUSY) { + /* + * Check whether we need to set retry_delay at + * all based on retry_delay module parameter + * and the status qualifier. + */ + + /* Upper 2 bits */ + scope = fcp_rsp->retry_delay_timer & 0xC000; + /* Lower 14 bits */ + qualifier = fcp_rsp->retry_delay_timer & 0x3FFF; + + if (qedf_retry_delay && + scope > 0 && qualifier > 0 && + qualifier <= 0x3FEF) { + /* Check we don't go over the max */ + if (qualifier > QEDF_RETRY_DELAY_MAX) + qualifier = + QEDF_RETRY_DELAY_MAX; + fcport->retry_delay_timestamp = + jiffies + (qualifier * HZ / 10); + } + } + } + if (io_req->fcp_resid) + scsi_set_resid(sc_cmd, io_req->fcp_resid); + break; + default: + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n", + io_req->fcp_status); + break; + } + +out: + if (qedf_io_tracing) + qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP); + + io_req->sc_cmd = NULL; + sc_cmd->SCp.ptr = NULL; + sc_cmd->scsi_done(sc_cmd); + kref_put(&io_req->refcount, qedf_release_cmd); +} + +/* Return a SCSI command in some other context besides a normal completion */ +void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, + int result) +{ + u16 xid; + struct scsi_cmnd *sc_cmd; + int refcount; + + if (!io_req) + return; + + xid = io_req->xid; + sc_cmd = io_req->sc_cmd; + + if (!sc_cmd) { + QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n"); + return; + } + + if (!sc_cmd->SCp.ptr) { + QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in " + "another context.\n"); + return; + } + + qedf_unmap_sg_list(qedf, io_req); + + sc_cmd->result = result << 16; + refcount = kref_read(&io_req->refcount); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%d: Completing " + "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, " + "allowed=%d retries=%d refcount=%d.\n", + qedf->lport->host->host_no, sc_cmd->device->id, + sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0], + sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4], + sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries, + refcount); + + /* + * Set resid to the whole buffer length so we won't try to resue any + * previously read data + */ + scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); + + if (qedf_io_tracing) + qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP); + + io_req->sc_cmd = NULL; + sc_cmd->SCp.ptr = NULL; + sc_cmd->scsi_done(sc_cmd); + kref_put(&io_req->refcount, qedf_release_cmd); +} + +/* + * Handle warning type CQE completions. This is mainly used for REC timer + * popping. + */ +void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + int rval, i; + struct qedf_rport *fcport = io_req->fcport; + u64 err_warn_bit_map; + u8 err_warn = 0xff; + + if (!cqe) + return; + + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, " + "xid=0x%x\n", io_req->xid); + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), + "err_warn_bitmap=%08x:%08x\n", + le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi), + le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo)); + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " + "rx_buff_off=%08x, rx_id=%04x\n", + le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off), + le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off), + le32_to_cpu(cqe->cqe_info.err_info.rx_id)); + + /* Normalize the error bitmap value to an just an unsigned int */ + err_warn_bit_map = (u64) + ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) | + (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo; + for (i = 0; i < 64; i++) { + if (err_warn_bit_map & (u64)((u64)1 << i)) { + err_warn = i; + break; + } + } + + /* Check if REC TOV expired if this is a tape device */ + if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { + if (err_warn == + FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) { + QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n"); + if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) { + io_req->rx_buf_off = + cqe->cqe_info.err_info.rx_buf_off; + io_req->tx_buf_off = + cqe->cqe_info.err_info.tx_buf_off; + io_req->rx_id = cqe->cqe_info.err_info.rx_id; + rval = qedf_send_rec(io_req); + /* + * We only want to abort the io_req if we + * can't queue the REC command as we want to + * keep the exchange open for recovery. + */ + if (rval) + goto send_abort; + } + return; + } + } + +send_abort: + init_completion(&io_req->abts_done); + rval = qedf_initiate_abts(io_req, true); + if (rval) + QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); +} + +/* Cleanup a command when we receive an error detection completion */ +void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + int rval; + + if (!cqe) + return; + + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, " + "xid=0x%x\n", io_req->xid); + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), + "err_warn_bitmap=%08x:%08x\n", + le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi), + le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo)); + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " + "rx_buff_off=%08x, rx_id=%04x\n", + le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off), + le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off), + le32_to_cpu(cqe->cqe_info.err_info.rx_id)); + + if (qedf->stop_io_on_error) { + qedf_stop_all_io(qedf); + return; + } + + init_completion(&io_req->abts_done); + rval = qedf_initiate_abts(io_req, true); + if (rval) + QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); +} + +static void qedf_flush_els_req(struct qedf_ctx *qedf, + struct qedf_ioreq *els_req) +{ + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid, + kref_read(&els_req->refcount)); + + /* + * Need to distinguish this from a timeout when calling the + * els_req->cb_func. + */ + els_req->event = QEDF_IOREQ_EV_ELS_FLUSH; + + /* Cancel the timer */ + cancel_delayed_work_sync(&els_req->timeout_work); + + /* Call callback function to complete command */ + if (els_req->cb_func && els_req->cb_arg) { + els_req->cb_func(els_req->cb_arg); + els_req->cb_arg = NULL; + } + + /* Release kref for original initiate_els */ + kref_put(&els_req->refcount, qedf_release_cmd); +} + +/* A value of -1 for lun is a wild card that means flush all + * active SCSI I/Os for the target. + */ +void qedf_flush_active_ios(struct qedf_rport *fcport, int lun) +{ + struct qedf_ioreq *io_req; + struct qedf_ctx *qedf; + struct qedf_cmd_mgr *cmd_mgr; + int i, rc; + + if (!fcport) + return; + + qedf = fcport->qedf; + cmd_mgr = qedf->cmd_mgr; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n"); + + for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { + io_req = &cmd_mgr->cmds[i]; + + if (!io_req) + continue; + if (io_req->fcport != fcport) + continue; + if (io_req->cmd_type == QEDF_ELS) { + rc = kref_get_unless_zero(&io_req->refcount); + if (!rc) { + QEDF_ERR(&(qedf->dbg_ctx), + "Could not get kref for io_req=0x%p.\n", + io_req); + continue; + } + qedf_flush_els_req(qedf, io_req); + /* + * Release the kref and go back to the top of the + * loop. + */ + goto free_cmd; + } + + if (!io_req->sc_cmd) + continue; + if (lun > 0) { + if (io_req->sc_cmd->device->lun != + (u64)lun) + continue; + } + + /* + * Use kref_get_unless_zero in the unlikely case the command + * we're about to flush was completed in the normal SCSI path + */ + rc = kref_get_unless_zero(&io_req->refcount); + if (!rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for " + "io_req=0x%p\n", io_req); + continue; + } + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Cleanup xid=0x%x.\n", io_req->xid); + + /* Cleanup task and return I/O mid-layer */ + qedf_initiate_cleanup(io_req, true); + +free_cmd: + kref_put(&io_req->refcount, qedf_release_cmd); + } +} + +/* + * Initiate a ABTS middle path command. Note that we don't have to initialize + * the task context for an ABTS task. + */ +int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts) +{ + struct fc_lport *lport; + struct qedf_rport *fcport = io_req->fcport; + struct fc_rport_priv *rdata = fcport->rdata; + struct qedf_ctx *qedf = fcport->qedf; + u16 xid; + u32 r_a_tov = 0; + int rc = 0; + unsigned long flags; + + r_a_tov = rdata->r_a_tov; + lport = qedf->lport; + + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "tgt not offloaded\n"); + rc = 1; + goto abts_err; + } + + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n"); + rc = 1; + goto abts_err; + } + + if (atomic_read(&qedf->link_down_tmo_valid) > 0) { + QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n"); + rc = 1; + goto abts_err; + } + + /* Ensure room on SQ */ + if (!atomic_read(&fcport->free_sqes)) { + QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n"); + rc = 1; + goto abts_err; + } + + + kref_get(&io_req->refcount); + + xid = io_req->xid; + qedf->control_requests++; + qedf->packet_aborts++; + + /* Set the return CPU to be the same as the request one */ + io_req->cpu = smp_processor_id(); + + /* Set the command type to abort */ + io_req->cmd_type = QEDF_ABTS; + io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; + + set_bit(QEDF_CMD_IN_ABORT, &io_req->flags); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = " + "0x%x\n", xid); + + qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ); + + spin_lock_irqsave(&fcport->rport_lock, flags); + + /* Add ABTS to send queue */ + qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_ABTS, 0); + + /* Ring doorbell */ + qedf_ring_doorbell(fcport); + + spin_unlock_irqrestore(&fcport->rport_lock, flags); + + return rc; +abts_err: + /* + * If the ABTS task fails to queue then we need to cleanup the + * task at the firmware. + */ + qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts); + return rc; +} + +void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + uint32_t r_ctl; + uint16_t xid; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = " + "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type); + + cancel_delayed_work(&io_req->timeout_work); + + xid = io_req->xid; + r_ctl = cqe->cqe_info.abts_info.r_ctl; + + switch (r_ctl) { + case FC_RCTL_BA_ACC: + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, + "ABTS response - ACC Send RRQ after R_A_TOV\n"); + io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS; + /* + * Dont release this cmd yet. It will be relesed + * after we get RRQ response + */ + kref_get(&io_req->refcount); + queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work, + msecs_to_jiffies(qedf->lport->r_a_tov)); + break; + /* For error cases let the cleanup return the command */ + case FC_RCTL_BA_RJT: + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, + "ABTS response - RJT\n"); + io_req->event = QEDF_IOREQ_EV_ABORT_FAILED; + break; + default: + QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n"); + break; + } + + clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); + + if (io_req->sc_cmd) { + if (io_req->return_scsi_cmd_on_abts) + qedf_scsi_done(qedf, io_req, DID_ERROR); + } + + /* Notify eh_abort handler that ABTS is complete */ + complete(&io_req->abts_done); + + kref_put(&io_req->refcount, qedf_release_cmd); +} + +int qedf_init_mp_req(struct qedf_ioreq *io_req) +{ + struct qedf_mp_req *mp_req; + struct fcoe_sge *mp_req_bd; + struct fcoe_sge *mp_resp_bd; + struct qedf_ctx *qedf = io_req->fcport->qedf; + dma_addr_t addr; + uint64_t sz; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n"); + + mp_req = (struct qedf_mp_req *)&(io_req->mp_req); + memset(mp_req, 0, sizeof(struct qedf_mp_req)); + + if (io_req->cmd_type != QEDF_ELS) { + mp_req->req_len = sizeof(struct fcp_cmnd); + io_req->data_xfer_len = mp_req->req_len; + } else + mp_req->req_len = io_req->data_xfer_len; + + mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, + &mp_req->req_buf_dma, GFP_KERNEL); + if (!mp_req->req_buf) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n"); + qedf_free_mp_resc(io_req); + return -ENOMEM; + } + + mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev, + QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL); + if (!mp_req->resp_buf) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp " + "buffer\n"); + qedf_free_mp_resc(io_req); + return -ENOMEM; + } + + /* Allocate and map mp_req_bd and mp_resp_bd */ + sz = sizeof(struct fcoe_sge); + mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz, + &mp_req->mp_req_bd_dma, GFP_KERNEL); + if (!mp_req->mp_req_bd) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n"); + qedf_free_mp_resc(io_req); + return -ENOMEM; + } + + mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz, + &mp_req->mp_resp_bd_dma, GFP_KERNEL); + if (!mp_req->mp_resp_bd) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n"); + qedf_free_mp_resc(io_req); + return -ENOMEM; + } + + /* Fill bd table */ + addr = mp_req->req_buf_dma; + mp_req_bd = mp_req->mp_req_bd; + mp_req_bd->sge_addr.lo = U64_LO(addr); + mp_req_bd->sge_addr.hi = U64_HI(addr); + mp_req_bd->size = QEDF_PAGE_SIZE; + + /* + * MP buffer is either a task mgmt command or an ELS. + * So the assumption is that it consumes a single bd + * entry in the bd table + */ + mp_resp_bd = mp_req->mp_resp_bd; + addr = mp_req->resp_buf_dma; + mp_resp_bd->sge_addr.lo = U64_LO(addr); + mp_resp_bd->sge_addr.hi = U64_HI(addr); + mp_resp_bd->size = QEDF_PAGE_SIZE; + + return 0; +} + +/* + * Last ditch effort to clear the port if it's stuck. Used only after a + * cleanup task times out. + */ +static void qedf_drain_request(struct qedf_ctx *qedf) +{ + if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n"); + return; + } + + /* Set bit to return all queuecommand requests as busy */ + set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags); + + /* Call qed drain request for function. Should be synchronous */ + qed_ops->common->drain(qedf->cdev); + + /* Settle time for CQEs to be returned */ + msleep(100); + + /* Unplug and continue */ + clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags); +} + +/* + * Returns SUCCESS if the cleanup task does not timeout, otherwise return + * FAILURE. + */ +int qedf_initiate_cleanup(struct qedf_ioreq *io_req, + bool return_scsi_cmd_on_abts) +{ + struct qedf_rport *fcport; + struct qedf_ctx *qedf; + uint16_t xid; + struct fcoe_task_context *task; + int tmo = 0; + int rc = SUCCESS; + unsigned long flags; + + fcport = io_req->fcport; + if (!fcport) { + QEDF_ERR(NULL, "fcport is NULL.\n"); + return SUCCESS; + } + + qedf = fcport->qedf; + if (!qedf) { + QEDF_ERR(NULL, "qedf is NULL.\n"); + return SUCCESS; + } + + if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || + test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in " + "cleanup processing or already completed.\n", + io_req->xid); + return SUCCESS; + } + + /* Ensure room on SQ */ + if (!atomic_read(&fcport->free_sqes)) { + QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n"); + return FAILED; + } + + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n", + io_req->xid); + + /* Cleanup cmds re-use the same TID as the original I/O */ + xid = io_req->xid; + io_req->cmd_type = QEDF_CLEANUP; + io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; + + /* Set the return CPU to be the same as the request one */ + io_req->cpu = smp_processor_id(); + + set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); + + task = qedf_get_task_mem(&qedf->tasks, xid); + + init_completion(&io_req->tm_done); + + /* Obtain free SQ entry */ + spin_lock_irqsave(&fcport->rport_lock, flags); + qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_EXCHANGE_CLEANUP, 0); + + /* Ring doorbell */ + qedf_ring_doorbell(fcport); + spin_unlock_irqrestore(&fcport->rport_lock, flags); + + tmo = wait_for_completion_timeout(&io_req->tm_done, + QEDF_CLEANUP_TIMEOUT * HZ); + + if (!tmo) { + rc = FAILED; + /* Timeout case */ + QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, " + "xid=%x.\n", io_req->xid); + clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); + /* Issue a drain request if cleanup task times out */ + QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n"); + qedf_drain_request(qedf); + } + + if (io_req->sc_cmd) { + if (io_req->return_scsi_cmd_on_abts) + qedf_scsi_done(qedf, io_req, DID_ERROR); + } + + if (rc == SUCCESS) + io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS; + else + io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED; + + return rc; +} + +void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n", + io_req->xid); + + clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); + + /* Complete so we can finish cleaning up the I/O */ + complete(&io_req->tm_done); +} + +static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, + uint8_t tm_flags) +{ + struct qedf_ioreq *io_req; + struct qedf_mp_req *tm_req; + struct fcoe_task_context *task; + struct fc_frame_header *fc_hdr; + struct fcp_cmnd *fcp_cmnd; + struct qedf_ctx *qedf = fcport->qedf; + int rc = 0; + uint16_t xid; + uint32_t sid, did; + int tmo = 0; + unsigned long flags; + + if (!sc_cmd) { + QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n"); + return FAILED; + } + + if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) { + QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n"); + rc = FAILED; + return FAILED; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x " + "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags); + + io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD); + if (!io_req) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF"); + rc = -EAGAIN; + goto reset_tmf_err; + } + + /* Initialize rest of io_req fields */ + io_req->sc_cmd = sc_cmd; + io_req->fcport = fcport; + io_req->cmd_type = QEDF_TASK_MGMT_CMD; + + /* Set the return CPU to be the same as the request one */ + io_req->cpu = smp_processor_id(); + + tm_req = (struct qedf_mp_req *)&(io_req->mp_req); + + rc = qedf_init_mp_req(io_req); + if (rc == FAILED) { + QEDF_ERR(&(qedf->dbg_ctx), "Task mgmt MP request init " + "failed\n"); + kref_put(&io_req->refcount, qedf_release_cmd); + goto reset_tmf_err; + } + + /* Set TM flags */ + io_req->io_req_flags = 0; + tm_req->tm_flags = tm_flags; + + /* Default is to return a SCSI command when an error occurs */ + io_req->return_scsi_cmd_on_abts = true; + + /* Fill FCP_CMND */ + qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf); + fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf; + memset(fcp_cmnd->fc_cdb, 0, FCP_CMND_LEN); + fcp_cmnd->fc_dl = 0; + + /* Fill FC header */ + fc_hdr = &(tm_req->req_fc_hdr); + sid = fcport->sid; + did = fcport->rdata->ids.port_id; + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, sid, did, + FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + /* Obtain exchange id */ + xid = io_req->xid; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = " + "0x%x\n", xid); + + /* Initialize task context for this IO request */ + task = qedf_get_task_mem(&qedf->tasks, xid); + qedf_init_mp_task(io_req, task); + + init_completion(&io_req->tm_done); + + /* Obtain free SQ entry */ + spin_lock_irqsave(&fcport->rport_lock, flags); + qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0); + + /* Ring doorbell */ + qedf_ring_doorbell(fcport); + spin_unlock_irqrestore(&fcport->rport_lock, flags); + + tmo = wait_for_completion_timeout(&io_req->tm_done, + QEDF_TM_TIMEOUT * HZ); + + if (!tmo) { + rc = FAILED; + QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n"); + } else { + /* Check TMF response code */ + if (io_req->fcp_rsp_code == 0) + rc = SUCCESS; + else + rc = FAILED; + } + + if (tm_flags == FCP_TMF_LUN_RESET) + qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun); + else + qedf_flush_active_ios(fcport, -1); + + kref_put(&io_req->refcount, qedf_release_cmd); + + if (rc != SUCCESS) { + QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n"); + rc = FAILED; + } else { + QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n"); + rc = SUCCESS; + } +reset_tmf_err: + return rc; +} + +int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct qedf_rport *fcport = (struct qedf_rport *)&rp[1]; + struct qedf_ctx *qedf; + struct fc_lport *lport; + int rc = SUCCESS; + int rval; + + rval = fc_remote_port_chkready(rport); + + if (rval) { + QEDF_ERR(NULL, "device_reset rport not ready\n"); + rc = FAILED; + goto tmf_err; + } + + if (fcport == NULL) { + QEDF_ERR(NULL, "device_reset: rport is NULL\n"); + rc = FAILED; + goto tmf_err; + } + + qedf = fcport->qedf; + lport = qedf->lport; + + if (test_bit(QEDF_UNLOADING, &qedf->flags) || + test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { + rc = SUCCESS; + goto tmf_err; + } + + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n"); + rc = FAILED; + goto tmf_err; + } + + rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags); + +tmf_err: + return rc; +} + +void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + struct fcoe_cqe_rsp_info *fcp_rsp; + struct fcoe_cqe_midpath_info *mp_info; + + + /* Get TMF response length from CQE */ + mp_info = &cqe->cqe_info.midpath_info; + io_req->mp_req.resp_len = mp_info->data_placement_size; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, + "Response len is %d.\n", io_req->mp_req.resp_len); + + fcp_rsp = &cqe->cqe_info.rsp_info; + qedf_parse_fcp_rsp(io_req, fcp_rsp); + + io_req->sc_cmd = NULL; + complete(&io_req->tm_done); +} + +void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx, + struct fcoe_cqe *cqe) +{ + unsigned long flags; + uint16_t tmp; + uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len; + u32 payload_len, crc; + struct fc_frame_header *fh; + struct fc_frame *fp; + struct qedf_io_work *io_work; + u32 bdq_idx; + void *bdq_addr; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, + "address.hi=%x address.lo=%x opaque_data.hi=%x " + "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n", + le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi), + le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo), + le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi), + le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo), + qedf->bdq_prod_idx, pktlen); + + bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo); + if (bdq_idx >= QEDF_BDQ_SIZE) { + QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n", + bdq_idx); + goto increment_prod; + } + + bdq_addr = qedf->bdq[bdq_idx].buf_addr; + if (!bdq_addr) { + QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping " + "unsolicited packet.\n"); + goto increment_prod; + } + + if (qedf_dump_frames) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, + "BDQ frame is at addr=%p.\n", bdq_addr); + print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1, + (void *)bdq_addr, pktlen, false); + } + + /* Allocate frame */ + payload_len = pktlen - sizeof(struct fc_frame_header); + fp = fc_frame_alloc(qedf->lport, payload_len); + if (!fp) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n"); + goto increment_prod; + } + + /* Copy data from BDQ buffer into fc_frame struct */ + fh = (struct fc_frame_header *)fc_frame_header_get(fp); + memcpy(fh, (void *)bdq_addr, pktlen); + + /* Initialize the frame so libfc sees it as a valid frame */ + crc = fcoe_fc_crc(fp); + fc_frame_init(fp); + fr_dev(fp) = qedf->lport; + fr_sof(fp) = FC_SOF_I3; + fr_eof(fp) = FC_EOF_T; + fr_crc(fp) = cpu_to_le32(~crc); + + /* + * We need to return the frame back up to libfc in a non-atomic + * context + */ + io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC); + if (!io_work) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " + "work for I/O completion.\n"); + fc_frame_free(fp); + goto increment_prod; + } + memset(io_work, 0, sizeof(struct qedf_io_work)); + + INIT_WORK(&io_work->work, qedf_fp_io_handler); + + /* Copy contents of CQE for deferred processing */ + memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe)); + + io_work->qedf = qedf; + io_work->fp = fp; + + queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work); +increment_prod: + spin_lock_irqsave(&qedf->hba_lock, flags); + + /* Increment producer to let f/w know we've handled the frame */ + qedf->bdq_prod_idx++; + + /* Producer index wraps at uint16_t boundary */ + if (qedf->bdq_prod_idx == 0xffff) + qedf->bdq_prod_idx = 0; + + writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); + tmp = readw(qedf->bdq_primary_prod); + writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); + tmp = readw(qedf->bdq_secondary_prod); + + spin_unlock_irqrestore(&qedf->hba_lock, flags); +} diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c new file mode 100644 index 000000000000..d9d7a86b5f8b --- /dev/null +++ b/drivers/scsi/qedf/qedf_main.c @@ -0,0 +1,3336 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/highmem.h> +#include <linux/crc32.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/kthread.h> +#include <scsi/libfc.h> +#include <scsi/scsi_host.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/cpu.h> +#include "qedf.h" + +const struct qed_fcoe_ops *qed_ops; + +static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id); +static void qedf_remove(struct pci_dev *pdev); + +extern struct qedf_debugfs_ops qedf_debugfs_ops; +extern struct file_operations qedf_dbg_fops; + +/* + * Driver module parameters. + */ +static unsigned int qedf_dev_loss_tmo = 60; +module_param_named(dev_loss_tmo, qedf_dev_loss_tmo, int, S_IRUGO); +MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached " + "remote ports (default 60)"); + +uint qedf_debug = QEDF_LOG_INFO; +module_param_named(debug, qedf_debug, uint, S_IRUGO); +MODULE_PARM_DESC(qedf_debug, " Debug mask. Pass '1' to enable default debugging" + " mask"); + +static uint qedf_fipvlan_retries = 30; +module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO); +MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt " + "before giving up (default 30)"); + +static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN; +module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO); +MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails " + "(default 1002)."); + +static uint qedf_default_prio = QEDF_DEFAULT_PRIO; +module_param_named(default_prio, qedf_default_prio, int, S_IRUGO); +MODULE_PARM_DESC(default_prio, " Default 802.1q priority for FIP and FCoE" + " traffic (default 3)."); + +uint qedf_dump_frames; +module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames " + "(default off)"); + +static uint qedf_queue_depth; +module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO); +MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered " + "by the qedf driver. Default is 0 (use OS default)."); + +uint qedf_io_tracing; +module_param_named(io_tracing, qedf_io_tracing, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(io_tracing, " Enable logging of SCSI requests/completions " + "into trace buffer. (default off)."); + +static uint qedf_max_lun = MAX_FIBRE_LUNS; +module_param_named(max_lun, qedf_max_lun, int, S_IRUGO); +MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver " + "supports. (default 0xffffffff)"); + +uint qedf_link_down_tmo; +module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO); +MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the " + "link is down by N seconds."); + +bool qedf_retry_delay; +module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry " + "delay handling (default off)."); + +static uint qedf_dp_module; +module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO); +MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed " + "qed module during probe."); + +static uint qedf_dp_level; +module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO); +MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module " + "during probe (0-3: 0 more verbose)."); + +struct workqueue_struct *qedf_io_wq; + +static struct fcoe_percpu_s qedf_global; +static DEFINE_SPINLOCK(qedf_global_lock); + +static struct kmem_cache *qedf_io_work_cache; + +void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id) +{ + qedf->vlan_id = vlan_id; + qedf->vlan_id |= qedf_default_prio << VLAN_PRIO_SHIFT; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x " + "prio=%d.\n", vlan_id, qedf_default_prio); +} + +/* Returns true if we have a valid vlan, false otherwise */ +static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf) +{ + int rc; + + if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { + QEDF_ERR(&(qedf->dbg_ctx), "Link not up.\n"); + return false; + } + + while (qedf->fipvlan_retries--) { + if (qedf->vlan_id > 0) + return true; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Retry %d.\n", qedf->fipvlan_retries); + init_completion(&qedf->fipvlan_compl); + qedf_fcoe_send_vlan_req(qedf); + rc = wait_for_completion_timeout(&qedf->fipvlan_compl, + 1 * HZ); + if (rc > 0) { + fcoe_ctlr_link_up(&qedf->ctlr); + return true; + } + } + + return false; +} + +static void qedf_handle_link_update(struct work_struct *work) +{ + struct qedf_ctx *qedf = + container_of(work, struct qedf_ctx, link_update.work); + int rc; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Entered.\n"); + + if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { + rc = qedf_initiate_fipvlan_req(qedf); + if (rc) + return; + /* + * If we get here then we never received a repsonse to our + * fip vlan request so set the vlan_id to the default and + * tell FCoE that the link is up + */ + QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN " + "response, falling back to default VLAN %d.\n", + qedf_fallback_vlan); + qedf_set_vlan_id(qedf, QEDF_FALLBACK_VLAN); + + /* + * Zero out data_src_addr so we'll update it with the new + * lport port_id + */ + eth_zero_addr(qedf->data_src_addr); + fcoe_ctlr_link_up(&qedf->ctlr); + } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { + /* + * If we hit here and link_down_tmo_valid is still 1 it means + * that link_down_tmo timed out so set it to 0 to make sure any + * other readers have accurate state. + */ + atomic_set(&qedf->link_down_tmo_valid, 0); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Calling fcoe_ctlr_link_down().\n"); + fcoe_ctlr_link_down(&qedf->ctlr); + qedf_wait_for_upload(qedf); + /* Reset the number of FIP VLAN retries */ + qedf->fipvlan_retries = qedf_fipvlan_retries; + } +} + +static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg) +{ + struct fc_exch *exch = fc_seq_exch(seq); + struct fc_lport *lport = exch->lp; + struct qedf_ctx *qedf = lport_priv(lport); + + if (!qedf) { + QEDF_ERR(NULL, "qedf is NULL.\n"); + return; + } + + /* + * If ERR_PTR is set then don't try to stat anything as it will cause + * a crash when we access fp. + */ + if (IS_ERR(fp)) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "fp has IS_ERR() set.\n"); + goto skip_stat; + } + + /* Log stats for FLOGI reject */ + if (fc_frame_payload_op(fp) == ELS_LS_RJT) + qedf->flogi_failed++; + + /* Complete flogi_compl so we can proceed to sending ADISCs */ + complete(&qedf->flogi_compl); + +skip_stat: + /* Report response to libfc */ + fc_lport_flogi_resp(seq, fp, lport); +} + +static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did, + struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *arg, u32 timeout) +{ + struct qedf_ctx *qedf = lport_priv(lport); + + /* + * Intercept FLOGI for statistic purposes. Note we use the resp + * callback to tell if this is really a flogi. + */ + if (resp == fc_lport_flogi_resp) { + qedf->flogi_cnt++; + return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp, + arg, timeout); + } + + return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); +} + +int qedf_send_flogi(struct qedf_ctx *qedf) +{ + struct fc_lport *lport; + struct fc_frame *fp; + + lport = qedf->lport; + + if (!lport->tt.elsct_send) + return -EINVAL; + + fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); + if (!fp) { + QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n"); + return -ENOMEM; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Sending FLOGI to reestablish session with switch.\n"); + lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, + ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov); + + init_completion(&qedf->flogi_compl); + + return 0; +} + +struct qedf_tmp_rdata_item { + struct fc_rport_priv *rdata; + struct list_head list; +}; + +/* + * This function is called if link_down_tmo is in use. If we get a link up and + * link_down_tmo has not expired then use just FLOGI/ADISC to recover our + * sessions with targets. Otherwise, just call fcoe_ctlr_link_up(). + */ +static void qedf_link_recovery(struct work_struct *work) +{ + struct qedf_ctx *qedf = + container_of(work, struct qedf_ctx, link_recovery.work); + struct qedf_rport *fcport; + struct fc_rport_priv *rdata; + struct qedf_tmp_rdata_item *rdata_item, *tmp_rdata_item; + bool rc; + int retries = 30; + int rval, i; + struct list_head rdata_login_list; + + INIT_LIST_HEAD(&rdata_login_list); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Link down tmo did not expire.\n"); + + /* + * Essentially reset the fcoe_ctlr here without affecting the state + * of the libfc structs. + */ + qedf->ctlr.state = FIP_ST_LINK_WAIT; + fcoe_ctlr_link_down(&qedf->ctlr); + + /* + * Bring the link up before we send the fipvlan request so libfcoe + * can select a new fcf in parallel + */ + fcoe_ctlr_link_up(&qedf->ctlr); + + /* Since the link when down and up to verify which vlan we're on */ + qedf->fipvlan_retries = qedf_fipvlan_retries; + rc = qedf_initiate_fipvlan_req(qedf); + if (!rc) + return; + + /* + * We need to wait for an FCF to be selected due to the + * fcoe_ctlr_link_up other the FLOGI will be rejected. + */ + while (retries > 0) { + if (qedf->ctlr.sel_fcf) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "FCF reselected, proceeding with FLOGI.\n"); + break; + } + msleep(500); + retries--; + } + + if (retries < 1) { + QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for " + "FCF selection.\n"); + return; + } + + rval = qedf_send_flogi(qedf); + if (rval) + return; + + /* Wait for FLOGI completion before proceeding with sending ADISCs */ + i = wait_for_completion_timeout(&qedf->flogi_compl, + qedf->lport->r_a_tov); + if (i == 0) { + QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n"); + return; + } + + /* + * Call lport->tt.rport_login which will cause libfc to send an + * ADISC since the rport is in state ready. + */ + rcu_read_lock(); + list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { + rdata = fcport->rdata; + if (rdata == NULL) + continue; + rdata_item = kzalloc(sizeof(struct qedf_tmp_rdata_item), + GFP_ATOMIC); + if (!rdata_item) + continue; + if (kref_get_unless_zero(&rdata->kref)) { + rdata_item->rdata = rdata; + list_add(&rdata_item->list, &rdata_login_list); + } else + kfree(rdata_item); + } + rcu_read_unlock(); + /* + * Do the fc_rport_login outside of the rcu lock so we don't take a + * mutex in an atomic context. + */ + list_for_each_entry_safe(rdata_item, tmp_rdata_item, &rdata_login_list, + list) { + list_del(&rdata_item->list); + fc_rport_login(rdata_item->rdata); + kref_put(&rdata_item->rdata->kref, fc_rport_destroy); + kfree(rdata_item); + } +} + +static void qedf_update_link_speed(struct qedf_ctx *qedf, + struct qed_link_output *link) +{ + struct fc_lport *lport = qedf->lport; + + lport->link_speed = FC_PORTSPEED_UNKNOWN; + lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; + + /* Set fc_host link speed */ + switch (link->speed) { + case 10000: + lport->link_speed = FC_PORTSPEED_10GBIT; + break; + case 25000: + lport->link_speed = FC_PORTSPEED_25GBIT; + break; + case 40000: + lport->link_speed = FC_PORTSPEED_40GBIT; + break; + case 50000: + lport->link_speed = FC_PORTSPEED_50GBIT; + break; + case 100000: + lport->link_speed = FC_PORTSPEED_100GBIT; + break; + default: + lport->link_speed = FC_PORTSPEED_UNKNOWN; + break; + } + + /* + * Set supported link speed by querying the supported + * capabilities of the link. + */ + if (link->supported_caps & SUPPORTED_10000baseKR_Full) + lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; + if (link->supported_caps & SUPPORTED_25000baseKR_Full) + lport->link_supported_speeds |= FC_PORTSPEED_25GBIT; + if (link->supported_caps & SUPPORTED_40000baseLR4_Full) + lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; + if (link->supported_caps & SUPPORTED_50000baseKR2_Full) + lport->link_supported_speeds |= FC_PORTSPEED_50GBIT; + if (link->supported_caps & SUPPORTED_100000baseKR4_Full) + lport->link_supported_speeds |= FC_PORTSPEED_100GBIT; + fc_host_supported_speeds(lport->host) = lport->link_supported_speeds; +} + +static void qedf_link_update(void *dev, struct qed_link_output *link) +{ + struct qedf_ctx *qedf = (struct qedf_ctx *)dev; + + if (link->link_up) { + QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n", + link->speed / 1000); + + /* Cancel any pending link down work */ + cancel_delayed_work(&qedf->link_update); + + atomic_set(&qedf->link_state, QEDF_LINK_UP); + qedf_update_link_speed(qedf, link); + + if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) { + QEDF_ERR(&(qedf->dbg_ctx), "DCBx done.\n"); + if (atomic_read(&qedf->link_down_tmo_valid) > 0) + queue_delayed_work(qedf->link_update_wq, + &qedf->link_recovery, 0); + else + queue_delayed_work(qedf->link_update_wq, + &qedf->link_update, 0); + atomic_set(&qedf->link_down_tmo_valid, 0); + } + + } else { + QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n"); + + atomic_set(&qedf->link_state, QEDF_LINK_DOWN); + atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); + /* + * Flag that we're waiting for the link to come back up before + * informing the fcoe layer of the event. + */ + if (qedf_link_down_tmo > 0) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Starting link down tmo.\n"); + atomic_set(&qedf->link_down_tmo_valid, 1); + } + qedf->vlan_id = 0; + qedf_update_link_speed(qedf, link); + queue_delayed_work(qedf->link_update_wq, &qedf->link_update, + qedf_link_down_tmo * HZ); + } +} + + +static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type) +{ + struct qedf_ctx *qedf = (struct qedf_ctx *)dev; + + QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe " + "prio=%d.\n", get->operational.valid, get->operational.enabled, + get->operational.app_prio.fcoe); + + if (get->operational.enabled && get->operational.valid) { + /* If DCBX was already negotiated on link up then just exit */ + if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "DCBX already set on link up.\n"); + return; + } + + atomic_set(&qedf->dcbx, QEDF_DCBX_DONE); + + if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { + if (atomic_read(&qedf->link_down_tmo_valid) > 0) + queue_delayed_work(qedf->link_update_wq, + &qedf->link_recovery, 0); + else + queue_delayed_work(qedf->link_update_wq, + &qedf->link_update, 0); + atomic_set(&qedf->link_down_tmo_valid, 0); + } + } + +} + +static u32 qedf_get_login_failures(void *cookie) +{ + struct qedf_ctx *qedf; + + qedf = (struct qedf_ctx *)cookie; + return qedf->flogi_failed; +} + +static struct qed_fcoe_cb_ops qedf_cb_ops = { + { + .link_update = qedf_link_update, + .dcbx_aen = qedf_dcbx_handler, + } +}; + +/* + * Various transport templates. + */ + +static struct scsi_transport_template *qedf_fc_transport_template; +static struct scsi_transport_template *qedf_fc_vport_transport_template; + +/* + * SCSI EH handlers + */ +static int qedf_eh_abort(struct scsi_cmnd *sc_cmd) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct qedf_rport *fcport; + struct fc_lport *lport; + struct qedf_ctx *qedf; + struct qedf_ioreq *io_req; + int rc = FAILED; + int rval; + + if (fc_remote_port_chkready(rport)) { + QEDF_ERR(NULL, "rport not ready\n"); + goto out; + } + + lport = shost_priv(sc_cmd->device->host); + qedf = (struct qedf_ctx *)lport_priv(lport); + + if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { + QEDF_ERR(&(qedf->dbg_ctx), "link not ready.\n"); + goto out; + } + + fcport = (struct qedf_rport *)&rp[1]; + + io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr; + if (!io_req) { + QEDF_ERR(&(qedf->dbg_ctx), "io_req is NULL.\n"); + rc = SUCCESS; + goto out; + } + + if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || + test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) || + test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in " + "cleanup or abort processing or already " + "completed.\n", io_req->xid); + rc = SUCCESS; + goto out; + } + + QEDF_ERR(&(qedf->dbg_ctx), "Aborting io_req sc_cmd=%p xid=0x%x " + "fp_idx=%d.\n", sc_cmd, io_req->xid, io_req->fp_idx); + + if (qedf->stop_io_on_error) { + qedf_stop_all_io(qedf); + rc = SUCCESS; + goto out; + } + + init_completion(&io_req->abts_done); + rval = qedf_initiate_abts(io_req, true); + if (rval) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); + goto out; + } + + wait_for_completion(&io_req->abts_done); + + if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS || + io_req->event == QEDF_IOREQ_EV_ABORT_FAILED || + io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) { + /* + * If we get a reponse to the abort this is success from + * the perspective that all references to the command have + * been removed from the driver and firmware + */ + rc = SUCCESS; + } else { + /* If the abort and cleanup failed then return a failure */ + rc = FAILED; + } + + if (rc == SUCCESS) + QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n", + io_req->xid); + else + QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n", + io_req->xid); + +out: + return rc; +} + +static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd) +{ + QEDF_ERR(NULL, "TARGET RESET Issued..."); + return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); +} + +static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd) +{ + QEDF_ERR(NULL, "LUN RESET Issued...\n"); + return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); +} + +void qedf_wait_for_upload(struct qedf_ctx *qedf) +{ + while (1) { + if (atomic_read(&qedf->num_offloads)) + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Waiting for all uploads to complete.\n"); + else + break; + msleep(500); + } +} + +/* Reset the host by gracefully logging out and then logging back in */ +static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd) +{ + struct fc_lport *lport; + struct qedf_ctx *qedf; + + lport = shost_priv(sc_cmd->device->host); + + if (lport->vport) { + QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n"); + return SUCCESS; + } + + qedf = (struct qedf_ctx *)lport_priv(lport); + + if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN || + test_bit(QEDF_UNLOADING, &qedf->flags) || + test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) + return FAILED; + + QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued..."); + + /* For host reset, essentially do a soft link up/down */ + atomic_set(&qedf->link_state, QEDF_LINK_DOWN); + atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); + queue_delayed_work(qedf->link_update_wq, &qedf->link_update, + 0); + qedf_wait_for_upload(qedf); + atomic_set(&qedf->link_state, QEDF_LINK_UP); + qedf->vlan_id = 0; + queue_delayed_work(qedf->link_update_wq, &qedf->link_update, + 0); + + return SUCCESS; +} + +static int qedf_slave_configure(struct scsi_device *sdev) +{ + if (qedf_queue_depth) { + scsi_change_queue_depth(sdev, qedf_queue_depth); + } + + return 0; +} + +static struct scsi_host_template qedf_host_template = { + .module = THIS_MODULE, + .name = QEDF_MODULE_NAME, + .this_id = -1, + .cmd_per_lun = 3, + .use_clustering = ENABLE_CLUSTERING, + .max_sectors = 0xffff, + .queuecommand = qedf_queuecommand, + .shost_attrs = qedf_host_attrs, + .eh_abort_handler = qedf_eh_abort, + .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */ + .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */ + .eh_host_reset_handler = qedf_eh_host_reset, + .slave_configure = qedf_slave_configure, + .dma_boundary = QED_HW_DMA_BOUNDARY, + .sg_tablesize = QEDF_MAX_BDS_PER_CMD, + .can_queue = FCOE_PARAMS_NUM_TASKS, +}; + +static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen) +{ + int rc; + + spin_lock(&qedf_global_lock); + rc = fcoe_get_paged_crc_eof(skb, tlen, &qedf_global); + spin_unlock(&qedf_global_lock); + + return rc; +} + +static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id) +{ + struct qedf_rport *fcport; + struct fc_rport_priv *rdata; + + rcu_read_lock(); + list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { + rdata = fcport->rdata; + if (rdata == NULL) + continue; + if (rdata->ids.port_id == port_id) { + rcu_read_unlock(); + return fcport; + } + } + rcu_read_unlock(); + + /* Return NULL to caller to let them know fcport was not found */ + return NULL; +} + +/* Transmits an ELS frame over an offloaded session */ +static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp) +{ + struct fc_frame_header *fh; + int rc = 0; + + fh = fc_frame_header_get(fp); + if ((fh->fh_type == FC_TYPE_ELS) && + (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { + switch (fc_frame_payload_op(fp)) { + case ELS_ADISC: + qedf_send_adisc(fcport, fp); + rc = 1; + break; + } + } + + return rc; +} + +/** + * qedf_xmit - qedf FCoE frame transmit function + * + */ +static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_lport *base_lport; + struct qedf_ctx *qedf; + struct ethhdr *eh; + struct fcoe_crc_eof *cp; + struct sk_buff *skb; + struct fc_frame_header *fh; + struct fcoe_hdr *hp; + u8 sof, eof; + u32 crc; + unsigned int hlen, tlen, elen; + int wlen; + struct fc_stats *stats; + struct fc_lport *tmp_lport; + struct fc_lport *vn_port = NULL; + struct qedf_rport *fcport; + int rc; + u16 vlan_tci = 0; + + qedf = (struct qedf_ctx *)lport_priv(lport); + + fh = fc_frame_header_get(fp); + skb = fp_skb(fp); + + /* Filter out traffic to other NPIV ports on the same host */ + if (lport->vport) + base_lport = shost_priv(vport_to_shost(lport->vport)); + else + base_lport = lport; + + /* Flag if the destination is the base port */ + if (base_lport->port_id == ntoh24(fh->fh_d_id)) { + vn_port = base_lport; + } else { + /* Got through the list of vports attached to the base_lport + * and see if we have a match with the destination address. + */ + list_for_each_entry(tmp_lport, &base_lport->vports, list) { + if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) { + vn_port = tmp_lport; + break; + } + } + } + if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) { + struct fc_rport_priv *rdata = NULL; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id)); + kfree_skb(skb); + rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id)); + if (rdata) + rdata->retries = lport->max_rport_retry_count; + return -EINVAL; + } + /* End NPIV filtering */ + + if (!qedf->ctlr.sel_fcf) { + kfree_skb(skb); + return 0; + } + + if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) { + QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n"); + kfree_skb(skb); + return 0; + } + + if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { + QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n"); + kfree_skb(skb); + return 0; + } + + if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { + if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb)) + return 0; + } + + /* Check to see if this needs to be sent on an offloaded session */ + fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id)); + + if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + rc = qedf_xmit_l2_frame(fcport, fp); + /* + * If the frame was successfully sent over the middle path + * then do not try to also send it over the LL2 path + */ + if (rc) + return 0; + } + + sof = fr_sof(fp); + eof = fr_eof(fp); + + elen = sizeof(struct ethhdr); + hlen = sizeof(struct fcoe_hdr); + tlen = sizeof(struct fcoe_crc_eof); + wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; + + skb->ip_summed = CHECKSUM_NONE; + crc = fcoe_fc_crc(fp); + + /* copy port crc and eof to the skb buff */ + if (skb_is_nonlinear(skb)) { + skb_frag_t *frag; + + if (qedf_get_paged_crc_eof(skb, tlen)) { + kfree_skb(skb); + return -ENOMEM; + } + frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; + cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset; + } else { + cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); + } + + memset(cp, 0, sizeof(*cp)); + cp->fcoe_eof = eof; + cp->fcoe_crc32 = cpu_to_le32(~crc); + if (skb_is_nonlinear(skb)) { + kunmap_atomic(cp); + cp = NULL; + } + + + /* adjust skb network/transport offsets to match mac/fcoe/port */ + skb_push(skb, elen + hlen); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb->mac_len = elen; + skb->protocol = htons(ETH_P_FCOE); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id); + + /* fill up mac and fcoe headers */ + eh = eth_hdr(skb); + eh->h_proto = htons(ETH_P_FCOE); + if (qedf->ctlr.map_dest) + fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); + else + /* insert GW address */ + ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr); + + /* Set the source MAC address */ + fc_fcoe_set_mac(eh->h_source, fh->fh_s_id); + + hp = (struct fcoe_hdr *)(eh + 1); + memset(hp, 0, sizeof(*hp)); + if (FC_FCOE_VER) + FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); + hp->fcoe_sof = sof; + + /*update tx stats */ + stats = per_cpu_ptr(lport->stats, get_cpu()); + stats->TxFrames++; + stats->TxWords += wlen; + put_cpu(); + + /* Get VLAN ID from skb for printing purposes */ + __vlan_hwaccel_get_tag(skb, &vlan_tci); + + /* send down to lld */ + fr_dev(fp) = lport; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: " + "src=%06x dest=%06x r_ctl=%x type=%x vlan=%04x.\n", + ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type, + vlan_tci); + if (qedf_dump_frames) + print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, + 1, skb->data, skb->len, false); + qed_ops->ll2->start_xmit(qedf->cdev, skb); + + return 0; +} + +static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport) +{ + int rval = 0; + u32 *pbl; + dma_addr_t page; + int num_pages; + + /* Calculate appropriate queue and PBL sizes */ + fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe); + fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE); + fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) * + sizeof(void *); + fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; + + fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size, + &fcport->sq_dma, GFP_KERNEL); + if (!fcport->sq) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send " + "queue.\n"); + rval = 1; + goto out; + } + memset(fcport->sq, 0, fcport->sq_mem_size); + + fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev, + fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL); + if (!fcport->sq_pbl) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send " + "queue PBL.\n"); + rval = 1; + goto out_free_sq; + } + memset(fcport->sq_pbl, 0, fcport->sq_pbl_size); + + /* Create PBL */ + num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE; + page = fcport->sq_dma; + pbl = (u32 *)fcport->sq_pbl; + + while (num_pages--) { + *pbl = U64_LO(page); + pbl++; + *pbl = U64_HI(page); + pbl++; + page += QEDF_PAGE_SIZE; + } + + return rval; + +out_free_sq: + dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq, + fcport->sq_dma); +out: + return rval; +} + +static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport) +{ + if (fcport->sq_pbl) + dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size, + fcport->sq_pbl, fcport->sq_pbl_dma); + if (fcport->sq) + dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, + fcport->sq, fcport->sq_dma); +} + +static int qedf_offload_connection(struct qedf_ctx *qedf, + struct qedf_rport *fcport) +{ + struct qed_fcoe_params_offload conn_info; + u32 port_id; + u8 lport_src_id[3]; + int rval; + uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe)); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection " + "portid=%06x.\n", fcport->rdata->ids.port_id); + rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle, + &fcport->fw_cid, &fcport->p_doorbell); + if (rval) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection " + "for portid=%06x.\n", fcport->rdata->ids.port_id); + rval = 1; /* For some reason qed returns 0 on failure here */ + goto out; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x " + "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id, + fcport->fw_cid, fcport->handle); + + memset(&conn_info, 0, sizeof(struct qed_fcoe_params_offload)); + + /* Fill in the offload connection info */ + conn_info.sq_pbl_addr = fcport->sq_pbl_dma; + + conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl); + conn_info.sq_next_page_addr = + (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8)); + + /* Need to use our FCoE MAC for the offload session */ + port_id = fc_host_port_id(qedf->lport->host); + lport_src_id[2] = (port_id & 0x000000FF); + lport_src_id[1] = (port_id & 0x0000FF00) >> 8; + lport_src_id[0] = (port_id & 0x00FF0000) >> 16; + fc_fcoe_set_mac(conn_info.src_mac, lport_src_id); + + ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr); + + conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size; + conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov / 20; + conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */ + conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size; + + /* Set VLAN data */ + conn_info.vlan_tag = qedf->vlan_id << + FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT; + conn_info.vlan_tag |= + qedf_default_prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT; + conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK << + FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT); + + /* Set host port source id */ + port_id = fc_host_port_id(qedf->lport->host); + fcport->sid = port_id; + conn_info.s_id.addr_hi = (port_id & 0x000000FF); + conn_info.s_id.addr_mid = (port_id & 0x0000FF00) >> 8; + conn_info.s_id.addr_lo = (port_id & 0x00FF0000) >> 16; + + conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq; + + /* Set remote port destination id */ + port_id = fcport->rdata->rport->port_id; + conn_info.d_id.addr_hi = (port_id & 0x000000FF); + conn_info.d_id.addr_mid = (port_id & 0x0000FF00) >> 8; + conn_info.d_id.addr_lo = (port_id & 0x00FF0000) >> 16; + + conn_info.def_q_idx = 0; /* Default index for send queue? */ + + /* Set FC-TAPE specific flags if needed */ + if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, + "Enable CONF, REC for portid=%06x.\n", + fcport->rdata->ids.port_id); + conn_info.flags |= 1 << + FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT; + conn_info.flags |= + ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << + FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT; + } + + rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info); + if (rval) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection " + "for portid=%06x.\n", fcport->rdata->ids.port_id); + goto out_free_conn; + } else + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload " + "succeeded portid=%06x total_sqe=%d.\n", + fcport->rdata->ids.port_id, total_sqe); + + spin_lock_init(&fcport->rport_lock); + atomic_set(&fcport->free_sqes, total_sqe); + return 0; +out_free_conn: + qed_ops->release_conn(qedf->cdev, fcport->handle); +out: + return rval; +} + +#define QEDF_TERM_BUFF_SIZE 10 +static void qedf_upload_connection(struct qedf_ctx *qedf, + struct qedf_rport *fcport) +{ + void *term_params; + dma_addr_t term_params_dma; + + /* Term params needs to be a DMA coherent buffer as qed shared the + * physical DMA address with the firmware. The buffer may be used in + * the receive path so we may eventually have to move this. + */ + term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, + &term_params_dma, GFP_KERNEL); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection " + "port_id=%06x.\n", fcport->rdata->ids.port_id); + + qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma); + qed_ops->release_conn(qedf->cdev, fcport->handle); + + dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params, + term_params_dma); +} + +static void qedf_cleanup_fcport(struct qedf_ctx *qedf, + struct qedf_rport *fcport) +{ + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n", + fcport->rdata->ids.port_id); + + /* Flush any remaining i/o's before we upload the connection */ + qedf_flush_active_ios(fcport, -1); + + if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) + qedf_upload_connection(qedf, fcport); + qedf_free_sq(qedf, fcport); + fcport->rdata = NULL; + fcport->qedf = NULL; +} + +/** + * This event_callback is called after successful completion of libfc + * initiated target login. qedf can proceed with initiating the session + * establishment. + */ +static void qedf_rport_event_handler(struct fc_lport *lport, + struct fc_rport_priv *rdata, + enum fc_rport_event event) +{ + struct qedf_ctx *qedf = lport_priv(lport); + struct fc_rport *rport = rdata->rport; + struct fc_rport_libfc_priv *rp; + struct qedf_rport *fcport; + u32 port_id; + int rval; + unsigned long flags; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, " + "port_id = 0x%x\n", event, rdata->ids.port_id); + + switch (event) { + case RPORT_EV_READY: + if (!rport) { + QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n"); + break; + } + + rp = rport->dd_data; + fcport = (struct qedf_rport *)&rp[1]; + fcport->qedf = qedf; + + if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) { + QEDF_ERR(&(qedf->dbg_ctx), "Not offloading " + "portid=0x%x as max number of offloaded sessions " + "reached.\n", rdata->ids.port_id); + return; + } + + /* + * Don't try to offload the session again. Can happen when we + * get an ADISC + */ + if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_WARN(&(qedf->dbg_ctx), "Session already " + "offloaded, portid=0x%x.\n", + rdata->ids.port_id); + return; + } + + if (rport->port_id == FC_FID_DIR_SERV) { + /* + * qedf_rport structure doesn't exist for + * directory server. + * We should not come here, as lport will + * take care of fabric login + */ + QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not " + "exist for dir server port_id=%x\n", + rdata->ids.port_id); + break; + } + + if (rdata->spp_type != FC_TYPE_FCP) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Not offlading since since spp type isn't FCP\n"); + break; + } + if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Not FCP target so not offloading\n"); + break; + } + + fcport->rdata = rdata; + fcport->rport = rport; + + rval = qedf_alloc_sq(qedf, fcport); + if (rval) { + qedf_cleanup_fcport(qedf, fcport); + break; + } + + /* Set device type */ + if (rdata->flags & FC_RP_FLAGS_RETRY && + rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET && + !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) { + fcport->dev_type = QEDF_RPORT_TYPE_TAPE; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "portid=%06x is a TAPE device.\n", + rdata->ids.port_id); + } else { + fcport->dev_type = QEDF_RPORT_TYPE_DISK; + } + + rval = qedf_offload_connection(qedf, fcport); + if (rval) { + qedf_cleanup_fcport(qedf, fcport); + break; + } + + /* Add fcport to list of qedf_ctx list of offloaded ports */ + spin_lock_irqsave(&qedf->hba_lock, flags); + list_add_rcu(&fcport->peers, &qedf->fcports); + spin_unlock_irqrestore(&qedf->hba_lock, flags); + + /* + * Set the session ready bit to let everyone know that this + * connection is ready for I/O + */ + set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags); + atomic_inc(&qedf->num_offloads); + + break; + case RPORT_EV_LOGO: + case RPORT_EV_FAILED: + case RPORT_EV_STOP: + port_id = rdata->ids.port_id; + if (port_id == FC_FID_DIR_SERV) + break; + + if (!rport) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "port_id=%x - rport notcreated Yet!!\n", port_id); + break; + } + rp = rport->dd_data; + /* + * Perform session upload. Note that rdata->peers is already + * removed from disc->rports list before we get this event. + */ + fcport = (struct qedf_rport *)&rp[1]; + + /* Only free this fcport if it is offloaded already */ + if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + set_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags); + qedf_cleanup_fcport(qedf, fcport); + + /* + * Remove fcport to list of qedf_ctx list of offloaded + * ports + */ + spin_lock_irqsave(&qedf->hba_lock, flags); + list_del_rcu(&fcport->peers); + spin_unlock_irqrestore(&qedf->hba_lock, flags); + + clear_bit(QEDF_RPORT_UPLOADING_CONNECTION, + &fcport->flags); + atomic_dec(&qedf->num_offloads); + } + + break; + + case RPORT_EV_NONE: + break; + } +} + +static void qedf_abort_io(struct fc_lport *lport) +{ + /* NO-OP but need to fill in the template */ +} + +static void qedf_fcp_cleanup(struct fc_lport *lport) +{ + /* + * NO-OP but need to fill in template to prevent a NULL + * function pointer dereference during link down. I/Os + * will be flushed when port is uploaded. + */ +} + +static struct libfc_function_template qedf_lport_template = { + .frame_send = qedf_xmit, + .fcp_abort_io = qedf_abort_io, + .fcp_cleanup = qedf_fcp_cleanup, + .rport_event_callback = qedf_rport_event_handler, + .elsct_send = qedf_elsct_send, +}; + +static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf) +{ + fcoe_ctlr_init(&qedf->ctlr, FIP_ST_AUTO); + + qedf->ctlr.send = qedf_fip_send; + qedf->ctlr.update_mac = qedf_update_src_mac; + qedf->ctlr.get_src_addr = qedf_get_src_mac; + ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac); +} + +static int qedf_lport_setup(struct qedf_ctx *qedf) +{ + struct fc_lport *lport = qedf->lport; + + lport->link_up = 0; + lport->max_retry_count = QEDF_FLOGI_RETRY_CNT; + lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT; + lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); + lport->boot_time = jiffies; + lport->e_d_tov = 2 * 1000; + lport->r_a_tov = 10 * 1000; + + /* Set NPIV support */ + lport->does_npiv = 1; + fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV; + + fc_set_wwnn(lport, qedf->wwnn); + fc_set_wwpn(lport, qedf->wwpn); + + fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0); + + /* Allocate the exchange manager */ + fc_exch_mgr_alloc(lport, FC_CLASS_3, qedf->max_scsi_xid + 1, + qedf->max_els_xid, NULL); + + if (fc_lport_init_stats(lport)) + return -ENOMEM; + + /* Finish lport config */ + fc_lport_config(lport); + + /* Set max frame size */ + fc_set_mfs(lport, QEDF_MFS); + fc_host_maxframe_size(lport->host) = lport->mfs; + + /* Set default dev_loss_tmo based on module parameter */ + fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo; + + /* Set symbolic node name */ + snprintf(fc_host_symbolic_name(lport->host), 256, + "QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION); + + return 0; +} + +/* + * NPIV functions + */ + +static int qedf_vport_libfc_config(struct fc_vport *vport, + struct fc_lport *lport) +{ + lport->link_up = 0; + lport->qfull = 0; + lport->max_retry_count = QEDF_FLOGI_RETRY_CNT; + lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT; + lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); + lport->boot_time = jiffies; + lport->e_d_tov = 2 * 1000; + lport->r_a_tov = 10 * 1000; + lport->does_npiv = 1; /* Temporary until we add NPIV support */ + + /* Allocate stats for vport */ + if (fc_lport_init_stats(lport)) + return -ENOMEM; + + /* Finish lport config */ + fc_lport_config(lport); + + /* offload related configuration */ + lport->crc_offload = 0; + lport->seq_offload = 0; + lport->lro_enabled = 0; + lport->lro_xid = 0; + lport->lso_max = 0; + + return 0; +} + +static int qedf_vport_create(struct fc_vport *vport, bool disabled) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port; + struct qedf_ctx *base_qedf = lport_priv(n_port); + struct qedf_ctx *vport_qedf; + + char buf[32]; + int rc = 0; + + rc = fcoe_validate_vport_create(vport); + if (rc) { + fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); + QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, " + "WWPN (0x%s) already exists.\n", buf); + goto err1; + } + + if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) { + QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport " + "because link is not up.\n"); + rc = -EIO; + goto err1; + } + + vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx)); + if (!vn_port) { + QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport " + "for vport.\n"); + rc = -ENOMEM; + goto err1; + } + + fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); + QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n", + buf); + + /* Copy some fields from base_qedf */ + vport_qedf = lport_priv(vn_port); + memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx)); + + /* Set qedf data specific to this vport */ + vport_qedf->lport = vn_port; + /* Use same hba_lock as base_qedf */ + vport_qedf->hba_lock = base_qedf->hba_lock; + vport_qedf->pdev = base_qedf->pdev; + vport_qedf->cmd_mgr = base_qedf->cmd_mgr; + init_completion(&vport_qedf->flogi_compl); + INIT_LIST_HEAD(&vport_qedf->fcports); + + rc = qedf_vport_libfc_config(vport, vn_port); + if (rc) { + QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory " + "for lport stats.\n"); + goto err2; + } + + fc_set_wwnn(vn_port, vport->node_name); + fc_set_wwpn(vn_port, vport->port_name); + vport_qedf->wwnn = vn_port->wwnn; + vport_qedf->wwpn = vn_port->wwpn; + + vn_port->host->transportt = qedf_fc_vport_transport_template; + vn_port->host->can_queue = QEDF_MAX_ELS_XID; + vn_port->host->max_lun = qedf_max_lun; + vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD; + vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN; + + rc = scsi_add_host(vn_port->host, &vport->dev); + if (rc) { + QEDF_WARN(&(base_qedf->dbg_ctx), "Error adding Scsi_Host.\n"); + goto err2; + } + + /* Set default dev_loss_tmo based on module parameter */ + fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo; + + /* Init libfc stuffs */ + memcpy(&vn_port->tt, &qedf_lport_template, + sizeof(qedf_lport_template)); + fc_exch_init(vn_port); + fc_elsct_init(vn_port); + fc_lport_init(vn_port); + fc_disc_init(vn_port); + fc_disc_config(vn_port, vn_port); + + + /* Allocate the exchange manager */ + shost = vport_to_shost(vport); + n_port = shost_priv(shost); + fc_exch_mgr_list_clone(n_port, vn_port); + + /* Set max frame size */ + fc_set_mfs(vn_port, QEDF_MFS); + + fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN; + + if (disabled) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + } else { + vn_port->boot_time = jiffies; + fc_fabric_login(vn_port); + fc_vport_setlink(vn_port); + } + + QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n", + vn_port); + + /* Set up debug context for vport */ + vport_qedf->dbg_ctx.host_no = vn_port->host->host_no; + vport_qedf->dbg_ctx.pdev = base_qedf->pdev; + +err2: + scsi_host_put(vn_port->host); +err1: + return rc; +} + +static int qedf_vport_destroy(struct fc_vport *vport) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port = vport->dd_data; + + mutex_lock(&n_port->lp_mutex); + list_del(&vn_port->list); + mutex_unlock(&n_port->lp_mutex); + + fc_fabric_logoff(vn_port); + fc_lport_destroy(vn_port); + + /* Detach from scsi-ml */ + fc_remove_host(vn_port->host); + scsi_remove_host(vn_port->host); + + /* + * Only try to release the exchange manager if the vn_port + * configuration is complete. + */ + if (vn_port->state == LPORT_ST_READY) + fc_exch_mgr_free(vn_port); + + /* Free memory used by statistical counters */ + fc_lport_free_stats(vn_port); + + /* Release Scsi_Host */ + if (vn_port->host) + scsi_host_put(vn_port->host); + + return 0; +} + +static int qedf_vport_disable(struct fc_vport *vport, bool disable) +{ + struct fc_lport *lport = vport->dd_data; + + if (disable) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + fc_fabric_logoff(lport); + } else { + lport->boot_time = jiffies; + fc_fabric_login(lport); + fc_vport_setlink(lport); + } + return 0; +} + +/* + * During removal we need to wait for all the vports associated with a port + * to be destroyed so we avoid a race condition where libfc is still trying + * to reap vports while the driver remove function has already reaped the + * driver contexts associated with the physical port. + */ +static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf) +{ + struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV, + "Entered.\n"); + while (fc_host->npiv_vports_inuse > 0) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV, + "Waiting for all vports to be reaped.\n"); + msleep(1000); + } +} + +/** + * qedf_fcoe_reset - Resets the fcoe + * + * @shost: shost the reset is from + * + * Returns: always 0 + */ +static int qedf_fcoe_reset(struct Scsi_Host *shost) +{ + struct fc_lport *lport = shost_priv(shost); + + fc_fabric_logoff(lport); + fc_fabric_login(lport); + return 0; +} + +static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host + *shost) +{ + struct fc_host_statistics *qedf_stats; + struct fc_lport *lport = shost_priv(shost); + struct qedf_ctx *qedf = lport_priv(lport); + struct qed_fcoe_stats *fw_fcoe_stats; + + qedf_stats = fc_get_host_stats(shost); + + /* We don't collect offload stats for specific NPIV ports */ + if (lport->vport) + goto out; + + fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL); + if (!fw_fcoe_stats) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for " + "fw_fcoe_stats.\n"); + goto out; + } + + /* Query firmware for offload stats */ + qed_ops->get_stats(qedf->cdev, fw_fcoe_stats); + + /* + * The expectation is that we add our offload stats to the stats + * being maintained by libfc each time the fc_get_host_status callback + * is invoked. The additions are not carried over for each call to + * the fc_get_host_stats callback. + */ + qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt + + fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt + + fw_fcoe_stats->fcoe_tx_other_pkt_cnt; + qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt + + fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt + + fw_fcoe_stats->fcoe_rx_other_pkt_cnt; + qedf_stats->fcp_input_megabytes += + do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000); + qedf_stats->fcp_output_megabytes += + do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000); + qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4; + qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4; + qedf_stats->invalid_crc_count += + fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt; + qedf_stats->dumped_frames = + fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt; + qedf_stats->error_frames += + fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt; + qedf_stats->fcp_input_requests += qedf->input_requests; + qedf_stats->fcp_output_requests += qedf->output_requests; + qedf_stats->fcp_control_requests += qedf->control_requests; + qedf_stats->fcp_packet_aborts += qedf->packet_aborts; + qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures; + + kfree(fw_fcoe_stats); +out: + return qedf_stats; +} + +static struct fc_function_template qedf_fc_transport_fn = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + + /* + * Tell FC transport to allocate enough space to store the backpointer + * for the associate qedf_rport struct. + */ + .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + + sizeof(struct qedf_rport)), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = qedf_fc_get_host_stats, + .issue_fc_host_lip = qedf_fcoe_reset, + .vport_create = qedf_vport_create, + .vport_delete = qedf_vport_destroy, + .vport_disable = qedf_vport_disable, + .bsg_request = fc_lport_bsg_request, +}; + +static struct fc_function_template qedf_fc_vport_transport_fn = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + + sizeof(struct qedf_rport)), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = fc_get_host_stats, + .issue_fc_host_lip = qedf_fcoe_reset, + .bsg_request = fc_lport_bsg_request, +}; + +static bool qedf_fp_has_work(struct qedf_fastpath *fp) +{ + struct qedf_ctx *qedf = fp->qedf; + struct global_queue *que; + struct qed_sb_info *sb_info = fp->sb_info; + struct status_block *sb = sb_info->sb_virt; + u16 prod_idx; + + /* Get the pointer to the global CQ this completion is on */ + que = qedf->global_queues[fp->sb_id]; + + /* Be sure all responses have been written to PI */ + rmb(); + + /* Get the current firmware producer index */ + prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI]; + + return (que->cq_prod_idx != prod_idx); +} + +/* + * Interrupt handler code. + */ + +/* Process completion queue and copy CQE contents for deferred processesing + * + * Return true if we should wake the I/O thread, false if not. + */ +static bool qedf_process_completions(struct qedf_fastpath *fp) +{ + struct qedf_ctx *qedf = fp->qedf; + struct qed_sb_info *sb_info = fp->sb_info; + struct status_block *sb = sb_info->sb_virt; + struct global_queue *que; + u16 prod_idx; + struct fcoe_cqe *cqe; + struct qedf_io_work *io_work; + int num_handled = 0; + unsigned int cpu; + struct qedf_ioreq *io_req = NULL; + u16 xid; + u16 new_cqes; + u32 comp_type; + + /* Get the current firmware producer index */ + prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI]; + + /* Get the pointer to the global CQ this completion is on */ + que = qedf->global_queues[fp->sb_id]; + + /* Calculate the amount of new elements since last processing */ + new_cqes = (prod_idx >= que->cq_prod_idx) ? + (prod_idx - que->cq_prod_idx) : + 0x10000 - que->cq_prod_idx + prod_idx; + + /* Save producer index */ + que->cq_prod_idx = prod_idx; + + while (new_cqes) { + fp->completions++; + num_handled++; + cqe = &que->cq[que->cq_cons_idx]; + + comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & + FCOE_CQE_CQE_TYPE_MASK; + + /* + * Process unsolicited CQEs directly in the interrupt handler + * sine we need the fastpath ID + */ + if (comp_type == FCOE_UNSOLIC_CQE_TYPE) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, + "Unsolicated CQE.\n"); + qedf_process_unsol_compl(qedf, fp->sb_id, cqe); + /* + * Don't add a work list item. Increment consumer + * consumer index and move on. + */ + goto inc_idx; + } + + xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK; + io_req = &qedf->cmd_mgr->cmds[xid]; + + /* + * Figure out which percpu thread we should queue this I/O + * on. + */ + if (!io_req) + /* If there is not io_req assocated with this CQE + * just queue it on CPU 0 + */ + cpu = 0; + else { + cpu = io_req->cpu; + io_req->int_cpu = smp_processor_id(); + } + + io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC); + if (!io_work) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " + "work for I/O completion.\n"); + continue; + } + memset(io_work, 0, sizeof(struct qedf_io_work)); + + INIT_WORK(&io_work->work, qedf_fp_io_handler); + + /* Copy contents of CQE for deferred processing */ + memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe)); + + io_work->qedf = fp->qedf; + io_work->fp = NULL; /* Only used for unsolicited frames */ + + queue_work_on(cpu, qedf_io_wq, &io_work->work); + +inc_idx: + que->cq_cons_idx++; + if (que->cq_cons_idx == fp->cq_num_entries) + que->cq_cons_idx = 0; + new_cqes--; + } + + return true; +} + + +/* MSI-X fastpath handler code */ +static irqreturn_t qedf_msix_handler(int irq, void *dev_id) +{ + struct qedf_fastpath *fp = dev_id; + + if (!fp) { + QEDF_ERR(NULL, "fp is null.\n"); + return IRQ_HANDLED; + } + if (!fp->sb_info) { + QEDF_ERR(NULL, "fp->sb_info in null."); + return IRQ_HANDLED; + } + + /* + * Disable interrupts for this status block while we process new + * completions + */ + qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); + + while (1) { + qedf_process_completions(fp); + + if (qedf_fp_has_work(fp) == 0) { + /* Update the sb information */ + qed_sb_update_sb_idx(fp->sb_info); + + /* Check for more work */ + rmb(); + + if (qedf_fp_has_work(fp) == 0) { + /* Re-enable interrupts */ + qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); + return IRQ_HANDLED; + } + } + } + + /* Do we ever want to break out of above loop? */ + return IRQ_HANDLED; +} + +/* simd handler for MSI/INTa */ +static void qedf_simd_int_handler(void *cookie) +{ + /* Cookie is qedf_ctx struct */ + struct qedf_ctx *qedf = (struct qedf_ctx *)cookie; + + QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf); +} + +#define QEDF_SIMD_HANDLER_NUM 0 +static void qedf_sync_free_irqs(struct qedf_ctx *qedf) +{ + int i; + + if (qedf->int_info.msix_cnt) { + for (i = 0; i < qedf->int_info.used_cnt; i++) { + synchronize_irq(qedf->int_info.msix[i].vector); + irq_set_affinity_hint(qedf->int_info.msix[i].vector, + NULL); + irq_set_affinity_notifier(qedf->int_info.msix[i].vector, + NULL); + free_irq(qedf->int_info.msix[i].vector, + &qedf->fp_array[i]); + } + } else + qed_ops->common->simd_handler_clean(qedf->cdev, + QEDF_SIMD_HANDLER_NUM); + + qedf->int_info.used_cnt = 0; + qed_ops->common->set_fp_int(qedf->cdev, 0); +} + +static int qedf_request_msix_irq(struct qedf_ctx *qedf) +{ + int i, rc, cpu; + + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < qedf->num_queues; i++) { + rc = request_irq(qedf->int_info.msix[i].vector, + qedf_msix_handler, 0, "qedf", &qedf->fp_array[i]); + + if (rc) { + QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n"); + qedf_sync_free_irqs(qedf); + return rc; + } + + qedf->int_info.used_cnt++; + rc = irq_set_affinity_hint(qedf->int_info.msix[i].vector, + get_cpu_mask(cpu)); + cpu = cpumask_next(cpu, cpu_online_mask); + } + + return 0; +} + +static int qedf_setup_int(struct qedf_ctx *qedf) +{ + int rc = 0; + + /* + * Learn interrupt configuration + */ + rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus()); + + rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info); + if (rc) + return 0; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = " + "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt, + num_online_cpus()); + + if (qedf->int_info.msix_cnt) + return qedf_request_msix_irq(qedf); + + qed_ops->common->simd_handler_config(qedf->cdev, &qedf, + QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler); + qedf->int_info.used_cnt = 1; + + return 0; +} + +/* Main function for libfc frame reception */ +static void qedf_recv_frame(struct qedf_ctx *qedf, + struct sk_buff *skb) +{ + u32 fr_len; + struct fc_lport *lport; + struct fc_frame_header *fh; + struct fcoe_crc_eof crc_eof; + struct fc_frame *fp; + u8 *mac = NULL; + u8 *dest_mac = NULL; + struct fcoe_hdr *hp; + struct qedf_rport *fcport; + + lport = qedf->lport; + if (lport == NULL || lport->state == LPORT_ST_DISABLED) { + QEDF_WARN(NULL, "Invalid lport struct or lport disabled.\n"); + kfree_skb(skb); + return; + } + + if (skb_is_nonlinear(skb)) + skb_linearize(skb); + mac = eth_hdr(skb)->h_source; + dest_mac = eth_hdr(skb)->h_dest; + + /* Pull the header */ + hp = (struct fcoe_hdr *)skb->data; + fh = (struct fc_frame_header *) skb_transport_header(skb); + skb_pull(skb, sizeof(struct fcoe_hdr)); + fr_len = skb->len - sizeof(struct fcoe_crc_eof); + + fp = (struct fc_frame *)skb; + fc_frame_init(fp); + fr_dev(fp) = lport; + fr_sof(fp) = hp->fcoe_sof; + if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { + kfree_skb(skb); + return; + } + fr_eof(fp) = crc_eof.fcoe_eof; + fr_crc(fp) = crc_eof.fcoe_crc32; + if (pskb_trim(skb, fr_len)) { + kfree_skb(skb); + return; + } + + fh = fc_frame_header_get(fp); + + if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && + fh->fh_type == FC_TYPE_FCP) { + /* Drop FCP data. We dont this in L2 path */ + kfree_skb(skb); + return; + } + if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && + fh->fh_type == FC_TYPE_ELS) { + switch (fc_frame_payload_op(fp)) { + case ELS_LOGO: + if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { + /* drop non-FIP LOGO */ + kfree_skb(skb); + return; + } + break; + } + } + + if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { + /* Drop incoming ABTS */ + kfree_skb(skb); + return; + } + + /* + * If a connection is uploading, drop incoming FCoE frames as there + * is a small window where we could try to return a frame while libfc + * is trying to clean things up. + */ + + /* Get fcport associated with d_id if it exists */ + fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id)); + + if (fcport && test_bit(QEDF_RPORT_UPLOADING_CONNECTION, + &fcport->flags)) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "Connection uploading, dropping fp=%p.\n", fp); + kfree_skb(skb); + return; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: " + "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp, + ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, + fh->fh_type); + if (qedf_dump_frames) + print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, + 1, skb->data, skb->len, false); + fc_exch_recv(lport, fp); +} + +static void qedf_ll2_process_skb(struct work_struct *work) +{ + struct qedf_skb_work *skb_work = + container_of(work, struct qedf_skb_work, work); + struct qedf_ctx *qedf = skb_work->qedf; + struct sk_buff *skb = skb_work->skb; + struct ethhdr *eh; + + if (!qedf) { + QEDF_ERR(NULL, "qedf is NULL\n"); + goto err_out; + } + + eh = (struct ethhdr *)skb->data; + + /* Undo VLAN encapsulation */ + if (eh->h_proto == htons(ETH_P_8021Q)) { + memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); + eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN); + skb_reset_mac_header(skb); + } + + /* + * Process either a FIP frame or FCoE frame based on the + * protocol value. If it's not either just drop the + * frame. + */ + if (eh->h_proto == htons(ETH_P_FIP)) { + qedf_fip_recv(qedf, skb); + goto out; + } else if (eh->h_proto == htons(ETH_P_FCOE)) { + __skb_pull(skb, ETH_HLEN); + qedf_recv_frame(qedf, skb); + goto out; + } else + goto err_out; + +err_out: + kfree_skb(skb); +out: + kfree(skb_work); + return; +} + +static int qedf_ll2_rx(void *cookie, struct sk_buff *skb, + u32 arg1, u32 arg2) +{ + struct qedf_ctx *qedf = (struct qedf_ctx *)cookie; + struct qedf_skb_work *skb_work; + + skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC); + if (!skb_work) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so " + "dropping frame.\n"); + kfree_skb(skb); + return 0; + } + + INIT_WORK(&skb_work->work, qedf_ll2_process_skb); + skb_work->skb = skb; + skb_work->qedf = qedf; + queue_work(qedf->ll2_recv_wq, &skb_work->work); + + return 0; +} + +static struct qed_ll2_cb_ops qedf_ll2_cb_ops = { + .rx_cb = qedf_ll2_rx, + .tx_cb = NULL, +}; + +/* Main thread to process I/O completions */ +void qedf_fp_io_handler(struct work_struct *work) +{ + struct qedf_io_work *io_work = + container_of(work, struct qedf_io_work, work); + u32 comp_type; + + /* + * Deferred part of unsolicited CQE sends + * frame to libfc. + */ + comp_type = (io_work->cqe.cqe_data >> + FCOE_CQE_CQE_TYPE_SHIFT) & + FCOE_CQE_CQE_TYPE_MASK; + if (comp_type == FCOE_UNSOLIC_CQE_TYPE && + io_work->fp) + fc_exch_recv(io_work->qedf->lport, io_work->fp); + else + qedf_process_cqe(io_work->qedf, &io_work->cqe); + + kfree(io_work); +} + +static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf, + struct qed_sb_info *sb_info, u16 sb_id) +{ + struct status_block *sb_virt; + dma_addr_t sb_phys; + int ret; + + sb_virt = dma_alloc_coherent(&qedf->pdev->dev, + sizeof(struct status_block), &sb_phys, GFP_KERNEL); + + if (!sb_virt) { + QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed " + "for id = %d.\n", sb_id); + return -ENOMEM; + } + + ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys, + sb_id, QED_SB_TYPE_STORAGE); + + if (ret) { + QEDF_ERR(&(qedf->dbg_ctx), "Status block initialization " + "failed for id = %d.\n", sb_id); + return ret; + } + + return 0; +} + +static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info) +{ + if (sb_info->sb_virt) + dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt), + (void *)sb_info->sb_virt, sb_info->sb_phys); +} + +static void qedf_destroy_sb(struct qedf_ctx *qedf) +{ + int id; + struct qedf_fastpath *fp = NULL; + + for (id = 0; id < qedf->num_queues; id++) { + fp = &(qedf->fp_array[id]); + if (fp->sb_id == QEDF_SB_ID_NULL) + break; + qedf_free_sb(qedf, fp->sb_info); + kfree(fp->sb_info); + } + kfree(qedf->fp_array); +} + +static int qedf_prepare_sb(struct qedf_ctx *qedf) +{ + int id; + struct qedf_fastpath *fp; + int ret; + + qedf->fp_array = + kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath), + GFP_KERNEL); + + if (!qedf->fp_array) { + QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation " + "failed.\n"); + return -ENOMEM; + } + + for (id = 0; id < qedf->num_queues; id++) { + fp = &(qedf->fp_array[id]); + fp->sb_id = QEDF_SB_ID_NULL; + fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL); + if (!fp->sb_info) { + QEDF_ERR(&(qedf->dbg_ctx), "SB info struct " + "allocation failed.\n"); + goto err; + } + ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id); + if (ret) { + QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and " + "initialization failed.\n"); + goto err; + } + fp->sb_id = id; + fp->qedf = qedf; + fp->cq_num_entries = + qedf->global_queues[id]->cq_mem_size / + sizeof(struct fcoe_cqe); + } +err: + return 0; +} + +void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe) +{ + u16 xid; + struct qedf_ioreq *io_req; + struct qedf_rport *fcport; + u32 comp_type; + + comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & + FCOE_CQE_CQE_TYPE_MASK; + + xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK; + io_req = &qedf->cmd_mgr->cmds[xid]; + + /* Completion not for a valid I/O anymore so just return */ + if (!io_req) + return; + + fcport = io_req->fcport; + + if (fcport == NULL) { + QEDF_ERR(&(qedf->dbg_ctx), "fcport is NULL.\n"); + return; + } + + /* + * Check that fcport is offloaded. If it isn't then the spinlock + * isn't valid and shouldn't be taken. We should just return. + */ + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); + return; + } + + + switch (comp_type) { + case FCOE_GOOD_COMPLETION_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + switch (io_req->cmd_type) { + case QEDF_SCSI_CMD: + qedf_scsi_completion(qedf, cqe, io_req); + break; + case QEDF_ELS: + qedf_process_els_compl(qedf, cqe, io_req); + break; + case QEDF_TASK_MGMT_CMD: + qedf_process_tmf_compl(qedf, cqe, io_req); + break; + case QEDF_SEQ_CLEANUP: + qedf_process_seq_cleanup_compl(qedf, cqe, io_req); + break; + } + break; + case FCOE_ERROR_DETECTION_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Error detect CQE.\n"); + qedf_process_error_detect(qedf, cqe, io_req); + break; + case FCOE_EXCH_CLEANUP_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Cleanup CQE.\n"); + qedf_process_cleanup_compl(qedf, cqe, io_req); + break; + case FCOE_ABTS_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Abort CQE.\n"); + qedf_process_abts_compl(qedf, cqe, io_req); + break; + case FCOE_DUMMY_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Dummy CQE.\n"); + break; + case FCOE_LOCAL_COMP_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Local completion CQE.\n"); + break; + case FCOE_WARNING_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Warning CQE.\n"); + qedf_process_warning_compl(qedf, cqe, io_req); + break; + case MAX_FCOE_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Max FCoE CQE.\n"); + break; + default: + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Default CQE.\n"); + break; + } +} + +static void qedf_free_bdq(struct qedf_ctx *qedf) +{ + int i; + + if (qedf->bdq_pbl_list) + dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, + qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma); + + if (qedf->bdq_pbl) + dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size, + qedf->bdq_pbl, qedf->bdq_pbl_dma); + + for (i = 0; i < QEDF_BDQ_SIZE; i++) { + if (qedf->bdq[i].buf_addr) { + dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE, + qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma); + } + } +} + +static void qedf_free_global_queues(struct qedf_ctx *qedf) +{ + int i; + struct global_queue **gl = qedf->global_queues; + + for (i = 0; i < qedf->num_queues; i++) { + if (!gl[i]) + continue; + + if (gl[i]->cq) + dma_free_coherent(&qedf->pdev->dev, + gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma); + if (gl[i]->cq_pbl) + dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size, + gl[i]->cq_pbl, gl[i]->cq_pbl_dma); + + kfree(gl[i]); + } + + qedf_free_bdq(qedf); +} + +static int qedf_alloc_bdq(struct qedf_ctx *qedf) +{ + int i; + struct scsi_bd *pbl; + u64 *list; + dma_addr_t page; + + /* Alloc dma memory for BDQ buffers */ + for (i = 0; i < QEDF_BDQ_SIZE; i++) { + qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev, + QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL); + if (!qedf->bdq[i].buf_addr) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ " + "buffer %d.\n", i); + return -ENOMEM; + } + } + + /* Alloc dma memory for BDQ page buffer list */ + qedf->bdq_pbl_mem_size = + QEDF_BDQ_SIZE * sizeof(struct scsi_bd); + qedf->bdq_pbl_mem_size = + ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE); + + qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev, + qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL); + if (!qedf->bdq_pbl) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n"); + return -ENOMEM; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "BDQ PBL addr=0x%p dma=0x%llx.\n", qedf->bdq_pbl, + qedf->bdq_pbl_dma); + + /* + * Populate BDQ PBL with physical and virtual address of individual + * BDQ buffers + */ + pbl = (struct scsi_bd *)qedf->bdq_pbl; + for (i = 0; i < QEDF_BDQ_SIZE; i++) { + pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma)); + pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma)); + pbl->opaque.hi = 0; + /* Opaque lo data is an index into the BDQ array */ + pbl->opaque.lo = cpu_to_le32(i); + pbl++; + } + + /* Allocate list of PBL pages */ + qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev, + QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL); + if (!qedf->bdq_pbl_list) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL " + "pages.\n"); + return -ENOMEM; + } + memset(qedf->bdq_pbl_list, 0, QEDF_PAGE_SIZE); + + /* + * Now populate PBL list with pages that contain pointers to the + * individual buffers. + */ + qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size / + QEDF_PAGE_SIZE; + list = (u64 *)qedf->bdq_pbl_list; + page = qedf->bdq_pbl_list_dma; + for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) { + *list = qedf->bdq_pbl_dma; + list++; + page += QEDF_PAGE_SIZE; + } + + return 0; +} + +static int qedf_alloc_global_queues(struct qedf_ctx *qedf) +{ + u32 *list; + int i; + int status = 0, rc; + u32 *pbl; + dma_addr_t page; + int num_pages; + + /* Allocate and map CQs, RQs */ + /* + * Number of global queues (CQ / RQ). This should + * be <= number of available MSIX vectors for the PF + */ + if (!qedf->num_queues) { + QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n"); + return 1; + } + + /* + * Make sure we allocated the PBL that will contain the physical + * addresses of our queues + */ + if (!qedf->p_cpuq) { + status = 1; + goto mem_alloc_failure; + } + + qedf->global_queues = kzalloc((sizeof(struct global_queue *) + * qedf->num_queues), GFP_KERNEL); + if (!qedf->global_queues) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global " + "queues array ptr memory\n"); + return -ENOMEM; + } + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "qedf->global_queues=%p.\n", qedf->global_queues); + + /* Allocate DMA coherent buffers for BDQ */ + rc = qedf_alloc_bdq(qedf); + if (rc) + goto mem_alloc_failure; + + /* Allocate a CQ and an associated PBL for each MSI-X vector */ + for (i = 0; i < qedf->num_queues; i++) { + qedf->global_queues[i] = kzalloc(sizeof(struct global_queue), + GFP_KERNEL); + if (!qedf->global_queues[i]) { + QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocation " + "global queue %d.\n", i); + goto mem_alloc_failure; + } + + qedf->global_queues[i]->cq_mem_size = + FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe); + qedf->global_queues[i]->cq_mem_size = + ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE); + + qedf->global_queues[i]->cq_pbl_size = + (qedf->global_queues[i]->cq_mem_size / + PAGE_SIZE) * sizeof(void *); + qedf->global_queues[i]->cq_pbl_size = + ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); + + qedf->global_queues[i]->cq = + dma_alloc_coherent(&qedf->pdev->dev, + qedf->global_queues[i]->cq_mem_size, + &qedf->global_queues[i]->cq_dma, GFP_KERNEL); + + if (!qedf->global_queues[i]->cq) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " + "cq.\n"); + status = -ENOMEM; + goto mem_alloc_failure; + } + memset(qedf->global_queues[i]->cq, 0, + qedf->global_queues[i]->cq_mem_size); + + qedf->global_queues[i]->cq_pbl = + dma_alloc_coherent(&qedf->pdev->dev, + qedf->global_queues[i]->cq_pbl_size, + &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL); + + if (!qedf->global_queues[i]->cq_pbl) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " + "cq PBL.\n"); + status = -ENOMEM; + goto mem_alloc_failure; + } + memset(qedf->global_queues[i]->cq_pbl, 0, + qedf->global_queues[i]->cq_pbl_size); + + /* Create PBL */ + num_pages = qedf->global_queues[i]->cq_mem_size / + QEDF_PAGE_SIZE; + page = qedf->global_queues[i]->cq_dma; + pbl = (u32 *)qedf->global_queues[i]->cq_pbl; + + while (num_pages--) { + *pbl = U64_LO(page); + pbl++; + *pbl = U64_HI(page); + pbl++; + page += QEDF_PAGE_SIZE; + } + /* Set the initial consumer index for cq */ + qedf->global_queues[i]->cq_cons_idx = 0; + } + + list = (u32 *)qedf->p_cpuq; + + /* + * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer, + * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points + * to the physical address which contains an array of pointers to + * the physical addresses of the specific queue pages. + */ + for (i = 0; i < qedf->num_queues; i++) { + *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma); + list++; + *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma); + list++; + *list = U64_LO(0); + list++; + *list = U64_HI(0); + list++; + } + + return 0; + +mem_alloc_failure: + qedf_free_global_queues(qedf); + return status; +} + +static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf) +{ + u8 sq_num_pbl_pages; + u32 sq_mem_size; + u32 cq_mem_size; + u32 cq_num_entries; + int rval; + + /* + * The number of completion queues/fastpath interrupts/status blocks + * we allocation is the minimum off: + * + * Number of CPUs + * Number of MSI-X vectors + * Max number allocated in hardware (QEDF_MAX_NUM_CQS) + */ + qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS, + num_online_cpus()); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", + qedf->num_queues); + + qedf->p_cpuq = pci_alloc_consistent(qedf->pdev, + qedf->num_queues * sizeof(struct qedf_glbl_q_params), + &qedf->hw_p_cpuq); + + if (!qedf->p_cpuq) { + QEDF_ERR(&(qedf->dbg_ctx), "pci_alloc_consistent failed.\n"); + return 1; + } + + rval = qedf_alloc_global_queues(qedf); + if (rval) { + QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation " + "failed.\n"); + return 1; + } + + /* Calculate SQ PBL size in the same manner as in qedf_sq_alloc() */ + sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe); + sq_mem_size = ALIGN(sq_mem_size, QEDF_PAGE_SIZE); + sq_num_pbl_pages = (sq_mem_size / QEDF_PAGE_SIZE); + + /* Calculate CQ num entries */ + cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe); + cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE); + cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe); + + memset(&(qedf->pf_params), 0, + sizeof(qedf->pf_params)); + + /* Setup the value for fcoe PF */ + qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS; + qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS; + qedf->pf_params.fcoe_pf_params.glbl_q_params_addr = + (u64)qedf->hw_p_cpuq; + qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages; + + qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0; + + qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries; + qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues; + + /* log_page_size: 12 for 4KB pages */ + qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE); + + qedf->pf_params.fcoe_pf_params.mtu = 9000; + qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI; + qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI; + + /* BDQ address and size */ + qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] = + qedf->bdq_pbl_list_dma; + qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] = + qedf->bdq_pbl_list_num_entries; + qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "bdq_list=%p bdq_pbl_list_dma=%llx bdq_pbl_list_entries=%d.\n", + qedf->bdq_pbl_list, + qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0], + qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "cq_num_entries=%d.\n", + qedf->pf_params.fcoe_pf_params.cq_num_entries); + + return 0; +} + +/* Free DMA coherent memory for array of queue pointers we pass to qed */ +static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf) +{ + size_t size = 0; + + if (qedf->p_cpuq) { + size = qedf->num_queues * sizeof(struct qedf_glbl_q_params); + pci_free_consistent(qedf->pdev, size, qedf->p_cpuq, + qedf->hw_p_cpuq); + } + + qedf_free_global_queues(qedf); + + if (qedf->global_queues) + kfree(qedf->global_queues); +} + +/* + * PCI driver functions + */ + +static const struct pci_device_id qedf_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165c) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8080) }, + {0} +}; +MODULE_DEVICE_TABLE(pci, qedf_pci_tbl); + +static struct pci_driver qedf_pci_driver = { + .name = QEDF_MODULE_NAME, + .id_table = qedf_pci_tbl, + .probe = qedf_probe, + .remove = qedf_remove, +}; + +static int __qedf_probe(struct pci_dev *pdev, int mode) +{ + int rc = -EINVAL; + struct fc_lport *lport; + struct qedf_ctx *qedf; + struct Scsi_Host *host; + bool is_vf = false; + struct qed_ll2_params params; + char host_buf[20]; + struct qed_link_params link_params; + int status; + void *task_start, *task_end; + struct qed_slowpath_params slowpath_params; + struct qed_probe_params qed_params; + u16 tmp; + + /* + * When doing error recovery we didn't reap the lport so don't try + * to reallocate it. + */ + if (mode != QEDF_MODE_RECOVERY) { + lport = libfc_host_alloc(&qedf_host_template, + sizeof(struct qedf_ctx)); + + if (!lport) { + QEDF_ERR(NULL, "Could not allocate lport.\n"); + rc = -ENOMEM; + goto err0; + } + + /* Initialize qedf_ctx */ + qedf = lport_priv(lport); + qedf->lport = lport; + qedf->ctlr.lp = lport; + qedf->pdev = pdev; + qedf->dbg_ctx.pdev = pdev; + qedf->dbg_ctx.host_no = lport->host->host_no; + spin_lock_init(&qedf->hba_lock); + INIT_LIST_HEAD(&qedf->fcports); + qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1; + atomic_set(&qedf->num_offloads, 0); + qedf->stop_io_on_error = false; + pci_set_drvdata(pdev, qedf); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, + "QLogic FastLinQ FCoE Module qedf %s, " + "FW %d.%d.%d.%d\n", QEDF_VERSION, + FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, + FW_ENGINEERING_VERSION); + } else { + /* Init pointers during recovery */ + qedf = pci_get_drvdata(pdev); + lport = qedf->lport; + } + + host = lport->host; + + /* Allocate mempool for qedf_io_work structs */ + qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN, + qedf_io_work_cache); + if (qedf->io_mempool == NULL) { + QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n"); + goto err1; + } + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n", + qedf->io_mempool); + + sprintf(host_buf, "qedf_%u_link", + qedf->lport->host->host_no); + qedf->link_update_wq = create_singlethread_workqueue(host_buf); + INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update); + INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery); + + qedf->fipvlan_retries = qedf_fipvlan_retries; + + /* + * Common probe. Takes care of basic hardware init and pci_* + * functions. + */ + memset(&qed_params, 0, sizeof(qed_params)); + qed_params.protocol = QED_PROTOCOL_FCOE; + qed_params.dp_module = qedf_dp_module; + qed_params.dp_level = qedf_dp_level; + qed_params.is_vf = is_vf; + qedf->cdev = qed_ops->common->probe(pdev, &qed_params); + if (!qedf->cdev) { + rc = -ENODEV; + goto err1; + } + + /* queue allocation code should come here + * order should be + * slowpath_start + * status block allocation + * interrupt registration (to get min number of queues) + * set_fcoe_pf_param + * qed_sp_fcoe_func_start + */ + rc = qedf_set_fcoe_pf_param(qedf); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n"); + goto err2; + } + qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); + + /* Learn information crucial for qedf to progress */ + rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); + goto err1; + } + + /* Record BDQ producer doorbell addresses */ + qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; + qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod, + qedf->bdq_secondary_prod); + + qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf); + + rc = qedf_prepare_sb(qedf); + if (rc) { + + QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); + goto err2; + } + + /* Start the Slowpath-process */ + slowpath_params.int_mode = QED_INT_MODE_MSIX; + slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER; + slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER; + slowpath_params.drv_rev = QEDF_DRIVER_REV_VER; + slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER; + memcpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE); + rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); + goto err2; + } + + /* + * update_pf_params needs to be called before and after slowpath + * start + */ + qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); + + /* Setup interrupts */ + rc = qedf_setup_int(qedf); + if (rc) + goto err3; + + rc = qed_ops->start(qedf->cdev, &qedf->tasks); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n"); + goto err4; + } + task_start = qedf_get_task_mem(&qedf->tasks, 0); + task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, " + "end=%p block_size=%u.\n", task_start, task_end, + qedf->tasks.size); + + /* + * We need to write the number of BDs in the BDQ we've preallocated so + * the f/w will do a prefetch and we'll get an unsolicited CQE when a + * packet arrives. + */ + qedf->bdq_prod_idx = QEDF_BDQ_SIZE; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Writing %d to primary and secondary BDQ doorbell registers.\n", + qedf->bdq_prod_idx); + writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); + tmp = readw(qedf->bdq_primary_prod); + writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); + tmp = readw(qedf->bdq_secondary_prod); + + qed_ops->common->set_power_state(qedf->cdev, PCI_D0); + + /* Now that the dev_info struct has been filled in set the MAC + * address + */ + ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n", + qedf->mac); + + /* Set the WWNN and WWPN based on the MAC address */ + qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0); + qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx " + "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn); + + sprintf(host_buf, "host_%d", host->host_no); + qed_ops->common->set_id(qedf->cdev, host_buf, QEDF_VERSION); + + + /* Set xid max values */ + qedf->max_scsi_xid = QEDF_MAX_SCSI_XID; + qedf->max_els_xid = QEDF_MAX_ELS_XID; + + /* Allocate cmd mgr */ + qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf); + if (!qedf->cmd_mgr) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n"); + goto err5; + } + + if (mode != QEDF_MODE_RECOVERY) { + host->transportt = qedf_fc_transport_template; + host->can_queue = QEDF_MAX_ELS_XID; + host->max_lun = qedf_max_lun; + host->max_cmd_len = QEDF_MAX_CDB_LEN; + rc = scsi_add_host(host, &pdev->dev); + if (rc) + goto err6; + } + + memset(¶ms, 0, sizeof(params)); + params.mtu = 9000; + ether_addr_copy(params.ll2_mac_address, qedf->mac); + + /* Start LL2 processing thread */ + snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no); + qedf->ll2_recv_wq = + create_singlethread_workqueue(host_buf); + if (!qedf->ll2_recv_wq) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n"); + goto err7; + } + +#ifdef CONFIG_DEBUG_FS + qedf_dbg_host_init(&(qedf->dbg_ctx), &qedf_debugfs_ops, + &qedf_dbg_fops); +#endif + + /* Start LL2 */ + qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf); + rc = qed_ops->ll2->start(qedf->cdev, ¶ms); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n"); + goto err7; + } + set_bit(QEDF_LL2_STARTED, &qedf->flags); + + /* hw will be insterting vlan tag*/ + qedf->vlan_hw_insert = 1; + qedf->vlan_id = 0; + + /* + * No need to setup fcoe_ctlr or fc_lport objects during recovery since + * they were not reaped during the unload process. + */ + if (mode != QEDF_MODE_RECOVERY) { + /* Setup imbedded fcoe controller */ + qedf_fcoe_ctlr_setup(qedf); + + /* Setup lport */ + rc = qedf_lport_setup(qedf); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), + "qedf_lport_setup failed.\n"); + goto err7; + } + } + + sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no); + qedf->timer_work_queue = + create_singlethread_workqueue(host_buf); + if (!qedf->timer_work_queue) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer " + "workqueue.\n"); + goto err7; + } + + /* DPC workqueue is not reaped during recovery unload */ + if (mode != QEDF_MODE_RECOVERY) { + sprintf(host_buf, "qedf_%u_dpc", + qedf->lport->host->host_no); + qedf->dpc_wq = create_singlethread_workqueue(host_buf); + } + + /* + * GRC dump and sysfs parameters are not reaped during the recovery + * unload process. + */ + if (mode != QEDF_MODE_RECOVERY) { + qedf->grcdump_size = qed_ops->common->dbg_grc_size(qedf->cdev); + if (qedf->grcdump_size) { + rc = qedf_alloc_grc_dump_buf(&qedf->grcdump, + qedf->grcdump_size); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), + "GRC Dump buffer alloc failed.\n"); + qedf->grcdump = NULL; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "grcdump: addr=%p, size=%u.\n", + qedf->grcdump, qedf->grcdump_size); + } + qedf_create_sysfs_ctx_attr(qedf); + + /* Initialize I/O tracing for this adapter */ + spin_lock_init(&qedf->io_trace_lock); + qedf->io_trace_idx = 0; + } + + init_completion(&qedf->flogi_compl); + + memset(&link_params, 0, sizeof(struct qed_link_params)); + link_params.link_up = true; + status = qed_ops->common->set_link(qedf->cdev, &link_params); + if (status) + QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n"); + + /* Start/restart discovery */ + if (mode == QEDF_MODE_RECOVERY) + fcoe_ctlr_link_up(&qedf->ctlr); + else + fc_fabric_login(lport); + + /* All good */ + return 0; + +err7: + if (qedf->ll2_recv_wq) + destroy_workqueue(qedf->ll2_recv_wq); + fc_remove_host(qedf->lport->host); + scsi_remove_host(qedf->lport->host); +#ifdef CONFIG_DEBUG_FS + qedf_dbg_host_exit(&(qedf->dbg_ctx)); +#endif +err6: + qedf_cmd_mgr_free(qedf->cmd_mgr); +err5: + qed_ops->stop(qedf->cdev); +err4: + qedf_free_fcoe_pf_param(qedf); + qedf_sync_free_irqs(qedf); +err3: + qed_ops->common->slowpath_stop(qedf->cdev); +err2: + qed_ops->common->remove(qedf->cdev); +err1: + scsi_host_put(lport->host); +err0: + return rc; +} + +static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + return __qedf_probe(pdev, QEDF_MODE_NORMAL); +} + +static void __qedf_remove(struct pci_dev *pdev, int mode) +{ + struct qedf_ctx *qedf; + + if (!pdev) { + QEDF_ERR(NULL, "pdev is NULL.\n"); + return; + } + + qedf = pci_get_drvdata(pdev); + + /* + * Prevent race where we're in board disable work and then try to + * rmmod the module. + */ + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n"); + return; + } + + if (mode != QEDF_MODE_RECOVERY) + set_bit(QEDF_UNLOADING, &qedf->flags); + + /* Logoff the fabric to upload all connections */ + if (mode == QEDF_MODE_RECOVERY) + fcoe_ctlr_link_down(&qedf->ctlr); + else + fc_fabric_logoff(qedf->lport); + qedf_wait_for_upload(qedf); + +#ifdef CONFIG_DEBUG_FS + qedf_dbg_host_exit(&(qedf->dbg_ctx)); +#endif + + /* Stop any link update handling */ + cancel_delayed_work_sync(&qedf->link_update); + destroy_workqueue(qedf->link_update_wq); + qedf->link_update_wq = NULL; + + if (qedf->timer_work_queue) + destroy_workqueue(qedf->timer_work_queue); + + /* Stop Light L2 */ + clear_bit(QEDF_LL2_STARTED, &qedf->flags); + qed_ops->ll2->stop(qedf->cdev); + if (qedf->ll2_recv_wq) + destroy_workqueue(qedf->ll2_recv_wq); + + /* Stop fastpath */ + qedf_sync_free_irqs(qedf); + qedf_destroy_sb(qedf); + + /* + * During recovery don't destroy OS constructs that represent the + * physical port. + */ + if (mode != QEDF_MODE_RECOVERY) { + qedf_free_grc_dump_buf(&qedf->grcdump); + qedf_remove_sysfs_ctx_attr(qedf); + + /* Remove all SCSI/libfc/libfcoe structures */ + fcoe_ctlr_destroy(&qedf->ctlr); + fc_lport_destroy(qedf->lport); + fc_remove_host(qedf->lport->host); + scsi_remove_host(qedf->lport->host); + } + + qedf_cmd_mgr_free(qedf->cmd_mgr); + + if (mode != QEDF_MODE_RECOVERY) { + fc_exch_mgr_free(qedf->lport); + fc_lport_free_stats(qedf->lport); + + /* Wait for all vports to be reaped */ + qedf_wait_for_vport_destroy(qedf); + } + + /* + * Now that all connections have been uploaded we can stop the + * rest of the qed operations + */ + qed_ops->stop(qedf->cdev); + + if (mode != QEDF_MODE_RECOVERY) { + if (qedf->dpc_wq) { + /* Stop general DPC handling */ + destroy_workqueue(qedf->dpc_wq); + qedf->dpc_wq = NULL; + } + } + + /* Final shutdown for the board */ + qedf_free_fcoe_pf_param(qedf); + if (mode != QEDF_MODE_RECOVERY) { + qed_ops->common->set_power_state(qedf->cdev, PCI_D0); + pci_set_drvdata(pdev, NULL); + } + qed_ops->common->slowpath_stop(qedf->cdev); + qed_ops->common->remove(qedf->cdev); + + mempool_destroy(qedf->io_mempool); + + /* Only reap the Scsi_host on a real removal */ + if (mode != QEDF_MODE_RECOVERY) + scsi_host_put(qedf->lport->host); +} + +static void qedf_remove(struct pci_dev *pdev) +{ + /* Check to make sure this function wasn't already disabled */ + if (!atomic_read(&pdev->enable_cnt)) + return; + + __qedf_remove(pdev, QEDF_MODE_NORMAL); +} + +/* + * Module Init/Remove + */ + +static int __init qedf_init(void) +{ + int ret; + + /* If debug=1 passed, set the default log mask */ + if (qedf_debug == QEDF_LOG_DEFAULT) + qedf_debug = QEDF_DEFAULT_LOG_MASK; + + /* Print driver banner */ + QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR, + QEDF_VERSION); + + /* Create kmem_cache for qedf_io_work structs */ + qedf_io_work_cache = kmem_cache_create("qedf_io_work_cache", + sizeof(struct qedf_io_work), 0, SLAB_HWCACHE_ALIGN, NULL); + if (qedf_io_work_cache == NULL) { + QEDF_ERR(NULL, "qedf_io_work_cache is NULL.\n"); + goto err1; + } + QEDF_INFO(NULL, QEDF_LOG_DISC, "qedf_io_work_cache=%p.\n", + qedf_io_work_cache); + + qed_ops = qed_get_fcoe_ops(); + if (!qed_ops) { + QEDF_ERR(NULL, "Failed to get qed fcoe operations\n"); + goto err1; + } + +#ifdef CONFIG_DEBUG_FS + qedf_dbg_init("qedf"); +#endif + + qedf_fc_transport_template = + fc_attach_transport(&qedf_fc_transport_fn); + if (!qedf_fc_transport_template) { + QEDF_ERR(NULL, "Could not register with FC transport\n"); + goto err2; + } + + qedf_fc_vport_transport_template = + fc_attach_transport(&qedf_fc_vport_transport_fn); + if (!qedf_fc_vport_transport_template) { + QEDF_ERR(NULL, "Could not register vport template with FC " + "transport\n"); + goto err3; + } + + qedf_io_wq = create_workqueue("qedf_io_wq"); + if (!qedf_io_wq) { + QEDF_ERR(NULL, "Could not create qedf_io_wq.\n"); + goto err4; + } + + qedf_cb_ops.get_login_failures = qedf_get_login_failures; + + ret = pci_register_driver(&qedf_pci_driver); + if (ret) { + QEDF_ERR(NULL, "Failed to register driver\n"); + goto err5; + } + + return 0; + +err5: + destroy_workqueue(qedf_io_wq); +err4: + fc_release_transport(qedf_fc_vport_transport_template); +err3: + fc_release_transport(qedf_fc_transport_template); +err2: +#ifdef CONFIG_DEBUG_FS + qedf_dbg_exit(); +#endif + qed_put_fcoe_ops(); +err1: + return -EINVAL; +} + +static void __exit qedf_cleanup(void) +{ + pci_unregister_driver(&qedf_pci_driver); + + destroy_workqueue(qedf_io_wq); + + fc_release_transport(qedf_fc_vport_transport_template); + fc_release_transport(qedf_fc_transport_template); +#ifdef CONFIG_DEBUG_FS + qedf_dbg_exit(); +#endif + qed_put_fcoe_ops(); + + kmem_cache_destroy(qedf_io_work_cache); +} + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("QLogic QEDF 25/40/50/100Gb FCoE Driver"); +MODULE_AUTHOR("QLogic Corporation"); +MODULE_VERSION(QEDF_VERSION); +module_init(qedf_init); +module_exit(qedf_cleanup); diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h new file mode 100644 index 000000000000..4ae5f537a440 --- /dev/null +++ b/drivers/scsi/qedf/qedf_version.h @@ -0,0 +1,15 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#define QEDF_VERSION "8.10.7.0" +#define QEDF_DRIVER_MAJOR_VER 8 +#define QEDF_DRIVER_MINOR_VER 10 +#define QEDF_DRIVER_REV_VER 7 +#define QEDF_DRIVER_ENG_VER 0 + diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index b1d3904ae8fd..c9f0ef4e11b3 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -165,10 +165,9 @@ static void qedi_tmf_resp_work(struct work_struct *work) iscsi_block_session(session->cls_session); rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true); if (rval) { - clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); qedi_clear_task_idx(qedi, qedi_cmd->task_id); iscsi_unblock_session(session->cls_session); - return; + goto exit_tmf_resp; } iscsi_unblock_session(session->cls_session); @@ -177,6 +176,8 @@ static void qedi_tmf_resp_work(struct work_struct *work) spin_lock(&session->back_lock); __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0); spin_unlock(&session->back_lock); + +exit_tmf_resp: kfree(resp_hdr_ptr); clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); } diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index f201f4099620..f610103994af 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2163,6 +2163,9 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) clear_bit(vha->vp_idx, ha->vp_idx_map); mutex_unlock(&ha->vport_lock); + dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, + vha->gnl.ldma); + if (vha->qpair->vp_idx == vha->vp_idx) { if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0x7087, diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 40ca75bbcb9d..84c9098cc089 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -13,28 +13,25 @@ /* BSG support for ELS/CT pass through */ void -qla2x00_bsg_job_done(void *data, void *ptr, int res) +qla2x00_bsg_job_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; - struct scsi_qla_host *vha = (scsi_qla_host_t *)data; + srb_t *sp = ptr; struct bsg_job *bsg_job = sp->u.bsg_job; struct fc_bsg_reply *bsg_reply = bsg_job->reply; bsg_reply->result = res; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); - sp->free(vha, sp); + sp->free(sp); } void -qla2x00_bsg_sp_free(void *data, void *ptr) +qla2x00_bsg_sp_free(void *ptr) { - srb_t *sp = (srb_t *)ptr; - struct scsi_qla_host *vha = sp->fcport->vha; + srb_t *sp = ptr; + struct qla_hw_data *ha = sp->vha->hw; struct bsg_job *bsg_job = sp->u.bsg_job; struct fc_bsg_request *bsg_request = bsg_job->request; - - struct qla_hw_data *ha = vha->hw; struct qla_mt_iocb_rqst_fx00 *piocb_rqst; if (sp->type == SRB_FXIOCB_BCMD) { @@ -62,7 +59,7 @@ qla2x00_bsg_sp_free(void *data, void *ptr) sp->type == SRB_FXIOCB_BCMD || sp->type == SRB_ELS_CMD_HST) kfree(sp->fcport); - qla2x00_rel_sp(vha, sp); + qla2x00_rel_sp(sp); } int @@ -394,7 +391,7 @@ qla2x00_process_els(struct bsg_job *bsg_job) if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x700e, "qla2x00_start_sp failed = %d\n", rval); - qla2x00_rel_sp(vha, sp); + qla2x00_rel_sp(sp); rval = -EIO; goto done_unmap_sg; } @@ -542,7 +539,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job) if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7017, "qla2x00_start_sp failed=%d.\n", rval); - qla2x00_rel_sp(vha, sp); + qla2x00_rel_sp(sp); rval = -EIO; goto done_free_fcport; } @@ -2578,6 +2575,6 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job) done: spin_unlock_irqrestore(&ha->hardware_lock, flags); - sp->free(vha, sp); + sp->free(sp); return 0; } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 2f14adfab018..625d438e3cce 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -55,6 +55,8 @@ #include "qla_settings.h" +#define MODE_DUAL (MODE_TARGET | MODE_INITIATOR) + /* * Data bit definitions */ @@ -251,6 +253,14 @@ #define MAX_CMDSZ 16 /* SCSI maximum CDB size. */ #include "qla_fw.h" + +struct name_list_extended { + struct get_name_list_extended *l; + dma_addr_t ldma; + struct list_head fcports; /* protect by sess_list */ + u32 size; + u8 sent; +}; /* * Timeout timer counts in seconds */ @@ -309,6 +319,17 @@ struct els_logo_payload { uint8_t wwpn[WWN_SIZE]; }; +struct ct_arg { + void *iocb; + u16 nport_handle; + dma_addr_t req_dma; + dma_addr_t rsp_dma; + u32 req_size; + u32 rsp_size; + void *req; + void *rsp; +}; + /* * SRB extensions. */ @@ -320,6 +341,7 @@ struct srb_iocb { #define SRB_LOGIN_COND_PLOGI BIT_1 #define SRB_LOGIN_SKIP_PRLI BIT_2 uint16_t data[2]; + u32 iop[2]; } logio; struct { #define ELS_DCMD_TIMEOUT 20 @@ -372,6 +394,16 @@ struct srb_iocb { __le16 comp_status; struct completion comp; } abt; + struct ct_arg ctarg; + struct { + __le16 in_mb[28]; /* fr fw */ + __le16 out_mb[28]; /* to fw */ + void *out, *in; + dma_addr_t out_dma, in_dma; + } mbx; + struct { + struct imm_ntfy_from_isp *ntfy; + } nack; } u; struct timer_list timer; @@ -392,23 +424,31 @@ struct srb_iocb { #define SRB_FXIOCB_BCMD 11 #define SRB_ABT_CMD 12 #define SRB_ELS_DCMD 13 +#define SRB_MB_IOCB 14 +#define SRB_CT_PTHRU_CMD 15 +#define SRB_NACK_PLOGI 16 +#define SRB_NACK_PRLI 17 +#define SRB_NACK_LOGO 18 typedef struct srb { atomic_t ref_count; struct fc_port *fcport; + struct scsi_qla_host *vha; uint32_t handle; uint16_t flags; uint16_t type; char *name; int iocbs; struct qla_qpair *qpair; + u32 gen1; /* scratch */ + u32 gen2; /* scratch */ union { struct srb_iocb iocb_cmd; struct bsg_job *bsg_job; struct srb_cmd scmd; } u; - void (*done)(void *, void *, int); - void (*free)(void *, void *); + void (*done)(void *, int); + void (*free)(void *); } srb_t; #define GET_CMD_SP(sp) (sp->u.scmd.cmd) @@ -1794,6 +1834,7 @@ typedef struct { #define SS_RESIDUAL_OVER BIT_10 #define SS_SENSE_LEN_VALID BIT_9 #define SS_RESPONSE_INFO_LEN_VALID BIT_8 +#define SS_SCSI_STATUS_BYTE 0xff #define SS_RESERVE_CONFLICT (BIT_4 | BIT_3) #define SS_BUSY_CONDITION BIT_3 @@ -1975,6 +2016,84 @@ struct mbx_entry { uint8_t port_name[WWN_SIZE]; }; +#ifndef IMMED_NOTIFY_TYPE +#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */ +/* + * ISP queue - immediate notify entry structure definition. + * This is sent by the ISP to the Target driver. + * This IOCB would have report of events sent by the + * initiator, that needs to be handled by the target + * driver immediately. + */ +struct imm_ntfy_from_isp { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + union { + struct { + uint32_t sys_define_2; /* System defined. */ + target_id_t target; + uint16_t lun; + uint8_t target_id; + uint8_t reserved_1; + uint16_t status_modifier; + uint16_t status; + uint16_t task_flags; + uint16_t seq_id; + uint16_t srr_rx_id; + uint32_t srr_rel_offs; + uint16_t srr_ui; +#define SRR_IU_DATA_IN 0x1 +#define SRR_IU_DATA_OUT 0x5 +#define SRR_IU_STATUS 0x7 + uint16_t srr_ox_id; + uint8_t reserved_2[28]; + } isp2x; + struct { + uint32_t reserved; + uint16_t nport_handle; + uint16_t reserved_2; + uint16_t flags; +#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1 +#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0 + uint16_t srr_rx_id; + uint16_t status; + uint8_t status_subcode; + uint8_t fw_handle; + uint32_t exchange_address; + uint32_t srr_rel_offs; + uint16_t srr_ui; + uint16_t srr_ox_id; + union { + struct { + uint8_t node_name[8]; + } plogi; /* PLOGI/ADISC/PDISC */ + struct { + /* PRLI word 3 bit 0-15 */ + uint16_t wd3_lo; + uint8_t resv0[6]; + } prli; + struct { + uint8_t port_id[3]; + uint8_t resv1; + uint16_t nport_handle; + uint16_t resv2; + } req_els; + } u; + uint8_t port_name[8]; + uint8_t resv3[3]; + uint8_t vp_index; + uint32_t reserved_5; + uint8_t port_id[3]; + uint8_t reserved_6; + } isp24; + } u; + uint16_t reserved_7; + uint16_t ox_id; +} __packed; +#endif + /* * ISP request and response queue entry sizes */ @@ -2022,10 +2141,22 @@ typedef struct { #define FC4_TYPE_OTHER 0x0 #define FC4_TYPE_UNKNOWN 0xff +/* mailbox command 4G & above */ +struct mbx_24xx_entry { + uint8_t entry_type; + uint8_t entry_count; + uint8_t sys_define1; + uint8_t entry_status; + uint32_t handle; + uint16_t mb[28]; +}; + +#define IOCB_SIZE 64 + /* * Fibre channel port type. */ - typedef enum { +typedef enum { FCT_UNKNOWN, FCT_RSCN, FCT_SWITCH, @@ -2034,6 +2165,74 @@ typedef struct { FCT_TARGET } fc_port_type_t; +enum qla_sess_deletion { + QLA_SESS_DELETION_NONE = 0, + QLA_SESS_DELETION_IN_PROGRESS, + QLA_SESS_DELETED, +}; + +enum qlt_plogi_link_t { + QLT_PLOGI_LINK_SAME_WWN, + QLT_PLOGI_LINK_CONFLICT, + QLT_PLOGI_LINK_MAX +}; + +struct qlt_plogi_ack_t { + struct list_head list; + struct imm_ntfy_from_isp iocb; + port_id_t id; + int ref_count; + void *fcport; +}; + +struct ct_sns_desc { + struct ct_sns_pkt *ct_sns; + dma_addr_t ct_sns_dma; +}; + +enum discovery_state { + DSC_DELETED, + DSC_GID_PN, + DSC_GNL, + DSC_LOGIN_PEND, + DSC_LOGIN_FAILED, + DSC_GPDB, + DSC_GPSC, + DSC_UPD_FCPORT, + DSC_LOGIN_COMPLETE, + DSC_DELETE_PEND, +}; + +enum login_state { /* FW control Target side */ + DSC_LS_LLIOCB_SENT = 2, + DSC_LS_PLOGI_PEND, + DSC_LS_PLOGI_COMP, + DSC_LS_PRLI_PEND, + DSC_LS_PRLI_COMP, + DSC_LS_PORT_UNAVAIL, + DSC_LS_PRLO_PEND = 9, + DSC_LS_LOGO_PEND, +}; + +enum fcport_mgt_event { + FCME_RELOGIN = 1, + FCME_RSCN, + FCME_GIDPN_DONE, + FCME_PLOGI_DONE, /* Initiator side sent LLIOCB */ + FCME_GNL_DONE, + FCME_GPSC_DONE, + FCME_GPDB_DONE, + FCME_GPNID_DONE, + FCME_DELETE_DONE, +}; + +enum rscn_addr_format { + RSCN_PORT_ADDR, + RSCN_AREA_ADDR, + RSCN_DOM_ADDR, + RSCN_FAB_ADDR, +}; + /* * Fibre channel port structure. */ @@ -2047,6 +2246,29 @@ typedef struct fc_port { uint16_t loop_id; uint16_t old_loop_id; + unsigned int conf_compl_supported:1; + unsigned int deleted:2; + unsigned int local:1; + unsigned int logout_on_delete:1; + unsigned int logo_ack_needed:1; + unsigned int keep_nport_handle:1; + unsigned int send_els_logo:1; + unsigned int login_pause:1; + unsigned int login_succ:1; + + struct fc_port *conflict; + unsigned char logout_completed; + int generation; + + struct se_session *se_sess; + struct kref sess_kref; + struct qla_tgt *tgt; + unsigned long expires; + struct list_head del_list_entry; + struct work_struct free_work; + + struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; + uint16_t tgt_id; uint16_t old_tgt_id; @@ -2075,8 +2297,30 @@ typedef struct fc_port { unsigned long retry_delay_timestamp; struct qla_tgt_sess *tgt_session; + struct ct_sns_desc ct_desc; + enum discovery_state disc_state; + enum login_state fw_login_state; + u32 login_gen, last_login_gen; + u32 rscn_gen, last_rscn_gen; + u32 chip_reset; + struct list_head gnl_entry; + struct work_struct del_work; + u8 iocb[IOCB_SIZE]; } fc_port_t; +#define QLA_FCPORT_SCAN 1 +#define QLA_FCPORT_FOUND 2 + +struct event_arg { + enum fcport_mgt_event event; + fc_port_t *fcport; + srb_t *sp; + port_id_t id; + u16 data[2], rc; + u8 port_name[WWN_SIZE]; + u32 iop[2]; +}; + #include "qla_mr.h" /* @@ -2154,6 +2398,10 @@ static const char * const port_state_str[] = { #define GFT_ID_REQ_SIZE (16 + 4) #define GFT_ID_RSP_SIZE (16 + 32) +#define GID_PN_CMD 0x121 +#define GID_PN_REQ_SIZE (16 + 8) +#define GID_PN_RSP_SIZE (16 + 4) + #define RFT_ID_CMD 0x217 #define RFT_ID_REQ_SIZE (16 + 4 + 32) #define RFT_ID_RSP_SIZE 16 @@ -2479,6 +2727,10 @@ struct ct_sns_req { uint8_t reserved; uint8_t port_name[3]; } gff_id; + + struct { + uint8_t port_name[8]; + } gid_pn; } req; }; @@ -2558,6 +2810,10 @@ struct ct_sns_rsp { struct { uint8_t fc4_features[128]; } gff_id; + struct { + uint8_t reserved; + uint8_t port_id[3]; + } gid_pn; } rsp; }; @@ -2699,11 +2955,11 @@ struct isp_operations { uint16_t (*calc_req_entries) (uint16_t); void (*build_iocbs) (srb_t *, cmd_entry_t *, uint16_t); - void * (*prep_ms_iocb) (struct scsi_qla_host *, uint32_t, uint32_t); - void * (*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t, + void *(*prep_ms_iocb) (struct scsi_qla_host *, struct ct_arg *); + void *(*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t, uint32_t); - uint8_t * (*read_nvram) (struct scsi_qla_host *, uint8_t *, + uint8_t *(*read_nvram) (struct scsi_qla_host *, uint8_t *, uint32_t, uint32_t); int (*write_nvram) (struct scsi_qla_host *, uint8_t *, uint32_t, uint32_t); @@ -2765,13 +3021,21 @@ enum qla_work_type { QLA_EVT_AEN, QLA_EVT_IDC_ACK, QLA_EVT_ASYNC_LOGIN, - QLA_EVT_ASYNC_LOGIN_DONE, QLA_EVT_ASYNC_LOGOUT, QLA_EVT_ASYNC_LOGOUT_DONE, QLA_EVT_ASYNC_ADISC, QLA_EVT_ASYNC_ADISC_DONE, QLA_EVT_UEVENT, QLA_EVT_AENFX, + QLA_EVT_GIDPN, + QLA_EVT_GPNID, + QLA_EVT_GPNID_DONE, + QLA_EVT_NEW_SESS, + QLA_EVT_GPDB, + QLA_EVT_GPSC, + QLA_EVT_UPD_FCPORT, + QLA_EVT_GNL, + QLA_EVT_NACK, }; @@ -2807,6 +3071,23 @@ struct qla_work_evt { struct { srb_t *sp; } iosb; + struct { + port_id_t id; + } gpnid; + struct { + port_id_t id; + u8 port_name[8]; + void *pla; + } new_sess; + struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */ + fc_port_t *fcport; + u8 opt; + } fcport; + struct { + fc_port_t *fcport; + u8 iocb[IOCB_SIZE]; + int type; + } nack; } u; }; @@ -2943,6 +3224,7 @@ struct qla_qpair { struct qla_hw_data *hw; struct work_struct q_work; struct list_head qp_list_elem; /* vha->qp_list */ + struct scsi_qla_host *vha; }; /* Place holder for FW buffer parameters */ @@ -2963,7 +3245,6 @@ struct qlt_hw_data { /* Protected by hw lock */ uint32_t enable_class_2:1; uint32_t enable_explicit_conf:1; - uint32_t ini_mode_force_reverse:1; uint32_t node_name_set:1; dma_addr_t atio_dma; /* Physical address. */ @@ -3115,6 +3396,7 @@ struct qla_hw_data { #define FLOGI_SP_SUPPORT BIT_13 uint8_t port_no; /* Physical port of adapter */ + uint8_t exch_starvation; /* Timeout timers. */ uint8_t loop_down_abort_time; /* port down timer */ @@ -3682,7 +3964,7 @@ typedef struct scsi_qla_host { #define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ #define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ #define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */ -#define SCR_PENDING 21 /* SCR in target mode */ +#define FREE_BIT 21 #define PORT_UPDATE_NEEDED 22 #define FX00_RESET_RECOVERY 23 #define FX00_TARGET_SCAN 24 @@ -3736,7 +4018,9 @@ typedef struct scsi_qla_host { /* list of commands waiting on workqueue */ struct list_head qla_cmd_list; struct list_head qla_sess_op_cmd_list; + struct list_head unknown_atio_list; spinlock_t cmd_list_lock; + struct delayed_work unknown_atio_work; /* Counter to detect races between ELS and RSCN events */ atomic_t generation_tick; @@ -3788,6 +4072,10 @@ typedef struct scsi_qla_host { struct qla8044_reset_template reset_tmplt; struct qla_tgt_counters tgt_counters; uint16_t bbcr; + struct name_list_extended gnl; + /* Count of active session/fcport */ + int fcport_count; + wait_queue_head_t fcport_waitQ; } scsi_qla_host_t; struct qla27xx_image_status { diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 34272fde8a5b..b48cce696bac 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -18,7 +18,7 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused) scsi_qla_host_t *vha = s->private; struct qla_hw_data *ha = vha->hw; unsigned long flags; - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; struct qla_tgt *tgt= vha->vha_tgt.qla_tgt; seq_printf(s, "%s\n",vha->host_str); @@ -26,12 +26,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused) seq_printf(s, "Port ID Port Name Handle\n"); spin_lock_irqsave(&ha->tgt.sess_lock, flags); - list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) { + list_for_each_entry(sess, &vha->vp_fcports, list) seq_printf(s, "%02x:%02x:%02x %8phC %d\n", - sess->s_id.b.domain,sess->s_id.b.area, - sess->s_id.b.al_pa, sess->port_name, - sess->loop_id); - } + sess->d_id.b.domain, sess->d_id.b.area, + sess->d_id.b.al_pa, sess->port_name, + sess->loop_id); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 8a2368b32dec..1f808928763b 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -72,6 +72,37 @@ struct port_database_24xx { uint8_t reserved_3[24]; }; +/* + * MB 75h returns a list of DB entries similar to port_database_24xx(64B). + * However, in this case it returns 1st 40 bytes. + */ +struct get_name_list_extended { + __le16 flags; + u8 current_login_state; + u8 last_login_state; + u8 hard_address[3]; + u8 reserved_1; + u8 port_id[3]; + u8 sequence_id; + __le16 port_timer; + __le16 nport_handle; /* N_PORT handle. */ + __le16 receive_data_size; + __le16 reserved_2; + + /* PRLI SVC Param are Big endian */ + u8 prli_svc_param_word_0[2]; /* Bits 15-0 of word 0 */ + u8 prli_svc_param_word_3[2]; /* Bits 15-0 of word 3 */ + u8 port_name[WWN_SIZE]; + u8 node_name[WWN_SIZE]; +}; + +/* MB 75h: This is the short version of the database */ +struct get_name_list { + u8 port_node_name[WWN_SIZE]; /* B7 most sig, B0 least sig */ + __le16 nport_handle; + u8 reserved; +}; + struct vp_database_24xx { uint16_t vp_status; uint8_t options; @@ -1270,27 +1301,76 @@ struct vp_config_entry_24xx { }; #define VP_RPT_ID_IOCB_TYPE 0x32 /* Report ID Acquisition entry. */ +enum VP_STATUS { + VP_STAT_COMPL, + VP_STAT_FAIL, + VP_STAT_ID_CHG, + VP_STAT_SNS_TO, /* timeout */ + VP_STAT_SNS_RJT, + VP_STAT_SCR_TO, /* timeout */ + VP_STAT_SCR_RJT, +}; + +enum VP_FLAGS { + VP_FLAGS_CON_FLOOP = 1, + VP_FLAGS_CON_P2P = 2, + VP_FLAGS_CON_FABRIC = 3, + VP_FLAGS_NAME_VALID = BIT_5, +}; + struct vp_rpt_id_entry_24xx { uint8_t entry_type; /* Entry type. */ uint8_t entry_count; /* Entry count. */ uint8_t sys_define; /* System defined. */ uint8_t entry_status; /* Entry Status. */ - - uint32_t handle; /* System handle. */ - - uint16_t vp_count; /* Format 0 -- | VP setup | VP acq |. */ - /* Format 1 -- | VP count |. */ - uint16_t vp_idx; /* Format 0 -- Reserved. */ - /* Format 1 -- VP status and index. */ + uint32_t resv1; + uint8_t vp_acquired; + uint8_t vp_setup; + uint8_t vp_idx; /* Format 0=reserved */ + uint8_t vp_status; /* Format 0=reserved */ uint8_t port_id[3]; uint8_t format; - - uint8_t vp_idx_map[16]; - - uint8_t reserved_4[24]; - uint16_t bbcr; - uint8_t reserved_5[6]; + union { + struct { + /* format 0 loop */ + uint8_t vp_idx_map[16]; + uint8_t reserved_4[32]; + } f0; + struct { + /* format 1 fabric */ + uint8_t vpstat1_subcode; /* vp_status=1 subcode */ + uint8_t flags; + uint16_t fip_flags; + uint8_t rsv2[12]; + + uint8_t ls_rjt_vendor; + uint8_t ls_rjt_explanation; + uint8_t ls_rjt_reason; + uint8_t rsv3[5]; + + uint8_t port_name[8]; + uint8_t node_name[8]; + uint16_t bbcr; + uint8_t reserved_5[6]; + } f1; + struct { /* format 2: N2N direct connect */ + uint8_t vpstat1_subcode; + uint8_t flags; + uint16_t rsv6; + uint8_t rsv2[12]; + + uint8_t ls_rjt_vendor; + uint8_t ls_rjt_explanation; + uint8_t ls_rjt_reason; + uint8_t rsv3[5]; + + uint8_t port_name[8]; + uint8_t node_name[8]; + uint32_t remote_nport_id; + uint32_t reserved_5; + } f2; + } u; }; #define VF_EVFP_IOCB_TYPE 0x26 /* Exchange Virtual Fabric Parameters entry. */ diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index afa0116a163b..b3d6441d1d90 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -73,6 +73,10 @@ extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *, uint16_t *); +struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *, + enum qla_work_type); +extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *); +int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e); extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); extern void *qla2x00_alloc_iocbs_ready(struct scsi_qla_host *, srb_t *); extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *); @@ -94,6 +98,13 @@ extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *); extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *, int, int); extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *); +void qla2x00_fcport_event_handler(scsi_qla_host_t *, struct event_arg *); +int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8); +int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, + struct imm_ntfy_from_isp *, int); +int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, + void *); +int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *); /* * Global Data in qla_os.c source file. @@ -127,6 +138,7 @@ extern int ql2xmdenable; extern int ql2xexlogins; extern int ql2xexchoffld; extern int ql2xfwholdabts; +extern int ql2xmvasynctoatio; extern int qla2x00_loop_reset(scsi_qla_host_t *); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); @@ -135,8 +147,6 @@ extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *); extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); -extern int qla2x00_post_async_login_done_work(struct scsi_qla_host *, - fc_port_t *, uint16_t *); extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *, @@ -176,9 +186,13 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); extern void qla2x00_disable_board_on_pci_error(struct work_struct *); -extern void qla2x00_sp_compl(void *, void *, int); -extern void qla2xxx_qpair_sp_free_dma(void *, void *); -extern void qla2xxx_qpair_sp_compl(void *, void *, int); +extern void qla2x00_sp_compl(void *, int); +extern void qla2xxx_qpair_sp_free_dma(void *); +extern void qla2xxx_qpair_sp_compl(void *, int); +extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *); +void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, + uint16_t *); +int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); /* * Global Functions in qla_mid.c source file. @@ -201,7 +215,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *); extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *); -extern void qla2x00_sp_free_dma(void *, void *); +extern void qla2x00_sp_free_dma(void *); extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); @@ -302,9 +316,6 @@ extern int qla2x00_init_firmware(scsi_qla_host_t *, uint16_t); extern int -qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *); - -extern int qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t); extern int @@ -483,6 +494,9 @@ qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *, uint32_t); extern irqreturn_t qla2xxx_msix_rsp_q(int irq, void *dev_id); +fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t); +fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8); +fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8); /* * Global Function Prototypes in qla_sup.c source file. @@ -574,8 +588,8 @@ extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int); /* * Global Function Prototypes in qla_gs.c source file. */ -extern void *qla2x00_prep_ms_iocb(scsi_qla_host_t *, uint32_t, uint32_t); -extern void *qla24xx_prep_ms_iocb(scsi_qla_host_t *, uint32_t, uint32_t); +extern void *qla2x00_prep_ms_iocb(scsi_qla_host_t *, struct ct_arg *); +extern void *qla24xx_prep_ms_iocb(scsi_qla_host_t *, struct ct_arg *); extern int qla2x00_ga_nxt(scsi_qla_host_t *, fc_port_t *); extern int qla2x00_gid_pt(scsi_qla_host_t *, sw_info_t *); extern int qla2x00_gpn_id(scsi_qla_host_t *, sw_info_t *); @@ -591,6 +605,23 @@ extern int qla2x00_fdmi_register(scsi_qla_host_t *); extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *); extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *); extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t); +extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *, + struct ct_sns_rsp *, const char *); +extern void qla2x00_async_iocb_timeout(void *data); +extern int qla24xx_async_gidpn(scsi_qla_host_t *, fc_port_t *); +int qla24xx_post_gidpn_work(struct scsi_qla_host *, fc_port_t *); +void qla24xx_handle_gidpn_event(scsi_qla_host_t *, struct event_arg *); + +extern void qla2x00_free_fcport(fc_port_t *); + +extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *); +extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *); +void qla24xx_async_gpnid_done(scsi_qla_host_t *, srb_t*); +void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *); + +int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *); +int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *); +int qla2x00_mgmt_svr_login(scsi_qla_host_t *); /* * Global Function Prototypes in qla_attr.c source file. @@ -702,10 +733,10 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *); /* IOCB related functions */ extern int qla82xx_start_scsi(srb_t *); -extern void qla2x00_sp_free(void *, void *); +extern void qla2x00_sp_free(void *); extern void qla2x00_sp_timeout(unsigned long); -extern void qla2x00_bsg_job_done(void *, void *, int); -extern void qla2x00_bsg_sp_free(void *, void *); +extern void qla2x00_bsg_job_done(void *, int); +extern void qla2x00_bsg_sp_free(void *); extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *); /* Interrupt related */ @@ -803,4 +834,17 @@ extern int qla_get_exchoffld_status(scsi_qla_host_t *, uint16_t *, uint16_t *); extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *, dma_addr_t); extern void qlt_handle_abts_recv(struct scsi_qla_host *, response_t *); +int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, + struct imm_ntfy_from_isp *, int); +void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *); +void qlt_plogi_ack_link(struct scsi_qla_host *, struct qlt_plogi_ack_t *, + struct fc_port *, enum qlt_plogi_link_t); +void qlt_plogi_ack_unref(struct scsi_qla_host *, struct qlt_plogi_ack_t *); +extern void qlt_schedule_sess_for_deletion(struct fc_port *, bool); +extern void qlt_schedule_sess_for_deletion_lock(struct fc_port *); +extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *, + uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **); +void qla24xx_delete_sess_fn(struct work_struct *); +void qlt_unknown_atio_work_fn(struct work_struct *); + #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index ee3df8794806..ab0f873fd6a1 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -24,12 +24,12 @@ static int qla2x00_sns_rnn_id(scsi_qla_host_t *); * Returns a pointer to the @ha's ms_iocb. */ void * -qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) +qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg) { struct qla_hw_data *ha = vha->hw; ms_iocb_entry_t *ms_pkt; - ms_pkt = ha->ms_iocb; + ms_pkt = (ms_iocb_entry_t *)arg->iocb; memset(ms_pkt, 0, sizeof(ms_iocb_entry_t)); ms_pkt->entry_type = MS_IOCB_TYPE; @@ -39,15 +39,15 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); ms_pkt->cmd_dsd_count = cpu_to_le16(1); ms_pkt->total_dsd_count = cpu_to_le16(2); - ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); - ms_pkt->req_bytecount = cpu_to_le32(req_size); + ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size); + ms_pkt->req_bytecount = cpu_to_le32(arg->req_size); - ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); + ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(arg->req_dma)); + ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(arg->req_dma)); ms_pkt->dseg_req_length = ms_pkt->req_bytecount; - ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); + ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(arg->rsp_dma)); + ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(arg->rsp_dma)); ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount; vha->qla_stats.control_requests++; @@ -64,29 +64,29 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) * Returns a pointer to the @ha's ms_iocb. */ void * -qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) +qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg) { struct qla_hw_data *ha = vha->hw; struct ct_entry_24xx *ct_pkt; - ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; + ct_pkt = (struct ct_entry_24xx *)arg->iocb; memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); ct_pkt->entry_type = CT_IOCB_TYPE; ct_pkt->entry_count = 1; - ct_pkt->nport_handle = cpu_to_le16(NPH_SNS); + ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle); ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); ct_pkt->cmd_dsd_count = cpu_to_le16(1); ct_pkt->rsp_dsd_count = cpu_to_le16(1); - ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); - ct_pkt->cmd_byte_count = cpu_to_le32(req_size); + ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size); + ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size); - ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); + ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(arg->req_dma)); + ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(arg->req_dma)); ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; - ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); + ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(arg->rsp_dma)); + ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(arg->rsp_dma)); ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; ct_pkt->vp_index = vha->vp_idx; @@ -117,7 +117,7 @@ qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size) return &p->p.req; } -static int +int qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, struct ct_sns_rsp *ct_rsp, const char *routine) { @@ -183,14 +183,21 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct qla_hw_data *ha = vha->hw; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_ga_nxt(vha, fcport); + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GA_NXT_REQ_SIZE; + arg.rsp_size = GA_NXT_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Issue GA_NXT */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GA_NXT_REQ_SIZE, - GA_NXT_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD, @@ -269,16 +276,24 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) struct ct_sns_gid_pt_data *gid_data; struct qla_hw_data *ha = vha->hw; uint16_t gid_pt_rsp_size; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_gid_pt(vha, list); gid_data = NULL; gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha); + + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GID_PT_REQ_SIZE; + arg.rsp_size = gid_pt_rsp_size; + arg.nport_handle = NPH_SNS; + /* Issue GID_PT */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE, - gid_pt_rsp_size); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size); @@ -344,15 +359,22 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct qla_hw_data *ha = vha->hw; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_gpn_id(vha, list); + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GPN_ID_REQ_SIZE; + arg.rsp_size = GPN_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GPN_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE, - GPN_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD, @@ -406,15 +428,22 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_gnn_id(vha, list); + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GNN_ID_REQ_SIZE; + arg.rsp_size = GNN_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GNN_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE, - GNN_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD, @@ -473,14 +502,21 @@ qla2x00_rft_id(scsi_qla_host_t *vha) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_rft_id(vha); + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = RFT_ID_REQ_SIZE; + arg.rsp_size = RFT_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Issue RFT_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFT_ID_REQ_SIZE, - RFT_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFT_ID_CMD, @@ -526,6 +562,7 @@ qla2x00_rff_id(scsi_qla_host_t *vha) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { ql_dbg(ql_dbg_disc, vha, 0x2046, @@ -533,10 +570,16 @@ qla2x00_rff_id(scsi_qla_host_t *vha) return (QLA_SUCCESS); } + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = RFF_ID_REQ_SIZE; + arg.rsp_size = RFF_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Issue RFF_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFF_ID_REQ_SIZE, - RFF_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFF_ID_CMD, @@ -584,14 +627,21 @@ qla2x00_rnn_id(scsi_qla_host_t *vha) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_rnn_id(vha); + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = RNN_ID_REQ_SIZE; + arg.rsp_size = RNN_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Issue RNN_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RNN_ID_REQ_SIZE, - RNN_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE); @@ -651,6 +701,7 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { ql_dbg(ql_dbg_disc, vha, 0x2050, @@ -658,10 +709,17 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha) return (QLA_SUCCESS); } + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = 0; + arg.rsp_size = RSNN_NN_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Issue RSNN_NN */ /* Prepare common MS IOCB */ /* Request size adjusted after CT preparation */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, 0, RSNN_NN_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, RSNN_NN_CMD, @@ -1103,7 +1161,7 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha) * * Returns 0 on success. */ -static int +int qla2x00_mgmt_svr_login(scsi_qla_host_t *vha) { int ret, rval; @@ -2425,15 +2483,22 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (!IS_IIDMA_CAPABLE(ha)) return QLA_FUNCTION_FAILED; + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GFPN_ID_REQ_SIZE; + arg.rsp_size = GFPN_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GFPN_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE, - GFPN_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD, @@ -2471,36 +2536,6 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) return (rval); } -static inline void * -qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha, uint32_t req_size, - uint32_t rsp_size) -{ - struct ct_entry_24xx *ct_pkt; - struct qla_hw_data *ha = vha->hw; - ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; - memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); - - ct_pkt->entry_type = CT_IOCB_TYPE; - ct_pkt->entry_count = 1; - ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id); - ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); - ct_pkt->cmd_dsd_count = cpu_to_le16(1); - ct_pkt->rsp_dsd_count = cpu_to_le16(1); - ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); - ct_pkt->cmd_byte_count = cpu_to_le32(req_size); - - ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); - ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; - - ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); - ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; - ct_pkt->vp_index = vha->vp_idx; - - return ct_pkt; -} - static inline struct ct_sns_req * qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd, @@ -2530,9 +2565,10 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) int rval; uint16_t i; struct qla_hw_data *ha = vha->hw; - ms_iocb_entry_t *ms_pkt; + ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (!IS_IIDMA_CAPABLE(ha)) return QLA_FUNCTION_FAILED; @@ -2543,11 +2579,17 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) if (rval) return rval; + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GPSC_REQ_SIZE; + arg.rsp_size = GPSC_RSP_SIZE; + arg.nport_handle = vha->mgmt_svr_loop_id; + for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GFPN_ID */ /* Prepare common MS IOCB */ - ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE, - GPSC_RSP_SIZE); + ms_pkt = qla24xx_prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD, @@ -2641,6 +2683,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) struct ct_sns_rsp *ct_rsp; struct qla_hw_data *ha = vha->hw; uint8_t fcp_scsi_features = 0; + struct ct_arg arg; for (i = 0; i < ha->max_fibre_devices; i++) { /* Set default FC4 Type as UNKNOWN so the default is to @@ -2651,9 +2694,15 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) if (!IS_FWI2_CAPABLE(ha)) continue; + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GFF_ID_REQ_SIZE; + arg.rsp_size = GFF_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFF_ID_REQ_SIZE, - GFF_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD, @@ -2692,3 +2741,538 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) break; } } + +/* GID_PN completion processing. */ +void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea) +{ + fc_port_t *fcport = ea->fcport; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC login state %d \n", + __func__, fcport->port_name, fcport->fw_login_state); + + if (ea->sp->gen2 != fcport->login_gen) { + /* PLOGI/PRLI/LOGO came in while cmd was out.*/ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC generation changed rscn %d|%d login %d|%d \n", + __func__, fcport->port_name, fcport->last_rscn_gen, + fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen); + return; + } + + if (!ea->rc) { + if (ea->sp->gen1 == fcport->rscn_gen) { + fcport->scan_state = QLA_FCPORT_FOUND; + fcport->flags |= FCF_FABRIC_DEVICE; + + if (fcport->d_id.b24 == ea->id.b24) { + /* cable plugged into the same place */ + switch (vha->host->active_mode) { + case MODE_TARGET: + /* NOOP. let the other guy login to us.*/ + break; + case MODE_INITIATOR: + case MODE_DUAL: + default: + if (atomic_read(&fcport->state) == + FCS_ONLINE) + break; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gnl\n", + __func__, __LINE__, fcport->port_name); + qla24xx_post_gnl_work(vha, fcport); + break; + } + } else { /* fcport->d_id.b24 != ea->id.b24 */ + fcport->d_id.b24 = ea->id.b24; + if (fcport->deleted == QLA_SESS_DELETED) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, fcport->port_name); + qlt_schedule_sess_for_deletion_lock(fcport); + } + } + } else { /* ea->sp->gen1 != fcport->rscn_gen */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gidpn\n", + __func__, __LINE__, fcport->port_name); + /* rscn came in while cmd was out */ + qla24xx_post_gidpn_work(vha, fcport); + } + } else { /* ea->rc */ + /* cable pulled */ + if (ea->sp->gen1 == fcport->rscn_gen) { + if (ea->sp->gen2 == fcport->login_gen) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", __func__, + __LINE__, fcport->port_name); + qlt_schedule_sess_for_deletion_lock(fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC login\n", __func__, __LINE__, + fcport->port_name); + qla24xx_fcport_handle_login(vha, fcport); + } + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gidpn\n", __func__, __LINE__, + fcport->port_name); + qla24xx_post_gidpn_work(vha, fcport); + } + } +} /* gidpn_event */ + +static void qla2x00_async_gidpn_sp_done(void *s, int res) +{ + struct srb *sp = s; + struct scsi_qla_host *vha = sp->vha; + fc_port_t *fcport = sp->fcport; + u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id; + struct event_arg ea; + + fcport->flags &= ~FCF_ASYNC_SENT; + + memset(&ea, 0, sizeof(ea)); + ea.fcport = fcport; + ea.id.b.domain = id[0]; + ea.id.b.area = id[1]; + ea.id.b.al_pa = id[2]; + ea.sp = sp; + ea.rc = res; + ea.event = FCME_GIDPN_DONE; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x, WWPN %8phC ID %3phC \n", + sp->name, res, fcport->port_name, id); + + qla2x00_fcport_event_handler(vha, &ea); + + sp->free(sp); +} + +int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + int rval = QLA_FUNCTION_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + + if (!vha->flags.online) + goto done; + + fcport->flags |= FCF_ASYNC_SENT; + fcport->disc_state = DSC_GID_PN; + fcport->scan_state = QLA_FCPORT_SCAN; + sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); + if (!sp) + goto done; + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "gidpn"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + /* CT_IU preamble */ + ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GID_PN_CMD, + GID_PN_RSP_SIZE); + + /* GIDPN req */ + memcpy(ct_req->req.gid_pn.port_name, fcport->port_name, + WWN_SIZE); + + /* req & rsp use the same buffer */ + sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.req_size = GID_PN_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.rsp_size = GID_PN_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; + + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; + sp->done = qla2x00_async_gidpn_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0x206f, + "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n", + sp->name, fcport->port_name, + sp->handle, fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + return rval; + +done_free_sp: + sp->free(sp); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} + +int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + int ls; + + ls = atomic_read(&vha->loop_state); + if (((ls != LOOP_READY) && (ls != LOOP_UP)) || + test_bit(UNLOADING, &vha->dpc_flags)) + return 0; + + e = qla2x00_alloc_work(vha, QLA_EVT_GIDPN); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + return qla2x00_post_work(vha, e); +} + +int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_GPSC); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + return qla2x00_post_work(vha, e); +} + +static void qla24xx_async_gpsc_sp_done(void *s, int res) +{ + struct srb *sp = s; + struct scsi_qla_host *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport = sp->fcport; + struct ct_sns_rsp *ct_rsp; + struct event_arg ea; + + ct_rsp = &fcport->ct_desc.ct_sns->p.rsp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x, WWPN %8phC \n", + sp->name, res, fcport->port_name); + + fcport->flags &= ~FCF_ASYNC_SENT; + + if (res == (DID_ERROR << 16)) { + /* entry status error */ + goto done; + } else if (res) { + if ((ct_rsp->header.reason_code == + CT_REASON_INVALID_COMMAND_CODE) || + (ct_rsp->header.reason_code == + CT_REASON_COMMAND_UNSUPPORTED)) { + ql_dbg(ql_dbg_disc, vha, 0x205a, + "GPSC command unsupported, disabling " + "query.\n"); + ha->flags.gpsc_supported = 0; + res = QLA_SUCCESS; + } + } else { + switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) { + case BIT_15: + fcport->fp_speed = PORT_SPEED_1GB; + break; + case BIT_14: + fcport->fp_speed = PORT_SPEED_2GB; + break; + case BIT_13: + fcport->fp_speed = PORT_SPEED_4GB; + break; + case BIT_12: + fcport->fp_speed = PORT_SPEED_10GB; + break; + case BIT_11: + fcport->fp_speed = PORT_SPEED_8GB; + break; + case BIT_10: + fcport->fp_speed = PORT_SPEED_16GB; + break; + case BIT_8: + fcport->fp_speed = PORT_SPEED_32GB; + break; + } + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n", + sp->name, + fcport->fabric_port_name, + be16_to_cpu(ct_rsp->rsp.gpsc.speeds), + be16_to_cpu(ct_rsp->rsp.gpsc.speed)); + } +done: + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_GPSC_DONE; + ea.rc = res; + ea.fcport = fcport; + qla2x00_fcport_event_handler(vha, &ea); + + sp->free(sp); +} + +int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + int rval = QLA_FUNCTION_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + + if (!vha->flags.online) + goto done; + + fcport->flags |= FCF_ASYNC_SENT; + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "gpsc"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + /* CT_IU preamble */ + ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD, + GPSC_RSP_SIZE); + + /* GPSC req */ + memcpy(ct_req->req.gpsc.port_name, fcport->port_name, + WWN_SIZE); + + sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id; + + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; + sp->done = qla24xx_async_gpsc_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n", + sp->name, fcport->port_name, sp->handle, + fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + return rval; + +done_free_sp: + sp->free(sp); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} + +int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id) +{ + struct qla_work_evt *e; + + if (test_bit(UNLOADING, &vha->dpc_flags)) + return 0; + + e = qla2x00_alloc_work(vha, QLA_EVT_GPNID); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.gpnid.id = *id; + return qla2x00_post_work(vha, e); +} + +void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp) +{ + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + + sp->free(sp); +} + +void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) +{ + fc_port_t *fcport; + unsigned long flags; + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + if (fcport) { + /* cable moved. just plugged in */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, fcport->port_name); + + fcport->rscn_gen++; + fcport->d_id = ea->id; + fcport->scan_state = QLA_FCPORT_FOUND; + fcport->flags |= FCF_FABRIC_DEVICE; + + qlt_schedule_sess_for_deletion_lock(fcport); + } else { + /* create new fcport */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post new sess\n", + __func__, __LINE__, ea->port_name); + + qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL); + } +} + +static void qla2x00_async_gpnid_sp_done(void *s, int res) +{ + struct srb *sp = s; + struct scsi_qla_host *vha = sp->vha; + struct ct_sns_req *ct_req = + (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; + struct ct_sns_rsp *ct_rsp = + (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; + struct event_arg ea; + struct qla_work_evt *e; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x ID %3phC. %8phC\n", + sp->name, res, ct_req->req.port_id.port_id, + ct_rsp->rsp.gpn_id.port_name); + + memset(&ea, 0, sizeof(ea)); + memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); + ea.sp = sp; + ea.id.b.domain = ct_req->req.port_id.port_id[0]; + ea.id.b.area = ct_req->req.port_id.port_id[1]; + ea.id.b.al_pa = ct_req->req.port_id.port_id[2]; + ea.rc = res; + ea.event = FCME_GPNID_DONE; + + qla2x00_fcport_event_handler(vha, &ea); + + e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE); + if (!e) { + /* please ignore kernel warning. otherwise, we have mem leak. */ + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + + sp->free(sp); + return; + } + + e->u.iosb.sp = sp; + qla2x00_post_work(vha, e); +} + +/* Get WWPN with Nport ID. */ +int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) +{ + int rval = QLA_FUNCTION_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + struct ct_sns_pkt *ct_sns; + + if (!vha->flags.online) + goto done; + + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "gpnid"; + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, + GFP_KERNEL); + if (!sp->u.iocb_cmd.u.ctarg.req) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate ct_sns request.\n"); + goto done_free_sp; + } + + sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, + GFP_KERNEL); + if (!sp->u.iocb_cmd.u.ctarg.rsp) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate ct_sns request.\n"); + goto done_free_sp; + } + + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; + memset(ct_sns, 0, sizeof(*ct_sns)); + + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; + /* CT_IU preamble */ + ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE); + + /* GPN_ID req */ + ct_req->req.port_id.port_id[0] = id->b.domain; + ct_req->req.port_id.port_id[1] = id->b.area; + ct_req->req.port_id.port_id[2] = id->b.al_pa; + + sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; + + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; + sp->done = qla2x00_async_gpnid_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s hdl=%x ID %3phC.\n", sp->name, + sp->handle, ct_req->req.port_id.port_id); + return rval; + +done_free_sp: + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + + sp->free(sp); +done: + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 265e1395bdb8..32fb9007f137 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -30,15 +30,15 @@ static int qla2x00_configure_hba(scsi_qla_host_t *); static int qla2x00_configure_loop(scsi_qla_host_t *); static int qla2x00_configure_local_loop(scsi_qla_host_t *); static int qla2x00_configure_fabric(scsi_qla_host_t *); -static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *); -static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *, - uint16_t *); - +static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *); static int qla2x00_restart_isp(scsi_qla_host_t *); static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); static int qla84xx_init_chip(scsi_qla_host_t *); static int qla25xx_init_queues(struct qla_hw_data *); +static int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8); +static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *, + struct event_arg *); /* SRB Extensions ---------------------------------------------------------- */ @@ -47,29 +47,27 @@ qla2x00_sp_timeout(unsigned long __data) { srb_t *sp = (srb_t *)__data; struct srb_iocb *iocb; - fc_port_t *fcport = sp->fcport; - struct qla_hw_data *ha = fcport->vha->hw; + scsi_qla_host_t *vha = sp->vha; struct req_que *req; unsigned long flags; - spin_lock_irqsave(&ha->hardware_lock, flags); - req = ha->req_q_map[0]; + spin_lock_irqsave(&vha->hw->hardware_lock, flags); + req = vha->hw->req_q_map[0]; req->outstanding_cmds[sp->handle] = NULL; iocb = &sp->u.iocb_cmd; iocb->timeout(sp); - sp->free(fcport->vha, sp); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + sp->free(sp); + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); } void -qla2x00_sp_free(void *data, void *ptr) +qla2x00_sp_free(void *ptr) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct srb_iocb *iocb = &sp->u.iocb_cmd; - struct scsi_qla_host *vha = (scsi_qla_host_t *)data; del_timer(&iocb->timer); - qla2x00_rel_sp(vha, sp); + qla2x00_rel_sp(sp); } /* Asynchronous Login/Logout Routines -------------------------------------- */ @@ -94,43 +92,72 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha) return tmo; } -static void +void qla2x00_async_iocb_timeout(void *data) { - srb_t *sp = (srb_t *)data; + srb_t *sp = data; fc_port_t *fcport = sp->fcport; + struct srb_iocb *lio = &sp->u.iocb_cmd; + struct event_arg ea; ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, - "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n", - sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa); + "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", + sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); fcport->flags &= ~FCF_ASYNC_SENT; - if (sp->type == SRB_LOGIN_CMD) { - struct srb_iocb *lio = &sp->u.iocb_cmd; - qla2x00_post_async_logout_work(fcport->vha, fcport, NULL); + + switch (sp->type) { + case SRB_LOGIN_CMD: /* Retry as needed. */ lio->u.logio.data[0] = MBS_COMMAND_ERROR; lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? QLA_LOGIO_LOGIN_RETRIED : 0; - qla2x00_post_async_login_done_work(fcport->vha, fcport, - lio->u.logio.data); - } else if (sp->type == SRB_LOGOUT_CMD) { + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_PLOGI_DONE; + ea.fcport = sp->fcport; + ea.data[0] = lio->u.logio.data[0]; + ea.data[1] = lio->u.logio.data[1]; + ea.sp = sp; + qla24xx_handle_plogi_done_event(fcport->vha, &ea); + break; + case SRB_LOGOUT_CMD: qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT); + break; + case SRB_CT_PTHRU_CMD: + case SRB_MB_IOCB: + case SRB_NACK_PLOGI: + case SRB_NACK_PRLI: + case SRB_NACK_LOGO: + sp->done(sp, QLA_FUNCTION_TIMEOUT); + break; } } static void -qla2x00_async_login_sp_done(void *data, void *ptr, int res) +qla2x00_async_login_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; + struct scsi_qla_host *vha = sp->vha; struct srb_iocb *lio = &sp->u.iocb_cmd; - struct scsi_qla_host *vha = (scsi_qla_host_t *)data; + struct event_arg ea; - if (!test_bit(UNLOADING, &vha->dpc_flags)) - qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport, - lio->u.logio.data); - sp->free(sp->fcport->vha, sp); + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC res %d \n", __func__, sp->fcport->port_name, res); + + sp->fcport->flags &= ~FCF_ASYNC_SENT; + if (!test_bit(UNLOADING, &vha->dpc_flags)) { + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_PLOGI_DONE; + ea.fcport = sp->fcport; + ea.data[0] = lio->u.logio.data[0]; + ea.data[1] = lio->u.logio.data[1]; + ea.iop[0] = lio->u.logio.iop[0]; + ea.iop[1] = lio->u.logio.iop[1]; + ea.sp = sp; + qla2x00_fcport_event_handler(vha, &ea); + } + + sp->free(sp); } int @@ -139,13 +166,23 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, { srb_t *sp; struct srb_iocb *lio; - int rval; + int rval = QLA_FUNCTION_FAILED; + + if (!vha->flags.online) + goto done; + + if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || + (fcport->fw_login_state == DSC_LS_PLOGI_COMP) || + (fcport->fw_login_state == DSC_LS_PRLI_PEND)) + goto done; - rval = QLA_FUNCTION_FAILED; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; + fcport->flags |= FCF_ASYNC_SENT; + fcport->logout_completed = 0; + sp->type = SRB_LOGIN_CMD; sp->name = "login"; qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); @@ -165,29 +202,30 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, } ql_dbg(ql_dbg_disc, vha, 0x2072, - "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x " - "retries=%d.\n", sp->handle, fcport->loop_id, + "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x " + "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->login_retry); return rval; done_free_sp: - sp->free(fcport->vha, sp); + sp->free(sp); done: + fcport->flags &= ~FCF_ASYNC_SENT; return rval; } static void -qla2x00_async_logout_sp_done(void *data, void *ptr, int res) +qla2x00_async_logout_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct srb_iocb *lio = &sp->u.iocb_cmd; - struct scsi_qla_host *vha = (scsi_qla_host_t *)data; - if (!test_bit(UNLOADING, &vha->dpc_flags)) - qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport, + sp->fcport->flags &= ~FCF_ASYNC_SENT; + if (!test_bit(UNLOADING, &sp->vha->dpc_flags)) + qla2x00_post_async_logout_done_work(sp->vha, sp->fcport, lio->u.logio.data); - sp->free(sp->fcport->vha, sp); + sp->free(sp); } int @@ -198,6 +236,7 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) int rval; rval = QLA_FUNCTION_FAILED; + fcport->flags |= FCF_ASYNC_SENT; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; @@ -214,28 +253,30 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) goto done_free_sp; ql_dbg(ql_dbg_disc, vha, 0x2070, - "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", + "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n", sp->handle, fcport->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa); + fcport->d_id.b.area, fcport->d_id.b.al_pa, + fcport->port_name); return rval; done_free_sp: - sp->free(fcport->vha, sp); + sp->free(sp); done: + fcport->flags &= ~FCF_ASYNC_SENT; return rval; } static void -qla2x00_async_adisc_sp_done(void *data, void *ptr, int res) +qla2x00_async_adisc_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; + struct scsi_qla_host *vha = sp->vha; struct srb_iocb *lio = &sp->u.iocb_cmd; - struct scsi_qla_host *vha = (scsi_qla_host_t *)data; if (!test_bit(UNLOADING, &vha->dpc_flags)) - qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport, + qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport, lio->u.logio.data); - sp->free(sp->fcport->vha, sp); + sp->free(sp); } int @@ -247,6 +288,7 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, int rval; rval = QLA_FUNCTION_FAILED; + fcport->flags |= FCF_ASYNC_SENT; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; @@ -271,15 +313,858 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, return rval; done_free_sp: - sp->free(fcport->vha, sp); + sp->free(sp); done: + fcport->flags &= ~FCF_ASYNC_SENT; return rval; } +static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, + struct event_arg *ea) +{ + fc_port_t *fcport, *conflict_fcport; + struct get_name_list_extended *e; + u16 i, n, found = 0, loop_id; + port_id_t id; + u64 wwn; + u8 opt = 0; + + fcport = ea->fcport; + + if (ea->rc) { /* rval */ + if (fcport->login_retry == 0) { + fcport->login_retry = vha->hw->login_retry_count; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "GNL failed Port login retry %8phN, retry cnt=%d.\n", + fcport->port_name, fcport->login_retry); + } + return; + } + + if (fcport->last_rscn_gen != fcport->rscn_gen) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC rscn gen changed rscn %d|%d \n", + __func__, fcport->port_name, + fcport->last_rscn_gen, fcport->rscn_gen); + qla24xx_post_gidpn_work(vha, fcport); + return; + } else if (fcport->last_login_gen != fcport->login_gen) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC login gen changed login %d|%d \n", + __func__, fcport->port_name, + fcport->last_login_gen, fcport->login_gen); + return; + } + + n = ea->data[0] / sizeof(struct get_name_list_extended); + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC n %d %02x%02x%02x lid %d \n", + __func__, __LINE__, fcport->port_name, n, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, fcport->loop_id); + + for (i = 0; i < n; i++) { + e = &vha->gnl.l[i]; + wwn = wwn_to_u64(e->port_name); + + if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE)) + continue; + + found = 1; + id.b.domain = e->port_id[2]; + id.b.area = e->port_id[1]; + id.b.al_pa = e->port_id[0]; + id.b.rsvd_1 = 0; + + loop_id = le16_to_cpu(e->nport_handle); + loop_id = (loop_id & 0x7fff); + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n", + __func__, fcport->port_name, + e->current_login_state, fcport->fw_login_state, + id.b.domain, id.b.area, id.b.al_pa, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, loop_id, fcport->loop_id); + + if ((id.b24 != fcport->d_id.b24) || + ((fcport->loop_id != FC_NO_LOOP_ID) && + (fcport->loop_id != loop_id))) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, fcport->port_name); + qlt_schedule_sess_for_deletion(fcport, 1); + return; + } + + fcport->loop_id = loop_id; + + wwn = wwn_to_u64(fcport->port_name); + qlt_find_sess_invalidate_other(vha, wwn, + id, loop_id, &conflict_fcport); + + if (conflict_fcport) { + /* + * Another share fcport share the same loop_id & + * nport id. Conflict fcport needs to finish + * cleanup before this fcport can proceed to login. + */ + conflict_fcport->conflict = fcport; + fcport->login_pause = 1; + } + + switch (e->current_login_state) { + case DSC_LS_PRLI_COMP: + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpdb\n", + __func__, __LINE__, fcport->port_name); + opt = PDO_FORCE_ADISC; + qla24xx_post_gpdb_work(vha, fcport, opt); + break; + + case DSC_LS_PORT_UNAVAIL: + default: + if (fcport->loop_id == FC_NO_LOOP_ID) { + qla2x00_find_new_loop_id(vha, fcport); + fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + } + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC \n", + __func__, __LINE__, fcport->port_name); + qla24xx_fcport_handle_login(vha, fcport); + break; + } + } + + if (!found) { + /* fw has no record of this port */ + if (fcport->loop_id == FC_NO_LOOP_ID) { + qla2x00_find_new_loop_id(vha, fcport); + fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + } else { + for (i = 0; i < n; i++) { + e = &vha->gnl.l[i]; + id.b.domain = e->port_id[0]; + id.b.area = e->port_id[1]; + id.b.al_pa = e->port_id[2]; + id.b.rsvd_1 = 0; + loop_id = le16_to_cpu(e->nport_handle); + + if (fcport->d_id.b24 == id.b24) { + conflict_fcport = + qla2x00_find_fcport_by_wwpn(vha, + e->port_name, 0); + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, + conflict_fcport->port_name); + qlt_schedule_sess_for_deletion + (conflict_fcport, 1); + } + + if (fcport->loop_id == loop_id) { + /* FW already picked this loop id for another fcport */ + qla2x00_find_new_loop_id(vha, fcport); + } + } + } + qla24xx_fcport_handle_login(vha, fcport); + } +} /* gnl_event */ + +static void +qla24xx_async_gnl_sp_done(void *s, int res) +{ + struct srb *sp = s; + struct scsi_qla_host *vha = sp->vha; + unsigned long flags; + struct fc_port *fcport = NULL, *tf; + u16 i, n = 0, loop_id; + struct event_arg ea; + struct get_name_list_extended *e; + u64 wwn; + struct list_head h; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x mb[1]=%x mb[2]=%x \n", + sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], + sp->u.iocb_cmd.u.mbx.in_mb[2]); + + memset(&ea, 0, sizeof(ea)); + ea.sp = sp; + ea.rc = res; + ea.event = FCME_GNL_DONE; + + if (sp->u.iocb_cmd.u.mbx.in_mb[1] >= + sizeof(struct get_name_list_extended)) { + n = sp->u.iocb_cmd.u.mbx.in_mb[1] / + sizeof(struct get_name_list_extended); + ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */ + } + + for (i = 0; i < n; i++) { + e = &vha->gnl.l[i]; + loop_id = le16_to_cpu(e->nport_handle); + /* mask out reserve bit */ + loop_id = (loop_id & 0x7fff); + set_bit(loop_id, vha->hw->loop_id_map); + wwn = wwn_to_u64(e->port_name); + + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n", + __func__, (void *)&wwn, e->port_id[2], e->port_id[1], + e->port_id[0], e->current_login_state, e->last_login_state, + (loop_id & 0x7fff)); + } + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + vha->gnl.sent = 0; + + INIT_LIST_HEAD(&h); + fcport = tf = NULL; + if (!list_empty(&vha->gnl.fcports)) + list_splice_init(&vha->gnl.fcports, &h); + + list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { + list_del_init(&fcport->gnl_entry); + fcport->flags &= ~FCF_ASYNC_SENT; + ea.fcport = fcport; + + qla2x00_fcport_event_handler(vha, &ea); + } + + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + sp->free(sp); +} + +int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + srb_t *sp; + struct srb_iocb *mbx; + int rval = QLA_FUNCTION_FAILED; + unsigned long flags; + u16 *mb; + + if (!vha->flags.online) + goto done; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-gnlist WWPN %8phC \n", fcport->port_name); + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + fcport->flags |= FCF_ASYNC_SENT; + fcport->disc_state = DSC_GNL; + fcport->last_rscn_gen = fcport->rscn_gen; + fcport->last_login_gen = fcport->login_gen; + + list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); + if (vha->gnl.sent) { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + rval = QLA_SUCCESS; + goto done; + } + vha->gnl.sent = 1; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + sp->type = SRB_MB_IOCB; + sp->name = "gnlist"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); + + mb = sp->u.iocb_cmd.u.mbx.out_mb; + mb[0] = MBC_PORT_NODE_NAME_LIST; + mb[1] = BIT_2 | BIT_3; + mb[2] = MSW(vha->gnl.ldma); + mb[3] = LSW(vha->gnl.ldma); + mb[6] = MSW(MSD(vha->gnl.ldma)); + mb[7] = LSW(MSD(vha->gnl.ldma)); + mb[8] = vha->gnl.size; + mb[9] = vha->vp_idx; + + mbx = &sp->u.iocb_cmd; + mbx->timeout = qla2x00_async_iocb_timeout; + + sp->done = qla24xx_async_gnl_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s - OUT WWPN %8phC hndl %x\n", + sp->name, fcport->port_name, sp->handle); + + return rval; + +done_free_sp: + sp->free(sp); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} + +int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_GNL); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + return qla2x00_post_work(vha, e); +} + +static +void qla24xx_async_gpdb_sp_done(void *s, int res) +{ + struct srb *sp = s; + struct scsi_qla_host *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + uint64_t zero = 0; + struct port_database_24xx *pd; + fc_port_t *fcport = sp->fcport; + u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; + int rval = QLA_SUCCESS; + struct event_arg ea; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", + sp->name, res, fcport->port_name, mb[1], mb[2]); + + fcport->flags &= ~FCF_ASYNC_SENT; + + if (res) { + rval = res; + goto gpd_error_out; + } + + pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; + + /* Check for logged in state. */ + if (pd->current_login_state != PDS_PRLI_COMPLETE && + pd->last_login_state != PDS_PRLI_COMPLETE) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "Unable to verify login-state (%x/%x) for " + "loop_id %x.\n", pd->current_login_state, + pd->last_login_state, fcport->loop_id); + rval = QLA_FUNCTION_FAILED; + goto gpd_error_out; + } + + if (fcport->loop_id == FC_NO_LOOP_ID || + (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && + memcmp(fcport->port_name, pd->port_name, 8))) { + /* We lost the device mid way. */ + rval = QLA_NOT_LOGGED_IN; + goto gpd_error_out; + } + + /* Names are little-endian. */ + memcpy(fcport->node_name, pd->node_name, WWN_SIZE); + + /* Get port_id of device. */ + fcport->d_id.b.domain = pd->port_id[0]; + fcport->d_id.b.area = pd->port_id[1]; + fcport->d_id.b.al_pa = pd->port_id[2]; + fcport->d_id.b.rsvd_1 = 0; + + /* If not target must be initiator or unknown type. */ + if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) + fcport->port_type = FCT_INITIATOR; + else + fcport->port_type = FCT_TARGET; + + /* Passback COS information. */ + fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? + FC_COS_CLASS2 : FC_COS_CLASS3; + + if (pd->prli_svc_param_word_3[0] & BIT_7) { + fcport->flags |= FCF_CONF_COMP_SUPPORTED; + fcport->conf_compl_supported = 1; + } + +gpd_error_out: + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_GPDB_DONE; + ea.rc = rval; + ea.fcport = fcport; + ea.sp = sp; + + qla2x00_fcport_event_handler(vha, &ea); + + dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, + sp->u.iocb_cmd.u.mbx.in_dma); + + sp->free(sp); +} + +static int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, + u8 opt) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_GPDB); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + e->u.fcport.opt = opt; + return qla2x00_post_work(vha, e); +} + +int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) +{ + srb_t *sp; + struct srb_iocb *mbx; + int rval = QLA_FUNCTION_FAILED; + u16 *mb; + dma_addr_t pd_dma; + struct port_database_24xx *pd; + struct qla_hw_data *ha = vha->hw; + + if (!vha->flags.online) + goto done; + + fcport->flags |= FCF_ASYNC_SENT; + fcport->disc_state = DSC_GPDB; + + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + + pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); + if (pd == NULL) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate port database structure.\n"); + goto done_free_sp; + } + memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); + + sp->type = SRB_MB_IOCB; + sp->name = "gpdb"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + mb = sp->u.iocb_cmd.u.mbx.out_mb; + mb[0] = MBC_GET_PORT_DATABASE; + mb[1] = fcport->loop_id; + mb[2] = MSW(pd_dma); + mb[3] = LSW(pd_dma); + mb[6] = MSW(MSD(pd_dma)); + mb[7] = LSW(MSD(pd_dma)); + mb[9] = vha->vp_idx; + mb[10] = opt; + + mbx = &sp->u.iocb_cmd; + mbx->timeout = qla2x00_async_iocb_timeout; + mbx->u.mbx.in = (void *)pd; + mbx->u.mbx.in_dma = pd_dma; + + sp->done = qla24xx_async_gpdb_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s %8phC hndl %x opt %x\n", + sp->name, fcport->port_name, sp->handle, opt); + + return rval; + +done_free_sp: + if (pd) + dma_pool_free(ha->s_dma_pool, pd, pd_dma); + + sp->free(sp); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + qla24xx_post_gpdb_work(vha, fcport, opt); + return rval; +} + +static +void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) +{ + int rval = ea->rc; + fc_port_t *fcport = ea->fcport; + unsigned long flags; + + fcport->flags &= ~FCF_ASYNC_SENT; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name, + fcport->disc_state, fcport->fw_login_state, rval); + + if (ea->sp->gen2 != fcport->login_gen) { + /* target side must have changed it. */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC generation changed rscn %d|%d login %d|%d \n", + __func__, fcport->port_name, fcport->last_rscn_gen, + fcport->rscn_gen, fcport->last_login_gen, + fcport->login_gen); + return; + } else if (ea->sp->gen1 != fcport->rscn_gen) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n", + __func__, __LINE__, fcport->port_name); + qla24xx_post_gidpn_work(vha, fcport); + return; + } + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post del sess\n", + __func__, __LINE__, fcport->port_name); + qlt_schedule_sess_for_deletion_lock(fcport); + return; + } + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + ea->fcport->login_gen++; + ea->fcport->deleted = 0; + ea->fcport->logout_on_delete = 1; + + if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) { + vha->fcport_count++; + ea->fcport->login_succ = 1; + + if (!IS_IIDMA_CAPABLE(vha->hw) || + !vha->hw->flags.gpsc_supported) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post upd_fcport fcp_cnt %d\n", + __func__, __LINE__, fcport->port_name, + vha->fcport_count); + + qla24xx_post_upd_fcport_work(vha, fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpsc fcp_cnt %d\n", + __func__, __LINE__, fcport->port_name, + vha->fcport_count); + + qla24xx_post_gpsc_work(vha, fcport); + } + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); +} /* gpdb event */ + +int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + if (fcport->login_retry == 0) + return 0; + + if (fcport->scan_state != QLA_FCPORT_FOUND) + return 0; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, fcport->login_pause, fcport->flags, + fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, + fcport->last_login_gen, fcport->login_gen, fcport->login_retry, + fcport->loop_id); + + fcport->login_retry--; + + if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || + (fcport->fw_login_state == DSC_LS_PLOGI_COMP) || + (fcport->fw_login_state == DSC_LS_PRLI_PEND)) + return 0; + + /* for pure Target Mode. Login will not be initiated */ + if (vha->host->active_mode == MODE_TARGET) + return 0; + + if (fcport->flags & FCF_ASYNC_SENT) { + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + return 0; + } + + switch (fcport->disc_state) { + case DSC_DELETED: + if (fcport->loop_id == FC_NO_LOOP_ID) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gnl\n", + __func__, __LINE__, fcport->port_name); + qla24xx_async_gnl(vha, fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post login\n", + __func__, __LINE__, fcport->port_name); + fcport->disc_state = DSC_LOGIN_PEND; + qla2x00_post_async_login_work(vha, fcport, NULL); + } + break; + + case DSC_GNL: + if (fcport->login_pause) { + fcport->last_rscn_gen = fcport->rscn_gen; + fcport->last_login_gen = fcport->login_gen; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; + } + + if (fcport->flags & FCF_FCP2_DEVICE) { + u8 opt = PDO_FORCE_ADISC; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpdb\n", + __func__, __LINE__, fcport->port_name); + + fcport->disc_state = DSC_GPDB; + qla24xx_post_gpdb_work(vha, fcport, opt); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post login \n", + __func__, __LINE__, fcport->port_name); + fcport->disc_state = DSC_LOGIN_PEND; + qla2x00_post_async_login_work(vha, fcport, NULL); + } + + break; + + case DSC_LOGIN_FAILED: + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gidpn \n", + __func__, __LINE__, fcport->port_name); + + qla24xx_post_gidpn_work(vha, fcport); + break; + + case DSC_LOGIN_COMPLETE: + /* recheck login state */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpdb \n", + __func__, __LINE__, fcport->port_name); + + qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC); + break; + + default: + break; + } + + return 0; +} + +static +void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea) +{ + fcport->rscn_gen++; + + ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, + "%s %8phC DS %d LS %d\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state); + + if (fcport->flags & FCF_ASYNC_SENT) + return; + + switch (fcport->disc_state) { + case DSC_DELETED: + case DSC_LOGIN_COMPLETE: + qla24xx_post_gidpn_work(fcport->vha, fcport); + break; + + default: + break; + } +} + +int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, + u8 *port_name, void *pla) +{ + struct qla_work_evt *e; + e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.new_sess.id = *id; + e->u.new_sess.pla = pla; + memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE); + + return qla2x00_post_work(vha, e); +} + +static +int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha, + struct event_arg *ea) +{ + fc_port_t *fcport = ea->fcport; + + if (test_bit(UNLOADING, &vha->dpc_flags)) + return 0; + + switch (vha->host->active_mode) { + case MODE_INITIATOR: + case MODE_DUAL: + if (fcport->scan_state == QLA_FCPORT_FOUND) + qla24xx_fcport_handle_login(vha, fcport); + break; + + case MODE_TARGET: + default: + /* no-op */ + break; + } + + return 0; +} + +static +void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, + struct event_arg *ea) +{ + fc_port_t *fcport = ea->fcport; + + if (fcport->scan_state != QLA_FCPORT_FOUND) { + fcport->login_retry++; + return; + } + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, fcport->login_pause, + fcport->deleted, fcport->conflict, + fcport->last_rscn_gen, fcport->rscn_gen, + fcport->last_login_gen, fcport->login_gen, + fcport->flags); + + if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || + (fcport->fw_login_state == DSC_LS_PLOGI_COMP) || + (fcport->fw_login_state == DSC_LS_PRLI_PEND)) + return; + + if (fcport->flags & FCF_ASYNC_SENT) { + fcport->login_retry++; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + return; + } + + if (fcport->disc_state == DSC_DELETE_PEND) { + fcport->login_retry++; + return; + } + + if (fcport->last_rscn_gen != fcport->rscn_gen) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n", + __func__, __LINE__, fcport->port_name); + + qla24xx_async_gidpn(vha, fcport); + return; + } + + qla24xx_fcport_handle_login(vha, fcport); +} + +void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) +{ + fc_port_t *fcport, *f, *tf; + uint32_t id = 0, mask, rid; + int rc; + + switch (ea->event) { + case FCME_RELOGIN: + if (test_bit(UNLOADING, &vha->dpc_flags)) + return; + + qla24xx_handle_relogin_event(vha, ea); + break; + case FCME_RSCN: + if (test_bit(UNLOADING, &vha->dpc_flags)) + return; + switch (ea->id.b.rsvd_1) { + case RSCN_PORT_ADDR: + fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); + if (!fcport) { + /* cable moved */ + rc = qla24xx_post_gpnid_work(vha, &ea->id); + if (rc) { + ql_log(ql_log_warn, vha, 0xffff, + "RSCN GPNID work failed %02x%02x%02x\n", + ea->id.b.domain, ea->id.b.area, + ea->id.b.al_pa); + } + } else { + ea->fcport = fcport; + qla24xx_handle_rscn_event(fcport, ea); + } + break; + case RSCN_AREA_ADDR: + case RSCN_DOM_ADDR: + if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) { + mask = 0xffff00; + ql_log(ql_dbg_async, vha, 0xffff, + "RSCN: Area 0x%06x was affected\n", + ea->id.b24); + } else { + mask = 0xff0000; + ql_log(ql_dbg_async, vha, 0xffff, + "RSCN: Domain 0x%06x was affected\n", + ea->id.b24); + } + + rid = ea->id.b24 & mask; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, + list) { + id = f->d_id.b24 & mask; + if (rid == id) { + ea->fcport = f; + qla24xx_handle_rscn_event(f, ea); + } + } + break; + case RSCN_FAB_ADDR: + default: + ql_log(ql_log_warn, vha, 0xffff, + "RSCN: Fabric was affected. Addr format %d\n", + ea->id.b.rsvd_1); + qla2x00_mark_all_devices_lost(vha, 1); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + } + break; + case FCME_GIDPN_DONE: + qla24xx_handle_gidpn_event(vha, ea); + break; + case FCME_GNL_DONE: + qla24xx_handle_gnl_done_event(vha, ea); + break; + case FCME_GPSC_DONE: + qla24xx_post_upd_fcport_work(vha, ea->fcport); + break; + case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */ + qla24xx_handle_plogi_done_event(vha, ea); + break; + case FCME_GPDB_DONE: + qla24xx_handle_gpdb_event(vha, ea); + break; + case FCME_GPNID_DONE: + qla24xx_handle_gpnid_event(vha, ea); + break; + case FCME_DELETE_DONE: + qla24xx_handle_delete_done_event(vha, ea); + break; + default: + BUG_ON(1); + break; + } +} + static void qla2x00_tmf_iocb_timeout(void *data) { - srb_t *sp = (srb_t *)data; + srb_t *sp = data; struct srb_iocb *tmf = &sp->u.iocb_cmd; tmf->u.tmf.comp_status = CS_TIMEOUT; @@ -287,10 +1172,11 @@ qla2x00_tmf_iocb_timeout(void *data) } static void -qla2x00_tmf_sp_done(void *data, void *ptr, int res) +qla2x00_tmf_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct srb_iocb *tmf = &sp->u.iocb_cmd; + complete(&tmf->u.tmf.comp); } @@ -348,7 +1234,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, } done_free_sp: - sp->free(vha, sp); + sp->free(sp); done: return rval; } @@ -356,7 +1242,7 @@ done: static void qla24xx_abort_iocb_timeout(void *data) { - srb_t *sp = (srb_t *)data; + srb_t *sp = data; struct srb_iocb *abt = &sp->u.iocb_cmd; abt->u.abt.comp_status = CS_TIMEOUT; @@ -364,9 +1250,9 @@ qla24xx_abort_iocb_timeout(void *data) } static void -qla24xx_abort_sp_done(void *data, void *ptr, int res) +qla24xx_abort_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct srb_iocb *abt = &sp->u.iocb_cmd; complete(&abt->u.abt.comp); @@ -375,7 +1261,7 @@ qla24xx_abort_sp_done(void *data, void *ptr, int res) static int qla24xx_async_abort_cmd(srb_t *cmd_sp) { - scsi_qla_host_t *vha = cmd_sp->fcport->vha; + scsi_qla_host_t *vha = cmd_sp->vha; fc_port_t *fcport = cmd_sp->fcport; struct srb_iocb *abt_iocb; srb_t *sp; @@ -408,7 +1294,7 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp) QLA_SUCCESS : QLA_FUNCTION_FAILED; done_free_sp: - sp->free(vha, sp); + sp->free(sp); done: return rval; } @@ -441,59 +1327,65 @@ qla24xx_async_abort_command(srb_t *sp) return qla24xx_async_abort_cmd(sp); } -void -qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, - uint16_t *data) +static void +qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) { - int rval; + port_id_t cid; /* conflict Nport id */ - switch (data[0]) { + switch (ea->data[0]) { case MBS_COMMAND_COMPLETE: /* * Driver must validate login state - If PRLI not complete, * force a relogin attempt via implicit LOGO, PLOGI, and PRLI * requests. */ - rval = qla2x00_get_port_database(vha, fcport, 0); - if (rval == QLA_NOT_LOGGED_IN) { - fcport->flags &= ~FCF_ASYNC_SENT; - fcport->flags |= FCF_LOGIN_NEEDED; - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - break; - } - - if (rval != QLA_SUCCESS) { - qla2x00_post_async_logout_work(vha, fcport, NULL); - qla2x00_post_async_login_work(vha, fcport, NULL); - break; - } - if (fcport->flags & FCF_FCP2_DEVICE) { - qla2x00_post_async_adisc_work(vha, fcport, data); - break; - } - qla2x00_update_fcport(vha, fcport); + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpdb\n", + __func__, __LINE__, ea->fcport->port_name); + ea->fcport->chip_reset = vha->hw->chip_reset; + ea->fcport->logout_on_delete = 1; + qla24xx_post_gpdb_work(vha, ea->fcport, 0); break; case MBS_COMMAND_ERROR: - fcport->flags &= ~FCF_ASYNC_SENT; - if (data[1] & QLA_LOGIO_LOGIN_RETRIED) + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC cmd error %x\n", + __func__, __LINE__, ea->fcport->port_name, ea->data[1]); + + ea->fcport->flags &= ~FCF_ASYNC_SENT; + ea->fcport->disc_state = DSC_LOGIN_FAILED; + if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED) set_bit(RELOGIN_NEEDED, &vha->dpc_flags); else - qla2x00_mark_device_lost(vha, fcport, 1, 0); - break; - case MBS_PORT_ID_USED: - fcport->loop_id = data[1]; - qla2x00_post_async_logout_work(vha, fcport, NULL); - qla2x00_post_async_login_work(vha, fcport, NULL); + qla2x00_mark_device_lost(vha, ea->fcport, 1, 0); break; case MBS_LOOP_ID_USED: - fcport->loop_id++; - rval = qla2x00_find_new_loop_id(vha, fcport); - if (rval != QLA_SUCCESS) { - fcport->flags &= ~FCF_ASYNC_SENT; - qla2x00_mark_device_lost(vha, fcport, 1, 0); - break; + /* data[1] = IO PARAM 1 = nport ID */ + cid.b.domain = (ea->iop[1] >> 16) & 0xff; + cid.b.area = (ea->iop[1] >> 8) & 0xff; + cid.b.al_pa = ea->iop[1] & 0xff; + cid.b.rsvd_1 = 0; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC LoopID 0x%x in use post gnl\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->loop_id); + + if (IS_SW_RESV_ADDR(cid)) { + set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); + ea->fcport->loop_id = FC_NO_LOOP_ID; + } else { + qla2x00_clear_loop_id(ea->fcport); } - qla2x00_post_async_login_work(vha, fcport, NULL); + qla24xx_post_gnl_work(vha, ea->fcport); + break; + case MBS_PORT_ID_USED: + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area, + ea->fcport->d_id.b.al_pa); + + qla2x00_clear_loop_id(ea->fcport); + qla24xx_post_gidpn_work(vha, ea->fcport); break; } return; @@ -503,10 +1395,9 @@ void qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, uint16_t *data) { - /* Don't re-login in target mode */ - if (!fcport->tgt_session) - qla2x00_mark_device_lost(vha, fcport, 1, 0); + qla2x00_mark_device_lost(vha, fcport, 1, 0); qlt_logo_completion_handler(fcport, data[0]); + fcport->login_gen++; return; } @@ -709,7 +1600,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) } } - if (qla_ini_mode_enabled(vha)) + if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) rval = qla2x00_init_rings(vha); ha->flags.chip_reset_done = 1; @@ -2088,6 +2979,21 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) __func__, ha->fw_options[2]); } + /* Move PUREX, ABTS RX & RIDA to ATIOQ */ + if (ql2xmvasynctoatio) { + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) + ha->fw_options[2] |= BIT_11; + else + ha->fw_options[2] &= ~BIT_11; + } + + ql_dbg(ql_dbg_init, vha, 0xffff, + "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", + __func__, ha->fw_options[1], ha->fw_options[2], + ha->fw_options[3], vha->host->active_mode); + qla2x00_set_fw_options(vha, ha->fw_options); + /* Update Serial Link options. */ if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) return; @@ -2968,8 +3874,14 @@ qla2x00_rport_del(void *data) rport = fcport->drport ? fcport->drport: fcport->rport; fcport->drport = NULL; spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); - if (rport) + if (rport) { + ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, + "%s %8phN. rport %p roles %x \n", + __func__, fcport->port_name, rport, + rport->roles); + fc_remote_port_delete(rport); + } } /** @@ -2995,9 +3907,42 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); fcport->supported_classes = FC_COS_UNSPECIFIED; + fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, + flags); + fcport->disc_state = DSC_DELETED; + fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + fcport->deleted = QLA_SESS_DELETED; + fcport->login_retry = vha->hw->login_retry_count; + fcport->login_retry = 5; + fcport->logout_on_delete = 1; + + if (!fcport->ct_desc.ct_sns) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate ct_sns request.\n"); + kfree(fcport); + fcport = NULL; + } + INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); + INIT_LIST_HEAD(&fcport->gnl_entry); + INIT_LIST_HEAD(&fcport->list); + return fcport; } +void +qla2x00_free_fcport(fc_port_t *fcport) +{ + if (fcport->ct_desc.ct_sns) { + dma_free_coherent(&fcport->vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns, + fcport->ct_desc.ct_sns_dma); + + fcport->ct_desc.ct_sns = NULL; + } + kfree(fcport); +} + /* * qla2x00_configure_loop * Updates Fibre Channel Device Database with what is actually on loop. @@ -3055,10 +4000,11 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) } else if (ha->current_topology == ISP_CFG_N) { clear_bit(RSCN_UPDATE, &flags); - + } else if (ha->current_topology == ISP_CFG_NL) { + clear_bit(RSCN_UPDATE, &flags); + set_bit(LOCAL_LOOP_UPDATE, &flags); } else if (!vha->flags.online || (test_bit(ABORT_ISP_ACTIVE, &flags))) { - set_bit(RSCN_UPDATE, &flags); set_bit(LOCAL_LOOP_UPDATE, &flags); } @@ -3095,7 +4041,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) * Process any ATIO queue entries that came in * while we weren't online. */ - if (qla_tgt_mode_enabled(vha)) { + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) { if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) { spin_lock_irqsave(&ha->tgt.atio_lock, flags); @@ -3159,6 +4106,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) uint16_t loop_id; uint8_t domain, area, al_pa; struct qla_hw_data *ha = vha->hw; + unsigned long flags; found_devs = 0; new_fcport = NULL; @@ -3199,7 +4147,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) "Marking port lost loop_id=0x%04x.\n", fcport->loop_id); - qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); + qla2x00_mark_device_lost(vha, fcport, 0, 0); } } @@ -3230,13 +4178,14 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) if (loop_id > LAST_LOCAL_LOOP_ID) continue; - memset(new_fcport, 0, sizeof(fc_port_t)); + memset(new_fcport->port_name, 0, WWN_SIZE); /* Fill in member data. */ new_fcport->d_id.b.domain = domain; new_fcport->d_id.b.area = area; new_fcport->d_id.b.al_pa = al_pa; new_fcport->loop_id = loop_id; + rval2 = qla2x00_get_port_database(vha, new_fcport, 0); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x201a, @@ -3249,6 +4198,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) continue; } + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); /* Check for matching device in port list. */ found = 0; fcport = NULL; @@ -3264,6 +4214,12 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) memcpy(fcport->node_name, new_fcport->node_name, WWN_SIZE); + if (!fcport->login_succ) { + vha->fcport_count++; + fcport->login_succ = 1; + fcport->disc_state = DSC_LOGIN_COMPLETE; + } + found++; break; } @@ -3274,16 +4230,28 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) /* Allocate a new replacement fcport. */ fcport = new_fcport; + if (!fcport->login_succ) { + vha->fcport_count++; + fcport->login_succ = 1; + fcport->disc_state = DSC_LOGIN_COMPLETE; + } + + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (new_fcport == NULL) { ql_log(ql_log_warn, vha, 0x201c, "Failed to allocate memory for fcport.\n"); rval = QLA_MEMORY_ALLOC_FAILED; goto cleanup_allocation; } + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); new_fcport->flags &= ~FCF_FABRIC_DEVICE; } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + /* Base iIDMA settings on HBA port speed. */ fcport->fp_speed = ha->link_data_rate; @@ -3334,6 +4302,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) } } +/* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/ static void qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) { @@ -3352,12 +4321,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) "Unable to allocate fc remote port.\n"); return; } - /* - * Create target mode FC NEXUS in qla_target.c if target mode is - * enabled.. - */ - - qlt_fc_port_added(vha, fcport); spin_lock_irqsave(fcport->vha->host->host_lock, flags); *((fc_port_t **)rport->dd_data) = fcport; @@ -3370,6 +4333,12 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (fcport->port_type == FCT_TARGET) rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phN. rport %p is %s mode \n", + __func__, fcport->port_name, rport, + (fcport->port_type == FCT_TARGET) ? "tgt" : "ini"); + fc_remote_port_rolechg(rport, rport_ids.roles); } @@ -3393,25 +4362,44 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { fcport->vha = vha; + if (IS_SW_RESV_ADDR(fcport->d_id)) + return; + + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC \n", + __func__, fcport->port_name); + if (IS_QLAFX00(vha->hw)) { qla2x00_set_fcport_state(fcport, FCS_ONLINE); goto reg_port; } fcport->login_retry = 0; fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + fcport->disc_state = DSC_LOGIN_COMPLETE; + fcport->deleted = 0; + fcport->logout_on_delete = 1; qla2x00_set_fcport_state(fcport, FCS_ONLINE); qla2x00_iidma_fcport(vha, fcport); qla24xx_update_fcport_fcp_prio(vha, fcport); reg_port: - if (qla_ini_mode_enabled(vha)) + switch (vha->host->active_mode) { + case MODE_INITIATOR: qla2x00_reg_remote_port(vha, fcport); - else { - /* - * Create target mode FC NEXUS in qla_target.c - */ - qlt_fc_port_added(vha, fcport); + break; + case MODE_TARGET: + if (!vha->vha_tgt.qla_tgt->tgt_stop && + !vha->vha_tgt.qla_tgt->tgt_stopped) + qlt_fc_port_added(vha, fcport); + break; + case MODE_DUAL: + qla2x00_reg_remote_port(vha, fcport); + if (!vha->vha_tgt.qla_tgt->tgt_stop && + !vha->vha_tgt.qla_tgt->tgt_stopped) + qlt_fc_port_added(vha, fcport); + break; + default: + break; } } @@ -3430,13 +4418,11 @@ static int qla2x00_configure_fabric(scsi_qla_host_t *vha) { int rval; - fc_port_t *fcport, *fcptemp; - uint16_t next_loopid; + fc_port_t *fcport; uint16_t mb[MAILBOX_REGISTER_COUNT]; uint16_t loop_id; LIST_HEAD(new_fcports); struct qla_hw_data *ha = vha->hw; - struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); int discovery_gen; /* If FL port exists, then SNS is present */ @@ -3454,7 +4440,19 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) } vha->device_flags |= SWITCH_FOUND; + + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { + rval = qla2x00_send_change_request(vha, 0x3, 0); + if (rval != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0x121, + "Failed to enable receiving of RSCN requests: 0x%x.\n", + rval); + } + + do { + qla2x00_mgmt_svr_login(vha); + /* FDMI support. */ if (ql2xfdmienable && test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) @@ -3501,9 +4499,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) } } -#define QLA_FCPORT_SCAN 1 -#define QLA_FCPORT_FOUND 2 - list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->scan_state = QLA_FCPORT_SCAN; } @@ -3516,174 +4511,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) * will be newer than discovery_gen. */ qlt_do_generation_tick(vha, &discovery_gen); - rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); + rval = qla2x00_find_all_fabric_devs(vha); if (rval != QLA_SUCCESS) break; - - /* - * Logout all previous fabric devices marked lost, except - * FCP2 devices. - */ - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) - break; - - if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) - continue; - - if (fcport->scan_state == QLA_FCPORT_SCAN) { - if (qla_ini_mode_enabled(base_vha) && - atomic_read(&fcport->state) == FCS_ONLINE) { - qla2x00_mark_device_lost(vha, fcport, - ql2xplogiabsentdevice, 0); - if (fcport->loop_id != FC_NO_LOOP_ID && - (fcport->flags & FCF_FCP2_DEVICE) == 0 && - fcport->port_type != FCT_INITIATOR && - fcport->port_type != FCT_BROADCAST) { - ha->isp_ops->fabric_logout(vha, - fcport->loop_id, - fcport->d_id.b.domain, - fcport->d_id.b.area, - fcport->d_id.b.al_pa); - qla2x00_clear_loop_id(fcport); - } - } else if (!qla_ini_mode_enabled(base_vha)) { - /* - * In target mode, explicitly kill - * sessions and log out of devices - * that are gone, so that we don't - * end up with an initiator using the - * wrong ACL (if the fabric recycles - * an FC address and we have a stale - * session around) and so that we don't - * report initiators that are no longer - * on the fabric. - */ - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077, - "port gone, logging out/killing session: " - "%8phC state 0x%x flags 0x%x fc4_type 0x%x " - "scan_state %d\n", - fcport->port_name, - atomic_read(&fcport->state), - fcport->flags, fcport->fc4_type, - fcport->scan_state); - qlt_fc_port_deleted(vha, fcport, - discovery_gen); - } - } - } - - /* Starting free loop ID. */ - next_loopid = ha->min_external_loopid; - - /* - * Scan through our port list and login entries that need to be - * logged in. - */ - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (atomic_read(&vha->loop_down_timer) || - test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) - break; - - if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || - (fcport->flags & FCF_LOGIN_NEEDED) == 0) - continue; - - /* - * If we're not an initiator, skip looking for devices - * and logging in. There's no reason for us to do it, - * and it seems to actively cause problems in target - * mode if we race with the initiator logging into us - * (we might get the "port ID used" status back from - * our login command and log out the initiator, which - * seems to cause havoc). - */ - if (!qla_ini_mode_enabled(base_vha)) { - if (fcport->scan_state == QLA_FCPORT_FOUND) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078, - "port %8phC state 0x%x flags 0x%x fc4_type 0x%x " - "scan_state %d (initiator mode disabled; skipping " - "login)\n", fcport->port_name, - atomic_read(&fcport->state), - fcport->flags, fcport->fc4_type, - fcport->scan_state); - } - continue; - } - - if (fcport->loop_id == FC_NO_LOOP_ID) { - fcport->loop_id = next_loopid; - rval = qla2x00_find_new_loop_id( - base_vha, fcport); - if (rval != QLA_SUCCESS) { - /* Ran out of IDs to use */ - break; - } - } - /* Login and update database */ - qla2x00_fabric_dev_login(vha, fcport, &next_loopid); - } - - /* Exit if out of loop IDs. */ - if (rval != QLA_SUCCESS) { - break; - } - - /* - * Login and add the new devices to our port list. - */ - list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { - if (atomic_read(&vha->loop_down_timer) || - test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) - break; - - /* - * If we're not an initiator, skip looking for devices - * and logging in. There's no reason for us to do it, - * and it seems to actively cause problems in target - * mode if we race with the initiator logging into us - * (we might get the "port ID used" status back from - * our login command and log out the initiator, which - * seems to cause havoc). - */ - if (qla_ini_mode_enabled(base_vha)) { - /* Find a new loop ID to use. */ - fcport->loop_id = next_loopid; - rval = qla2x00_find_new_loop_id(base_vha, - fcport); - if (rval != QLA_SUCCESS) { - /* Ran out of IDs to use */ - break; - } - - /* Login and update database */ - qla2x00_fabric_dev_login(vha, fcport, - &next_loopid); - } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079, - "new port %8phC state 0x%x flags 0x%x fc4_type " - "0x%x scan_state %d (initiator mode disabled; " - "skipping login)\n", - fcport->port_name, - atomic_read(&fcport->state), - fcport->flags, fcport->fc4_type, - fcport->scan_state); - } - - list_move_tail(&fcport->list, &vha->vp_fcports); - } } while (0); - /* Free all new device structures not processed. */ - list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { - list_del(&fcport->list); - kfree(fcport); - } - - if (rval) { + if (rval) ql_dbg(ql_dbg_disc, vha, 0x2068, "Configure fabric error exit rval=%d.\n", rval); - } return (rval); } @@ -3702,12 +4537,11 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) * Kernel context. */ static int -qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, - struct list_head *new_fcports) +qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) { int rval; uint16_t loop_id; - fc_port_t *fcport, *new_fcport, *fcptemp; + fc_port_t *fcport, *new_fcport; int found; sw_info_t *swl; @@ -3716,6 +4550,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, port_id_t wrap = {}, nxt_d_id; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + unsigned long flags; rval = QLA_SUCCESS; @@ -3736,9 +4571,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, swl = NULL; } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { swl = NULL; - } else if (ql2xiidmaenable && - qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) { - qla2x00_gpsc(vha, swl); + } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) { + swl = NULL; } /* If other queries succeeded probe for FC-4 type */ @@ -3800,11 +4634,6 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, ql_log(ql_log_warn, vha, 0x2064, "SNS scan failed -- assuming " "zero-entry result.\n"); - list_for_each_entry_safe(fcport, fcptemp, - new_fcports, list) { - list_del(&fcport->list); - kfree(fcport); - } rval = QLA_SUCCESS; break; } @@ -3847,6 +4676,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, new_fcport->fc4_type != FC4_TYPE_UNKNOWN)) continue; + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + /* Locate matching device in database. */ found = 0; list_for_each_entry(fcport, &vha->vp_fcports, list) { @@ -3869,7 +4700,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, */ if (fcport->d_id.b24 == new_fcport->d_id.b24 && (atomic_read(&fcport->state) == FCS_ONLINE || - !qla_ini_mode_enabled(base_vha))) { + (vha->host->active_mode == MODE_TARGET))) { break; } @@ -3889,7 +4720,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, * Log it out if still logged in and mark it for * relogin later. */ - if (!qla_ini_mode_enabled(base_vha)) { + if (qla_tgt_mode_enabled(base_vha)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080, "port changed FC ID, %8phC" " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n", @@ -3907,25 +4738,19 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, fcport->d_id.b24 = new_fcport->d_id.b24; fcport->flags |= FCF_LOGIN_NEEDED; - if (fcport->loop_id != FC_NO_LOOP_ID && - (fcport->flags & FCF_FCP2_DEVICE) == 0 && - (fcport->flags & FCF_ASYNC_SENT) == 0 && - fcport->port_type != FCT_INITIATOR && - fcport->port_type != FCT_BROADCAST) { - ha->isp_ops->fabric_logout(vha, fcport->loop_id, - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa); - qla2x00_clear_loop_id(fcport); - } - break; } - if (found) + if (found) { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); continue; + } /* If device was not in our fcports list, then add it. */ new_fcport->scan_state = QLA_FCPORT_FOUND; - list_add_tail(&new_fcport->list, new_fcports); + list_add_tail(&new_fcport->list, &vha->vp_fcports); + + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + /* Allocate a new replacement fcport. */ nxt_d_id.b24 = new_fcport->d_id.b24; @@ -3939,8 +4764,44 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, new_fcport->d_id.b24 = nxt_d_id.b24; } - kfree(new_fcport); + qla2x00_free_fcport(new_fcport); + /* + * Logout all previous fabric dev marked lost, except FCP2 devices. + */ + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + break; + + if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || + (fcport->flags & FCF_LOGIN_NEEDED) == 0) + continue; + + if (fcport->scan_state == QLA_FCPORT_SCAN) { + if ((qla_dual_mode_enabled(vha) || + qla_ini_mode_enabled(vha)) && + atomic_read(&fcport->state) == FCS_ONLINE) { + qla2x00_mark_device_lost(vha, fcport, + ql2xplogiabsentdevice, 0); + if (fcport->loop_id != FC_NO_LOOP_ID && + (fcport->flags & FCF_FCP2_DEVICE) == 0 && + fcport->port_type != FCT_INITIATOR && + fcport->port_type != FCT_BROADCAST) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, + fcport->port_name); + + qlt_schedule_sess_for_deletion_lock + (fcport); + continue; + } + } + } + + if (fcport->scan_state == QLA_FCPORT_FOUND) + qla24xx_fcport_handle_login(vha, fcport); + } return (rval); } @@ -3992,64 +4853,6 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) return (rval); } -/* - * qla2x00_fabric_dev_login - * Login fabric target device and update FC port database. - * - * Input: - * ha: adapter state pointer. - * fcport: port structure list pointer. - * next_loopid: contains value of a new loop ID that can be used - * by the next login attempt. - * - * Returns: - * qla2x00 local function return status code. - * - * Context: - * Kernel context. - */ -static int -qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport, - uint16_t *next_loopid) -{ - int rval; - uint8_t opts; - struct qla_hw_data *ha = vha->hw; - - rval = QLA_SUCCESS; - - if (IS_ALOGIO_CAPABLE(ha)) { - if (fcport->flags & FCF_ASYNC_SENT) - return rval; - fcport->flags |= FCF_ASYNC_SENT; - rval = qla2x00_post_async_login_work(vha, fcport, NULL); - if (!rval) - return rval; - } - - fcport->flags &= ~FCF_ASYNC_SENT; - rval = qla2x00_fabric_login(vha, fcport, next_loopid); - if (rval == QLA_SUCCESS) { - /* Send an ADISC to FCP2 devices.*/ - opts = 0; - if (fcport->flags & FCF_FCP2_DEVICE) - opts |= BIT_1; - rval = qla2x00_get_port_database(vha, fcport, opts); - if (rval != QLA_SUCCESS) { - ha->isp_ops->fabric_logout(vha, fcport->loop_id, - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa); - qla2x00_mark_device_lost(vha, fcport, 1, 0); - } else { - qla2x00_update_fcport(vha, fcport); - } - } else { - /* Retry Login. */ - qla2x00_mark_device_lost(vha, fcport, 1, 0); - } - - return (rval); -} /* * qla2x00_fabric_login @@ -4341,13 +5144,6 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha) spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_rport_del(fcport); - /* - * Release the target mode FC NEXUS in - * qla_target.c, if target mod is enabled. - */ - qlt_fc_port_deleted(vha, fcport, - base_vha->total_fcport_update_gen); - spin_lock_irqsave(&ha->vport_slock, flags); } } @@ -4730,6 +5526,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) if (!(IS_P3P_TYPE(ha))) ha->isp_ops->reset_chip(vha); + ha->chip_reset++; + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); @@ -4784,8 +5582,6 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) /* Requeue all commands in outstanding command list. */ qla2x00_abort_all_cmds(vha, DID_RESET << 16); } - - ha->chip_reset++; /* memory barrier */ wmb(); } @@ -4981,7 +5777,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) if (!status) { /* Issue a marker after FW becomes ready. */ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } @@ -5209,7 +6004,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) rval = 1; } - if (!qla_ini_mode_enabled(vha)) { + if (qla_tgt_mode_enabled(vha)) { /* Don't enable full login after initial LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_13); /* Don't enable LIP full login for initiator */ @@ -5400,6 +6195,7 @@ uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha) for (chksum = 0; cnt--; wptr++) chksum += le32_to_cpu(*wptr); + if (chksum) { ql_dbg(ql_dbg_init, vha, 0x018c, "Checksum validation failed for primary image (0x%x)\n", @@ -6412,6 +7208,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) vha->flags.process_response_queue = 1; } + /* enable RIDA Format2 */ + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) + icb->firmware_options_3 |= BIT_0; + if (rval) { ql_log(ql_log_warn, vha, 0x0076, "NVRAM configuration failed.\n"); @@ -6536,13 +7336,26 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha) __func__, ha->fw_options[2]); } - if (!ql2xetsenable) - goto out; + /* Move PUREX, ABTS RX & RIDA to ATIOQ */ + if (ql2xmvasynctoatio) { + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) + ha->fw_options[2] |= BIT_11; + else + ha->fw_options[2] &= ~BIT_11; + } + + if (ql2xetsenable) { + /* Enable ETS Burst. */ + memset(ha->fw_options, 0, sizeof(ha->fw_options)); + ha->fw_options[2] |= BIT_9; + } + + ql_dbg(ql_dbg_init, vha, 0xffff, + "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", + __func__, ha->fw_options[1], ha->fw_options[2], + ha->fw_options[3], vha->host->active_mode); - /* Enable ETS Burst. */ - memset(ha->fw_options, 0, sizeof(ha->fw_options)); - ha->fw_options[2] |= BIT_9; -out: qla2x00_set_fw_options(vha, ha->fw_options); } @@ -6748,6 +7561,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v memset(qpair, 0, sizeof(struct qla_qpair)); qpair->hw = vha->hw; + qpair->vha = vha; /* Assign available que pair id */ mutex_lock(&ha->mq_lock); diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 44e404583c86..66df6cec59da 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -166,8 +166,8 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state) /* Don't print state transitions during initial allocation of fcport */ if (old_state && old_state != state) { ql_dbg(ql_dbg_disc, fcport->vha, 0x207d, - "FCPort state transitioned from %s to %s - " - "portid=%02x%02x%02x.\n", + "FCPort %8phC state transitioned from %s to %s - " + "portid=%02x%02x%02x.\n", fcport->port_name, port_state_str[old_state], port_state_str[state], fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); @@ -232,6 +232,7 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag) memset(sp, 0, sizeof(*sp)); sp->fcport = fcport; sp->iocbs = 1; + sp->vha = qpair->vha; done: if (!sp) QLA_QPAIR_MARK_NOT_BUSY(qpair); @@ -249,20 +250,20 @@ static inline srb_t * qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) { srb_t *sp = NULL; - struct qla_hw_data *ha = vha->hw; uint8_t bail; QLA_VHA_MARK_BUSY(vha, bail); if (unlikely(bail)) return NULL; - sp = mempool_alloc(ha->srb_mempool, flag); + sp = mempool_alloc(vha->hw->srb_mempool, flag); if (!sp) goto done; memset(sp, 0, sizeof(*sp)); sp->fcport = fcport; sp->iocbs = 1; + sp->vha = vha; done: if (!sp) QLA_VHA_MARK_NOT_BUSY(vha); @@ -270,10 +271,10 @@ done: } static inline void -qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp) +qla2x00_rel_sp(srb_t *sp) { - mempool_free(sp, vha->hw->srb_mempool); - QLA_VHA_MARK_NOT_BUSY(vha); + QLA_VHA_MARK_NOT_BUSY(sp->vha); + mempool_free(sp, sp->vha->hw->srb_mempool); } static inline void @@ -285,8 +286,7 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo) sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout; add_timer(&sp->u.iocb_cmd.timer); sp->free = qla2x00_sp_free; - if ((IS_QLAFX00(sp->fcport->vha->hw)) && - (sp->type == SRB_FXIOCB_DCMD)) + if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD)) init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); if (sp->type == SRB_ELS_DCMD) init_completion(&sp->u.iocb_cmd.u.els_logo.comp); diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 58e49a3e1de8..535079280288 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -23,7 +23,7 @@ qla2x00_get_cmd_direction(srb_t *sp) { uint16_t cflags; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; cflags = 0; @@ -210,7 +210,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, return; } - vha = sp->fcport->vha; + vha = sp->vha; cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); /* Three DSDs are available in the Command Type 2 IOCB */ @@ -267,7 +267,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, return; } - vha = sp->fcport->vha; + vha = sp->vha; cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); /* Two DSDs are available in the Command Type 3 IOCB */ @@ -324,7 +324,7 @@ qla2x00_start_scsi(srb_t *sp) struct rsp_que *rsp; /* Setup device pointers. */ - vha = sp->fcport->vha; + vha = sp->vha; ha = vha->hw; reg = &ha->iobase->isp; cmd = GET_CMD_SP(sp); @@ -601,7 +601,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, return 0; } - vha = sp->fcport->vha; + vha = sp->vha; ha = vha->hw; /* Set transfer direction */ @@ -716,7 +716,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, return; } - vha = sp->fcport->vha; + vha = sp->vha; /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { @@ -1108,7 +1108,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, if (sp) { cmd = GET_CMD_SP(sp); sgl = scsi_prot_sglist(cmd); - vha = sp->fcport->vha; + vha = sp->vha; } else if (tc) { vha = tc->vha; sgl = tc->prot_sg; @@ -1215,7 +1215,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, /* Update entry type to indicate Command Type CRC_2 IOCB */ *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2); - vha = sp->fcport->vha; + vha = sp->vha; ha = vha->hw; /* No data transfer */ @@ -1225,7 +1225,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, return QLA_SUCCESS; } - cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + cmd_pkt->vp_index = sp->vha->vp_idx; /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { @@ -1415,7 +1415,7 @@ qla24xx_start_scsi(srb_t *sp) struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; /* Setup device pointers. */ @@ -1492,7 +1492,7 @@ qla24xx_start_scsi(srb_t *sp) cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; - cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + cmd_pkt->vp_index = sp->vha->vp_idx; int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); @@ -1564,7 +1564,7 @@ qla24xx_dif_start_scsi(srb_t *sp) struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct cmd_type_crc_2 *cmd_pkt; uint32_t status = 0; @@ -2214,13 +2214,13 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; logio->port_id[2] = sp->fcport->d_id.b.domain; - logio->vp_index = sp->fcport->vha->vp_idx; + logio->vp_index = sp->vha->vp_idx; } static void qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) { - struct qla_hw_data *ha = sp->fcport->vha->hw; + struct qla_hw_data *ha = sp->vha->hw; struct srb_iocb *lio = &sp->u.iocb_cmd; uint16_t opts; @@ -2238,7 +2238,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | sp->fcport->d_id.b.al_pa); - mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); + mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); } static void @@ -2247,20 +2247,20 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); - if (!sp->fcport->tgt_session || - !sp->fcport->tgt_session->keep_nport_handle) + if (!sp->fcport->se_sess || + !sp->fcport->keep_nport_handle) logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; logio->port_id[2] = sp->fcport->d_id.b.domain; - logio->vp_index = sp->fcport->vha->vp_idx; + logio->vp_index = sp->vha->vp_idx; } static void qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) { - struct qla_hw_data *ha = sp->fcport->vha->hw; + struct qla_hw_data *ha = sp->vha->hw; mbx->entry_type = MBX_IOCB_TYPE; SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); @@ -2271,7 +2271,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | sp->fcport->d_id.b.al_pa); - mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); + mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); /* Implicit: mbx->mbx10 = 0. */ } @@ -2281,13 +2281,13 @@ qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); - logio->vp_index = sp->fcport->vha->vp_idx; + logio->vp_index = sp->vha->vp_idx; } static void qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) { - struct qla_hw_data *ha = sp->fcport->vha->hw; + struct qla_hw_data *ha = sp->vha->hw; mbx->entry_type = MBX_IOCB_TYPE; SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); @@ -2302,7 +2302,7 @@ qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); - mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); + mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); } static void @@ -2338,32 +2338,30 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) } static void -qla2x00_els_dcmd_sp_free(void *ptr, void *data) +qla2x00_els_dcmd_sp_free(void *data) { - struct scsi_qla_host *vha = (scsi_qla_host_t *)ptr; - struct qla_hw_data *ha = vha->hw; - srb_t *sp = (srb_t *)data; + srb_t *sp = data; struct srb_iocb *elsio = &sp->u.iocb_cmd; kfree(sp->fcport); if (elsio->u.els_logo.els_logo_pyld) - dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE, + dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE, elsio->u.els_logo.els_logo_pyld, elsio->u.els_logo.els_logo_pyld_dma); del_timer(&elsio->timer); - qla2x00_rel_sp(vha, sp); + qla2x00_rel_sp(sp); } static void qla2x00_els_dcmd_iocb_timeout(void *data) { - srb_t *sp = (srb_t *)data; - struct srb_iocb *lio = &sp->u.iocb_cmd; + srb_t *sp = data; fc_port_t *fcport = sp->fcport; - struct scsi_qla_host *vha = fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; + struct srb_iocb *lio = &sp->u.iocb_cmd; unsigned long flags = 0; ql_dbg(ql_dbg_io, vha, 0x3069, @@ -2386,12 +2384,12 @@ qla2x00_els_dcmd_iocb_timeout(void *data) } static void -qla2x00_els_dcmd_sp_done(void *data, void *ptr, int res) +qla2x00_els_dcmd_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; fc_port_t *fcport = sp->fcport; struct srb_iocb *lio = &sp->u.iocb_cmd; - struct scsi_qla_host *vha = fcport->vha; + struct scsi_qla_host *vha = sp->vha; ql_dbg(ql_dbg_io, vha, 0x3072, "%s hdl=%x, portid=%02x%02x%02x done\n", @@ -2449,7 +2447,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, GFP_KERNEL); if (!elsio->u.els_logo.els_logo_pyld) { - sp->free(vha, sp); + sp->free(sp); return QLA_FUNCTION_FAILED; } @@ -2468,7 +2466,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { - sp->free(vha, sp); + sp->free(sp); return QLA_FUNCTION_FAILED; } @@ -2479,14 +2477,14 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, wait_for_completion(&elsio->u.els_logo.comp); - sp->free(vha, sp); + sp->free(sp); return rval; } static void qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) { - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct srb_iocb *elsio = &sp->u.iocb_cmd; els_iocb->entry_type = ELS_IOCB_TYPE; @@ -2518,7 +2516,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->rx_address[1] = 0; els_iocb->rx_len = 0; - sp->fcport->vha->qla_stats.control_requests++; + sp->vha->qla_stats.control_requests++; } static void @@ -2534,7 +2532,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->handle = sp->handle; els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); - els_iocb->vp_index = sp->fcport->vha->vp_idx; + els_iocb->vp_index = sp->vha->vp_idx; els_iocb->sof_type = EST_SOFI3; els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); @@ -2565,7 +2563,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->rx_len = cpu_to_le32(sg_dma_len (bsg_job->reply_payload.sg_list)); - sp->fcport->vha->qla_stats.control_requests++; + sp->vha->qla_stats.control_requests++; } static void @@ -2576,7 +2574,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) struct scatterlist *sg; int index; uint16_t tot_dsds; - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct bsg_job *bsg_job = sp->u.bsg_job; int loop_iterartion = 0; @@ -2642,7 +2640,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) } ct_iocb->entry_count = entry_count; - sp->fcport->vha->qla_stats.control_requests++; + sp->vha->qla_stats.control_requests++; } static void @@ -2653,7 +2651,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) struct scatterlist *sg; int index; uint16_t tot_dsds; - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct bsg_job *bsg_job = sp->u.bsg_job; int loop_iterartion = 0; @@ -2665,7 +2663,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) ct_iocb->handle = sp->handle; ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); - ct_iocb->vp_index = sp->fcport->vha->vp_idx; + ct_iocb->vp_index = sp->vha->vp_idx; ct_iocb->comp_status = cpu_to_le16(0); ct_iocb->cmd_dsd_count = @@ -2739,7 +2737,7 @@ qla82xx_start_scsi(srb_t *sp) uint32_t *fcp_dl; uint8_t additional_cdb_len; struct ct6_dsd *ctx; - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct req_que *req = NULL; struct rsp_que *rsp = NULL; @@ -2901,7 +2899,7 @@ sufficient_dsds: cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; - cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + cmd_pkt->vp_index = sp->vha->vp_idx; /* Build IOCB segments */ if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) @@ -2974,7 +2972,7 @@ sufficient_dsds: cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; - cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + cmd_pkt->vp_index = sp->vha->vp_idx; int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, @@ -3060,7 +3058,7 @@ static void qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) { struct srb_iocb *aio = &sp->u.iocb_cmd; - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct req_que *req = vha->req; memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); @@ -3079,19 +3077,69 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) wmb(); } +static void +qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx) +{ + int i, sz; + + mbx->entry_type = MBX_IOCB_TYPE; + mbx->handle = sp->handle; + sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb)); + + for (i = 0; i < sz; i++) + mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]); +} + +static void +qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt) +{ + sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt; + qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg); + ct_pkt->handle = sp->handle; +} + +static void qla2x00_send_notify_ack_iocb(srb_t *sp, + struct nack_to_isp *nack) +{ + struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy; + + nack->entry_type = NOTIFY_ACK_TYPE; + nack->entry_count = 1; + nack->ox_id = ntfy->ox_id; + + nack->u.isp24.handle = sp->handle; + nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; + if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { + nack->u.isp24.flags = ntfy->u.isp24.flags & + cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); + } + nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; + nack->u.isp24.status = ntfy->u.isp24.status; + nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; + nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; + nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; + nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; + nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; + nack->u.isp24.srr_flags = 0; + nack->u.isp24.srr_reject_code = 0; + nack->u.isp24.srr_reject_code_expl = 0; + nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; +} + int qla2x00_start_sp(srb_t *sp) { int rval; - struct qla_hw_data *ha = sp->fcport->vha->hw; + scsi_qla_host_t *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; void *pkt; unsigned long flags; rval = QLA_FUNCTION_FAILED; spin_lock_irqsave(&ha->hardware_lock, flags); - pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp); + pkt = qla2x00_alloc_iocbs(vha, sp); if (!pkt) { - ql_log(ql_log_warn, sp->fcport->vha, 0x700c, + ql_log(ql_log_warn, vha, 0x700c, "qla2x00_alloc_iocbs failed.\n"); goto done; } @@ -3139,12 +3187,23 @@ qla2x00_start_sp(srb_t *sp) case SRB_ELS_DCMD: qla24xx_els_logo_iocb(sp, pkt); break; + case SRB_CT_PTHRU_CMD: + qla2x00_ctpthru_cmd_iocb(sp, pkt); + break; + case SRB_MB_IOCB: + qla2x00_mb_iocb(sp, pkt); + break; + case SRB_NACK_PLOGI: + case SRB_NACK_PRLI: + case SRB_NACK_LOGO: + qla2x00_send_notify_ack_iocb(sp, pkt); + break; default: break; } wmb(); - qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]); + qla2x00_start_iocbs(vha, ha->req_q_map[0]); done: spin_unlock_irqrestore(&ha->hardware_lock, flags); return rval; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index edc2264db45b..3c66ea29de27 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -561,14 +561,50 @@ qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) return ret; } -static inline fc_port_t * +fc_port_t * qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) { - fc_port_t *fcport; + fc_port_t *f, *tf; + + f = tf = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) + if (f->loop_id == loop_id) + return f; + return NULL; +} + +fc_port_t * +qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) +{ + fc_port_t *f, *tf; + + f = tf = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { + if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { + if (incl_deleted) + return f; + else if (f->deleted == 0) + return f; + } + } + return NULL; +} - list_for_each_entry(fcport, &vha->vp_fcports, list) - if (fcport->loop_id == loop_id) - return fcport; +fc_port_t * +qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, + u8 incl_deleted) +{ + fc_port_t *f, *tf; + + f = tf = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { + if (f->d_id.b24 == id->b24) { + if (incl_deleted) + return f; + else if (f->deleted == 0) + return f; + } + } return NULL; } @@ -934,7 +970,11 @@ skip_rio: ql_dbg(ql_dbg_async, vha, 0x508a, "Marking port lost loopid=%04x portid=%06x.\n", fcport->loop_id, fcport->d_id.b24); - qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); + if (qla_ini_mode_enabled(vha)) { + qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); + fcport->logout_on_delete = 0; + qlt_schedule_sess_for_deletion_lock(fcport); + } break; global_port_update: @@ -985,9 +1025,6 @@ global_port_update: qla2x00_mark_all_devices_lost(vha, 1); - if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha)) - set_bit(SCR_PENDING, &vha->dpc_flags); - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(VP_CONFIG_OK, &vha->vp_flags); @@ -1024,27 +1061,19 @@ global_port_update: if (qla2x00_is_a_vp_did(vha, rscn_entry)) break; - /* - * Search for the rport related to this RSCN entry and mark it - * as lost. - */ - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (atomic_read(&fcport->state) != FCS_ONLINE) - continue; - if (fcport->d_id.b24 == rscn_entry) { - qla2x00_mark_device_lost(vha, fcport, 0, 0); - break; - } - } - atomic_set(&vha->loop_down_timer, 0); vha->flags.management_server_logged_in = 0; - - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - set_bit(RSCN_UPDATE, &vha->dpc_flags); - qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); + { + struct event_arg ea; + + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_RSCN; + ea.id.b24 = rscn_entry; + ea.id.b.rsvd_1 = rscn_entry >> 24; + qla2x00_fcport_event_handler(vha, &ea); + qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); + } break; - /* case MBA_RIO_RESPONSE: */ case MBA_ZIO_RESPONSE: ql_dbg(ql_dbg_async, vha, 0x5015, @@ -1212,7 +1241,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, req->outstanding_cmds[index] = NULL; /* Save ISP completion status */ - sp->done(ha, sp, DID_OK << 16); + sp->done(sp, DID_OK << 16); } else { ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); @@ -1235,7 +1264,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, index = LSW(pkt->handle); if (index >= req->num_outstanding_cmds) { ql_log(ql_log_warn, vha, 0x5031, - "Invalid command index (%x).\n", index); + "Invalid command index (%x) type %8ph.\n", + index, iocb); if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else @@ -1343,66 +1373,122 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, le16_to_cpu(mbx->mb7)); logio_done: - sp->done(vha, sp, 0); + sp->done(sp, 0); } static void -qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, - sts_entry_t *pkt, int iocb_type) +qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct mbx_24xx_entry *pkt) { - const char func[] = "CT_IOCB"; - const char *type; + const char func[] = "MBX-IOCB2"; srb_t *sp; - struct bsg_job *bsg_job; - struct fc_bsg_reply *bsg_reply; - uint16_t comp_status; + struct srb_iocb *si; + u16 sz, i; int res; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (!sp) return; - bsg_job = sp->u.bsg_job; - bsg_reply = bsg_job->reply; + si = &sp->u.iocb_cmd; + sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); - type = "ct pass-through"; + for (i = 0; i < sz; i++) + si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]); - comp_status = le16_to_cpu(pkt->comp_status); + res = (si->u.mbx.in_mb[0] & MBS_MASK); - /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT - * fc payload to the caller - */ - bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; - bsg_job->reply_len = sizeof(struct fc_bsg_reply); + sp->done(sp, res); +} - if (comp_status != CS_COMPLETE) { - if (comp_status == CS_DATA_UNDERRUN) { - res = DID_OK << 16; - bsg_reply->reply_payload_rcv_len = - le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); +static void +qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct nack_to_isp *pkt) +{ + const char func[] = "nack"; + srb_t *sp; + int res = 0; - ql_log(ql_log_warn, vha, 0x5048, - "CT pass-through-%s error " - "comp_status-status=0x%x total_byte = 0x%x.\n", - type, comp_status, - bsg_reply->reply_payload_rcv_len); - } else { - ql_log(ql_log_warn, vha, 0x5049, - "CT pass-through-%s error " - "comp_status-status=0x%x.\n", type, comp_status); - res = DID_ERROR << 16; - bsg_reply->reply_payload_rcv_len = 0; - } - ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, - (uint8_t *)pkt, sizeof(*pkt)); - } else { - res = DID_OK << 16; - bsg_reply->reply_payload_rcv_len = - bsg_job->reply_payload.payload_len; - bsg_job->reply_len = 0; - } + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) + res = QLA_FUNCTION_FAILED; + + sp->done(sp, res); +} + +static void +qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, + sts_entry_t *pkt, int iocb_type) +{ + const char func[] = "CT_IOCB"; + const char *type; + srb_t *sp; + struct bsg_job *bsg_job; + struct fc_bsg_reply *bsg_reply; + uint16_t comp_status; + int res = 0; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; - sp->done(vha, sp, res); + switch (sp->type) { + case SRB_CT_CMD: + bsg_job = sp->u.bsg_job; + bsg_reply = bsg_job->reply; + + type = "ct pass-through"; + + comp_status = le16_to_cpu(pkt->comp_status); + + /* + * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT + * fc payload to the caller + */ + bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + + if (comp_status != CS_COMPLETE) { + if (comp_status == CS_DATA_UNDERRUN) { + res = DID_OK << 16; + bsg_reply->reply_payload_rcv_len = + le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); + + ql_log(ql_log_warn, vha, 0x5048, + "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", + type, comp_status, + bsg_reply->reply_payload_rcv_len); + } else { + ql_log(ql_log_warn, vha, 0x5049, + "CT pass-through-%s error comp_status=0x%x.\n", + type, comp_status); + res = DID_ERROR << 16; + bsg_reply->reply_payload_rcv_len = 0; + } + ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, + (uint8_t *)pkt, sizeof(*pkt)); + } else { + res = DID_OK << 16; + bsg_reply->reply_payload_rcv_len = + bsg_job->reply_payload.payload_len; + bsg_job->reply_len = 0; + } + break; + case SRB_CT_PTHRU_CMD: + /* + * borrowing sts_entry_24xx.comp_status. + * same location as ct_entry_24xx.comp_status + */ + res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, + (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, + sp->name); + break; + } + + sp->done(sp, res); } static void @@ -1438,7 +1524,16 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, type = "Driver ELS logo"; ql_dbg(ql_dbg_user, vha, 0x5047, "Completing %s: (%p) type=%d.\n", type, sp, sp->type); - sp->done(vha, sp, 0); + sp->done(sp, 0); + return; + case SRB_CT_PTHRU_CMD: + /* borrowing sts_entry_24xx.comp_status. + same location as ct_entry_24xx.comp_status + */ + res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, + (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, + sp->name); + sp->done(sp, res); return; default: ql_dbg(ql_dbg_user, vha, 0x503e, @@ -1496,7 +1591,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, bsg_job->reply_len = 0; } - sp->done(vha, sp, res); + sp->done(sp, res); } static void @@ -1543,6 +1638,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, fcport->d_id.b.area, fcport->d_id.b.al_pa, le32_to_cpu(logio->io_parameter[0])); + vha->hw->exch_starvation = 0; data[0] = MBS_COMMAND_COMPLETE; if (sp->type != SRB_LOGIN_CMD) goto logio_done; @@ -1568,6 +1664,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, iop[0] = le32_to_cpu(logio->io_parameter[0]); iop[1] = le32_to_cpu(logio->io_parameter[1]); + lio->u.logio.iop[0] = iop[0]; + lio->u.logio.iop[1] = iop[1]; switch (iop[0]) { case LSC_SCODE_PORTID_USED: data[0] = MBS_PORT_ID_USED; @@ -1576,6 +1674,21 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, case LSC_SCODE_NPORT_USED: data[0] = MBS_LOOP_ID_USED; break; + case LSC_SCODE_NOXCB: + vha->hw->exch_starvation++; + if (vha->hw->exch_starvation > 5) { + ql_log(ql_log_warn, vha, 0xffff, + "Exchange starvation. Resetting RISC\n"); + + vha->hw->exch_starvation = 0; + + if (IS_P3P_TYPE(vha->hw)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + /* drop through */ default: data[0] = MBS_COMMAND_ERROR; break; @@ -1590,7 +1703,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, le32_to_cpu(logio->io_parameter[1])); logio_done: - sp->done(vha, sp, 0); + sp->done(sp, 0); } static void @@ -1640,7 +1753,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, (uint8_t *)sts, sizeof(*sts)); - sp->done(vha, sp, 0); + sp->done(sp, 0); } /** @@ -1728,7 +1841,7 @@ static inline void qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, uint32_t sense_len, struct rsp_que *rsp, int res) { - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct scsi_cmnd *cp = GET_CMD_SP(sp); uint32_t track_sense_len; @@ -1756,7 +1869,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, if (sense_len) { ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", - sp->fcport->vha->host_no, cp->device->id, cp->device->lun, + sp->vha->host_no, cp->device->id, cp->device->lun, cp); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, cp->sense_buffer, sense_len); @@ -1778,7 +1891,7 @@ struct scsi_dif_tuple { static inline int qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) { - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct scsi_cmnd *cmd = GET_CMD_SP(sp); uint8_t *ap = &sts24->data[12]; uint8_t *ep = &sts24->data[20]; @@ -2043,7 +2156,7 @@ done: bsg_job->reply_len = sizeof(struct fc_bsg_reply); /* Always return DID_OK, bsg will send the vendor specific response * in this case only */ - sp->done(vha, sp, (DID_OK << 6)); + sp->done(sp, DID_OK << 6); } @@ -2076,6 +2189,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) int res = 0; uint16_t state_flags = 0; uint16_t retry_delay = 0; + uint8_t no_logout = 0; sts = (sts_entry_t *) pkt; sts24 = (struct sts_entry_24xx *) pkt; @@ -2336,6 +2450,7 @@ check_scsi_status: break; case CS_PORT_LOGGED_OUT: + no_logout = 1; case CS_PORT_CONFIG_CHG: case CS_PORT_BUSY: case CS_INCOMPLETE: @@ -2358,14 +2473,21 @@ check_scsi_status: break; } - ql_dbg(ql_dbg_io, fcport->vha, 0x3021, - "Port to be marked lost on fcport=%02x%02x%02x, current " - "port state= %s.\n", fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa, - port_state_str[atomic_read(&fcport->state)]); + if (atomic_read(&fcport->state) == FCS_ONLINE) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, + "Port to be marked lost on fcport=%02x%02x%02x, current " + "port state= %s comp_status %x.\n", fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa, + port_state_str[atomic_read(&fcport->state)], + comp_status); + + if (no_logout) + fcport->logout_on_delete = 0; - if (atomic_read(&fcport->state) == FCS_ONLINE) qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); + qlt_schedule_sess_for_deletion_lock(fcport); + } + break; case CS_ABORTED: @@ -2407,7 +2529,7 @@ out: resid_len, fw_resid_len, sp, cp); if (rsp->status_srb == NULL) - sp->done(ha, sp, res); + sp->done(sp, res); } /** @@ -2464,7 +2586,7 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) /* Place command on done queue. */ if (sense_len == 0) { rsp->status_srb = NULL; - sp->done(ha, sp, cp->result); + sp->done(sp, cp->result); } } @@ -2500,7 +2622,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { - sp->done(ha, sp, res); + sp->done(sp, res); return; } fatal: @@ -2558,7 +2680,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, abt = &sp->u.iocb_cmd; abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); - sp->done(vha, sp, 0); + sp->done(sp, 0); } /** @@ -2629,10 +2751,16 @@ process_err: } case ABTS_RESP_24XX: case CTIO_TYPE7: - case NOTIFY_ACK_TYPE: case CTIO_CRC2: qlt_response_pkt_all_vps(vha, (response_t *)pkt); break; + case NOTIFY_ACK_TYPE: + if (pkt->handle == QLA_TGT_SKIP_HANDLE) + qlt_response_pkt_all_vps(vha, (response_t *)pkt); + else + qla24xxx_nack_iocb_entry(vha, rsp->req, + (struct nack_to_isp *)pkt); + break; case MARKER_TYPE: /* Do nothing in this case, this check is to prevent it * from falling into default case @@ -2642,6 +2770,10 @@ process_err: qla24xx_abort_iocb_entry(vha, rsp->req, (struct abort_entry_24xx *)pkt); break; + case MBX_IOCB_TYPE: + qla24xx_mbx_iocb_entry(vha, rsp->req, + (struct mbx_24xx_entry *)pkt); + break; default: /* Type Not Supported. */ ql_dbg(ql_dbg_async, vha, 0x5042, @@ -2658,8 +2790,9 @@ process_err: if (IS_P3P_TYPE(ha)) { struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); - } else + } else { WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); + } } static void @@ -3015,14 +3148,17 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) int i, ret; struct qla_msix_entry *qentry; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + int min_vecs = QLA_BASE_VECTORS; struct irq_affinity desc = { .pre_vectors = QLA_BASE_VECTORS, }; - if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) + if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { desc.pre_vectors++; + min_vecs++; + } - ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS, + ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 67f64db390b0..35079f417417 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -1637,94 +1637,6 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) return rval; } -/* - * qla2x00_get_node_name_list - * Issue get node name list mailbox command, kmalloc() - * and return the resulting list. Caller must kfree() it! - * - * Input: - * ha = adapter state pointer. - * out_data = resulting list - * out_len = length of the resulting list - * - * Returns: - * qla2x00 local function return status code. - * - * Context: - * Kernel context. - */ -int -qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len) -{ - struct qla_hw_data *ha = vha->hw; - struct qla_port_24xx_data *list = NULL; - void *pmap; - mbx_cmd_t mc; - dma_addr_t pmap_dma; - ulong dma_size; - int rval, left; - - left = 1; - while (left > 0) { - dma_size = left * sizeof(*list); - pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size, - &pmap_dma, GFP_KERNEL); - if (!pmap) { - ql_log(ql_log_warn, vha, 0x113f, - "%s(%ld): DMA Alloc failed of %ld\n", - __func__, vha->host_no, dma_size); - rval = QLA_MEMORY_ALLOC_FAILED; - goto out; - } - - mc.mb[0] = MBC_PORT_NODE_NAME_LIST; - mc.mb[1] = BIT_1 | BIT_3; - mc.mb[2] = MSW(pmap_dma); - mc.mb[3] = LSW(pmap_dma); - mc.mb[6] = MSW(MSD(pmap_dma)); - mc.mb[7] = LSW(MSD(pmap_dma)); - mc.mb[8] = dma_size; - mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8; - mc.in_mb = MBX_0|MBX_1; - mc.tov = 30; - mc.flags = MBX_DMA_IN; - - rval = qla2x00_mailbox_command(vha, &mc); - if (rval != QLA_SUCCESS) { - if ((mc.mb[0] == MBS_COMMAND_ERROR) && - (mc.mb[1] == 0xA)) { - left += le16_to_cpu(mc.mb[2]) / - sizeof(struct qla_port_24xx_data); - goto restart; - } - goto out_free; - } - - left = 0; - - list = kmemdup(pmap, dma_size, GFP_KERNEL); - if (!list) { - ql_log(ql_log_warn, vha, 0x1140, - "%s(%ld): failed to allocate node names list " - "structure.\n", __func__, vha->host_no); - rval = QLA_MEMORY_ALLOC_FAILED; - goto out_free; - } - -restart: - dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); - } - - *out_data = list; - *out_len = dma_size; - -out: - return rval; - -out_free: - dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); - return rval; -} /* * qla2x00_get_port_database @@ -3687,10 +3599,8 @@ void qla24xx_report_id_acquisition(scsi_qla_host_t *vha, struct vp_rpt_id_entry_24xx *rptid_entry) { - uint8_t vp_idx; - uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); struct qla_hw_data *ha = vha->hw; - scsi_qla_host_t *vp; + scsi_qla_host_t *vp = NULL; unsigned long flags; int found; @@ -3701,80 +3611,124 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, return; if (rptid_entry->format == 0) { + /* loop */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7, "Format 0 : Number of VPs setup %d, number of " - "VPs acquired %d.\n", - MSB(le16_to_cpu(rptid_entry->vp_count)), - LSB(le16_to_cpu(rptid_entry->vp_count))); + "VPs acquired %d.\n", rptid_entry->vp_setup, + rptid_entry->vp_acquired); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8, "Primary port id %02x%02x%02x.\n", rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); + + vha->d_id.b.domain = rptid_entry->port_id[2]; + vha->d_id.b.area = rptid_entry->port_id[1]; + vha->d_id.b.al_pa = rptid_entry->port_id[0]; + + spin_lock_irqsave(&ha->vport_slock, flags); + qlt_update_vp_map(vha, SET_AL_PA); + spin_unlock_irqrestore(&ha->vport_slock, flags); + } else if (rptid_entry->format == 1) { - vp_idx = LSB(stat); + /* fabric */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9, "Format 1: VP[%d] enabled - status %d - with " - "port id %02x%02x%02x.\n", vp_idx, MSB(stat), + "port id %02x%02x%02x.\n", rptid_entry->vp_idx, + rptid_entry->vp_status, rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); /* buffer to buffer credit flag */ - vha->flags.bbcr_enable = (rptid_entry->bbcr & 0xf) != 0; - - /* FA-WWN is only for physical port */ - if (!vp_idx) { - void *wwpn = ha->init_cb->port_name; + vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; + + if (rptid_entry->vp_idx == 0) { + if (rptid_entry->vp_status == VP_STAT_COMPL) { + /* FA-WWN is only for physical port */ + if (qla_ini_mode_enabled(vha) && + ha->flags.fawwpn_enabled && + (rptid_entry->u.f1.flags & + VP_FLAGS_NAME_VALID)) { + memcpy(vha->port_name, + rptid_entry->u.f1.port_name, + WWN_SIZE); + } - if (!MSB(stat)) { - if (rptid_entry->vp_idx_map[1] & BIT_6) - wwpn = rptid_entry->reserved_4 + 8; + vha->d_id.b.domain = rptid_entry->port_id[2]; + vha->d_id.b.area = rptid_entry->port_id[1]; + vha->d_id.b.al_pa = rptid_entry->port_id[0]; + spin_lock_irqsave(&ha->vport_slock, flags); + qlt_update_vp_map(vha, SET_AL_PA); + spin_unlock_irqrestore(&ha->vport_slock, flags); } - memcpy(vha->port_name, wwpn, WWN_SIZE); + fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); - ql_dbg(ql_dbg_mbx, vha, 0x1018, - "FA-WWN portname %016llx (%x)\n", - fc_host_port_name(vha->host), MSB(stat)); - } - - vp = vha; - if (vp_idx == 0) - goto reg_needed; - if (MSB(stat) != 0 && MSB(stat) != 2) { - ql_dbg(ql_dbg_mbx, vha, 0x10ba, - "Could not acquire ID for VP[%d].\n", vp_idx); - return; - } + if (qla_ini_mode_enabled(vha)) + ql_dbg(ql_dbg_mbx, vha, 0x1018, + "FA-WWN portname %016llx (%x)\n", + fc_host_port_name(vha->host), + rptid_entry->vp_status); - found = 0; - spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vp, &ha->vp_list, list) { - if (vp_idx == vp->vp_idx) { - found = 1; - break; + set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); + set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); + } else { + if (rptid_entry->vp_status != VP_STAT_COMPL && + rptid_entry->vp_status != VP_STAT_ID_CHG) { + ql_dbg(ql_dbg_mbx, vha, 0x10ba, + "Could not acquire ID for VP[%d].\n", + rptid_entry->vp_idx); + return; } - } - spin_unlock_irqrestore(&ha->vport_slock, flags); - if (!found) - return; + found = 0; + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) { + if (rptid_entry->vp_idx == vp->vp_idx) { + found = 1; + break; + } + } + spin_unlock_irqrestore(&ha->vport_slock, flags); - vp->d_id.b.domain = rptid_entry->port_id[2]; - vp->d_id.b.area = rptid_entry->port_id[1]; - vp->d_id.b.al_pa = rptid_entry->port_id[0]; + if (!found) + return; - /* - * Cannot configure here as we are still sitting on the - * response queue. Handle it in dpc context. - */ - set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); + vp->d_id.b.domain = rptid_entry->port_id[2]; + vp->d_id.b.area = rptid_entry->port_id[1]; + vp->d_id.b.al_pa = rptid_entry->port_id[0]; + spin_lock_irqsave(&ha->vport_slock, flags); + qlt_update_vp_map(vp, SET_AL_PA); + spin_unlock_irqrestore(&ha->vport_slock, flags); -reg_needed: - set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); - set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); + /* + * Cannot configure here as we are still sitting on the + * response queue. Handle it in dpc context. + */ + set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); + set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); + set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); + } set_bit(VP_DPC_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); + } else if (rptid_entry->format == 2) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", + rptid_entry->port_id[2], rptid_entry->port_id[1], + rptid_entry->port_id[0]); + + ql_dbg(ql_dbg_async, vha, 0xffff, + "N2N: Remote WWPN %8phC.\n", + rptid_entry->u.f2.port_name); + + /* N2N. direct connect */ + vha->d_id.b.domain = rptid_entry->port_id[2]; + vha->d_id.b.area = rptid_entry->port_id[1]; + vha->d_id.b.al_pa = rptid_entry->port_id[0]; + + spin_lock_irqsave(&ha->vport_slock, flags); + qlt_update_vp_map(vha, SET_AL_PA); + spin_unlock_irqrestore(&ha->vport_slock, flags); } } diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 96c33e292eba..10b742d27e16 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -1789,16 +1789,16 @@ qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo) static void qla2x00_fxdisc_iocb_timeout(void *data) { - srb_t *sp = (srb_t *)data; + srb_t *sp = data; struct srb_iocb *lio = &sp->u.iocb_cmd; complete(&lio->u.fxiocb.fxiocb_comp); } static void -qla2x00_fxdisc_sp_done(void *data, void *ptr, int res) +qla2x00_fxdisc_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct srb_iocb *lio = &sp->u.iocb_cmd; complete(&lio->u.fxiocb.fxiocb_comp); @@ -1999,7 +1999,7 @@ done_unmap_req: dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len, fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle); done_free_sp: - sp->free(vha, sp); + sp->free(sp); done: return rval; } @@ -2127,7 +2127,7 @@ static inline void qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, uint32_t sense_len, struct rsp_que *rsp, int res) { - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct scsi_cmnd *cp = GET_CMD_SP(sp); uint32_t track_sense_len; @@ -2162,7 +2162,7 @@ qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, if (sense_len) { ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039, "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", - sp->fcport->vha->host_no, cp->device->id, cp->device->lun, + sp->vha->host_no, cp->device->id, cp->device->lun, cp); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049, cp->sense_buffer, sense_len); @@ -2181,7 +2181,7 @@ qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID))) cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE); tmf->u.tmf.comp_status = cpstatus; - sp->done(vha, sp, 0); + sp->done(sp, 0); } static void @@ -2198,7 +2198,7 @@ qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, abt = &sp->u.iocb_cmd; abt->u.abt.comp_status = pkt->tgt_id_sts; - sp->done(vha, sp, 0); + sp->done(sp, 0); } static void @@ -2264,7 +2264,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; } - sp->done(vha, sp, res); + sp->done(sp, res); } /** @@ -2537,7 +2537,7 @@ check_scsi_status: par_sense_len, rsp_info_len); if (rsp->status_srb == NULL) - sp->done(ha, sp, res); + sp->done(sp, res); } /** @@ -2614,7 +2614,7 @@ qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) /* Place command on done queue. */ if (sense_len == 0) { rsp->status_srb = NULL; - sp->done(ha, sp, cp->result); + sp->done(sp, cp->result); } } @@ -2695,7 +2695,7 @@ qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { - sp->done(ha, sp, res); + sp->done(sp, res); return; } @@ -2997,7 +2997,7 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, cont_a64_entry_t lcont_pkt; cont_a64_entry_t *cont_pkt; - vha = sp->fcport->vha; + vha = sp->vha; req = vha->req; cmd = GET_CMD_SP(sp); @@ -3081,7 +3081,7 @@ qlafx00_start_scsi(srb_t *sp) struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct cmd_type_7_fx00 *cmd_pkt; struct cmd_type_7_fx00 lcmd_pkt; @@ -3205,7 +3205,7 @@ void qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb) { struct srb_iocb *fxio = &sp->u.iocb_cmd; - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct req_que *req = vha->req; struct tsk_mgmt_entry_fx00 tm_iocb; struct scsi_lun llun; @@ -3232,7 +3232,7 @@ void qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb) { struct srb_iocb *fxio = &sp->u.iocb_cmd; - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct req_que *req = vha->req; struct abort_iocb_entry_fx00 abt_iocb; @@ -3346,8 +3346,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) REQUEST_ENTRY_SIZE); cont_pkt = qlafx00_prep_cont_type1_iocb( - sp->fcport->vha->req, - &lcont_pkt); + sp->vha->req, &lcont_pkt); cur_dsd = (__le32 *) lcont_pkt.dseg_0_address; avail_dsds = 5; @@ -3368,7 +3367,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) &lcont_pkt, REQUEST_ENTRY_SIZE); ql_dump_buffer( ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x3042, + sp->vha, 0x3042, (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); } @@ -3377,7 +3376,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, REQUEST_ENTRY_SIZE); ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x3043, + sp->vha, 0x3043, (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); } } @@ -3409,8 +3408,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) REQUEST_ENTRY_SIZE); cont_pkt = qlafx00_prep_cont_type1_iocb( - sp->fcport->vha->req, - &lcont_pkt); + sp->vha->req, &lcont_pkt); cur_dsd = (__le32 *) lcont_pkt.dseg_0_address; avail_dsds = 5; @@ -3431,7 +3429,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) REQUEST_ENTRY_SIZE); ql_dump_buffer( ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x3045, + sp->vha, 0x3045, (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); } @@ -3440,7 +3438,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, REQUEST_ENTRY_SIZE); ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x3046, + sp->vha, 0x3046, (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); } } @@ -3452,7 +3450,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) } ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x3047, + sp->vha, 0x3047, (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00)); memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index d01c90c7dd04..1fed235a1b4a 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -237,6 +237,13 @@ MODULE_PARM_DESC(ql2xfwholdabts, "0 (Default) Do not set fw option. " "1 - Set fw option to hold ABTS."); +int ql2xmvasynctoatio = 1; +module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xmvasynctoatio, + "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ" + "0 (Default). Do not move IOCBs" + "1 - Move IOCBs."); + /* * SCSI host template entry points */ @@ -607,11 +614,11 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) } void -qla2x00_sp_free_dma(void *vha, void *ptr) +qla2x00_sp_free_dma(void *ptr) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; + struct qla_hw_data *ha = sp->vha->hw; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - struct qla_hw_data *ha = sp->fcport->vha->hw; void *ctx = GET_CMD_CTX_SP(sp); if (sp->flags & SRB_DMA_VALID) { @@ -650,20 +657,19 @@ qla2x00_sp_free_dma(void *vha, void *ptr) } CMD_SP(cmd) = NULL; - qla2x00_rel_sp(sp->fcport->vha, sp); + qla2x00_rel_sp(sp); } void -qla2x00_sp_compl(void *data, void *ptr, int res) +qla2x00_sp_compl(void *ptr, int res) { - struct qla_hw_data *ha = (struct qla_hw_data *)data; - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct scsi_cmnd *cmd = GET_CMD_SP(sp); cmd->result = res; if (atomic_read(&sp->ref_count) == 0) { - ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015, + ql_dbg(ql_dbg_io, sp->vha, 0x3015, "SP reference-count to ZERO -- sp=%p cmd=%p.\n", sp, GET_CMD_SP(sp)); if (ql2xextended_error_logging & ql_dbg_io) @@ -673,12 +679,12 @@ qla2x00_sp_compl(void *data, void *ptr, int res) if (!atomic_dec_and_test(&sp->ref_count)) return; - qla2x00_sp_free_dma(ha, sp); + qla2x00_sp_free_dma(sp); cmd->scsi_done(cmd); } void -qla2xxx_qpair_sp_free_dma(void *vha, void *ptr) +qla2xxx_qpair_sp_free_dma(void *ptr) { srb_t *sp = (srb_t *)ptr; struct scsi_cmnd *cmd = GET_CMD_SP(sp); @@ -724,9 +730,9 @@ qla2xxx_qpair_sp_free_dma(void *vha, void *ptr) } void -qla2xxx_qpair_sp_compl(void *data, void *ptr, int res) +qla2xxx_qpair_sp_compl(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct scsi_cmnd *cmd = GET_CMD_SP(sp); cmd->result = res; @@ -742,7 +748,7 @@ qla2xxx_qpair_sp_compl(void *data, void *ptr, int res) if (!atomic_dec_and_test(&sp->ref_count)) return; - qla2xxx_qpair_sp_free_dma(sp->fcport->vha, sp); + qla2xxx_qpair_sp_free_dma(sp); cmd->scsi_done(cmd); } @@ -863,7 +869,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) return 0; qc24_host_busy_free_sp: - qla2x00_sp_free_dma(ha, sp); + qla2x00_sp_free_dma(sp); qc24_host_busy: return SCSI_MLQUEUE_HOST_BUSY; @@ -952,7 +958,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, return 0; qc24_host_busy_free_sp: - qla2xxx_qpair_sp_free_dma(vha, sp); + qla2xxx_qpair_sp_free_dma(sp); qc24_host_busy: return SCSI_MLQUEUE_HOST_BUSY; @@ -1044,6 +1050,34 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) return (return_status); } +static inline int test_fcport_count(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + int res; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + ql_dbg(ql_dbg_init, vha, 0xffff, + "tgt %p, fcport_count=%d\n", + vha, vha->fcport_count); + res = (vha->fcport_count == 0); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + return res; +} + +/* + * qla2x00_wait_for_sess_deletion can only be called from remove_one. + * it has dependency on UNLOADING flag to stop device discovery + */ +static void +qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) +{ + qla2x00_mark_all_devices_lost(vha, 0); + + wait_event(vha->fcport_waitQ, test_fcport_count(vha)); +} + /* * qla2x00_wait_for_hba_ready * Wait till the HBA is ready before doing driver unload @@ -1204,7 +1238,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) } spin_lock_irqsave(&ha->hardware_lock, flags); - sp->done(ha, sp, 0); + sp->done(sp, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Did the command return during mailbox execution? */ @@ -1249,7 +1283,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, continue; if (sp->type != SRB_SCSI_CMD) continue; - if (vha->vp_idx != sp->fcport->vha->vp_idx) + if (vha->vp_idx != sp->vha->vp_idx) continue; match = 0; cmd = GET_CMD_SP(sp); @@ -1629,7 +1663,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) spin_lock_irqsave(&ha->hardware_lock, flags); } req->outstanding_cmds[cnt] = NULL; - sp->done(vha, sp, res); + sp->done(sp, res); } } } @@ -1815,6 +1849,7 @@ skip_pio: /* Determine queue resources */ ha->max_req_queues = ha->max_rsp_queues = 1; + ha->msix_count = QLA_BASE_VECTORS; if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) goto mqiobase_exit; @@ -1842,9 +1877,8 @@ skip_pio: "BAR 3 not enabled.\n"); mqiobase_exit: - ha->msix_count = ha->max_rsp_queues + 1; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c, - "MSIX Count:%d.\n", ha->msix_count); + "MSIX Count: %d.\n", ha->msix_count); return (0); iospace_error_exit: @@ -1892,6 +1926,7 @@ qla83xx_iospace_config(struct qla_hw_data *ha) /* 83XX 26XX always use MQ type access for queues * - mbar 2, a.k.a region 4 */ ha->max_req_queues = ha->max_rsp_queues = 1; + ha->msix_count = QLA_BASE_VECTORS; ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4), pci_resource_len(ha->pdev, 4)); @@ -1915,12 +1950,13 @@ qla83xx_iospace_config(struct qla_hw_data *ha) if (ql2xmqsupport) { /* MB interrupt uses 1 vector */ ha->max_req_queues = ha->msix_count - 1; - ha->max_rsp_queues = ha->max_req_queues; /* ATIOQ needs 1 vector. That's 1 less QPair */ if (QLA_TGT_MODE_ENABLED()) ha->max_req_queues--; + ha->max_rsp_queues = ha->max_req_queues; + /* Queue pairs is the max value minus * the base queue pair */ ha->max_qpairs = ha->max_req_queues - 1; @@ -1934,14 +1970,8 @@ qla83xx_iospace_config(struct qla_hw_data *ha) "BAR 1 not enabled.\n"); mqiobase_exit: - ha->msix_count = ha->max_rsp_queues + 1; - if (QLA_TGT_MODE_ENABLED()) - ha->msix_count++; - - qlt_83xx_iospace_config(ha); - ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f, - "MSIX Count:%d.\n", ha->msix_count); + "MSIX Count: %d.\n", ha->msix_count); return 0; iospace_error_exit: @@ -3124,7 +3154,8 @@ skip_dpc: ql_dbg(ql_dbg_init, base_vha, 0x00f2, "Init done and hba is online.\n"); - if (qla_ini_mode_enabled(base_vha)) + if (qla_ini_mode_enabled(base_vha) || + qla_dual_mode_enabled(base_vha)) scsi_scan_host(host); else ql_dbg(ql_dbg_init, base_vha, 0x0122, @@ -3373,21 +3404,26 @@ qla2x00_remove_one(struct pci_dev *pdev) * resources. */ if (!atomic_read(&pdev->enable_cnt)) { + dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, + base_vha->gnl.l, base_vha->gnl.ldma); + scsi_host_put(base_vha->host); kfree(ha); pci_set_drvdata(pdev, NULL); return; } - qla2x00_wait_for_hba_ready(base_vha); - /* if UNLOAD flag is already set, then continue unload, + /* + * if UNLOAD flag is already set, then continue unload, * where it was set first. */ if (test_bit(UNLOADING, &base_vha->dpc_flags)) return; set_bit(UNLOADING, &base_vha->dpc_flags); + dma_free_coherent(&ha->pdev->dev, + base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); if (IS_QLAFX00(ha)) qlafx00_driver_shutdown(base_vha, 20); @@ -3536,10 +3572,14 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, qla2xxx_wake_dpc(base_vha); } else { int now; - if (rport) + if (rport) { + ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, + "%s %8phN. rport %p roles %x \n", + __func__, fcport->port_name, rport, + rport->roles); fc_remote_port_delete(rport); + } qlt_do_generation_tick(vha, &now); - qlt_fc_port_deleted(vha, fcport, now); } } @@ -3582,7 +3622,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, fcport->login_retry = vha->hw->login_retry_count; ql_dbg(ql_dbg_disc, vha, 0x2067, - "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n", + "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", fcport->port_name, fcport->loop_id, fcport->login_retry); } } @@ -3605,7 +3645,13 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) { fc_port_t *fcport; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Mark all dev lost\n"); + list_for_each_entry(fcport, &vha->vp_fcports, list) { + fcport->scan_state = 0; + qlt_schedule_sess_for_deletion_lock(fcport); + if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx) continue; @@ -4195,10 +4241,10 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, struct scsi_qla_host *vha = NULL; host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); - if (host == NULL) { + if (!host) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0107, "Failed to allocate host from the scsi layer, aborting.\n"); - goto fail; + return NULL; } /* Clear our data area */ @@ -4217,9 +4263,22 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, INIT_LIST_HEAD(&vha->logo_list); INIT_LIST_HEAD(&vha->plogi_ack_list); INIT_LIST_HEAD(&vha->qp_list); + INIT_LIST_HEAD(&vha->gnl.fcports); spin_lock_init(&vha->work_lock); spin_lock_init(&vha->cmd_list_lock); + init_waitqueue_head(&vha->fcport_waitQ); + + vha->gnl.size = sizeof(struct get_name_list_extended) * + (ha->max_loop_id + 1); + vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev, + vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL); + if (!vha->gnl.l) { + ql_log(ql_log_fatal, vha, 0xffff, + "Alloc failed for name list.\n"); + scsi_remove_host(vha->host); + return NULL; + } sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); ql_dbg(ql_dbg_init, vha, 0x0041, @@ -4228,12 +4287,9 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, dev_name(&(ha->pdev->dev))); return vha; - -fail: - return vha; } -static struct qla_work_evt * +struct qla_work_evt * qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) { struct qla_work_evt *e; @@ -4255,7 +4311,7 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) return e; } -static int +int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) { unsigned long flags; @@ -4316,7 +4372,6 @@ int qla2x00_post_async_##name##_work( \ } qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); -qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE); qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); @@ -4369,6 +4424,67 @@ qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, return qla2x00_post_work(vha, e); } +int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + return qla2x00_post_work(vha, e); +} + +static +void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) +{ + unsigned long flags; + fc_port_t *fcport = NULL; + struct qlt_plogi_ack_t *pla = + (struct qlt_plogi_ack_t *)e->u.new_sess.pla; + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1); + if (fcport) { + fcport->d_id = e->u.new_sess.id; + if (pla) { + fcport->fw_login_state = DSC_LS_PLOGI_PEND; + qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN); + /* we took an extra ref_count to prevent PLOGI ACK when + * fcport/sess has not been created. + */ + pla->ref_count--; + } + } else { + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (fcport) { + fcport->d_id = e->u.new_sess.id; + fcport->scan_state = QLA_FCPORT_FOUND; + fcport->flags |= FCF_FABRIC_DEVICE; + fcport->fw_login_state = DSC_LS_PLOGI_PEND; + + memcpy(fcport->port_name, e->u.new_sess.port_name, + WWN_SIZE); + list_add_tail(&fcport->list, &vha->vp_fcports); + + if (pla) { + qlt_plogi_ack_link(vha, pla, fcport, + QLT_PLOGI_LINK_SAME_WWN); + pla->ref_count--; + } + } + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + if (fcport) { + if (pla) + qlt_plogi_ack_unref(vha, pla); + else + qla24xx_async_gnl(vha, fcport); + } +} + void qla2x00_do_work(struct scsi_qla_host *vha) { @@ -4395,10 +4511,6 @@ qla2x00_do_work(struct scsi_qla_host *vha) qla2x00_async_login(vha, e->u.logio.fcport, e->u.logio.data); break; - case QLA_EVT_ASYNC_LOGIN_DONE: - qla2x00_async_login_done(vha, e->u.logio.fcport, - e->u.logio.data); - break; case QLA_EVT_ASYNC_LOGOUT: qla2x00_async_logout(vha, e->u.logio.fcport); break; @@ -4420,6 +4532,34 @@ qla2x00_do_work(struct scsi_qla_host *vha) case QLA_EVT_AENFX: qlafx00_process_aen(vha, e); break; + case QLA_EVT_GIDPN: + qla24xx_async_gidpn(vha, e->u.fcport.fcport); + break; + case QLA_EVT_GPNID: + qla24xx_async_gpnid(vha, &e->u.gpnid.id); + break; + case QLA_EVT_GPNID_DONE: + qla24xx_async_gpnid_done(vha, e->u.iosb.sp); + break; + case QLA_EVT_NEW_SESS: + qla24xx_create_new_sess(vha, e); + break; + case QLA_EVT_GPDB: + qla24xx_async_gpdb(vha, e->u.fcport.fcport, + e->u.fcport.opt); + break; + case QLA_EVT_GPSC: + qla24xx_async_gpsc(vha, e->u.fcport.fcport); + break; + case QLA_EVT_UPD_FCPORT: + qla2x00_update_fcport(vha, e->u.fcport.fcport); + break; + case QLA_EVT_GNL: + qla24xx_async_gnl(vha, e->u.fcport.fcport); + break; + case QLA_EVT_NACK: + qla24xx_do_nack_work(vha, e); + break; } if (e->flags & QLA_EVT_FLAG_FREE) kfree(e); @@ -4436,9 +4576,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha) { fc_port_t *fcport; int status; - uint16_t next_loopid = 0; - struct qla_hw_data *ha = vha->hw; - uint16_t data[2]; + struct event_arg ea; list_for_each_entry(fcport, &vha->vp_fcports, list) { /* @@ -4449,77 +4587,38 @@ void qla2x00_relogin(struct scsi_qla_host *vha) fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) { fcport->login_retry--; if (fcport->flags & FCF_FABRIC_DEVICE) { - if (fcport->flags & FCF_FCP2_DEVICE) - ha->isp_ops->fabric_logout(vha, - fcport->loop_id, - fcport->d_id.b.domain, - fcport->d_id.b.area, - fcport->d_id.b.al_pa); - - if (fcport->loop_id == FC_NO_LOOP_ID) { - fcport->loop_id = next_loopid = - ha->min_external_loopid; - status = qla2x00_find_new_loop_id( - vha, fcport); - if (status != QLA_SUCCESS) { - /* Ran out of IDs to use */ - break; - } - } - - if (IS_ALOGIO_CAPABLE(ha)) { - fcport->flags |= FCF_ASYNC_SENT; - data[0] = 0; - data[1] = QLA_LOGIO_LOGIN_RETRIED; - status = qla2x00_post_async_login_work( - vha, fcport, data); - if (status == QLA_SUCCESS) - continue; - /* Attempt a retry. */ - status = 1; - } else { - status = qla2x00_fabric_login(vha, - fcport, &next_loopid); - if (status == QLA_SUCCESS) { - int status2; - uint8_t opts; - - opts = 0; - if (fcport->flags & - FCF_FCP2_DEVICE) - opts |= BIT_1; - status2 = - qla2x00_get_port_database( - vha, fcport, opts); - if (status2 != QLA_SUCCESS) - status = 1; - } - } - } else + ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, + "%s %8phC DS %d LS %d\n", __func__, + fcport->port_name, fcport->disc_state, + fcport->fw_login_state); + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_RELOGIN; + ea.fcport = fcport; + qla2x00_fcport_event_handler(vha, &ea); + } else { status = qla2x00_local_device_login(vha, fcport); + if (status == QLA_SUCCESS) { + fcport->old_loop_id = fcport->loop_id; + ql_dbg(ql_dbg_disc, vha, 0x2003, + "Port login OK: logged in ID 0x%x.\n", + fcport->loop_id); + qla2x00_update_fcport(vha, fcport); + } else if (status == 1) { + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + /* retry the login again */ + ql_dbg(ql_dbg_disc, vha, 0x2007, + "Retrying %d login again loop_id 0x%x.\n", + fcport->login_retry, + fcport->loop_id); + } else { + fcport->login_retry = 0; + } - if (status == QLA_SUCCESS) { - fcport->old_loop_id = fcport->loop_id; - - ql_dbg(ql_dbg_disc, vha, 0x2003, - "Port login OK: logged in ID 0x%x.\n", - fcport->loop_id); - - qla2x00_update_fcport(vha, fcport); - - } else if (status == 1) { - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - /* retry the login again */ - ql_dbg(ql_dbg_disc, vha, 0x2007, - "Retrying %d login again loop_id 0x%x.\n", - fcport->login_retry, fcport->loop_id); - } else { - fcport->login_retry = 0; + if (fcport->login_retry == 0 && + status != QLA_SUCCESS) + qla2x00_clear_loop_id(fcport); } - - if (fcport->login_retry == 0 && status != QLA_SUCCESS) - qla2x00_clear_loop_id(fcport); } if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; @@ -5183,7 +5282,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) struct pci_dev *pdev = ha->pdev; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - /* if UNLOAD flag is already set, then continue unload, + /* + * if UNLOAD flag is already set, then continue unload, * where it was set first. */ if (test_bit(UNLOADING, &base_vha->dpc_flags)) @@ -5192,6 +5292,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) ql_log(ql_log_warn, base_vha, 0x015b, "Disabling adapter.\n"); + qla2x00_wait_for_sess_deletion(base_vha); + set_bit(UNLOADING, &base_vha->dpc_flags); qla2x00_delete_all_vps(ha, base_vha); @@ -5410,16 +5512,6 @@ qla2x00_do_dpc(void *data) qla2x00_update_fcports(base_vha); } - if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) { - int ret; - ret = qla2x00_send_change_request(base_vha, 0x3, 0); - if (ret != QLA_SUCCESS) - ql_log(ql_log_warn, base_vha, 0x121, - "Failed to enable receiving of RSCN " - "requests: 0x%x.\n", ret); - clear_bit(SCR_PENDING, &base_vha->dpc_flags); - } - if (IS_QLAFX00(ha)) goto loop_resync_check; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index e4fda84b959e..45f5077684f0 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -55,8 +55,17 @@ MODULE_PARM_DESC(qlini_mode, "disabled on enabling target mode and then on disabling target mode " "enabled back; " "\"disabled\" - initiator mode will never be enabled; " + "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated " + "when ready " "\"enabled\" (default) - initiator mode will always stay enabled."); +static int ql_dm_tgt_ex_pct = 50; +module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql_dm_tgt_ex_pct, + "For Dual Mode (qlini_mode=dual), this parameter determines " + "the percentage of exchanges/cmds FW will allocate resources " + "for Target mode."); + int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; static int temp_sam_status = SAM_STAT_BUSY; @@ -102,12 +111,10 @@ enum fcp_resp_rsp_codes { static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, struct atio_from_isp *pkt, uint8_t); static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); -static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, +static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, int fn, void *iocb, int flags); static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); -static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, - struct qla_tgt_srr_imm *imm, int ha_lock); static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd); static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, @@ -120,6 +127,9 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *imm, int ha_locked); +static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, + fc_port_t *fcport, bool local); +void qlt_unreg_sess(struct fc_port *sess); /* * Global Variables */ @@ -140,21 +150,6 @@ void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) wmb(); } -/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ -static struct qla_tgt_sess *qlt_find_sess_by_port_name( - struct qla_tgt *tgt, - const uint8_t *port_name) -{ - struct qla_tgt_sess *sess; - - list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) { - if (!memcmp(sess->port_name, port_name, WWN_SIZE)) - return sess; - } - - return NULL; -} - /* Might release hw lock, then reaquire!! */ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) { @@ -229,6 +224,105 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); } + +static void qlt_queue_unknown_atio(scsi_qla_host_t *vha, + struct atio_from_isp *atio, uint8_t ha_locked) +{ + struct qla_tgt_sess_op *u; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + unsigned long flags; + + if (tgt->tgt_stop) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "qla_target(%d): dropping unknown ATIO_TYPE7, " + "because tgt is being stopped", vha->vp_idx); + goto out_term; + } + + u = kzalloc(sizeof(*u), GFP_ATOMIC); + if (u == NULL) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "Alloc of struct unknown_atio (size %zd) failed", sizeof(*u)); + /* It should be harmless and on the next retry should work well */ + goto out_term; + } + + u->vha = vha; + memcpy(&u->atio, atio, sizeof(*atio)); + INIT_LIST_HEAD(&u->cmd_list); + + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_add_tail(&u->cmd_list, &vha->unknown_atio_list); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); + + schedule_delayed_work(&vha->unknown_atio_work, 1); + +out: + return; + +out_term: + qlt_send_term_exchange(vha, NULL, atio, ha_locked, 0); + goto out; +} + +static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, + uint8_t ha_locked) +{ + struct qla_tgt_sess_op *u, *t; + scsi_qla_host_t *host; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + unsigned long flags; + uint8_t queued = 0; + + list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) { + if (u->aborted) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "Freeing unknown %s %p, because of Abort", + "ATIO_TYPE7", u); + qlt_send_term_exchange(vha, NULL, &u->atio, + ha_locked, 0); + goto abort; + } + + host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); + if (host != NULL) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "Requeuing unknown ATIO_TYPE7 %p", u); + qlt_24xx_atio_pkt(host, &u->atio, ha_locked); + } else if (tgt->tgt_stop) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "Freeing unknown %s %p, because tgt is being stopped", + "ATIO_TYPE7", u); + qlt_send_term_exchange(vha, NULL, &u->atio, + ha_locked, 0); + } else { + ql_dbg(ql_dbg_async, vha, 0xffff, + "u %p, vha %p, host %p, sched again..", u, + vha, host); + if (!queued) { + queued = 1; + schedule_delayed_work(&vha->unknown_atio_work, + 1); + } + continue; + } + +abort: + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_del(&u->cmd_list); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); + kfree(u); + } +} + +void qlt_unknown_atio_work_fn(struct work_struct *work) +{ + struct scsi_qla_host *vha = container_of(to_delayed_work(work), + struct scsi_qla_host, unknown_atio_work); + + qlt_try_to_dequeue_unknown_atios(vha, 0); +} + static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, struct atio_from_isp *atio, uint8_t ha_locked) { @@ -249,8 +343,14 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, atio->u.isp24.fcp_hdr.d_id[0], atio->u.isp24.fcp_hdr.d_id[1], atio->u.isp24.fcp_hdr.d_id[2]); + + + qlt_queue_unknown_atio(vha, atio, ha_locked); break; } + if (unlikely(!list_empty(&vha->unknown_atio_list))) + qlt_try_to_dequeue_unknown_atios(vha, ha_locked); + qlt_24xx_atio_pkt(host, atio, ha_locked); break; } @@ -278,6 +378,31 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, break; } + case VP_RPT_ID_IOCB_TYPE: + qla24xx_report_id_acquisition(vha, + (struct vp_rpt_id_entry_24xx *)atio); + break; + + case ABTS_RECV_24XX: + { + struct abts_recv_from_24xx *entry = + (struct abts_recv_from_24xx *)atio; + struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, + entry->vp_index); + if (unlikely(!host)) { + ql_dbg(ql_dbg_tgt, vha, 0xffff, + "qla_target(%d): Response pkt (ABTS_RECV_24XX) " + "received, with unknown vp_index %d\n", + vha->vp_idx, entry->vp_index); + break; + } + qlt_response_pkt(host, (response_t *)atio); + break; + + } + + /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ + default: ql_dbg(ql_dbg_tgt, vha, 0xe040, "qla_target(%d): Received unknown ATIO atio " @@ -395,22 +520,263 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) /* * All qlt_plogi_ack_t operations are protected by hardware_lock */ +static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport, + struct imm_ntfy_from_isp *ntfy, int type) +{ + struct qla_work_evt *e; + e = qla2x00_alloc_work(vha, QLA_EVT_NACK); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.nack.fcport = fcport; + e->u.nack.type = type; + memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp)); + return qla2x00_post_work(vha, e); +} + +static +void qla2x00_async_nack_sp_done(void *s, int res) +{ + struct srb *sp = (struct srb *)s; + struct scsi_qla_host *vha = sp->vha; + unsigned long flags; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x %8phC type %d\n", + sp->name, res, sp->fcport->port_name, sp->type); + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + sp->fcport->flags &= ~FCF_ASYNC_SENT; + sp->fcport->chip_reset = vha->hw->chip_reset; + + switch (sp->type) { + case SRB_NACK_PLOGI: + sp->fcport->login_gen++; + sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; + sp->fcport->logout_on_delete = 1; + break; + + case SRB_NACK_PRLI: + sp->fcport->fw_login_state = DSC_LS_PRLI_COMP; + sp->fcport->deleted = 0; + + if (!sp->fcport->login_succ && + !IS_SW_RESV_ADDR(sp->fcport->d_id)) { + sp->fcport->login_succ = 1; + + vha->fcport_count++; + + if (!IS_IIDMA_CAPABLE(vha->hw) || + !vha->hw->flags.gpsc_supported) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post upd_fcport fcp_cnt %d\n", + __func__, __LINE__, + sp->fcport->port_name, + vha->fcport_count); + + qla24xx_post_upd_fcport_work(vha, sp->fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpsc fcp_cnt %d\n", + __func__, __LINE__, + sp->fcport->port_name, + vha->fcport_count); + + qla24xx_post_gpsc_work(vha, sp->fcport); + } + } + break; + + case SRB_NACK_LOGO: + sp->fcport->login_gen++; + sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE); + break; + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + sp->free(sp); +} + +int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, + struct imm_ntfy_from_isp *ntfy, int type) +{ + int rval = QLA_FUNCTION_FAILED; + srb_t *sp; + char *c = NULL; + + fcport->flags |= FCF_ASYNC_SENT; + switch (type) { + case SRB_NACK_PLOGI: + fcport->fw_login_state = DSC_LS_PLOGI_PEND; + c = "PLOGI"; + break; + case SRB_NACK_PRLI: + fcport->fw_login_state = DSC_LS_PRLI_PEND; + c = "PRLI"; + break; + case SRB_NACK_LOGO: + fcport->fw_login_state = DSC_LS_LOGO_PEND; + c = "LOGO"; + break; + } + + sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); + if (!sp) + goto done; + + sp->type = type; + sp->name = "nack"; + + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); + + sp->u.iocb_cmd.u.nack.ntfy = ntfy; + + sp->done = qla2x00_async_nack_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s %8phC hndl %x %s\n", + sp->name, fcport->port_name, sp->handle, c); + + return rval; + +done_free_sp: + sp->free(sp); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} + +void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) +{ + fc_port_t *t; + unsigned long flags; + + switch (e->u.nack.type) { + case SRB_NACK_PRLI: + mutex_lock(&vha->vha_tgt.tgt_mutex); + t = qlt_create_sess(vha, e->u.nack.fcport, 0); + mutex_unlock(&vha->vha_tgt.tgt_mutex); + if (t) { + ql_log(ql_log_info, vha, 0xffff, + "%s create sess success %p", __func__, t); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + /* create sess has an extra kref */ + vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + } + break; + } + qla24xx_async_notify_ack(vha, e->u.nack.fcport, + (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type); +} + +void qla24xx_delete_sess_fn(struct work_struct *work) +{ + fc_port_t *fcport = container_of(work, struct fc_port, del_work); + struct qla_hw_data *ha = fcport->vha->hw; + unsigned long flags; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + + if (fcport->se_sess) { + ha->tgt.tgt_ops->shutdown_sess(fcport); + ha->tgt.tgt_ops->put_sess(fcport); + } else { + qlt_unreg_sess(fcport); + } + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); +} + +/* + * Called from qla2x00_reg_remote_port() + */ +void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct fc_port *sess = fcport; + unsigned long flags; + + if (!vha->hw->tgt.tgt_ops) + return; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (tgt->tgt_stop) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return; + } + + if (fcport->disc_state == DSC_DELETE_PEND) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return; + } + + if (!sess->se_sess) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + mutex_lock(&vha->vha_tgt.tgt_mutex); + sess = qlt_create_sess(vha, fcport, false); + mutex_unlock(&vha->vha_tgt.tgt_mutex); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + } else { + if (fcport->fw_login_state == DSC_LS_PRLI_COMP) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return; + } + + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: kref_get fail sess %8phC \n", + __func__, sess->port_name); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return; + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, + "qla_target(%u): %ssession for port %8phC " + "(loop ID %d) reappeared\n", vha->vp_idx, + sess->local ? "local " : "", sess->port_name, sess->loop_id); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, + "Reappeared sess %p\n", sess); + + ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, + fcport->loop_id, + (fcport->flags & FCF_CONF_COMP_SUPPORTED)); + } + + if (sess && sess->local) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, + "qla_target(%u): local session for " + "port %8phC (loop ID %d) became global\n", vha->vp_idx, + fcport->port_name, sess->loop_id); + sess->local = 0; + } + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); +} /* * This is a zero-base ref-counting solution, since hardware_lock * guarantees that ref_count is not modified concurrently. * Upon successful return content of iocb is undefined */ -static qlt_plogi_ack_t * +static struct qlt_plogi_ack_t * qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, struct imm_ntfy_from_isp *iocb) { - qlt_plogi_ack_t *pla; + struct qlt_plogi_ack_t *pla; list_for_each_entry(pla, &vha->plogi_ack_list, list) { if (pla->id.b24 == id->b24) { qlt_send_term_imm_notif(vha, &pla->iocb, 1); - pla->iocb = *iocb; + memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); return pla; } } @@ -423,50 +789,78 @@ qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, return NULL; } - pla->iocb = *iocb; + memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); pla->id = *id; list_add_tail(&pla->list, &vha->plogi_ack_list); return pla; } -static void qlt_plogi_ack_unref(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla) +void qlt_plogi_ack_unref(struct scsi_qla_host *vha, + struct qlt_plogi_ack_t *pla) { + struct imm_ntfy_from_isp *iocb = &pla->iocb; + port_id_t port_id; + uint16_t loop_id; + fc_port_t *fcport = pla->fcport; + BUG_ON(!pla->ref_count); pla->ref_count--; if (pla->ref_count) return; - ql_dbg(ql_dbg_async, vha, 0x5089, + ql_dbg(ql_dbg_disc, vha, 0x5089, "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x" - " exch %#x ox_id %#x\n", pla->iocb.u.isp24.port_name, - pla->iocb.u.isp24.port_id[2], pla->iocb.u.isp24.port_id[1], - pla->iocb.u.isp24.port_id[0], - le16_to_cpu(pla->iocb.u.isp24.nport_handle), - pla->iocb.u.isp24.exchange_address, pla->iocb.ox_id); - qlt_send_notify_ack(vha, &pla->iocb, 0, 0, 0, 0, 0, 0); + " exch %#x ox_id %#x\n", iocb->u.isp24.port_name, + iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1], + iocb->u.isp24.port_id[0], + le16_to_cpu(iocb->u.isp24.nport_handle), + iocb->u.isp24.exchange_address, iocb->ox_id); + + port_id.b.domain = iocb->u.isp24.port_id[2]; + port_id.b.area = iocb->u.isp24.port_id[1]; + port_id.b.al_pa = iocb->u.isp24.port_id[0]; + port_id.b.rsvd_1 = 0; + + loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); + + fcport->loop_id = loop_id; + fcport->d_id = port_id; + qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI); + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla) + fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; + if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla) + fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; + } list_del(&pla->list); kmem_cache_free(qla_tgt_plogi_cachep, pla); } -static void -qlt_plogi_ack_link(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla, - struct qla_tgt_sess *sess, qlt_plogi_link_t link) +void +qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla, + struct fc_port *sess, enum qlt_plogi_link_t link) { + struct imm_ntfy_from_isp *iocb = &pla->iocb; /* Inc ref_count first because link might already be pointing at pla */ pla->ref_count++; + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, + "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" + " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n", + sess, link, sess->port_name, + iocb->u.isp24.port_name, iocb->u.isp24.port_id[2], + iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], + pla->ref_count, pla, link); + if (sess->plogi_link[link]) qlt_plogi_ack_unref(vha, sess->plogi_link[link]); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, - "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" - " s_id %02x:%02x:%02x, ref=%d\n", sess, link, sess->port_name, - pla->iocb.u.isp24.port_name, pla->iocb.u.isp24.port_id[2], - pla->iocb.u.isp24.port_id[1], pla->iocb.u.isp24.port_id[0], - pla->ref_count); + if (link == QLT_PLOGI_LINK_SAME_WWN) + pla->fcport = sess; sess->plogi_link[link] = pla; } @@ -519,49 +913,45 @@ qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) static void qlt_free_session_done(struct work_struct *work) { - struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess, + struct fc_port *sess = container_of(work, struct fc_port, free_work); struct qla_tgt *tgt = sess->tgt; struct scsi_qla_host *vha = sess->vha; struct qla_hw_data *ha = vha->hw; unsigned long flags; bool logout_started = false; - fc_port_t fcport; + struct event_arg ea; + scsi_qla_host_t *base_vha; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, - sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, + sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, sess->logout_on_delete, sess->keep_nport_handle, sess->send_els_logo); - BUG_ON(!tgt); - if (sess->send_els_logo) { - qlt_port_logo_t logo; - logo.id = sess->s_id; - logo.cmd_count = 0; - qlt_send_first_logo(vha, &logo); - } + if (!IS_SW_RESV_ADDR(sess->d_id)) { + if (sess->send_els_logo) { + qlt_port_logo_t logo; - if (sess->logout_on_delete) { - int rc; + logo.id = sess->d_id; + logo.cmd_count = 0; + qlt_send_first_logo(vha, &logo); + } - memset(&fcport, 0, sizeof(fcport)); - fcport.loop_id = sess->loop_id; - fcport.d_id = sess->s_id; - memcpy(fcport.port_name, sess->port_name, WWN_SIZE); - fcport.vha = vha; - fcport.tgt_session = sess; - - rc = qla2x00_post_async_logout_work(vha, &fcport, NULL); - if (rc != QLA_SUCCESS) - ql_log(ql_log_warn, vha, 0xf085, - "Schedule logo failed sess %p rc %d\n", - sess, rc); - else - logout_started = true; + if (sess->logout_on_delete) { + int rc; + + rc = qla2x00_post_async_logout_work(vha, sess, NULL); + if (rc != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0xf085, + "Schedule logo failed sess %p rc %d\n", + sess, rc); + else + logout_started = true; + } } /* @@ -583,29 +973,61 @@ static void qlt_free_session_done(struct work_struct *work) msleep(100); } - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087, - "%s: sess %p logout completed\n", - __func__, sess); + ql_dbg(ql_dbg_disc, vha, 0xf087, + "%s: sess %p logout completed\n",__func__, sess); } - spin_lock_irqsave(&ha->hardware_lock, flags); + if (sess->logo_ack_needed) { + sess->logo_ack_needed = 0; + qla24xx_async_notify_ack(vha, sess, + (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); + } + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (sess->se_sess) { + sess->se_sess = NULL; + if (tgt && !IS_SW_RESV_ADDR(sess->d_id)) + tgt->sess_count--; + } + + sess->disc_state = DSC_DELETED; + sess->fw_login_state = DSC_LS_PORT_UNAVAIL; + sess->deleted = QLA_SESS_DELETED; + sess->login_retry = vha->hw->login_retry_count; + + if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) { + vha->fcport_count--; + sess->login_succ = 0; + } + + if (sess->chip_reset != sess->vha->hw->chip_reset) + qla2x00_clear_loop_id(sess); + + if (sess->conflict) { + sess->conflict->login_pause = 0; + sess->conflict = NULL; + if (!test_bit(UNLOADING, &vha->dpc_flags)) + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + } { - qlt_plogi_ack_t *own = + struct qlt_plogi_ack_t *own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; - qlt_plogi_ack_t *con = + struct qlt_plogi_ack_t *con = sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; + struct imm_ntfy_from_isp *iocb; if (con) { + iocb = &con->iocb; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099, - "se_sess %p / sess %p port %8phC is gone," - " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", - sess->se_sess, sess, sess->port_name, - own ? "releasing own PLOGI" : - "no own PLOGI pending", - own ? own->ref_count : -1, - con->iocb.u.isp24.port_name, con->ref_count); + "se_sess %p / sess %p port %8phC is gone," + " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", + sess->se_sess, sess, sess->port_name, + own ? "releasing own PLOGI" : "no own PLOGI pending", + own ? own->ref_count : -1, + iocb->u.isp24.port_name, con->ref_count); qlt_plogi_ack_unref(vha, con); + sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; } else { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a, "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n", @@ -615,59 +1037,64 @@ static void qlt_free_session_done(struct work_struct *work) own ? own->ref_count : -1); } - if (own) + if (own) { + sess->fw_login_state = DSC_LS_PLOGI_PEND; qlt_plogi_ack_unref(vha, own); + sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; + } } - - list_del(&sess->sess_list_entry); - - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, - "Unregistration of sess %p finished\n", sess); + "Unregistration of sess %p %8phC finished fcp_cnt %d\n", + sess, sess->port_name, vha->fcport_count); - kfree(sess); - /* - * We need to protect against race, when tgt is freed before or - * inside wake_up() - */ - tgt->sess_count--; - if (tgt->sess_count == 0) + if (tgt && (tgt->sess_count == 0)) wake_up_all(&tgt->waitQ); + + if (vha->fcport_count == 0) + wake_up_all(&vha->fcport_waitQ); + + base_vha = pci_get_drvdata(ha->pdev); + if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags)) + return; + + if (!tgt || !tgt->tgt_stop) { + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_DELETE_DONE; + ea.fcport = sess; + qla2x00_fcport_event_handler(vha, &ea); + } } /* ha->tgt.sess_lock supposed to be held on entry */ -static void qlt_release_session(struct kref *kref) +void qlt_unreg_sess(struct fc_port *sess) { - struct qla_tgt_sess *sess = - container_of(kref, struct qla_tgt_sess, sess_kref); struct scsi_qla_host *vha = sess->vha; + ql_dbg(ql_dbg_disc, sess->vha, 0xffff, + "%s sess %p for deletion %8phC\n", + __func__, sess, sess->port_name); + if (sess->se_sess) vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); - if (!list_empty(&sess->del_list_entry)) - list_del_init(&sess->del_list_entry); + qla2x00_mark_device_lost(vha, sess, 1, 1); + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; + sess->disc_state = DSC_DELETE_PEND; + sess->last_rscn_gen = sess->rscn_gen; + sess->last_login_gen = sess->login_gen; INIT_WORK(&sess->free_work, qlt_free_session_done); schedule_work(&sess->free_work); } - -void qlt_put_sess(struct qla_tgt_sess *sess) -{ - if (!sess) - return; - - assert_spin_locked(&sess->vha->hw->tgt.sess_lock); - kref_put(&sess->sess_kref, qlt_release_session); -} -EXPORT_SYMBOL(qlt_put_sess); +EXPORT_SYMBOL(qlt_unreg_sess); static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; uint16_t loop_id; int res = 0; struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; @@ -680,31 +1107,6 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) spin_lock_irqsave(&ha->tgt.sess_lock, flags); qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); -#if 0 /* FIXME: do we need to choose a session here? */ - if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { - sess = list_entry(ha->tgt.qla_tgt->sess_list.next, - typeof(*sess), sess_list_entry); - switch (mcmd) { - case QLA_TGT_NEXUS_LOSS_SESS: - mcmd = QLA_TGT_NEXUS_LOSS; - break; - case QLA_TGT_ABORT_ALL_SESS: - mcmd = QLA_TGT_ABORT_ALL; - break; - case QLA_TGT_NEXUS_LOSS: - case QLA_TGT_ABORT_ALL: - break; - default: - ql_dbg(ql_dbg_tgt, vha, 0xe046, - "qla_target(%d): Not allowed " - "command %x in %s", vha->vp_idx, - mcmd, __func__); - sess = NULL; - break; - } - } else - sess = NULL; -#endif } else { spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); @@ -726,57 +1128,69 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); } +static void qla24xx_chk_fcp_state(struct fc_port *sess) +{ + if (sess->chip_reset != sess->vha->hw->chip_reset) { + sess->logout_on_delete = 0; + sess->logo_ack_needed = 0; + sess->fw_login_state = DSC_LS_PORT_UNAVAIL; + sess->scan_state = 0; + } +} + /* ha->tgt.sess_lock supposed to be held on entry */ -static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, +void qlt_schedule_sess_for_deletion(struct fc_port *sess, bool immediate) { struct qla_tgt *tgt = sess->tgt; - uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; - if (sess->deleted) { - /* Upgrade to unconditional deletion in case it was temporary */ - if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING) - list_del(&sess->del_list_entry); - else + if (sess->disc_state == DSC_DELETE_PEND) + return; + + if (sess->disc_state == DSC_DELETED) { + if (tgt && tgt->tgt_stop && (tgt->sess_count == 0)) + wake_up_all(&tgt->waitQ); + if (sess->vha->fcport_count == 0) + wake_up_all(&sess->vha->fcport_waitQ); + + if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && + !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) return; } - ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, - "Scheduling sess %p for deletion\n", sess); + sess->disc_state = DSC_DELETE_PEND; - if (immediate) { - dev_loss_tmo = 0; - sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; - list_add(&sess->del_list_entry, &tgt->del_sess_list); - } else { - sess->deleted = QLA_SESS_DELETION_PENDING; - list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); - } + if (sess->deleted == QLA_SESS_DELETED) + sess->logout_on_delete = 0; - sess->expires = jiffies + dev_loss_tmo * HZ; + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; + qla24xx_chk_fcp_state(sess); - ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, - "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)" - " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n", - sess->vha->vp_idx, sess->port_name, sess->loop_id, - sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, - dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete, - sess->generation); + ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, + "Scheduling sess %p for deletion\n", sess); - if (immediate) - mod_delayed_work(system_wq, &tgt->sess_del_work, 0); - else - schedule_delayed_work(&tgt->sess_del_work, - sess->expires - jiffies); + schedule_work(&sess->del_work); +} + +void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess) +{ + unsigned long flags; + struct qla_hw_data *ha = sess->vha->hw; + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + qlt_schedule_sess_for_deletion(sess, 1); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } /* ha->tgt.sess_lock supposed to be held on entry */ static void qlt_clear_tgt_db(struct qla_tgt *tgt) { - struct qla_tgt_sess *sess; + struct fc_port *sess; + scsi_qla_host_t *vha = tgt->vha; - list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) - qlt_schedule_sess_for_deletion(sess, true); + list_for_each_entry(sess, &vha->vp_fcports, list) { + if (sess->se_sess) + qlt_schedule_sess_for_deletion(sess, 1); + } /* At this point tgt could be already dead */ } @@ -830,240 +1244,84 @@ out_free_id_list: return res; } -/* ha->tgt.sess_lock supposed to be held on entry */ -static void qlt_undelete_sess(struct qla_tgt_sess *sess) -{ - BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING); - - list_del_init(&sess->del_list_entry); - sess->deleted = 0; -} - -static void qlt_del_sess_work_fn(struct delayed_work *work) -{ - struct qla_tgt *tgt = container_of(work, struct qla_tgt, - sess_del_work); - struct scsi_qla_host *vha = tgt->vha; - struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; - unsigned long flags, elapsed; - - spin_lock_irqsave(&ha->tgt.sess_lock, flags); - while (!list_empty(&tgt->del_sess_list)) { - sess = list_entry(tgt->del_sess_list.next, typeof(*sess), - del_list_entry); - elapsed = jiffies; - if (time_after_eq(elapsed, sess->expires)) { - /* No turning back */ - list_del_init(&sess->del_list_entry); - sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, - "Timeout: sess %p about to be deleted\n", - sess); - if (sess->se_sess) - ha->tgt.tgt_ops->shutdown_sess(sess); - qlt_put_sess(sess); - } else { - schedule_delayed_work(&tgt->sess_del_work, - sess->expires - elapsed); - break; - } - } - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); -} - /* * Adds an extra ref to allow to drop hw lock after adding sess to the list. * Caller must put it. */ -static struct qla_tgt_sess *qlt_create_sess( +static struct fc_port *qlt_create_sess( struct scsi_qla_host *vha, fc_port_t *fcport, bool local) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; + struct fc_port *sess = fcport; unsigned long flags; - /* Check to avoid double sessions */ - spin_lock_irqsave(&ha->tgt.sess_lock, flags); - list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list, - sess_list_entry) { - if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, - "Double sess %p found (s_id %x:%x:%x, " - "loop_id %d), updating to d_id %x:%x:%x, " - "loop_id %d", sess, sess->s_id.b.domain, - sess->s_id.b.al_pa, sess->s_id.b.area, - sess->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.al_pa, fcport->d_id.b.area, - fcport->loop_id); - - /* Cannot undelete at this point */ - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { - spin_unlock_irqrestore(&ha->tgt.sess_lock, - flags); - return NULL; - } - - if (sess->deleted) - qlt_undelete_sess(sess); - - if (!sess->se_sess) { - if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, - &sess->port_name[0], sess) < 0) { - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - return NULL; - } - } - - kref_get(&sess->sess_kref); - ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, - (fcport->flags & FCF_CONF_COMP_SUPPORTED)); - - if (sess->local && !local) - sess->local = 0; - - qlt_do_generation_tick(vha, &sess->generation); - - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + if (vha->vha_tgt.qla_tgt->tgt_stop) + return NULL; - return sess; + if (fcport->se_sess) { + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: kref_get_unless_zero failed for %8phC\n", + __func__, sess->port_name); + return NULL; } - } - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - - sess = kzalloc(sizeof(*sess), GFP_KERNEL); - if (!sess) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a, - "qla_target(%u): session allocation failed, all commands " - "from port %8phC will be refused", vha->vp_idx, - fcport->port_name); - - return NULL; + return fcport; } sess->tgt = vha->vha_tgt.qla_tgt; - sess->vha = vha; - sess->s_id = fcport->d_id; - sess->loop_id = fcport->loop_id; sess->local = local; - kref_init(&sess->sess_kref); - INIT_LIST_HEAD(&sess->del_list_entry); - /* Under normal circumstances we want to logout from firmware when + /* + * Under normal circumstances we want to logout from firmware when * session eventually ends and release corresponding nport handle. * In the exception cases (e.g. when new PLOGI is waiting) corresponding - * code will adjust these flags as necessary. */ + * code will adjust these flags as necessary. + */ sess->logout_on_delete = 1; sess->keep_nport_handle = 0; + sess->logout_completed = 0; - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, - "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", - sess, vha->vha_tgt.qla_tgt); - - sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED); - BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); - memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); - - spin_lock_irqsave(&ha->tgt.sess_lock, flags); - list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); - vha->vha_tgt.qla_tgt->sess_count++; - qlt_do_generation_tick(vha, &sess->generation); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, - "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " - "s_id %x:%x:%x, confirmed completion %ssupported) added\n", - vha->vp_idx, local ? "local " : "", fcport->port_name, - fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area, - sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); - - /* - * Determine if this fc_port->port_name is allowed to access - * target mode using explict NodeACLs+MappedLUNs, or using - * TPG demo mode. If this is successful a target mode FC nexus - * is created. - */ if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, &fcport->port_name[0], sess) < 0) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "(%d) %8phC check_initiator_node_acl failed\n", + vha->vp_idx, fcport->port_name); return NULL; } else { + kref_init(&fcport->sess_kref); /* - * Take an extra reference to ->sess_kref here to handle qla_tgt_sess - * access across ->tgt.sess_lock reaquire. + * Take an extra reference to ->sess_kref here to handle + * fc_port access across ->tgt.sess_lock reaquire. */ - kref_get(&sess->sess_kref); - } - - return sess; -} - -/* - * Called from qla2x00_reg_remote_port() - */ -void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) -{ - struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_sess *sess; - unsigned long flags; - - if (!vha->hw->tgt.tgt_ops) - return; - - if (!tgt || (fcport->port_type != FCT_INITIATOR)) - return; + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: kref_get_unless_zero failed for %8phC\n", + __func__, sess->port_name); + return NULL; + } - if (qla_ini_mode_enabled(vha)) - return; + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (!IS_SW_RESV_ADDR(sess->d_id)) + vha->vha_tgt.qla_tgt->sess_count++; - spin_lock_irqsave(&ha->tgt.sess_lock, flags); - if (tgt->tgt_stop) { + qlt_do_generation_tick(vha, &sess->generation); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - return; } - sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); - if (!sess) { - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - - mutex_lock(&vha->vha_tgt.tgt_mutex); - sess = qlt_create_sess(vha, fcport, false); - mutex_unlock(&vha->vha_tgt.tgt_mutex); - spin_lock_irqsave(&ha->tgt.sess_lock, flags); - } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { - /* Point of no return */ - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - return; - } else { - kref_get(&sess->sess_kref); - - if (sess->deleted) { - qlt_undelete_sess(sess); - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, - "qla_target(%u): %ssession for port %8phC " - "(loop ID %d) reappeared\n", vha->vp_idx, - sess->local ? "local " : "", sess->port_name, - sess->loop_id); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, + "Adding sess %p se_sess %p to tgt %p sess_count %d\n", + sess, sess->se_sess, vha->vha_tgt.qla_tgt, + vha->vha_tgt.qla_tgt->sess_count); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, - "Reappeared sess %p\n", sess); - } - ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, - (fcport->flags & FCF_CONF_COMP_SUPPORTED)); - } + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, + "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " + "s_id %x:%x:%x, confirmed completion %ssupported) added\n", + vha->vp_idx, local ? "local " : "", fcport->port_name, + fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area, + sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); - if (sess && sess->local) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, - "qla_target(%u): local session for " - "port %8phC (loop ID %d) became global\n", vha->vp_idx, - fcport->port_name, sess->loop_id); - sess->local = 0; - } - qlt_put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return sess; } /* @@ -1074,7 +1332,7 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_sess *sess; + struct fc_port *sess = fcport; unsigned long flags; if (!vha->hw->tgt.tgt_ops) @@ -1088,8 +1346,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); return; } - sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); - if (!sess) { + if (!sess->se_sess) { spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); return; } @@ -1120,12 +1377,12 @@ static inline int test_tgt_sess_count(struct qla_tgt *tgt) * We need to protect against race, when tgt is freed before or * inside wake_up() */ - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, - "tgt %p, empty(sess_list)=%d sess_count=%d\n", - tgt, list_empty(&tgt->sess_list), tgt->sess_count); + "tgt %p, sess_count=%d\n", + tgt, tgt->sess_count); res = (tgt->sess_count == 0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return res; } @@ -1173,8 +1430,6 @@ int qlt_stop_phase1(struct qla_tgt *tgt) mutex_unlock(&vha->vha_tgt.tgt_mutex); mutex_unlock(&qla_tgt_mutex); - flush_delayed_work(&tgt->sess_del_work); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, "Waiting for sess works (tgt %p)", tgt); spin_lock_irqsave(&tgt->sess_work_lock, flags); @@ -1186,14 +1441,13 @@ int qlt_stop_phase1(struct qla_tgt *tgt) spin_unlock_irqrestore(&tgt->sess_work_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, - "Waiting for tgt %p: list_empty(sess_list)=%d " - "sess_count=%d\n", tgt, list_empty(&tgt->sess_list), - tgt->sess_count); + "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count); wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); /* Big hammer */ - if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha)) + if (!ha->flags.host_shutting_down && + (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))) qlt_disable_vha(vha); /* Wait for sessions to clear out (just in case) */ @@ -1320,6 +1574,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, nack = (struct nack_to_isp *)pkt; nack->ox_id = ntfy->ox_id; + nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE; nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { nack->u.isp24.flags = ntfy->u.isp24.flags & @@ -1489,6 +1744,14 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag) } } + list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { + if (tag == op->atio.u.isp24.exchange_addr) { + op->aborted = true; + spin_unlock(&vha->cmd_list_lock); + return 1; + } + } + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { if (tag == cmd->atio.u.isp24.exchange_addr) { cmd->aborted = 1; @@ -1525,6 +1788,18 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, if (op_key == key && op_lun == lun) op->aborted = true; } + + list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { + uint32_t op_key; + u64 op_lun; + + op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); + op_lun = scsilun_to_int( + (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); + if (op_key == key && op_lun == lun) + op->aborted = true; + } + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { uint32_t cmd_key; uint32_t cmd_lun; @@ -1540,7 +1815,7 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, /* ha->hardware_lock supposed to be held on entry */ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, - struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) + struct abts_recv_from_24xx *abts, struct fc_port *sess) { struct qla_hw_data *ha = vha->hw; struct se_session *se_sess = sess->se_sess; @@ -1549,8 +1824,9 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, u32 lun = 0; int rc; bool found_lun = false; + unsigned long flags; - spin_lock(&se_sess->sess_cmd_lock); + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); @@ -1560,7 +1836,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, break; } } - spin_unlock(&se_sess->sess_cmd_lock); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); /* cmd not in LIO lists, look in qla list */ if (!found_lun) { @@ -1592,8 +1868,9 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, mcmd->sess = sess; memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); mcmd->reset_count = vha->hw->chip_reset; + mcmd->tmr_func = QLA_TGT_ABTS; - rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK, + rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, abts->exchange_addr_to_abort); if (rc != 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, @@ -1613,7 +1890,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, struct abts_recv_from_24xx *abts) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; + struct fc_port *sess; uint32_t tag = abts->exchange_addr_to_abort; uint8_t s_id[3]; int rc; @@ -1665,7 +1942,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + if (sess->deleted) { qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); return; } @@ -1763,10 +2040,23 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) return; } - if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) - qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, - 0, 0, 0, 0, 0, 0); - else { + if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) { + if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == + ELS_LOGO || + mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == + ELS_PRLO || + mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == + ELS_TPRLO) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "TM response logo %phC status %#x state %#x", + mcmd->sess->port_name, mcmd->fc_tm_rsp, + mcmd->flags); + qlt_schedule_sess_for_deletion_lock(mcmd->sess); + } else { + qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, + 0, 0, 0, 0, 0, 0); + } + } else { if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, mcmd->fc_tm_rsp, false); @@ -2182,95 +2472,6 @@ static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, cmd->conf_compl_supported; } -#ifdef CONFIG_QLA_TGT_DEBUG_SRR -/* - * Original taken from the XFS code - */ -static unsigned long qlt_srr_random(void) -{ - static int Inited; - static unsigned long RandomValue; - static DEFINE_SPINLOCK(lock); - /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ - register long rv; - register long lo; - register long hi; - unsigned long flags; - - spin_lock_irqsave(&lock, flags); - if (!Inited) { - RandomValue = jiffies; - Inited = 1; - } - rv = RandomValue; - hi = rv / 127773; - lo = rv % 127773; - rv = 16807 * lo - 2836 * hi; - if (rv <= 0) - rv += 2147483647; - RandomValue = rv; - spin_unlock_irqrestore(&lock, flags); - return rv; -} - -static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) -{ -#if 0 /* This is not a real status packets lost, so it won't lead to SRR */ - if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200) - == 50) { - *xmit_type &= ~QLA_TGT_XMIT_STATUS; - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015, - "Dropping cmd %p (tag %d) status", cmd, se_cmd->tag); - } -#endif - /* - * It's currently not possible to simulate SRRs for FCP_WRITE without - * a physical link layer failure, so don't even try here.. - */ - if (cmd->dma_data_direction != DMA_FROM_DEVICE) - return; - - if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) && - ((qlt_srr_random() % 100) == 20)) { - int i, leave = 0; - unsigned int tot_len = 0; - - while (leave == 0) - leave = qlt_srr_random() % cmd->sg_cnt; - - for (i = 0; i < leave; i++) - tot_len += cmd->sg[i].length; - - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016, - "Cutting cmd %p (tag %d) buffer" - " tail to len %d, sg_cnt %d (cmd->bufflen %d," - " cmd->sg_cnt %d)", cmd, se_cmd->tag, tot_len, leave, - cmd->bufflen, cmd->sg_cnt); - - cmd->bufflen = tot_len; - cmd->sg_cnt = leave; - } - - if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) { - unsigned int offset = qlt_srr_random() % cmd->bufflen; - - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017, - "Cutting cmd %p (tag %d) buffer head " - "to offset %d (cmd->bufflen %d)", cmd, se_cmd->tag, offset, - cmd->bufflen); - if (offset == 0) - *xmit_type &= ~QLA_TGT_XMIT_DATA; - else if (qlt_set_data_offset(cmd, offset)) { - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018, - "qlt_set_data_offset() failed (tag %d)", se_cmd->tag); - } - } -} -#else -static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) -{} -#endif - static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, struct qla_tgt_prm *prm) { @@ -2288,7 +2489,7 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, int i; if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { - if (prm->cmd->se_cmd.scsi_status != 0) { + if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) { ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017, "Skipping EXPLICIT_CONFORM and " "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " @@ -2672,7 +2873,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, int res; spin_lock_irqsave(&ha->hardware_lock, flags); - if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + if (cmd->sess && cmd->sess->deleted) { cmd->state = QLA_TGT_STATE_PROCESSED; if (cmd->sess->logout_completed) /* no need to terminate. FW already freed exchange. */ @@ -2685,7 +2886,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, spin_unlock_irqrestore(&ha->hardware_lock, flags); memset(&prm, 0, sizeof(prm)); - qlt_check_srr_debug(cmd, &xmit_type); ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n", @@ -2848,7 +3048,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) spin_lock_irqsave(&ha->hardware_lock, flags); if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) || - (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) { + (cmd->sess && cmd->sess->deleted)) { /* * Either the port is not online or this request was from * previous life, just abort the processing. @@ -3296,7 +3496,7 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd) return EIO; } cmd->aborted = 1; - cmd->cmd_flags |= BIT_6; + cmd->trc_flags |= TRC_ABORT; spin_unlock_irqrestore(&cmd->cmd_lock, flags); qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1); @@ -3306,7 +3506,7 @@ EXPORT_SYMBOL(qlt_abort_cmd); void qlt_free_cmd(struct qla_tgt_cmd *cmd) { - struct qla_tgt_sess *sess = cmd->sess; + struct fc_port *sess = cmd->sess; ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, "%s: se_cmd[%p] ox_id %04x\n", @@ -3335,90 +3535,6 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd) } EXPORT_SYMBOL(qlt_free_cmd); -/* ha->hardware_lock supposed to be held on entry */ -static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha, - struct qla_tgt_cmd *cmd, void *ctio) -{ - struct qla_tgt_srr_ctio *sc; - struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_srr_imm *imm; - - tgt->ctio_srr_id++; - cmd->cmd_flags |= BIT_15; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, - "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx); - - if (!ctio) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055, - "qla_target(%d): SRR CTIO, but ctio is NULL\n", - vha->vp_idx); - return -EINVAL; - } - - sc = kzalloc(sizeof(*sc), GFP_ATOMIC); - if (sc != NULL) { - sc->cmd = cmd; - /* IRQ is already OFF */ - spin_lock(&tgt->srr_lock); - sc->srr_id = tgt->ctio_srr_id; - list_add_tail(&sc->srr_list_entry, - &tgt->srr_ctio_list); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, - "CTIO SRR %p added (id %d)\n", sc, sc->srr_id); - if (tgt->imm_srr_id == tgt->ctio_srr_id) { - int found = 0; - list_for_each_entry(imm, &tgt->srr_imm_list, - srr_list_entry) { - if (imm->srr_id == sc->srr_id) { - found = 1; - break; - } - } - if (found) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b, - "Scheduling srr work\n"); - schedule_work(&tgt->srr_work); - } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056, - "qla_target(%d): imm_srr_id " - "== ctio_srr_id (%d), but there is no " - "corresponding SRR IMM, deleting CTIO " - "SRR %p\n", vha->vp_idx, - tgt->ctio_srr_id, sc); - list_del(&sc->srr_list_entry); - spin_unlock(&tgt->srr_lock); - - kfree(sc); - return -EINVAL; - } - } - spin_unlock(&tgt->srr_lock); - } else { - struct qla_tgt_srr_imm *ti; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057, - "qla_target(%d): Unable to allocate SRR CTIO entry\n", - vha->vp_idx); - spin_lock(&tgt->srr_lock); - list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list, - srr_list_entry) { - if (imm->srr_id == tgt->ctio_srr_id) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c, - "IMM SRR %p deleted (id %d)\n", - imm, imm->srr_id); - list_del(&imm->srr_list_entry); - qlt_reject_free_srr_imm(vha, imm, 1); - } - } - spin_unlock(&tgt->srr_lock); - - return -ENOMEM; - } - - return 0; -} - /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ @@ -3527,7 +3643,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) dump_stack(); } - cmd->cmd_flags |= BIT_17; + cmd->trc_flags |= TRC_FLUSH; ha->tgt.tgt_ops->free_cmd(cmd); } @@ -3632,20 +3748,14 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, */ cmd->sess->logout_on_delete = 0; cmd->sess->send_els_logo = 1; - qlt_schedule_sess_for_deletion(cmd->sess, true); + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, cmd->sess->port_name); + + qlt_schedule_sess_for_deletion_lock(cmd->sess); } break; } - case CTIO_SRR_RECEIVED: - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a, - "qla_target(%d): CTIO with SRR_RECEIVED" - " status %x received (state %x, se_cmd %p)\n", - vha->vp_idx, status, cmd->state, se_cmd); - if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0) - break; - else - return; - case CTIO_DIF_ERROR: { struct ctio_crc_from_fw *crc = (struct ctio_crc_from_fw *)ctio; @@ -3693,7 +3803,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, */ if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && (!cmd->aborted)) { - cmd->cmd_flags |= BIT_13; + cmd->trc_flags |= TRC_CTIO_ERR; if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) return; } @@ -3701,7 +3811,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, skip_term: if (cmd->state == QLA_TGT_STATE_PROCESSED) { - cmd->cmd_flags |= BIT_12; + cmd->trc_flags |= TRC_CTIO_DONE; } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { cmd->state = QLA_TGT_STATE_DATA_IN; @@ -3711,11 +3821,11 @@ skip_term: ha->tgt.tgt_ops->handle_data(cmd); return; } else if (cmd->aborted) { - cmd->cmd_flags |= BIT_18; + cmd->trc_flags |= TRC_CTIO_ABORTED; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); } else { - cmd->cmd_flags |= BIT_19; + cmd->trc_flags |= TRC_CTIO_STRANGE; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, "qla_target(%d): A command in state (%d) should " "not return a CTIO complete\n", vha->vp_idx, cmd->state); @@ -3762,7 +3872,7 @@ static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, return fcp_task_attr; } -static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, +static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *, uint8_t *); /* * Process context for I/O path into tcm_qla2xxx code @@ -3772,7 +3882,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) scsi_qla_host_t *vha = cmd->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_sess *sess = cmd->sess; + struct fc_port *sess = cmd->sess; struct atio_from_isp *atio = &cmd->atio; unsigned char *cdb; unsigned long flags; @@ -3780,7 +3890,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) int ret, fcp_task_attr, data_dir, bidi = 0; cmd->cmd_in_wq = 0; - cmd->cmd_flags |= BIT_1; + cmd->trc_flags |= TRC_DO_WORK; if (tgt->tgt_stop) goto out_term; @@ -3822,7 +3932,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( */ spin_lock_irqsave(&ha->tgt.sess_lock, flags); - qlt_put_sess(sess); + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; @@ -3832,7 +3942,7 @@ out_term: * cmd has not sent to target yet, so pass NULL as the second * argument to qlt_send_term_exchange() and free the memory here. */ - cmd->cmd_flags |= BIT_2; + cmd->trc_flags |= TRC_DO_WORK_ERR; spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0); @@ -3841,7 +3951,7 @@ out_term: spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->tgt.sess_lock, flags); - qlt_put_sess(sess); + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } @@ -3859,7 +3969,7 @@ static void qlt_do_work(struct work_struct *work) } static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, - struct qla_tgt_sess *sess, + struct fc_port *sess, struct atio_from_isp *atio) { struct se_session *se_sess = sess->se_sess; @@ -3883,7 +3993,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, cmd->loop_id = sess->loop_id; cmd->conf_compl_supported = sess->conf_compl_supported; - cmd->cmd_flags = 0; + cmd->trc_flags = 0; cmd->jiffies_at_alloc = get_jiffies_64(); cmd->reset_count = vha->hw->chip_reset; @@ -3900,7 +4010,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work) struct qla_tgt_sess_op, work); scsi_qla_host_t *vha = op->vha; struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; + struct fc_port *sess; struct qla_tgt_cmd *cmd; unsigned long flags; uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; @@ -3941,11 +4051,12 @@ static void qlt_create_sess_from_atio(struct work_struct *work) if (!cmd) { spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY); - qlt_put_sess(sess); + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->hardware_lock, flags); kfree(op); return; } + /* * __qlt_do_work() will call qlt_put_sess() to release * the extra reference taken above by qlt_make_local_sess() @@ -3953,13 +4064,11 @@ static void qlt_create_sess_from_atio(struct work_struct *work) __qlt_do_work(cmd); kfree(op); return; - out_term: spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); kfree(op); - } /* ha->hardware_lock supposed to be held on entry */ @@ -3968,8 +4077,9 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_sess *sess; + struct fc_port *sess; struct qla_tgt_cmd *cmd; + unsigned long flags; if (unlikely(tgt->tgt_stop)) { ql_dbg(ql_dbg_io, vha, 0x3061, @@ -3998,7 +4108,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, /* Another WWN used to have our s_id. Our PLOGI scheduled its * session deletion, but it's still in sess_del_work wq */ - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + if (sess->deleted) { ql_dbg(ql_dbg_io, vha, 0x3061, "New command while old session %p is being deleted\n", sess); @@ -4008,24 +4118,32 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, /* * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. */ - kref_get(&sess->sess_kref); + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_tgt, vha, 0xffff, + "%s: kref_get fail, %8phC oxid %x \n", + __func__, sess->port_name, + be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); + return -EFAULT; + } cmd = qlt_get_tag(vha, sess, atio); if (!cmd) { ql_dbg(ql_dbg_io, vha, 0x3062, "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); - qlt_put_sess(sess); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return -ENOMEM; } cmd->cmd_in_wq = 1; - cmd->cmd_flags |= BIT_0; + cmd->trc_flags |= TRC_NEW_CMD; cmd->se_cmd.cpuid = ha->msix_count ? ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND; - spin_lock(&vha->cmd_list_lock); + spin_lock_irqsave(&vha->cmd_list_lock, flags); list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); - spin_unlock(&vha->cmd_list_lock); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); INIT_WORK(&cmd->work, qlt_do_work); if (ha->msix_count) { @@ -4043,7 +4161,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, } /* ha->hardware_lock supposed to be held on entry */ -static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, +static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, int fn, void *iocb, int flags) { struct scsi_qla_host *vha = sess->vha; @@ -4051,7 +4169,6 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, struct qla_tgt_mgmt_cmd *mcmd; struct atio_from_isp *a = (struct atio_from_isp *)iocb; int res; - uint8_t tmr_func; mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); if (!mcmd) { @@ -4073,74 +4190,12 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, mcmd->reset_count = vha->hw->chip_reset; switch (fn) { - case QLA_TGT_CLEAR_ACA: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000, - "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx); - tmr_func = TMR_CLEAR_ACA; - break; - - case QLA_TGT_TARGET_RESET: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001, - "qla_target(%d): TARGET_RESET received\n", - sess->vha->vp_idx); - tmr_func = TMR_TARGET_WARM_RESET; - break; - case QLA_TGT_LUN_RESET: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, - "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); - tmr_func = TMR_LUN_RESET; - abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); - break; - - case QLA_TGT_CLEAR_TS: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003, - "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx); - tmr_func = TMR_CLEAR_TASK_SET; - break; - - case QLA_TGT_ABORT_TS: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004, - "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx); - tmr_func = TMR_ABORT_TASK_SET; - break; -#if 0 - case QLA_TGT_ABORT_ALL: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005, - "qla_target(%d): Doing ABORT_ALL_TASKS\n", - sess->vha->vp_idx); - tmr_func = 0; - break; - - case QLA_TGT_ABORT_ALL_SESS: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006, - "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n", - sess->vha->vp_idx); - tmr_func = 0; - break; - - case QLA_TGT_NEXUS_LOSS_SESS: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007, - "qla_target(%d): Doing NEXUS_LOSS_SESS\n", - sess->vha->vp_idx); - tmr_func = 0; - break; - - case QLA_TGT_NEXUS_LOSS: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008, - "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx); - tmr_func = 0; - break; -#endif - default: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a, - "qla_target(%d): Unknown task mgmt fn 0x%x\n", - sess->vha->vp_idx, fn); - mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); - return -ENOSYS; + abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); + break; } - res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0); + res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 0); if (res != 0) { ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", @@ -4158,7 +4213,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) struct atio_from_isp *a = (struct atio_from_isp *)iocb; struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt; - struct qla_tgt_sess *sess; + struct fc_port *sess; uint32_t lun, unpacked_lun; int fn; unsigned long flags; @@ -4183,7 +4238,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) sizeof(struct atio_from_isp)); } - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) + if (sess->deleted) return -EFAULT; return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); @@ -4191,7 +4246,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) /* ha->hardware_lock supposed to be held on entry */ static int __qlt_abort_task(struct scsi_qla_host *vha, - struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess) + struct imm_ntfy_from_isp *iocb, struct fc_port *sess) { struct atio_from_isp *a = (struct atio_from_isp *)iocb; struct qla_hw_data *ha = vha->hw; @@ -4215,8 +4270,9 @@ static int __qlt_abort_task(struct scsi_qla_host *vha, lun = a->u.isp24.fcp_cmnd.lun; unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); mcmd->reset_count = vha->hw->chip_reset; + mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK; - rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, + rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func, le16_to_cpu(iocb->u.isp2x.seq_id)); if (rc != 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, @@ -4234,7 +4290,7 @@ static int qlt_abort_task(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; + struct fc_port *sess; int loop_id; unsigned long flags; @@ -4257,22 +4313,20 @@ static int qlt_abort_task(struct scsi_qla_host *vha, void qlt_logo_completion_handler(fc_port_t *fcport, int rc) { - if (fcport->tgt_session) { - if (rc != MBS_COMMAND_COMPLETE) { - ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, - "%s: se_sess %p / sess %p from" - " port %8phC loop_id %#04x s_id %02x:%02x:%02x" - " LOGO failed: %#x\n", - __func__, - fcport->tgt_session->se_sess, - fcport->tgt_session, - fcport->port_name, fcport->loop_id, - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa, rc); - } - - fcport->tgt_session->logout_completed = 1; + if (rc != MBS_COMMAND_COMPLETE) { + ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, + "%s: se_sess %p / sess %p from" + " port %8phC loop_id %#04x s_id %02x:%02x:%02x" + " LOGO failed: %#x\n", + __func__, + fcport->se_sess, + fcport, + fcport->port_name, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, rc); } + + fcport->logout_completed = 1; } /* @@ -4282,16 +4336,16 @@ void qlt_logo_completion_handler(fc_port_t *fcport, int rc) * deletion. Returns existing session with matching wwn if present. * Null otherwise. */ -static struct qla_tgt_sess * -qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, - port_id_t port_id, uint16_t loop_id, struct qla_tgt_sess **conflict_sess) +struct fc_port * +qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, + port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess) { - struct qla_tgt_sess *sess = NULL, *other_sess; + struct fc_port *sess = NULL, *other_sess; uint64_t other_wwn; *conflict_sess = NULL; - list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) { + list_for_each_entry(other_sess, &vha->vp_fcports, list) { other_wwn = wwn_to_u64(other_sess->port_name); @@ -4302,9 +4356,9 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, } /* find other sess with nport_id collision */ - if (port_id.b24 == other_sess->s_id.b24) { + if (port_id.b24 == other_sess->d_id.b24) { if (loop_id != other_sess->loop_id) { - ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c, + ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); @@ -4320,6 +4374,11 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, * Another wwn used to have our s_id/loop_id * kill the session, but don't free the loop_id */ + ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff, + "Invalidating sess %p loop_id %d wwn %llx.\n", + other_sess, other_sess->loop_id, other_wwn); + + other_sess->keep_nport_handle = 1; *conflict_sess = other_sess; qlt_schedule_sess_for_deletion(other_sess, @@ -4329,8 +4388,9 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, } /* find other sess with nport handle collision */ - if (loop_id == other_sess->loop_id) { - ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d, + if ((loop_id == other_sess->loop_id) && + (loop_id != FC_NO_LOOP_ID)) { + ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); @@ -4358,11 +4418,21 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) spin_lock(&vha->cmd_list_lock); list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); + if (op_key == key) { op->aborted = true; count++; } } + + list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { + uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); + if (op_key == key) { + op->aborted = true; + count++; + } + } + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); if (cmd_key == key) { @@ -4383,13 +4453,13 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess = NULL, *conflict_sess = NULL; + struct fc_port *sess = NULL, *conflict_sess = NULL; uint64_t wwn; port_id_t port_id; uint16_t loop_id; uint16_t wd3_lo; int res = 0; - qlt_plogi_ack_t *pla; + struct qlt_plogi_ack_t *pla; unsigned long flags; wwn = wwn_to_u64(iocb->u.isp24.port_name); @@ -4401,9 +4471,12 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, - "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", - vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); + ql_dbg(ql_dbg_disc, vha, 0xf026, + "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n", + vha->vp_idx, iocb->u.isp24.port_id[2], + iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], + iocb->u.isp24.status_subcode, loop_id, + iocb->u.isp24.port_name); /* res = 1 means ack at the end of thread * res = 0 means ack async/later. @@ -4416,12 +4489,12 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, if (wwn) { spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); - sess = qlt_find_sess_invalidate_other(tgt, wwn, - port_id, loop_id, &conflict_sess); + sess = qlt_find_sess_invalidate_other(vha, wwn, + port_id, loop_id, &conflict_sess); spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); } - if (IS_SW_RESV_ADDR(port_id) || (!sess && !conflict_sess)) { + if (IS_SW_RESV_ADDR(port_id)) { res = 1; break; } @@ -4429,42 +4502,66 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); if (!pla) { qlt_send_term_imm_notif(vha, iocb, 1); - - res = 0; break; } res = 0; - if (conflict_sess) + if (conflict_sess) { + conflict_sess->login_gen++; qlt_plogi_ack_link(vha, pla, conflict_sess, - QLT_PLOGI_LINK_CONFLICT); + QLT_PLOGI_LINK_CONFLICT); + } - if (!sess) + if (!sess) { + pla->ref_count++; + qla24xx_post_newsess_work(vha, &port_id, + iocb->u.isp24.port_name, pla); + res = 0; break; + } qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); - /* - * Under normal circumstances we want to release nport handle - * during LOGO process to avoid nport handle leaks inside FW. - * The exception is when LOGO is done while another PLOGI with - * the same nport handle is waiting as might be the case here. - * Note: there is always a possibily of a race where session - * deletion has already started for other reasons (e.g. ACL - * removal) and now PLOGI arrives: - * 1. if PLOGI arrived in FW after nport handle has been freed, - * FW must have assigned this PLOGI a new/same handle and we - * can proceed ACK'ing it as usual when session deletion - * completes. - * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT - * bit reached it, the handle has now been released. We'll - * get an error when we ACK this PLOGI. Nothing will be sent - * back to initiator. Initiator should eventually retry - * PLOGI and situation will correct itself. - */ - sess->keep_nport_handle = ((sess->loop_id == loop_id) && - (sess->s_id.b24 == port_id.b24)); - qlt_schedule_sess_for_deletion(sess, true); + sess->fw_login_state = DSC_LS_PLOGI_PEND; + sess->d_id = port_id; + sess->login_gen++; + + switch (sess->disc_state) { + case DSC_DELETED: + qlt_plogi_ack_unref(vha, pla); + break; + + default: + /* + * Under normal circumstances we want to release nport handle + * during LOGO process to avoid nport handle leaks inside FW. + * The exception is when LOGO is done while another PLOGI with + * the same nport handle is waiting as might be the case here. + * Note: there is always a possibily of a race where session + * deletion has already started for other reasons (e.g. ACL + * removal) and now PLOGI arrives: + * 1. if PLOGI arrived in FW after nport handle has been freed, + * FW must have assigned this PLOGI a new/same handle and we + * can proceed ACK'ing it as usual when session deletion + * completes. + * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT + * bit reached it, the handle has now been released. We'll + * get an error when we ACK this PLOGI. Nothing will be sent + * back to initiator. Initiator should eventually retry + * PLOGI and situation will correct itself. + */ + sess->keep_nport_handle = ((sess->loop_id == loop_id) && + (sess->d_id.b24 == port_id.b24)); + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, sess->port_name); + + + qlt_schedule_sess_for_deletion_lock(sess); + break; + } + break; case ELS_PRLI: @@ -4472,8 +4569,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, if (wwn) { spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); - sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id, - loop_id, &conflict_sess); + sess = qlt_find_sess_invalidate_other(vha, wwn, port_id, + loop_id, &conflict_sess); spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); } @@ -4487,7 +4584,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, } if (sess != NULL) { - if (sess->deleted) { + if (sess->fw_login_state == DSC_LS_PLOGI_PEND) { /* * Impatient initiator sent PRLI before last * PLOGI could finish. Will force him to re-try, @@ -4511,11 +4608,16 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, sess->local = 0; sess->loop_id = loop_id; - sess->s_id = port_id; + sess->d_id = port_id; + sess->fw_login_state = DSC_LS_PRLI_PEND; if (wd3_lo & BIT_7) sess->conf_compl_supported = 1; + if ((wd3_lo & BIT_4) == 0) + sess->port_type = FCT_INITIATOR; + else + sess->port_type = FCT_TARGET; } res = 1; /* send notify ack */ @@ -4525,15 +4627,61 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } else { - /* todo: else - create sess here. */ - res = 1; /* send notify ack */ - } + if (sess) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post nack\n", + __func__, __LINE__, sess->port_name); + qla24xx_post_nack_work(vha, sess, iocb, + SRB_NACK_PRLI); + res = 0; + } + } break; + + case ELS_TPRLO: + if (le16_to_cpu(iocb->u.isp24.flags) & + NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { + loop_id = 0xFFFF; + qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS); + res = 1; + break; + } + /* drop through */ case ELS_LOGO: case ELS_PRLO: + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + sess = qla2x00_find_fcport_by_loopid(vha, loop_id); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + if (sess) { + sess->login_gen++; + sess->fw_login_state = DSC_LS_LOGO_PEND; + sess->logo_ack_needed = 1; + memcpy(sess->iocb, iocb, IOCB_SIZE); + } + res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: logo %llx res %d sess %p ", + __func__, wwn, res, sess); + if (res == 0) { + /* + * cmd went upper layer, look for qlt_xmit_tm_rsp() + * for LOGO_ACK & sess delete + */ + BUG_ON(!sess); + res = 0; + } else { + /* cmd did not go to upper layer. */ + if (sess) { + qlt_schedule_sess_for_deletion_lock(sess); + res = 0; + } + /* else logo will be ack */ + } break; case ELS_PDISC: case ELS_ADISC: @@ -4544,6 +4692,16 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 0, 0, 0, 0, 0, 0); tgt->link_reinit_iocb_pending = 0; } + + sess = qla2x00_find_fcport_by_wwpn(vha, + iocb->u.isp24.port_name, 1); + if (sess) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "sess %p lid %d|%d DS %d LS %d\n", + sess, sess->loop_id, loop_id, + sess->disc_state, sess->fw_login_state); + } + res = 1; /* send notify ack */ break; } @@ -4560,451 +4718,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, return res; } -static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) -{ -#if 1 - /* - * FIXME: Reject non zero SRR relative offset until we can test - * this code properly. - */ - pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); - return -1; -#else - struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL; - size_t first_offset = 0, rem_offset = offset, tmp = 0; - int i, sg_srr_cnt, bufflen = 0; - - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023, - "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, " - "cmd->sg_cnt: %u, direction: %d\n", - cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); - - if (!cmd->sg || !cmd->sg_cnt) { - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055, - "Missing cmd->sg or zero cmd->sg_cnt in" - " qla_tgt_set_data_offset\n"); - return -EINVAL; - } - /* - * Walk the current cmd->sg list until we locate the new sg_srr_start - */ - for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) { - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024, - "sg[%d]: %p page: %p, length: %d, offset: %d\n", - i, sg, sg_page(sg), sg->length, sg->offset); - - if ((sg->length + tmp) > offset) { - first_offset = rem_offset; - sg_srr_start = sg; - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025, - "Found matching sg[%d], using %p as sg_srr_start, " - "and using first_offset: %zu\n", i, sg, - first_offset); - break; - } - tmp += sg->length; - rem_offset -= sg->length; - } - - if (!sg_srr_start) { - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056, - "Unable to locate sg_srr_start for offset: %u\n", offset); - return -EINVAL; - } - sg_srr_cnt = (cmd->sg_cnt - i); - - sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL); - if (!sg_srr) { - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057, - "Unable to allocate sgp\n"); - return -ENOMEM; - } - sg_init_table(sg_srr, sg_srr_cnt); - sgp = &sg_srr[0]; - /* - * Walk the remaining list for sg_srr_start, mapping to the newly - * allocated sg_srr taking first_offset into account. - */ - for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) { - if (first_offset) { - sg_set_page(sgp, sg_page(sg), - (sg->length - first_offset), first_offset); - first_offset = 0; - } else { - sg_set_page(sgp, sg_page(sg), sg->length, 0); - } - bufflen += sgp->length; - - sgp = sg_next(sgp); - if (!sgp) - break; - } - - cmd->sg = sg_srr; - cmd->sg_cnt = sg_srr_cnt; - cmd->bufflen = bufflen; - cmd->offset += offset; - cmd->free_sg = 1; - - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg); - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n", - cmd->sg_cnt); - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n", - cmd->bufflen); - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n", - cmd->offset); - - if (cmd->sg_cnt < 0) - BUG(); - - if (cmd->bufflen < 0) - BUG(); - - return 0; -#endif -} - -static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd, - uint32_t srr_rel_offs, int *xmit_type) -{ - int res = 0, rel_offs; - - rel_offs = srr_rel_offs - cmd->offset; - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d", - srr_rel_offs, rel_offs); - - *xmit_type = QLA_TGT_XMIT_ALL; - - if (rel_offs < 0) { - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062, - "qla_target(%d): SRR rel_offs (%d) < 0", - cmd->vha->vp_idx, rel_offs); - res = -1; - } else if (rel_offs == cmd->bufflen) - *xmit_type = QLA_TGT_XMIT_STATUS; - else if (rel_offs > 0) - res = qlt_set_data_offset(cmd, rel_offs); - - return res; -} - -/* No locks, thread context */ -static void qlt_handle_srr(struct scsi_qla_host *vha, - struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm) -{ - struct imm_ntfy_from_isp *ntfy = - (struct imm_ntfy_from_isp *)&imm->imm_ntfy; - struct qla_hw_data *ha = vha->hw; - struct qla_tgt_cmd *cmd = sctio->cmd; - struct se_cmd *se_cmd = &cmd->se_cmd; - unsigned long flags; - int xmit_type = 0, resp = 0; - uint32_t offset; - uint16_t srr_ui; - - offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs); - srr_ui = ntfy->u.isp24.srr_ui; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n", - cmd, srr_ui); - - switch (srr_ui) { - case SRR_IU_STATUS: - spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_notify_ack(vha, ntfy, - 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - xmit_type = QLA_TGT_XMIT_STATUS; - resp = 1; - break; - case SRR_IU_DATA_IN: - if (!cmd->sg || !cmd->sg_cnt) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063, - "Unable to process SRR_IU_DATA_IN due to" - " missing cmd->sg, state: %d\n", cmd->state); - dump_stack(); - goto out_reject; - } - if (se_cmd->scsi_status != 0) { - ql_dbg(ql_dbg_tgt, vha, 0xe02a, - "Rejecting SRR_IU_DATA_IN with non GOOD " - "scsi_status\n"); - goto out_reject; - } - cmd->bufflen = se_cmd->data_length; - - if (qlt_has_data(cmd)) { - if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) - goto out_reject; - spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_notify_ack(vha, ntfy, - 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - resp = 1; - } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064, - "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject", - vha->vp_idx, se_cmd->tag, - cmd->se_cmd.scsi_status); - goto out_reject; - } - break; - case SRR_IU_DATA_OUT: - if (!cmd->sg || !cmd->sg_cnt) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065, - "Unable to process SRR_IU_DATA_OUT due to" - " missing cmd->sg\n"); - dump_stack(); - goto out_reject; - } - if (se_cmd->scsi_status != 0) { - ql_dbg(ql_dbg_tgt, vha, 0xe02b, - "Rejecting SRR_IU_DATA_OUT" - " with non GOOD scsi_status\n"); - goto out_reject; - } - cmd->bufflen = se_cmd->data_length; - - if (qlt_has_data(cmd)) { - if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) - goto out_reject; - spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_notify_ack(vha, ntfy, - 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (xmit_type & QLA_TGT_XMIT_DATA) { - cmd->cmd_flags |= BIT_8; - qlt_rdy_to_xfer(cmd); - } - } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, - "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject", - vha->vp_idx, se_cmd->tag, cmd->se_cmd.scsi_status); - goto out_reject; - } - break; - default: - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067, - "qla_target(%d): Unknown srr_ui value %x", - vha->vp_idx, srr_ui); - goto out_reject; - } - - /* Transmit response in case of status and data-in cases */ - if (resp) { - cmd->cmd_flags |= BIT_7; - qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); - } - - return; - -out_reject: - spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_notify_ack(vha, ntfy, 0, 0, 0, - NOTIFY_ACK_SRR_FLAGS_REJECT, - NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, - NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); - if (cmd->state == QLA_TGT_STATE_NEED_DATA) { - cmd->state = QLA_TGT_STATE_DATA_IN; - dump_stack(); - } else { - cmd->cmd_flags |= BIT_9; - qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0); - } - spin_unlock_irqrestore(&ha->hardware_lock, flags); -} - -static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha, - struct qla_tgt_srr_imm *imm, int ha_locked) -{ - struct qla_hw_data *ha = vha->hw; - unsigned long flags = 0; - -#ifndef __CHECKER__ - if (!ha_locked) - spin_lock_irqsave(&ha->hardware_lock, flags); -#endif - - qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0, - NOTIFY_ACK_SRR_FLAGS_REJECT, - NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, - NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); - -#ifndef __CHECKER__ - if (!ha_locked) - spin_unlock_irqrestore(&ha->hardware_lock, flags); -#endif - - kfree(imm); -} - -static void qlt_handle_srr_work(struct work_struct *work) -{ - struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work); - struct scsi_qla_host *vha = tgt->vha; - struct qla_tgt_srr_ctio *sctio; - unsigned long flags; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n", - tgt); - -restart: - spin_lock_irqsave(&tgt->srr_lock, flags); - list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) { - struct qla_tgt_srr_imm *imm, *i, *ti; - struct qla_tgt_cmd *cmd; - struct se_cmd *se_cmd; - - imm = NULL; - list_for_each_entry_safe(i, ti, &tgt->srr_imm_list, - srr_list_entry) { - if (i->srr_id == sctio->srr_id) { - list_del(&i->srr_list_entry); - if (imm) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068, - "qla_target(%d): There must be " - "only one IMM SRR per CTIO SRR " - "(IMM SRR %p, id %d, CTIO %p\n", - vha->vp_idx, i, i->srr_id, sctio); - qlt_reject_free_srr_imm(tgt->vha, i, 0); - } else - imm = i; - } - } - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a, - "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio, - sctio->srr_id); - - if (imm == NULL) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b, - "Not found matching IMM for SRR CTIO (id %d)\n", - sctio->srr_id); - continue; - } else - list_del(&sctio->srr_list_entry); - - spin_unlock_irqrestore(&tgt->srr_lock, flags); - - cmd = sctio->cmd; - /* - * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow - * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in() - * logic.. - */ - cmd->offset = 0; - if (cmd->free_sg) { - kfree(cmd->sg); - cmd->sg = NULL; - cmd->free_sg = 0; - } - se_cmd = &cmd->se_cmd; - - cmd->sg_cnt = se_cmd->t_data_nents; - cmd->sg = se_cmd->t_data_sg; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, - "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d", - cmd, &cmd->se_cmd, se_cmd->tag, se_cmd->t_task_cdb ? - se_cmd->t_task_cdb[0] : 0, cmd->sg_cnt, cmd->offset); - - qlt_handle_srr(vha, sctio, imm); - - kfree(imm); - kfree(sctio); - goto restart; - } - spin_unlock_irqrestore(&tgt->srr_lock, flags); -} - -/* ha->hardware_lock supposed to be held on entry */ -static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, - struct imm_ntfy_from_isp *iocb) -{ - struct qla_tgt_srr_imm *imm; - struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_srr_ctio *sctio; - - tgt->imm_srr_id++; - - ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n", - vha->vp_idx); - - imm = kzalloc(sizeof(*imm), GFP_ATOMIC); - if (imm != NULL) { - memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy)); - - /* IRQ is already OFF */ - spin_lock(&tgt->srr_lock); - imm->srr_id = tgt->imm_srr_id; - list_add_tail(&imm->srr_list_entry, - &tgt->srr_imm_list); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e, - "IMM NTFY SRR %p added (id %d, ui %x)\n", - imm, imm->srr_id, iocb->u.isp24.srr_ui); - if (tgt->imm_srr_id == tgt->ctio_srr_id) { - int found = 0; - list_for_each_entry(sctio, &tgt->srr_ctio_list, - srr_list_entry) { - if (sctio->srr_id == imm->srr_id) { - found = 1; - break; - } - } - if (found) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s", - "Scheduling srr work\n"); - schedule_work(&tgt->srr_work); - } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030, - "qla_target(%d): imm_srr_id " - "== ctio_srr_id (%d), but there is no " - "corresponding SRR CTIO, deleting IMM " - "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id, - imm); - list_del(&imm->srr_list_entry); - - kfree(imm); - - spin_unlock(&tgt->srr_lock); - goto out_reject; - } - } - spin_unlock(&tgt->srr_lock); - } else { - struct qla_tgt_srr_ctio *ts; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069, - "qla_target(%d): Unable to allocate SRR IMM " - "entry, SRR request will be rejected\n", vha->vp_idx); - - /* IRQ is already OFF */ - spin_lock(&tgt->srr_lock); - list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list, - srr_list_entry) { - if (sctio->srr_id == tgt->imm_srr_id) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031, - "CTIO SRR %p deleted (id %d)\n", - sctio, sctio->srr_id); - list_del(&sctio->srr_list_entry); - qlt_send_term_exchange(vha, sctio->cmd, - &sctio->cmd->atio, 1, 0); - kfree(sctio); - } - } - spin_unlock(&tgt->srr_lock); - goto out_reject; - } - - return; - -out_reject: - qlt_send_notify_ack(vha, iocb, 0, 0, 0, - NOTIFY_ACK_SRR_FLAGS_REJECT, - NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, - NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); -} - /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ @@ -5126,12 +4839,6 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha, if (qlt_24xx_handle_els(vha, iocb) == 0) send_notify_ack = 0; break; - - case IMM_NTFY_SRR: - qlt_prepare_srr_imm(vha, iocb); - send_notify_ack = 0; - break; - default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, "qla_target(%d): Received unknown immediate " @@ -5153,7 +4860,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha, struct ctio7_to_24xx *ctio24; struct qla_hw_data *ha = vha->hw; request_t *pkt; - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; unsigned long flags; spin_lock_irqsave(&ha->tgt.sess_lock, flags); @@ -5214,7 +4921,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; + struct fc_port *sess; struct se_session *se_sess; struct qla_tgt_cmd *cmd; int tag; @@ -5756,6 +5463,32 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); break; + case MBA_REJECTED_FCP_CMD: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "qla_target(%d): Async event LS_REJECT occurred " + "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, + le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), + le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); + + if (le16_to_cpu(mailbox[3]) == 1) { + /* exchange starvation. */ + vha->hw->exch_starvation++; + if (vha->hw->exch_starvation > 5) { + ql_log(ql_log_warn, vha, 0xffff, + "Exchange starvation-. Resetting RISC\n"); + + vha->hw->exch_starvation = 0; + if (IS_P3P_TYPE(vha->hw)) + set_bit(FCOE_CTX_RESET_NEEDED, + &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + } + break; + case MBA_PORT_UPDATE: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, "qla_target(%d): Port update async event %#x " @@ -5765,14 +5498,14 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); login_code = le16_to_cpu(mailbox[2]); - if (login_code == 0x4) + if (login_code == 0x4) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, "Async MB 2: Got PLOGI Complete\n"); - else if (login_code == 0x7) + vha->hw->exch_starvation = 0; + } else if (login_code == 0x7) ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, "Async MB 2: Port Logged Out\n"); break; - default: break; } @@ -5783,8 +5516,10 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, uint16_t loop_id) { - fc_port_t *fcport; + fc_port_t *fcport, *tfcp, *del; int rc; + unsigned long flags; + u8 newfcport = 0; fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); if (!fcport) { @@ -5806,18 +5541,82 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, return NULL; } + del = NULL; + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1); + + if (tfcp) { + tfcp->d_id = fcport->d_id; + tfcp->port_type = fcport->port_type; + tfcp->supported_classes = fcport->supported_classes; + tfcp->flags |= fcport->flags; + + del = fcport; + fcport = tfcp; + } else { + if (vha->hw->current_topology == ISP_CFG_F) + fcport->flags |= FCF_FABRIC_DEVICE; + + list_add_tail(&fcport->list, &vha->vp_fcports); + if (!IS_SW_RESV_ADDR(fcport->d_id)) + vha->fcport_count++; + fcport->login_gen++; + fcport->disc_state = DSC_LOGIN_COMPLETE; + fcport->login_succ = 1; + newfcport = 1; + } + + fcport->deleted = 0; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + switch (vha->host->active_mode) { + case MODE_INITIATOR: + case MODE_DUAL: + if (newfcport) { + if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post upd_fcport fcp_cnt %d\n", + __func__, __LINE__, fcport->port_name, vha->fcport_count); + qla24xx_post_upd_fcport_work(vha, fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpsc fcp_cnt %d\n", + __func__, __LINE__, fcport->port_name, vha->fcport_count); + qla24xx_post_gpsc_work(vha, fcport); + } + } + break; + + case MODE_TARGET: + default: + break; + } + if (del) + qla2x00_free_fcport(del); + return fcport; } /* Must be called under tgt_mutex */ -static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, +static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha, uint8_t *s_id) { - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; fc_port_t *fcport = NULL; int rc, global_resets; uint16_t loop_id = 0; + if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) { + /* + * This is Domain Controller, so it should be + * OK to drop SCSI commands from it. + */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, + "Unable to find initiator with S_ID %x:%x:%x", + s_id[0], s_id[1], s_id[2]); + return NULL; + } + mutex_lock(&vha->vha_tgt.tgt_mutex); retry: @@ -5828,21 +5627,11 @@ retry: if (rc != 0) { mutex_unlock(&vha->vha_tgt.tgt_mutex); - if ((s_id[0] == 0xFF) && - (s_id[1] == 0xFC)) { - /* - * This is Domain Controller, so it should be - * OK to drop SCSI commands from it. - */ - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, - "Unable to find initiator with S_ID %x:%x:%x", - s_id[0], s_id[1], s_id[2]); - } else - ql_log(ql_log_info, vha, 0xf071, - "qla_target(%d): Unable to find " - "initiator with S_ID %x:%x:%x", - vha->vp_idx, s_id[0], s_id[1], - s_id[2]); + ql_log(ql_log_info, vha, 0xf071, + "qla_target(%d): Unable to find " + "initiator with S_ID %x:%x:%x", + vha->vp_idx, s_id[0], s_id[1], + s_id[2]); if (rc == -ENOENT) { qlt_port_logo_t logo; @@ -5875,7 +5664,6 @@ retry: mutex_unlock(&vha->vha_tgt.tgt_mutex); - kfree(fcport); return sess; } @@ -5884,7 +5672,7 @@ static void qlt_abort_work(struct qla_tgt *tgt, { struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; unsigned long flags = 0, flags2 = 0; uint32_t be_s_id; uint8_t s_id[3]; @@ -5911,12 +5699,18 @@ static void qlt_abort_work(struct qla_tgt *tgt, if (!sess) goto out_term2; } else { - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + if (sess->deleted) { sess = NULL; goto out_term2; } - kref_get(&sess->sess_kref); + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff, + "%s: kref_get fail %8phC \n", + __func__, sess->port_name); + sess = NULL; + goto out_term2; + } } spin_lock_irqsave(&ha->hardware_lock, flags); @@ -5928,8 +5722,8 @@ static void qlt_abort_work(struct qla_tgt *tgt, if (rc != 0) goto out_term; spin_unlock_irqrestore(&ha->hardware_lock, flags); - - qlt_put_sess(sess); + if (sess) + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); return; @@ -5940,7 +5734,8 @@ out_term: qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); spin_unlock_irqrestore(&ha->hardware_lock, flags); - qlt_put_sess(sess); + if (sess) + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); } @@ -5950,7 +5745,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, struct atio_from_isp *a = &prm->tm_iocb2; struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; unsigned long flags; uint8_t *s_id = NULL; /* to hide compiler warnings */ int rc; @@ -5975,12 +5770,18 @@ static void qlt_tmr_work(struct qla_tgt *tgt, if (!sess) goto out_term; } else { - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + if (sess->deleted) { sess = NULL; goto out_term; } - kref_get(&sess->sess_kref); + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff, + "%s: kref_get fail %8phC\n", + __func__, sess->port_name); + sess = NULL; + goto out_term; + } } iocb = a; @@ -5992,13 +5793,13 @@ static void qlt_tmr_work(struct qla_tgt *tgt, if (rc != 0) goto out_term; - qlt_put_sess(sess); + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; out_term: qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); - qlt_put_sess(sess); + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } @@ -6075,17 +5876,10 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) tgt->ha = ha; tgt->vha = base_vha; init_waitqueue_head(&tgt->waitQ); - INIT_LIST_HEAD(&tgt->sess_list); INIT_LIST_HEAD(&tgt->del_sess_list); - INIT_DELAYED_WORK(&tgt->sess_del_work, - (void (*)(struct work_struct *))qlt_del_sess_work_fn); spin_lock_init(&tgt->sess_work_lock); INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); INIT_LIST_HEAD(&tgt->sess_works_list); - spin_lock_init(&tgt->srr_lock); - INIT_LIST_HEAD(&tgt->srr_ctio_list); - INIT_LIST_HEAD(&tgt->srr_imm_list); - INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); atomic_set(&tgt->tgt_global_resets_count, 0); base_vha->vha_tgt.qla_tgt = tgt; @@ -6251,29 +6045,25 @@ EXPORT_SYMBOL(qlt_lport_deregister); /* Must be called under HW lock */ static void qlt_set_mode(struct scsi_qla_host *vha) { - struct qla_hw_data *ha = vha->hw; - switch (ql2x_ini_mode) { case QLA2XXX_INI_MODE_DISABLED: case QLA2XXX_INI_MODE_EXCLUSIVE: vha->host->active_mode = MODE_TARGET; break; case QLA2XXX_INI_MODE_ENABLED: - vha->host->active_mode |= MODE_TARGET; + vha->host->active_mode = MODE_UNKNOWN; + break; + case QLA2XXX_INI_MODE_DUAL: + vha->host->active_mode = MODE_DUAL; break; default: break; } - - if (ha->tgt.ini_mode_force_reverse) - qla_reverse_ini_mode(vha); } /* Must be called under HW lock */ static void qlt_clear_mode(struct scsi_qla_host *vha) { - struct qla_hw_data *ha = vha->hw; - switch (ql2x_ini_mode) { case QLA2XXX_INI_MODE_DISABLED: vha->host->active_mode = MODE_UNKNOWN; @@ -6282,14 +6072,12 @@ static void qlt_clear_mode(struct scsi_qla_host *vha) vha->host->active_mode = MODE_INITIATOR; break; case QLA2XXX_INI_MODE_ENABLED: - vha->host->active_mode &= ~MODE_TARGET; + case QLA2XXX_INI_MODE_DUAL: + vha->host->active_mode = MODE_INITIATOR; break; default: break; } - - if (ha->tgt.ini_mode_force_reverse) - qla_reverse_ini_mode(vha); } /* @@ -6377,9 +6165,6 @@ static void qlt_disable_vha(struct scsi_qla_host *vha) void qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) { - if (!qla_tgt_mode_enabled(vha)) - return; - vha->vha_tgt.qla_tgt = NULL; mutex_init(&vha->vha_tgt.tgt_mutex); @@ -6405,13 +6190,11 @@ qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req) * FC-4 Feature bit 0 indicates target functionality to the name server. */ if (qla_tgt_mode_enabled(vha)) { - if (qla_ini_mode_enabled(vha)) - ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; - else - ct_req->req.rff_id.fc4_feature = BIT_0; + ct_req->req.rff_id.fc4_feature = BIT_0; } else if (qla_ini_mode_enabled(vha)) { ct_req->req.rff_id.fc4_feature = BIT_1; - } + } else if (qla_dual_mode_enabled(vha)) + ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; } /* @@ -6430,7 +6213,7 @@ qlt_init_atio_q_entries(struct scsi_qla_host *vha) uint16_t cnt; struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; - if (!qla_tgt_mode_enabled(vha)) + if (qla_ini_mode_enabled(vha)) return; for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { @@ -6523,8 +6306,10 @@ void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) { struct qla_hw_data *ha = vha->hw; + u32 tmp; + u16 t; - if (qla_tgt_mode_enabled(vha)) { + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { if (!ha->tgt.saved_set) { /* We save only once */ ha->tgt.saved_exchange_count = nv->exchange_count; @@ -6537,13 +6322,30 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) ha->tgt.saved_set = 1; } - nv->exchange_count = cpu_to_le16(0xFFFF); + if (qla_tgt_mode_enabled(vha)) { + nv->exchange_count = cpu_to_le16(0xFFFF); + } else { /* dual */ + if (ql_dm_tgt_ex_pct > 100) { + ql_dm_tgt_ex_pct = 50; + } else if (ql_dm_tgt_ex_pct == 100) { + /* leave some for FW */ + ql_dm_tgt_ex_pct = 95; + } + + tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct; + tmp = tmp/100; + if (tmp > 0xffff) + tmp = 0xffff; + + t = tmp & 0xffff; + nv->exchange_count = cpu_to_le16(t); + } /* Enable target mode */ nv->firmware_options_1 |= cpu_to_le32(BIT_4); /* Disable ini mode, if requested */ - if (!qla_ini_mode_enabled(vha)) + if (qla_tgt_mode_enabled(vha)) nv->firmware_options_1 |= cpu_to_le32(BIT_5); /* Disable Full Login after LIP */ @@ -6622,11 +6424,13 @@ void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) { struct qla_hw_data *ha = vha->hw; + u32 tmp; + u16 t; if (!QLA_TGT_MODE_ENABLED()) return; - if (qla_tgt_mode_enabled(vha)) { + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { if (!ha->tgt.saved_set) { /* We save only once */ ha->tgt.saved_exchange_count = nv->exchange_count; @@ -6639,13 +6443,29 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) ha->tgt.saved_set = 1; } - nv->exchange_count = cpu_to_le16(0xFFFF); + if (qla_tgt_mode_enabled(vha)) { + nv->exchange_count = cpu_to_le16(0xFFFF); + } else { /* dual */ + if (ql_dm_tgt_ex_pct > 100) { + ql_dm_tgt_ex_pct = 50; + } else if (ql_dm_tgt_ex_pct == 100) { + /* leave some for FW */ + ql_dm_tgt_ex_pct = 95; + } + + tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct; + tmp = tmp/100; + if (tmp > 0xffff) + tmp = 0xffff; + t = tmp & 0xffff; + nv->exchange_count = cpu_to_le16(t); + } /* Enable target mode */ nv->firmware_options_1 |= cpu_to_le32(BIT_4); /* Disable ini mode, if requested */ - if (!qla_ini_mode_enabled(vha)) + if (qla_tgt_mode_enabled(vha)) nv->firmware_options_1 |= cpu_to_le32(BIT_5); /* Disable Full Login after LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_13); @@ -6749,10 +6569,12 @@ void qlt_modify_vp_config(struct scsi_qla_host *vha, struct vp_config_entry_24xx *vpmod) { - if (qla_tgt_mode_enabled(vha)) + /* enable target mode. Bit5 = 1 => disable */ + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) vpmod->options_idx1 &= ~BIT_5; - /* Disable ini mode, if requested */ - if (!qla_ini_mode_enabled(vha)) + + /* Disable ini mode, if requested. bit4 = 1 => disable */ + if (qla_tgt_mode_enabled(vha)) vpmod->options_idx1 &= ~BIT_4; } @@ -6772,6 +6594,11 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) mutex_init(&base_vha->vha_tgt.tgt_mutex); mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); + + INIT_LIST_HEAD(&base_vha->unknown_atio_list); + INIT_DELAYED_WORK(&base_vha->unknown_atio_work, + qlt_unknown_atio_work_fn); + qlt_clear_mode(base_vha); } @@ -6906,6 +6733,8 @@ static int __init qlt_parse_ini_mode(void) ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; + else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0) + ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL; else return false; @@ -6935,9 +6764,8 @@ int __init qlt_init(void) } qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep", - sizeof(qlt_plogi_ack_t), - __alignof__(qlt_plogi_ack_t), - 0, NULL); + sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t), + 0, NULL); if (!qla_tgt_plogi_cachep) { ql_log(ql_log_fatal, NULL, 0xe06d, diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 0824a8164a24..a7f90dcaae37 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -45,10 +45,12 @@ #define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive" #define QLA2XXX_INI_MODE_STR_DISABLED "disabled" #define QLA2XXX_INI_MODE_STR_ENABLED "enabled" +#define QLA2XXX_INI_MODE_STR_DUAL "dual" #define QLA2XXX_INI_MODE_EXCLUSIVE 0 #define QLA2XXX_INI_MODE_DISABLED 1 #define QLA2XXX_INI_MODE_ENABLED 2 +#define QLA2XXX_INI_MODE_DUAL 3 #define QLA2XXX_COMMAND_COUNT_INIT 250 #define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250 @@ -118,84 +120,6 @@ ? le16_to_cpu((iocb)->u.isp2x.target.extended) \ : (uint16_t)(iocb)->u.isp2x.target.id.standard) -#ifndef IMMED_NOTIFY_TYPE -#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */ -/* - * ISP queue - immediate notify entry structure definition. - * This is sent by the ISP to the Target driver. - * This IOCB would have report of events sent by the - * initiator, that needs to be handled by the target - * driver immediately. - */ -struct imm_ntfy_from_isp { - uint8_t entry_type; /* Entry type. */ - uint8_t entry_count; /* Entry count. */ - uint8_t sys_define; /* System defined. */ - uint8_t entry_status; /* Entry Status. */ - union { - struct { - uint32_t sys_define_2; /* System defined. */ - target_id_t target; - uint16_t lun; - uint8_t target_id; - uint8_t reserved_1; - uint16_t status_modifier; - uint16_t status; - uint16_t task_flags; - uint16_t seq_id; - uint16_t srr_rx_id; - uint32_t srr_rel_offs; - uint16_t srr_ui; -#define SRR_IU_DATA_IN 0x1 -#define SRR_IU_DATA_OUT 0x5 -#define SRR_IU_STATUS 0x7 - uint16_t srr_ox_id; - uint8_t reserved_2[28]; - } isp2x; - struct { - uint32_t reserved; - uint16_t nport_handle; - uint16_t reserved_2; - uint16_t flags; -#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1 -#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0 - uint16_t srr_rx_id; - uint16_t status; - uint8_t status_subcode; - uint8_t fw_handle; - uint32_t exchange_address; - uint32_t srr_rel_offs; - uint16_t srr_ui; - uint16_t srr_ox_id; - union { - struct { - uint8_t node_name[8]; - } plogi; /* PLOGI/ADISC/PDISC */ - struct { - /* PRLI word 3 bit 0-15 */ - uint16_t wd3_lo; - uint8_t resv0[6]; - } prli; - struct { - uint8_t port_id[3]; - uint8_t resv1; - uint16_t nport_handle; - uint16_t resv2; - } req_els; - } u; - uint8_t port_name[8]; - uint8_t resv3[3]; - uint8_t vp_index; - uint32_t reserved_5; - uint8_t port_id[3]; - uint8_t reserved_6; - } isp24; - } u; - uint16_t reserved_7; - uint16_t ox_id; -} __packed; -#endif - #ifndef NOTIFY_ACK_TYPE #define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */ /* @@ -731,7 +655,7 @@ struct abts_resp_from_24xx_fw { \********************************************************************/ struct qla_tgt_mgmt_cmd; -struct qla_tgt_sess; +struct fc_port; /* * This structure provides a template of function calls that the @@ -744,21 +668,22 @@ struct qla_tgt_func_tmpl { unsigned char *, uint32_t, int, int, int); void (*handle_data)(struct qla_tgt_cmd *); void (*handle_dif_err)(struct qla_tgt_cmd *); - int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, + int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t, uint32_t); void (*free_cmd)(struct qla_tgt_cmd *); void (*free_mcmd)(struct qla_tgt_mgmt_cmd *); - void (*free_session)(struct qla_tgt_sess *); + void (*free_session)(struct fc_port *); int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *, - struct qla_tgt_sess *); - void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool); - struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *, + struct fc_port *); + void (*update_sess)(struct fc_port *, port_id_t, uint16_t, bool); + struct fc_port *(*find_sess_by_loop_id)(struct scsi_qla_host *, const uint16_t); - struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *, + struct fc_port *(*find_sess_by_s_id)(struct scsi_qla_host *, const uint8_t *); - void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *); - void (*shutdown_sess)(struct qla_tgt_sess *); + void (*clear_nacl_from_fcport_map)(struct fc_port *); + void (*put_sess)(struct fc_port *); + void (*shutdown_sess)(struct fc_port *); }; int qla2x00_wait_for_hba_online(struct scsi_qla_host *); @@ -795,6 +720,8 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *); #define QLA_TGT_ABORT_ALL 0xFFFE #define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD #define QLA_TGT_NEXUS_LOSS 0xFFFC +#define QLA_TGT_ABTS 0xFFFB +#define QLA_TGT_2G_ABORT_TASK 0xFFFA /* Notify Acknowledge flags */ #define NOTIFY_ACK_RES_COUNT BIT_8 @@ -872,12 +799,8 @@ struct qla_tgt { /* Count of sessions refering qla_tgt. Protected by hardware_lock. */ int sess_count; - /* Protected by hardware_lock. Addition also protected by tgt_mutex. */ - struct list_head sess_list; - /* Protected by hardware_lock */ struct list_head del_sess_list; - struct delayed_work sess_del_work; spinlock_t sess_work_lock; struct list_head sess_works_list; @@ -888,16 +811,7 @@ struct qla_tgt { int notify_ack_expected; int abts_resp_expected; int modify_lun_expected; - - int ctio_srr_id; - int imm_srr_id; - spinlock_t srr_lock; - struct list_head srr_ctio_list; - struct list_head srr_imm_list; - struct work_struct srr_work; - atomic_t tgt_global_resets_count; - struct list_head tgt_list_entry; }; @@ -910,92 +824,32 @@ struct qla_tgt_sess_op { bool aborted; }; -enum qla_sess_deletion { - QLA_SESS_DELETION_NONE = 0, - QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of - * this one */ - QLA_SESS_DELETION_IN_PROGRESS = 2, -}; - -typedef enum { - QLT_PLOGI_LINK_SAME_WWN, - QLT_PLOGI_LINK_CONFLICT, - QLT_PLOGI_LINK_MAX -} qlt_plogi_link_t; - -typedef struct { - struct list_head list; - struct imm_ntfy_from_isp iocb; - port_id_t id; - int ref_count; -} qlt_plogi_ack_t; - -/* - * Equivilant to IT Nexus (Initiator-Target) - */ -struct qla_tgt_sess { - uint16_t loop_id; - port_id_t s_id; - - unsigned int conf_compl_supported:1; - unsigned int deleted:2; - unsigned int local:1; - unsigned int logout_on_delete:1; - unsigned int keep_nport_handle:1; - unsigned int send_els_logo:1; - - unsigned char logout_completed; - - int generation; - - struct se_session *se_sess; - struct kref sess_kref; - struct scsi_qla_host *vha; - struct qla_tgt *tgt; - - struct list_head sess_list_entry; - unsigned long expires; - struct list_head del_list_entry; - - uint8_t port_name[WWN_SIZE]; - struct work_struct free_work; - - qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; +enum trace_flags { + TRC_NEW_CMD = BIT_0, + TRC_DO_WORK = BIT_1, + TRC_DO_WORK_ERR = BIT_2, + TRC_XFR_RDY = BIT_3, + TRC_XMIT_DATA = BIT_4, + TRC_XMIT_STATUS = BIT_5, + TRC_SRR_RSP = BIT_6, + TRC_SRR_XRDY = BIT_7, + TRC_SRR_TERM = BIT_8, + TRC_SRR_CTIO = BIT_9, + TRC_FLUSH = BIT_10, + TRC_CTIO_ERR = BIT_11, + TRC_CTIO_DONE = BIT_12, + TRC_CTIO_ABORTED = BIT_13, + TRC_CTIO_STRANGE= BIT_14, + TRC_CMD_DONE = BIT_15, + TRC_CMD_CHK_STOP = BIT_16, + TRC_CMD_FREE = BIT_17, + TRC_DATA_IN = BIT_18, + TRC_ABORT = BIT_19, }; -typedef enum { - /* - * BIT_0 - Atio Arrival / schedule to work - * BIT_1 - qlt_do_work - * BIT_2 - qlt_do work failed - * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending - * BIT_4 - read respond/tcm_qla2xx_queue_data_in - * BIT_5 - status respond / tcm_qla2xx_queue_status - * BIT_6 - tcm request to abort/Term exchange. - * pre_xmit_response->qlt_send_term_exchange - * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response) - * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer) - * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange) - * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data - - * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd - * BIT_13 - Bad completion - - * qlt_ctio_do_completion --> qlt_term_ctio_exchange - * BIT_14 - Back end data received/sent. - * BIT_15 - SRR prepare ctio - * BIT_16 - complete free - * BIT_17 - flush - qlt_abort_cmd_on_host_reset - * BIT_18 - completion w/abort status - * BIT_19 - completion w/unknown status - * BIT_20 - tcm_qla2xxx_free_cmd - */ - CMD_FLAG_DATA_WORK = BIT_11, - CMD_FLAG_DATA_WORK_FREE = BIT_21, -} cmd_flags_t; - struct qla_tgt_cmd { struct se_cmd se_cmd; - struct qla_tgt_sess *sess; + struct fc_port *sess; int state; struct work_struct free_work; struct work_struct work; @@ -1014,6 +868,8 @@ struct qla_tgt_cmd { unsigned int cmd_sent_to_fw:1; unsigned int cmd_in_wq:1; unsigned int aborted:1; + unsigned int data_work:1; + unsigned int data_work_free:1; struct scatterlist *sg; /* cmd data buffer SG vector */ int sg_cnt; /* SG segments count */ @@ -1038,7 +894,7 @@ struct qla_tgt_cmd { uint64_t jiffies_at_alloc; uint64_t jiffies_at_free; - cmd_flags_t cmd_flags; + enum trace_flags trc_flags; }; struct qla_tgt_sess_work_param { @@ -1056,9 +912,9 @@ struct qla_tgt_sess_work_param { }; struct qla_tgt_mgmt_cmd { - uint8_t tmr_func; + uint16_t tmr_func; uint8_t fc_tm_rsp; - struct qla_tgt_sess *sess; + struct fc_port *sess; struct se_cmd se_cmd; struct work_struct free_work; unsigned int flags; @@ -1090,18 +946,6 @@ struct qla_tgt_prm { uint16_t tot_dsds; }; -struct qla_tgt_srr_imm { - struct list_head srr_list_entry; - int srr_id; - struct imm_ntfy_from_isp imm_ntfy; -}; - -struct qla_tgt_srr_ctio { - struct list_head srr_list_entry; - int srr_id; - struct qla_tgt_cmd *cmd; -}; - /* Check for Switch reserved address */ #define IS_SW_RESV_ADDR(_s_id) \ ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc)) @@ -1121,7 +965,7 @@ extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *); extern int qlt_lport_register(void *, u64, u64, u64, int (*callback)(struct scsi_qla_host *, void *, u64, u64)); extern void qlt_lport_deregister(struct scsi_qla_host *); -void qlt_put_sess(struct qla_tgt_sess *sess); +extern void qlt_unreg_sess(struct fc_port *); extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int); extern int __init qlt_init(void); @@ -1133,24 +977,22 @@ extern void qlt_update_vp_map(struct scsi_qla_host *, int); * is not set. Right now, ha value is ignored. */ #define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED) + extern int ql2x_ini_mode; static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha) { - return ha->host->active_mode & MODE_TARGET; + return ha->host->active_mode == MODE_TARGET; } static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha) { - return ha->host->active_mode & MODE_INITIATOR; + return ha->host->active_mode == MODE_INITIATOR; } -static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha) +static inline bool qla_dual_mode_enabled(struct scsi_qla_host *ha) { - if (ha->host->active_mode & MODE_INITIATOR) - ha->host->active_mode &= ~MODE_INITIATOR; - else - ha->host->active_mode |= MODE_INITIATOR; + return (ha->host->active_mode == MODE_DUAL); } static inline uint32_t sid_to_key(const uint8_t *s_id) diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 3084983c1287..8e8ab0fa9672 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -282,10 +282,10 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work) cmd->cmd_in_wq = 0; - WARN_ON(cmd->cmd_flags & BIT_16); + WARN_ON(cmd->trc_flags & TRC_CMD_FREE); cmd->vha->tgt_counters.qla_core_ret_sta_ctio++; - cmd->cmd_flags |= BIT_16; + cmd->trc_flags |= TRC_CMD_FREE; transport_generic_free_cmd(&cmd->se_cmd, 0); } @@ -299,8 +299,8 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) cmd->vha->tgt_counters.core_qla_free_cmd++; cmd->cmd_in_wq = 1; - BUG_ON(cmd->cmd_flags & BIT_20); - cmd->cmd_flags |= BIT_20; + WARN_ON(cmd->trc_flags & TRC_CMD_DONE); + cmd->trc_flags |= TRC_CMD_DONE; INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); @@ -315,7 +315,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) { cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - cmd->cmd_flags |= BIT_14; + cmd->trc_flags |= TRC_CMD_CHK_STOP; } return target_put_sess_cmd(se_cmd); @@ -339,9 +339,26 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) qlt_free_cmd(cmd); } +static void tcm_qla2xxx_release_session(struct kref *kref) +{ + struct fc_port *sess = container_of(kref, + struct fc_port, sess_kref); + + qlt_unreg_sess(sess); +} + +static void tcm_qla2xxx_put_sess(struct fc_port *sess) +{ + if (!sess) + return; + + assert_spin_locked(&sess->vha->hw->tgt.sess_lock); + kref_put(&sess->sess_kref, tcm_qla2xxx_release_session); +} + static void tcm_qla2xxx_close_session(struct se_session *se_sess) { - struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; + struct fc_port *sess = se_sess->fabric_sess_ptr; struct scsi_qla_host *vha; unsigned long flags; @@ -350,7 +367,7 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); target_sess_cmd_list_set_waiting(se_sess); - qlt_put_sess(sess); + tcm_qla2xxx_put_sess(sess); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } @@ -377,7 +394,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) cmd->se_cmd.se_cmd_flags); return 0; } - cmd->cmd_flags |= BIT_3; + cmd->trc_flags |= TRC_XFR_RDY; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); @@ -441,7 +458,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, { struct se_cmd *se_cmd = &cmd->se_cmd; struct se_session *se_sess; - struct qla_tgt_sess *sess; + struct fc_port *sess; #ifdef CONFIG_TCM_QLA2XXX_DEBUG struct se_portal_group *se_tpg; struct tcm_qla2xxx_tpg *tpg; @@ -456,7 +473,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, sess = cmd->sess; if (!sess) { - pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n"); + pr_err("Unable to locate struct fc_port from qla_tgt_cmd\n"); return -EINVAL; } @@ -493,9 +510,9 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) cmd->cmd_in_wq = 0; spin_lock_irqsave(&cmd->cmd_lock, flags); - cmd->cmd_flags |= CMD_FLAG_DATA_WORK; + cmd->data_work = 1; if (cmd->aborted) { - cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE; + cmd->data_work_free = 1; spin_unlock_irqrestore(&cmd->cmd_lock, flags); tcm_qla2xxx_free_cmd(cmd); @@ -532,7 +549,7 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) */ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) { - cmd->cmd_flags |= BIT_10; + cmd->trc_flags |= TRC_DATA_IN; cmd->cmd_in_wq = 1; INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); @@ -563,13 +580,49 @@ static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd) * Called from qla_target.c:qlt_issue_task_mgmt() */ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, - uint8_t tmr_func, uint32_t tag) + uint16_t tmr_func, uint32_t tag) { - struct qla_tgt_sess *sess = mcmd->sess; + struct fc_port *sess = mcmd->sess; struct se_cmd *se_cmd = &mcmd->se_cmd; + int transl_tmr_func = 0; + + switch (tmr_func) { + case QLA_TGT_ABTS: + pr_debug("%ld: ABTS received\n", sess->vha->host_no); + transl_tmr_func = TMR_ABORT_TASK; + break; + case QLA_TGT_2G_ABORT_TASK: + pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no); + transl_tmr_func = TMR_ABORT_TASK; + break; + case QLA_TGT_CLEAR_ACA: + pr_debug("%ld: CLEAR_ACA received\n", sess->vha->host_no); + transl_tmr_func = TMR_CLEAR_ACA; + break; + case QLA_TGT_TARGET_RESET: + pr_debug("%ld: TARGET_RESET received\n", sess->vha->host_no); + transl_tmr_func = TMR_TARGET_WARM_RESET; + break; + case QLA_TGT_LUN_RESET: + pr_debug("%ld: LUN_RESET received\n", sess->vha->host_no); + transl_tmr_func = TMR_LUN_RESET; + break; + case QLA_TGT_CLEAR_TS: + pr_debug("%ld: CLEAR_TS received\n", sess->vha->host_no); + transl_tmr_func = TMR_CLEAR_TASK_SET; + break; + case QLA_TGT_ABORT_TS: + pr_debug("%ld: ABORT_TS received\n", sess->vha->host_no); + transl_tmr_func = TMR_ABORT_TASK_SET; + break; + default: + pr_debug("%ld: Unknown task mgmt fn 0x%x\n", + sess->vha->host_no, tmr_func); + return -ENOSYS; + } return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd, - tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); + transl_tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); } static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) @@ -591,7 +644,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) return 0; } - cmd->cmd_flags |= BIT_4; + cmd->trc_flags |= TRC_XMIT_DATA; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); @@ -622,11 +675,11 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) cmd->sg_cnt = 0; cmd->offset = 0; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); - if (cmd->cmd_flags & BIT_5) { - pr_crit("Bit_5 already set for cmd = %p.\n", cmd); + if (cmd->trc_flags & TRC_XMIT_STATUS) { + pr_crit("Multiple calls for status = %p.\n", cmd); dump_stack(); } - cmd->cmd_flags |= BIT_5; + cmd->trc_flags |= TRC_XMIT_STATUS; if (se_cmd->data_direction == DMA_FROM_DEVICE) { /* @@ -682,10 +735,7 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) qlt_xmit_tm_rsp(mcmd); } - -#define DATA_WORK_NOT_FREE(_flags) \ - (( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \ - CMD_FLAG_DATA_WORK) +#define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free) static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, @@ -697,13 +747,13 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) spin_lock_irqsave(&cmd->cmd_lock, flags); if ((cmd->state == QLA_TGT_STATE_NEW)|| - ((cmd->state == QLA_TGT_STATE_DATA_IN) && - DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) { - - cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE; + ((cmd->state == QLA_TGT_STATE_DATA_IN) && + DATA_WORK_NOT_FREE(cmd))) { + cmd->data_work_free = 1; spin_unlock_irqrestore(&cmd->cmd_lock, flags); - /* Cmd have not reached firmware. - * Use this trigger to free it. */ + /* + * cmd has not reached fw, Use this trigger to free it. + */ tcm_qla2xxx_free_cmd(cmd); return; } @@ -713,11 +763,11 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) } static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, - struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *); + struct tcm_qla2xxx_nacl *, struct fc_port *); /* * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ -static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) +static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess) { struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; struct se_portal_group *se_tpg = se_nacl->se_tpg; @@ -756,7 +806,7 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); } -static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) +static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess) { assert_spin_locked(&sess->vha->hw->tgt.sess_lock); target_sess_cmd_list_set_waiting(sess->se_sess); @@ -1141,7 +1191,7 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( /* * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ -static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( +static struct fc_port *tcm_qla2xxx_find_sess_by_s_id( scsi_qla_host_t *vha, const uint8_t *s_id) { @@ -1169,12 +1219,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( se_nacl, se_nacl->initiatorname); nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); - if (!nacl->qla_tgt_sess) { - pr_err("Unable to locate struct qla_tgt_sess\n"); + if (!nacl->fc_port) { + pr_err("Unable to locate struct fc_port\n"); return NULL; } - return nacl->qla_tgt_sess; + return nacl->fc_port; } /* @@ -1185,7 +1235,7 @@ static void tcm_qla2xxx_set_sess_by_s_id( struct se_node_acl *new_se_nacl, struct tcm_qla2xxx_nacl *nacl, struct se_session *se_sess, - struct qla_tgt_sess *qla_tgt_sess, + struct fc_port *fc_port, uint8_t *s_id) { u32 key; @@ -1209,22 +1259,22 @@ static void tcm_qla2xxx_set_sess_by_s_id( pr_debug("Wiping nonexisting fc_port entry\n"); } - qla_tgt_sess->se_sess = se_sess; - nacl->qla_tgt_sess = qla_tgt_sess; + fc_port->se_sess = se_sess; + nacl->fc_port = fc_port; return; } - if (nacl->qla_tgt_sess) { + if (nacl->fc_port) { if (new_se_nacl == NULL) { - pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n"); + pr_debug("Clearing existing nacl->fc_port and fc_port entry\n"); btree_remove32(&lport->lport_fcport_map, key); - nacl->qla_tgt_sess = NULL; + nacl->fc_port = NULL; return; } - pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n"); + pr_debug("Replacing existing nacl->fc_port and fc_port entry\n"); btree_update32(&lport->lport_fcport_map, key, new_se_nacl); - qla_tgt_sess->se_sess = se_sess; - nacl->qla_tgt_sess = qla_tgt_sess; + fc_port->se_sess = se_sess; + nacl->fc_port = fc_port; return; } @@ -1234,19 +1284,19 @@ static void tcm_qla2xxx_set_sess_by_s_id( return; } - pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n"); + pr_debug("Replacing existing fc_port entry w/o active nacl->fc_port\n"); btree_update32(&lport->lport_fcport_map, key, new_se_nacl); - qla_tgt_sess->se_sess = se_sess; - nacl->qla_tgt_sess = qla_tgt_sess; + fc_port->se_sess = se_sess; + nacl->fc_port = fc_port; - pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n", - nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); + pr_debug("Setup nacl->fc_port %p by s_id for se_nacl: %p, initiatorname: %s\n", + nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname); } /* * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ -static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id( +static struct fc_port *tcm_qla2xxx_find_sess_by_loop_id( scsi_qla_host_t *vha, const uint16_t loop_id) { @@ -1274,12 +1324,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id( nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); - if (!nacl->qla_tgt_sess) { - pr_err("Unable to locate struct qla_tgt_sess\n"); + if (!nacl->fc_port) { + pr_err("Unable to locate struct fc_port\n"); return NULL; } - return nacl->qla_tgt_sess; + return nacl->fc_port; } /* @@ -1290,7 +1340,7 @@ static void tcm_qla2xxx_set_sess_by_loop_id( struct se_node_acl *new_se_nacl, struct tcm_qla2xxx_nacl *nacl, struct se_session *se_sess, - struct qla_tgt_sess *qla_tgt_sess, + struct fc_port *fc_port, uint16_t loop_id) { struct se_node_acl *saved_nacl; @@ -1305,27 +1355,27 @@ static void tcm_qla2xxx_set_sess_by_loop_id( if (!saved_nacl) { pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n"); fc_loopid->se_nacl = new_se_nacl; - if (qla_tgt_sess->se_sess != se_sess) - qla_tgt_sess->se_sess = se_sess; - if (nacl->qla_tgt_sess != qla_tgt_sess) - nacl->qla_tgt_sess = qla_tgt_sess; + if (fc_port->se_sess != se_sess) + fc_port->se_sess = se_sess; + if (nacl->fc_port != fc_port) + nacl->fc_port = fc_port; return; } - if (nacl->qla_tgt_sess) { + if (nacl->fc_port) { if (new_se_nacl == NULL) { - pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n"); + pr_debug("Clearing nacl->fc_port and fc_loopid->se_nacl\n"); fc_loopid->se_nacl = NULL; - nacl->qla_tgt_sess = NULL; + nacl->fc_port = NULL; return; } - pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n"); + pr_debug("Replacing existing nacl->fc_port and fc_loopid->se_nacl\n"); fc_loopid->se_nacl = new_se_nacl; - if (qla_tgt_sess->se_sess != se_sess) - qla_tgt_sess->se_sess = se_sess; - if (nacl->qla_tgt_sess != qla_tgt_sess) - nacl->qla_tgt_sess = qla_tgt_sess; + if (fc_port->se_sess != se_sess) + fc_port->se_sess = se_sess; + if (nacl->fc_port != fc_port) + nacl->fc_port = fc_port; return; } @@ -1335,29 +1385,29 @@ static void tcm_qla2xxx_set_sess_by_loop_id( return; } - pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n"); + pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->fc_port\n"); fc_loopid->se_nacl = new_se_nacl; - if (qla_tgt_sess->se_sess != se_sess) - qla_tgt_sess->se_sess = se_sess; - if (nacl->qla_tgt_sess != qla_tgt_sess) - nacl->qla_tgt_sess = qla_tgt_sess; + if (fc_port->se_sess != se_sess) + fc_port->se_sess = se_sess; + if (nacl->fc_port != fc_port) + nacl->fc_port = fc_port; - pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n", - nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); + pr_debug("Setup nacl->fc_port %p by loop_id for se_nacl: %p, initiatorname: %s\n", + nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname); } /* * Should always be called with qla_hw_data->tgt.sess_lock held. */ static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, - struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess) + struct tcm_qla2xxx_nacl *nacl, struct fc_port *sess) { struct se_session *se_sess = sess->se_sess; unsigned char be_sid[3]; - be_sid[0] = sess->s_id.b.domain; - be_sid[1] = sess->s_id.b.area; - be_sid[2] = sess->s_id.b.al_pa; + be_sid[0] = sess->d_id.b.domain; + be_sid[1] = sess->d_id.b.area; + be_sid[2] = sess->d_id.b.al_pa; tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, sess, be_sid); @@ -1365,7 +1415,7 @@ static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, sess, sess->loop_id); } -static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) +static void tcm_qla2xxx_free_session(struct fc_port *sess) { struct qla_tgt *tgt = sess->tgt; struct qla_hw_data *ha = tgt->ha; @@ -1377,7 +1427,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) se_sess = sess->se_sess; if (!se_sess) { - pr_err("struct qla_tgt_sess->se_sess is NULL\n"); + pr_err("struct fc_port->se_sess is NULL\n"); dump_stack(); return; } @@ -1404,14 +1454,14 @@ static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg, struct se_node_acl *se_nacl = se_sess->se_node_acl; struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); - struct qla_tgt_sess *qlat_sess = p; + struct fc_port *qlat_sess = p; uint16_t loop_id = qlat_sess->loop_id; unsigned long flags; unsigned char be_sid[3]; - be_sid[0] = qlat_sess->s_id.b.domain; - be_sid[1] = qlat_sess->s_id.b.area; - be_sid[2] = qlat_sess->s_id.b.al_pa; + be_sid[0] = qlat_sess->d_id.b.domain; + be_sid[1] = qlat_sess->d_id.b.area; + be_sid[2] = qlat_sess->d_id.b.al_pa; /* * And now setup se_nacl and session pointers into HW lport internal @@ -1434,7 +1484,7 @@ static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg, static int tcm_qla2xxx_check_initiator_node_acl( scsi_qla_host_t *vha, unsigned char *fc_wwpn, - struct qla_tgt_sess *qlat_sess) + struct fc_port *qlat_sess) { struct qla_hw_data *ha = vha->hw; struct tcm_qla2xxx_lport *lport; @@ -1478,7 +1528,7 @@ static int tcm_qla2xxx_check_initiator_node_acl( return 0; } -static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, +static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id, uint16_t loop_id, bool conf_compl_supported) { struct qla_tgt *tgt = sess->tgt; @@ -1491,11 +1541,11 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, u32 key; - if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24) + if (sess->loop_id != loop_id || sess->d_id.b24 != s_id.b24) pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", sess, sess->port_name, - sess->loop_id, loop_id, sess->s_id.b.domain, - sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain, + sess->loop_id, loop_id, sess->d_id.b.domain, + sess->d_id.b.area, sess->d_id.b.al_pa, s_id.b.domain, s_id.b.area, s_id.b.al_pa); if (sess->loop_id != loop_id) { @@ -1515,18 +1565,20 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, sess->loop_id = loop_id; } - if (sess->s_id.b24 != s_id.b24) { - key = (((u32) sess->s_id.b.domain << 16) | - ((u32) sess->s_id.b.area << 8) | - ((u32) sess->s_id.b.al_pa)); + if (sess->d_id.b24 != s_id.b24) { + key = (((u32) sess->d_id.b.domain << 16) | + ((u32) sess->d_id.b.area << 8) | + ((u32) sess->d_id.b.al_pa)); if (btree_lookup32(&lport->lport_fcport_map, key)) - WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl, - "Found wrong se_nacl when updating s_id %x:%x:%x\n", - sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa); + WARN(btree_remove32(&lport->lport_fcport_map, key) != + se_nacl, "Found wrong se_nacl when updating s_id %x:%x:%x\n", + sess->d_id.b.domain, sess->d_id.b.area, + sess->d_id.b.al_pa); else WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n", - sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa); + sess->d_id.b.domain, sess->d_id.b.area, + sess->d_id.b.al_pa); key = (((u32) s_id.b.domain << 16) | ((u32) s_id.b.area << 8) | @@ -1537,10 +1589,11 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, s_id.b.domain, s_id.b.area, s_id.b.al_pa); btree_update32(&lport->lport_fcport_map, key, se_nacl); } else { - btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC); + btree_insert32(&lport->lport_fcport_map, key, se_nacl, + GFP_ATOMIC); } - sess->s_id = s_id; + sess->d_id = s_id; nacl->nport_id = key; } @@ -1567,6 +1620,7 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, + .put_sess = tcm_qla2xxx_put_sess, .shutdown_sess = tcm_qla2xxx_shutdown_sess, }; @@ -1690,7 +1744,7 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr; struct fc_vport_identifiers vport_id; - if (!qla_tgt_mode_enabled(base_vha)) { + if (qla_ini_mode_enabled(base_vha)) { pr_err("qla2xxx base_vha not enabled for target mode\n"); return -EPERM; } @@ -1738,7 +1792,7 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport( p = strchr(tmp, '@'); if (!p) { - pr_err("Unable to locate NPIV '@' seperator\n"); + pr_err("Unable to locate NPIV '@' separator\n"); return ERR_PTR(-EINVAL); } *p++ = '\0'; diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index cf8430be183b..071035dfa99a 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -20,8 +20,8 @@ struct tcm_qla2xxx_nacl { u64 nport_wwnn; /* ASCII formatted WWPN for FC Initiator Nport */ char nport_name[TCM_QLA2XXX_NAMELEN]; - /* Pointer to qla_tgt_sess */ - struct qla_tgt_sess *qla_tgt_sess; + /* Pointer to fc_port */ + struct fc_port *fc_port; /* Pointer to TCM FC nexus */ struct se_session *nport_nexus; }; diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c index b1383a71400e..a75673bb82b3 100644 --- a/drivers/scsi/scsi_common.c +++ b/drivers/scsi/scsi_common.c @@ -137,11 +137,11 @@ EXPORT_SYMBOL(int_to_scsilun); bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len, struct scsi_sense_hdr *sshdr) { + memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); + if (!sense_buffer || !sb_len) return false; - memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); - sshdr->response_code = (sense_buffer[0] & 0x7f); if (!scsi_sense_valid(sshdr)) diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index b8d3b97b217a..84addee05be6 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -219,20 +219,6 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) } EXPORT_SYMBOL_GPL(scsi_unregister_device_handler); -static struct scsi_device *get_sdev_from_queue(struct request_queue *q) -{ - struct scsi_device *sdev; - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - sdev = q->queuedata; - if (!sdev || !get_device(&sdev->sdev_gendev)) - sdev = NULL; - spin_unlock_irqrestore(q->queue_lock, flags); - - return sdev; -} - /* * scsi_dh_activate - activate the path associated with the scsi_device * corresponding to the given request queue. @@ -251,7 +237,7 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) struct scsi_device *sdev; int err = SCSI_DH_NOSYS; - sdev = get_sdev_from_queue(q); + sdev = scsi_device_from_queue(q); if (!sdev) { if (fn) fn(data, err); @@ -298,7 +284,7 @@ int scsi_dh_set_params(struct request_queue *q, const char *params) struct scsi_device *sdev; int err = -SCSI_DH_NOSYS; - sdev = get_sdev_from_queue(q); + sdev = scsi_device_from_queue(q); if (!sdev) return err; @@ -321,7 +307,7 @@ int scsi_dh_attach(struct request_queue *q, const char *name) struct scsi_device_handler *scsi_dh; int err = 0; - sdev = get_sdev_from_queue(q); + sdev = scsi_device_from_queue(q); if (!sdev) return -ENODEV; @@ -359,7 +345,7 @@ const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp) struct scsi_device *sdev; const char *handler_name = NULL; - sdev = get_sdev_from_queue(q); + sdev = scsi_device_from_queue(q); if (!sdev) return NULL; diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 8b8c814df5c7..b6bf3f29a12a 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -199,6 +199,7 @@ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg) int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) { char scsi_cmd[MAX_COMMAND_SIZE]; + struct scsi_sense_hdr sense_hdr; /* Check for deprecated ioctls ... all the ioctls which don't * follow the new unique numbering scheme are deprecated */ @@ -243,7 +244,7 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) return scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); case SCSI_IOCTL_TEST_UNIT_READY: return scsi_test_unit_ready(sdev, IOCTL_NORMAL_TIMEOUT, - NORMAL_RETRIES, NULL); + NORMAL_RETRIES, &sense_hdr); case SCSI_IOCTL_START_UNIT: scsi_cmd[0] = START_STOP; scsi_cmd[1] = 0; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 3e32dc954c3c..ba2286652ff6 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -213,10 +213,30 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) __scsi_queue_insert(cmd, reason, 1); } -static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, + +/** + * scsi_execute - insert request and wait for the result + * @sdev: scsi device + * @cmd: scsi command + * @data_direction: data direction + * @buffer: data buffer + * @bufflen: len of buffer + * @sense: optional sense buffer + * @sshdr: optional decoded sense header + * @timeout: request timeout in seconds + * @retries: number of times to retry request + * @flags: flags for ->cmd_flags + * @rq_flags: flags for ->rq_flags + * @resid: optional residual length + * + * returns the req->errors value which is the scsi_cmnd result + * field. + */ +int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, - unsigned char *sense, int timeout, int retries, u64 flags, - req_flags_t rq_flags, int *resid) + unsigned char *sense, struct scsi_sense_hdr *sshdr, + int timeout, int retries, u64 flags, req_flags_t rq_flags, + int *resid) { struct request *req; struct scsi_request *rq; @@ -259,62 +279,16 @@ static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, *resid = rq->resid_len; if (sense && rq->sense_len) memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE); + if (sshdr) + scsi_normalize_sense(rq->sense, rq->sense_len, sshdr); ret = req->errors; out: blk_put_request(req); return ret; } - -/** - * scsi_execute - insert request and wait for the result - * @sdev: scsi device - * @cmd: scsi command - * @data_direction: data direction - * @buffer: data buffer - * @bufflen: len of buffer - * @sense: optional sense buffer - * @timeout: request timeout in seconds - * @retries: number of times to retry request - * @flags: or into request flags; - * @resid: optional residual length - * - * returns the req->errors value which is the scsi_cmnd result - * field. - */ -int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, - int data_direction, void *buffer, unsigned bufflen, - unsigned char *sense, int timeout, int retries, u64 flags, - int *resid) -{ - return __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense, - timeout, retries, flags, 0, resid); -} EXPORT_SYMBOL(scsi_execute); -int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, - int data_direction, void *buffer, unsigned bufflen, - struct scsi_sense_hdr *sshdr, int timeout, int retries, - int *resid, u64 flags, req_flags_t rq_flags) -{ - char *sense = NULL; - int result; - - if (sshdr) { - sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); - if (!sense) - return DRIVER_ERROR << 24; - } - result = __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, - sense, timeout, retries, flags, rq_flags, resid); - if (sshdr) - scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); - - kfree(sense); - return result; -} -EXPORT_SYMBOL(scsi_execute_req_flags); - /* * Function: scsi_init_cmd_errh() * @@ -2231,6 +2205,29 @@ void scsi_mq_destroy_tags(struct Scsi_Host *shost) blk_mq_free_tag_set(&shost->tag_set); } +/** + * scsi_device_from_queue - return sdev associated with a request_queue + * @q: The request queue to return the sdev from + * + * Return the sdev associated with a request queue or NULL if the + * request_queue does not reference a SCSI device. + */ +struct scsi_device *scsi_device_from_queue(struct request_queue *q) +{ + struct scsi_device *sdev = NULL; + + if (q->mq_ops) { + if (q->mq_ops == &scsi_mq_ops) + sdev = q->queuedata; + } else if (q->request_fn == scsi_request_fn) + sdev = q->queuedata; + if (!sdev || !get_device(&sdev->sdev_gendev)) + sdev = NULL; + + return sdev; +} +EXPORT_SYMBOL_GPL(scsi_device_from_queue); + /* * Function: scsi_block_requests() * @@ -2497,28 +2494,20 @@ EXPORT_SYMBOL(scsi_mode_sense); * @sdev: scsi device to change the state of. * @timeout: command timeout * @retries: number of retries before failing - * @sshdr_external: Optional pointer to struct scsi_sense_hdr for - * returning sense. Make sure that this is cleared before passing - * in. + * @sshdr: outpout pointer for decoded sense information. * * Returns zero if unsuccessful or an error if TUR failed. For * removable media, UNIT_ATTENTION sets ->changed flag. **/ int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, - struct scsi_sense_hdr *sshdr_external) + struct scsi_sense_hdr *sshdr) { char cmd[] = { TEST_UNIT_READY, 0, 0, 0, 0, 0, }; - struct scsi_sense_hdr *sshdr; int result; - if (!sshdr_external) - sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); - else - sshdr = sshdr_external; - /* try to eat the UNIT_ATTENTION if there are enough retries */ do { result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, @@ -2529,8 +2518,6 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, } while (scsi_sense_valid(sshdr) && sshdr->sense_key == UNIT_ATTENTION && --retries); - if (!sshdr_external) - kfree(sshdr); return result; } EXPORT_SYMBOL(scsi_test_unit_ready); diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index 319868f3f674..d0219e36080c 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c @@ -123,25 +123,21 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd, { int i, result; unsigned char sense[SCSI_SENSE_BUFFERSIZE]; + struct scsi_sense_hdr sshdr_tmp; + + if (!sshdr) + sshdr = &sshdr_tmp; for(i = 0; i < DV_RETRIES; i++) { - result = scsi_execute(sdev, cmd, dir, buffer, bufflen, - sense, DV_TIMEOUT, /* retries */ 1, + result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense, + sshdr, DV_TIMEOUT, /* retries */ 1, REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER, - NULL); - if (driver_byte(result) & DRIVER_SENSE) { - struct scsi_sense_hdr sshdr_tmp; - if (!sshdr) - sshdr = &sshdr_tmp; - - if (scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, - sshdr) - && sshdr->sense_key == UNIT_ATTENTION) - continue; - } - break; + 0, NULL); + if (!(driver_byte(result) & DRIVER_SENSE) || + sshdr->sense_key != UNIT_ATTENTION) + break; } return result; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index cb6e68dd6df0..c7839f6c35cc 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1425,7 +1425,6 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) { struct scsi_disk *sdkp = scsi_disk_get(disk); struct scsi_device *sdp; - struct scsi_sense_hdr *sshdr = NULL; int retval; if (!sdkp) @@ -1454,22 +1453,21 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever * sd_revalidate() is called. */ - retval = -ENODEV; - if (scsi_block_when_processing_errors(sdp)) { - sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); + struct scsi_sense_hdr sshdr = { 0, }; + retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES, - sshdr); - } + &sshdr); - /* failed to execute TUR, assume media not present */ - if (host_byte(retval)) { - set_media_not_present(sdkp); - goto out; - } + /* failed to execute TUR, assume media not present */ + if (host_byte(retval)) { + set_media_not_present(sdkp); + goto out; + } - if (media_not_present(sdkp, sshdr)) - goto out; + if (media_not_present(sdkp, &sshdr)) + goto out; + } /* * For removable scsi disk we have to recognise the presence @@ -1485,7 +1483,6 @@ out: * Medium present state has changed in either direction. * Device has indicated UNIT_ATTENTION. */ - kfree(sshdr); retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; sdp->changed = 0; scsi_disk_put(sdkp); @@ -1511,9 +1508,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp) * Leave the rest of the command zero to indicate * flush everything. */ - res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, - &sshdr, timeout, SD_MAX_RETRIES, - NULL, 0, RQF_PM); + res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, + timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL); if (res == 0) break; } @@ -3084,7 +3080,7 @@ struct sd_devt { struct disk_devt disk_devt; }; -void sd_devt_release(struct disk_devt *disk_devt) +static void sd_devt_release(struct disk_devt *disk_devt) { struct sd_devt *sd_devt = container_of(disk_devt, struct sd_devt, disk_devt); @@ -3213,10 +3209,10 @@ static int sd_probe(struct device *dev) sd_devt = NULL; out_put: put_disk(gd); - out_free: - kfree(sdkp); out_free_devt: kfree(sd_devt); + out_free: + kfree(sdkp); out: scsi_autopm_put_device(sdp); return error; @@ -3299,8 +3295,8 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start) if (!scsi_device_online(sdp)) return -ENODEV; - res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - SD_TIMEOUT, SD_MAX_RETRIES, NULL, 0, RQF_PM); + res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, + SD_TIMEOUT, SD_MAX_RETRIES, 0, RQF_PM, NULL); if (res) { sd_print_result(sdkp, "Start/Stop Unit failed", res); if (driver_byte(res) & DRIVER_SENSE) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 11c0dfb3dfa3..657ad15682a3 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -534,8 +534,7 @@ static int pqi_write_current_time_to_host_wellness( size_t buffer_length; time64_t local_time; unsigned int year; - struct timeval time; - struct rtc_time tm; + struct tm tm; buffer_length = sizeof(*buffer); @@ -552,9 +551,8 @@ static int pqi_write_current_time_to_host_wellness( put_unaligned_le16(sizeof(buffer->time), &buffer->time_length); - do_gettimeofday(&time); - local_time = time.tv_sec - (sys_tz.tz_minuteswest * 60); - rtc_time64_to_tm(local_time, &tm); + local_time = ktime_get_real_seconds(); + time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); year = tm.tm_year + 1900; buffer->time[0] = bin2bcd(tm.tm_hour); diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index dfffdf63e44c..4610c8c5693f 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -187,30 +187,19 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) struct scsi_device *SDev; struct scsi_sense_hdr sshdr; int result, err = 0, retries = 0; - struct request_sense *sense = cgc->sense; SDev = cd->device; - if (!sense) { - sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); - if (!sense) { - err = -ENOMEM; - goto out; - } - } - retry: if (!scsi_block_when_processing_errors(SDev)) { err = -ENODEV; goto out; } - memset(sense, 0, sizeof(*sense)); result = scsi_execute(SDev, cgc->cmd, cgc->data_direction, - cgc->buffer, cgc->buflen, (char *)sense, - cgc->timeout, IOCTL_RETRIES, 0, NULL); - - scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr); + cgc->buffer, cgc->buflen, + (unsigned char *)cgc->sense, &sshdr, + cgc->timeout, IOCTL_RETRIES, 0, 0, NULL); /* Minimal error checking. Ignore cases we know about, and report the rest. */ if (driver_byte(result) != 0) { @@ -261,8 +250,6 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) /* Wake up a process waiting for device */ out: - if (!cgc->sense) - kfree(sense); cgc->stat = err; return err; } diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 81212d4bd9bf..e5ef78a6848e 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -23,7 +23,7 @@ static const char *verstr = "20160209"; #include <linux/fs.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/string.h> diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index ce5d023c1c91..c87d770b519a 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1523,18 +1523,6 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) return false; } - /* - * Not performing check for each individual select_major - * mappings of select_minor, since there is no harm in - * configuring a non-existent select_minor - */ - if (host->testbus.select_minor > 0xFF) { - dev_err(host->hba->dev, - "%s: 0x%05X is not a legal testbus option\n", - __func__, host->testbus.select_minor); - return false; - } - return true; } diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 8b721f431dd0..dc6efbd1be8e 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -6915,9 +6915,9 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp) goto out; } - ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer, - UFSHCD_REQ_SENSE_SIZE, NULL, - msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM); + ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer, + UFSHCD_REQ_SENSE_SIZE, NULL, NULL, + msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL); if (ret) pr_err("%s: failed with err %d\n", __func__, ret); @@ -6982,8 +6982,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, * callbacks hence set the RQF_PM flag so that it doesn't resume the * already suspended childs. */ - ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - START_STOP_TIMEOUT, 0, NULL, 0, RQF_PM); + ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, + START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL); if (ret) { sdev_printk(KERN_WARNING, sdp, "START_STOP failed for power mode: %d, result %x\n", diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index c680d7641311..939c47df73fa 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/mempool.h> +#include <linux/interrupt.h> #include <linux/virtio.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> @@ -29,6 +30,7 @@ #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> #include <linux/seqlock.h> +#include <linux/blk-mq-virtio.h> #define VIRTIO_SCSI_MEMPOOL_SZ 64 #define VIRTIO_SCSI_EVENT_LEN 8 @@ -108,7 +110,6 @@ struct virtio_scsi { bool affinity_hint_set; struct hlist_node node; - struct hlist_node node_dead; /* Protected by event_vq lock */ bool stop_events; @@ -118,7 +119,6 @@ struct virtio_scsi { struct virtio_scsi_vq req_vqs[]; }; -static enum cpuhp_state virtioscsi_online; static struct kmem_cache *virtscsi_cmd_cache; static mempool_t *virtscsi_cmd_pool; @@ -766,6 +766,13 @@ static void virtscsi_target_destroy(struct scsi_target *starget) kfree(tgt); } +static int virtscsi_map_queues(struct Scsi_Host *shost) +{ + struct virtio_scsi *vscsi = shost_priv(shost); + + return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2); +} + static struct scsi_host_template virtscsi_host_template_single = { .module = THIS_MODULE, .name = "Virtio SCSI HBA", @@ -801,6 +808,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { .use_clustering = ENABLE_CLUSTERING, .target_alloc = virtscsi_target_alloc, .target_destroy = virtscsi_target_destroy, + .map_queues = virtscsi_map_queues, .track_queue_depth = 1, }; @@ -817,80 +825,6 @@ static struct scsi_host_template virtscsi_host_template_multi = { virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ } while(0) -static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) -{ - int i; - int cpu; - - /* In multiqueue mode, when the number of cpu is equal - * to the number of request queues, we let the qeueues - * to be private to one cpu by setting the affinity hint - * to eliminate the contention. - */ - if ((vscsi->num_queues == 1 || - vscsi->num_queues != num_online_cpus()) && affinity) { - if (vscsi->affinity_hint_set) - affinity = false; - else - return; - } - - if (affinity) { - i = 0; - for_each_online_cpu(cpu) { - virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu); - i++; - } - - vscsi->affinity_hint_set = true; - } else { - for (i = 0; i < vscsi->num_queues; i++) { - if (!vscsi->req_vqs[i].vq) - continue; - - virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); - } - - vscsi->affinity_hint_set = false; - } -} - -static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) -{ - get_online_cpus(); - __virtscsi_set_affinity(vscsi, affinity); - put_online_cpus(); -} - -static int virtscsi_cpu_online(unsigned int cpu, struct hlist_node *node) -{ - struct virtio_scsi *vscsi = hlist_entry_safe(node, struct virtio_scsi, - node); - __virtscsi_set_affinity(vscsi, true); - return 0; -} - -static int virtscsi_cpu_notif_add(struct virtio_scsi *vi) -{ - int ret; - - ret = cpuhp_state_add_instance(virtioscsi_online, &vi->node); - if (ret) - return ret; - - ret = cpuhp_state_add_instance(CPUHP_VIRT_SCSI_DEAD, &vi->node_dead); - if (ret) - cpuhp_state_remove_instance(virtioscsi_online, &vi->node); - return ret; -} - -static void virtscsi_cpu_notif_remove(struct virtio_scsi *vi) -{ - cpuhp_state_remove_instance_nocalls(virtioscsi_online, &vi->node); - cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_SCSI_DEAD, - &vi->node_dead); -} - static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, struct virtqueue *vq) { @@ -900,14 +834,8 @@ static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, static void virtscsi_remove_vqs(struct virtio_device *vdev) { - struct Scsi_Host *sh = virtio_scsi_host(vdev); - struct virtio_scsi *vscsi = shost_priv(sh); - - virtscsi_set_affinity(vscsi, false); - /* Stop all the virtqueues. */ vdev->config->reset(vdev); - vdev->config->del_vqs(vdev); } @@ -920,6 +848,7 @@ static int virtscsi_init(struct virtio_device *vdev, vq_callback_t **callbacks; const char **names; struct virtqueue **vqs; + struct irq_affinity desc = { .pre_vectors = 2 }; num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE; vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL); @@ -941,7 +870,8 @@ static int virtscsi_init(struct virtio_device *vdev, } /* Discover virtqueues and write information to configuration. */ - err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); + err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names, + &desc); if (err) goto out; @@ -1007,10 +937,6 @@ static int virtscsi_probe(struct virtio_device *vdev) if (err) goto virtscsi_init_failed; - err = virtscsi_cpu_notif_add(vscsi); - if (err) - goto scsi_add_host_failed; - cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; @@ -1065,9 +991,6 @@ static void virtscsi_remove(struct virtio_device *vdev) virtscsi_cancel_event_work(vscsi); scsi_remove_host(shost); - - virtscsi_cpu_notif_remove(vscsi); - virtscsi_remove_vqs(vdev); scsi_host_put(shost); } @@ -1075,10 +998,6 @@ static void virtscsi_remove(struct virtio_device *vdev) #ifdef CONFIG_PM_SLEEP static int virtscsi_freeze(struct virtio_device *vdev) { - struct Scsi_Host *sh = virtio_scsi_host(vdev); - struct virtio_scsi *vscsi = shost_priv(sh); - - virtscsi_cpu_notif_remove(vscsi); virtscsi_remove_vqs(vdev); return 0; } @@ -1093,11 +1012,6 @@ static int virtscsi_restore(struct virtio_device *vdev) if (err) return err; - err = virtscsi_cpu_notif_add(vscsi); - if (err) { - vdev->config->del_vqs(vdev); - return err; - } virtio_device_ready(vdev); if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) @@ -1152,16 +1066,6 @@ static int __init init(void) pr_err("mempool_create() for virtscsi_cmd_pool failed\n"); goto error; } - ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, - "scsi/virtio:online", - virtscsi_cpu_online, NULL); - if (ret < 0) - goto error; - virtioscsi_online = ret; - ret = cpuhp_setup_state_multi(CPUHP_VIRT_SCSI_DEAD, "scsi/virtio:dead", - NULL, virtscsi_cpu_online); - if (ret) - goto error; ret = register_virtio_driver(&virtio_scsi_driver); if (ret < 0) goto error; @@ -1177,17 +1081,12 @@ error: kmem_cache_destroy(virtscsi_cmd_cache); virtscsi_cmd_cache = NULL; } - if (virtioscsi_online) - cpuhp_remove_multi_state(virtioscsi_online); - cpuhp_remove_multi_state(CPUHP_VIRT_SCSI_DEAD); return ret; } static void __exit fini(void) { unregister_virtio_driver(&virtio_scsi_driver); - cpuhp_remove_multi_state(virtioscsi_online); - cpuhp_remove_multi_state(CPUHP_VIRT_SCSI_DEAD); mempool_destroy(virtscsi_cmd_pool); kmem_cache_destroy(virtscsi_cmd_cache); } diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h index 2eaf3184f61d..2ce394aa4c95 100644 --- a/drivers/soc/fsl/qbman/dpaa_sys.h +++ b/drivers/soc/fsl/qbman/dpaa_sys.h @@ -36,6 +36,7 @@ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/kthread.h> +#include <linux/sched/signal.h> #include <linux/vmalloc.h> #include <linux/platform_device.h> #include <linux/of.h> diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 44222ef9471e..90b5b2efafbf 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -33,6 +33,7 @@ #include <linux/pm_domain.h> #include <linux/export.h> #include <linux/sched/rt.h> +#include <uapi/linux/sched/types.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/ioport.h> diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 2c3ffbcbd621..f45115fce4eb 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -36,6 +36,7 @@ #include <linux/debugfs.h> #include <linux/dma-buf.h> #include <linux/idr.h> +#include <linux/sched/task.h> #include "ion.h" #include "ion_priv.h" diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index 4e5c0f17f579..c69d0bd53693 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -20,6 +20,7 @@ #include <linux/mm.h> #include <linux/rtmutex.h> #include <linux/sched.h> +#include <uapi/linux/sched/types.h> #include <linux/scatterlist.h> #include <linux/vmalloc.h> #include "ion.h" diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index ec3b66561412..054660049395 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -37,7 +37,7 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/oom.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/swap.h> #include <linux/rcupdate.h> #include <linux/profile.h> diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 57e8599b54e6..8deac8d9225d 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -23,7 +23,7 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/mm.h> diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c index c63e591631f6..c3b8fc54883d 100644 --- a/drivers/staging/dgnc/dgnc_tty.c +++ b/drivers/staging/dgnc/dgnc_tty.c @@ -19,7 +19,7 @@ */ #include <linux/kernel.h> -#include <linux/sched.h> /* For jiffies, task states */ +#include <linux/sched/signal.h> /* For jiffies, task states, etc. */ #include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */ #include <linux/module.h> #include <linux/ctype.h> diff --git a/drivers/staging/dgnc/dgnc_utils.c b/drivers/staging/dgnc/dgnc_utils.c index 95272f4765fc..6f59240024d1 100644 --- a/drivers/staging/dgnc/dgnc_utils.c +++ b/drivers/staging/dgnc/dgnc_utils.c @@ -1,5 +1,5 @@ #include <linux/tty.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include "dgnc_utils.h" /* diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c index 47acb0a29842..3be5f25ff113 100644 --- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c +++ b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c @@ -588,8 +588,7 @@ static int parse_mc_ranges(struct device *dev, int *paddr_cells, int *mc_addr_cells, int *mc_size_cells, - const __be32 **ranges_start, - u8 *num_ranges) + const __be32 **ranges_start) { const __be32 *prop; int range_tuple_cell_count; @@ -602,8 +601,6 @@ static int parse_mc_ranges(struct device *dev, dev_warn(dev, "missing or empty ranges property for device tree node '%s'\n", mc_node->name); - - *num_ranges = 0; return 0; } @@ -630,8 +627,7 @@ static int parse_mc_ranges(struct device *dev, return -EINVAL; } - *num_ranges = ranges_len / tuple_len; - return 0; + return ranges_len / tuple_len; } static int get_mc_addr_translation_ranges(struct device *dev, @@ -639,7 +635,7 @@ static int get_mc_addr_translation_ranges(struct device *dev, **ranges, u8 *num_ranges) { - int error; + int ret; int paddr_cells; int mc_addr_cells; int mc_size_cells; @@ -647,16 +643,16 @@ static int get_mc_addr_translation_ranges(struct device *dev, const __be32 *ranges_start; const __be32 *cell; - error = parse_mc_ranges(dev, + ret = parse_mc_ranges(dev, &paddr_cells, &mc_addr_cells, &mc_size_cells, - &ranges_start, - num_ranges); - if (error < 0) - return error; + &ranges_start); + if (ret < 0) + return ret; - if (!(*num_ranges)) { + *num_ranges = ret; + if (!ret) { /* * Missing or empty ranges property ("ranges;") for the * 'fsl,qoriq-mc' node. In this case, identity mapping diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c index c4bf3298ba07..f0404bc37123 100644 --- a/drivers/staging/greybus/pwm.c +++ b/drivers/staging/greybus/pwm.c @@ -284,7 +284,6 @@ static int gb_pwm_probe(struct gbphy_device *gbphy_dev, pwm->ops = &gb_pwm_ops; pwm->base = -1; /* Allocate base dynamically */ pwm->npwm = pwmc->pwm_max + 1; - pwm->can_sleep = true; /* FIXME */ ret = pwmchip_add(pwm); if (ret) { diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c index ab0dbf5cab5a..43255e2e9276 100644 --- a/drivers/staging/greybus/uart.c +++ b/drivers/staging/greybus/uart.c @@ -14,7 +14,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/uaccess.h> diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c index cf902154f0aa..bcf9f3dd0310 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c @@ -34,7 +34,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/fs_struct.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include "../../../include/linux/libcfs/libcfs.h" diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c index 92cd4113cf75..87fe366f8f70 100644 --- a/drivers/staging/lustre/lnet/selftest/rpc.c +++ b/drivers/staging/lustre/lnet/selftest/rpc.c @@ -255,7 +255,7 @@ srpc_service_init(struct srpc_service *svc) svc->sv_shuttingdown = 0; svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(*svc->sv_cpt_data)); + sizeof(**svc->sv_cpt_data)); if (!svc->sv_cpt_data) return -ENOMEM; diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h index 21aec0ca9ad3..7d8628ce0d3b 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h @@ -44,6 +44,7 @@ #ifdef __KERNEL__ # include <linux/quota.h> +# include <linux/sched/signal.h> # include <linux/string.h> /* snprintf() */ # include <linux/version.h> #else /* !__KERNEL__ */ diff --git a/drivers/staging/lustre/lustre/include/lustre_compat.h b/drivers/staging/lustre/lustre/include/lustre_compat.h index 300e96fb032a..da9ce195c52e 100644 --- a/drivers/staging/lustre/lustre/include/lustre_compat.h +++ b/drivers/staging/lustre/lustre/include/lustre_compat.h @@ -35,6 +35,7 @@ #include <linux/fs_struct.h> #include <linux/namei.h> +#include <linux/cred.h> #include "lustre_patchless_compat.h" diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h index 27f3148c4344..b04d613846ee 100644 --- a/drivers/staging/lustre/lustre/include/lustre_lib.h +++ b/drivers/staging/lustre/lustre/include/lustre_lib.h @@ -42,7 +42,7 @@ * @{ */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/signal.h> #include <linux/types.h> #include "../../include/linux/libcfs/libcfs.h" diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h index aaedec7d793c..dace6591a0a4 100644 --- a/drivers/staging/lustre/lustre/include/obd_support.h +++ b/drivers/staging/lustre/lustre/include/obd_support.h @@ -34,6 +34,8 @@ #define _OBD_SUPPORT #include <linux/slab.h> +#include <linux/sched/signal.h> + #include "../../include/linux/libcfs/libcfs.h" #include "lustre_compat.h" #include "lprocfs_status.h" diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c index 10adfcdd7035..481c0d01d4c6 100644 --- a/drivers/staging/lustre/lustre/llite/file.c +++ b/drivers/staging/lustre/lustre/llite/file.c @@ -2952,15 +2952,16 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits) return rc; } -int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat) +int ll_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct inode *inode = d_inode(de); + struct inode *inode = d_inode(path->dentry); struct ll_sb_info *sbi = ll_i2sbi(inode); struct ll_inode_info *lli = ll_i2info(inode); int res; - res = ll_inode_revalidate(de, MDS_INODELOCK_UPDATE | - MDS_INODELOCK_LOOKUP); + res = ll_inode_revalidate(path->dentry, + MDS_INODELOCK_UPDATE | MDS_INODELOCK_LOOKUP); ll_stats_ops_tally(sbi, LPROC_LL_GETATTR, 1); if (res) diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index ecdfd0c29b7f..55f68acd85d1 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h @@ -750,7 +750,8 @@ int ll_file_open(struct inode *inode, struct file *file); int ll_file_release(struct inode *inode, struct file *file); int ll_release_openhandle(struct inode *, struct lookup_intent *); int ll_md_real_close(struct inode *inode, fmode_t fmode); -int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat); +int ll_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); struct posix_acl *ll_get_acl(struct inode *inode, int type); int ll_migrate(struct inode *parent, struct file *file, int mdtidx, const char *name, int namelen); diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c index e860df7c45a2..366f2ce20f5e 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c @@ -38,7 +38,9 @@ #include "../../include/linux/libcfs/libcfs.h" #include <linux/crypto.h> +#include <linux/cred.h> #include <linux/key.h> +#include <linux/sched/task.h> #include "../include/obd.h" #include "../include/obd_class.h" diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c index c75ae43095ba..c6c3de94adaa 100644 --- a/drivers/staging/media/lirc/lirc_sir.c +++ b/drivers/staging/media/lirc/lirc_sir.c @@ -36,7 +36,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/fs.h> diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c index 34aac3e2eb87..e4a533b6beb3 100644 --- a/drivers/staging/media/lirc/lirc_zilog.c +++ b/drivers/staging/media/lirc/lirc_zilog.c @@ -42,7 +42,7 @@ #include <linux/module.h> #include <linux/kmod.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/string.h> diff --git a/drivers/staging/media/platform/bcm2835/mmal-vchiq.c b/drivers/staging/media/platform/bcm2835/mmal-vchiq.c index f0639ee6c8b9..fdfb6a620a43 100644 --- a/drivers/staging/media/platform/bcm2835/mmal-vchiq.c +++ b/drivers/staging/media/platform/bcm2835/mmal-vchiq.c @@ -397,8 +397,10 @@ buffer_from_host(struct vchiq_mmal_instance *instance, /* get context */ msg_context = get_msg_context(instance); - if (msg_context == NULL) - return -ENOMEM; + if (!msg_context) { + ret = -ENOMEM; + goto unlock; + } /* store bulk message context for when data arrives */ msg_context->u.bulk.instance = instance; @@ -454,6 +456,7 @@ buffer_from_host(struct vchiq_mmal_instance *instance, vchi_service_release(instance->handle); +unlock: mutex_unlock(&instance->bulk_mutex); return ret; diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h index ee3f5ee06529..9e390648d93e 100644 --- a/drivers/staging/rtl8188eu/include/osdep_service.h +++ b/drivers/staging/rtl8188eu/include/osdep_service.h @@ -37,7 +37,7 @@ #include <linux/io.h> #include <linux/mutex.h> #include <linux/sem.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/etherdevice.h> #include <linux/wireless.h> #include <net/iw_handler.h> diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h index b8a170978434..5d33020554cd 100644 --- a/drivers/staging/rtl8712/osdep_service.h +++ b/drivers/staging/rtl8712/osdep_service.h @@ -33,7 +33,7 @@ #include <linux/interrupt.h> #include <linux/semaphore.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/sem.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c index f19b6b27aa71..5346c657485d 100644 --- a/drivers/staging/rtl8712/rtl8712_cmd.c +++ b/drivers/staging/rtl8712/rtl8712_cmd.c @@ -32,6 +32,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/module.h> #include <linux/kref.h> #include <linux/netdevice.h> diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c index ff68a384f9c2..d2ff0afd685a 100644 --- a/drivers/staging/speakup/speakup_soft.c +++ b/drivers/staging/speakup/speakup_soft.c @@ -22,7 +22,7 @@ #include <linux/unistd.h> #include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */ #include <linux/poll.h> /* for poll_wait() */ -#include <linux/sched.h> /* schedule(), signal_pending(), TASK_INTERRUPTIBLE */ +#include <linux/sched/signal.h> /* schedule(), signal_pending(), TASK_INTERRUPTIBLE */ #include "spk_priv.h" #include "speakup.h" diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c index e6241fb5cfa6..3aeffcb9c87e 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c @@ -121,8 +121,14 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state) if (err < 0) return err; - (void)of_property_read_u32(dev->of_node, "cache-line-size", + err = of_property_read_u32(dev->of_node, "cache-line-size", &g_cache_line_size); + + if (err) { + dev_err(dev, "Missing cache-line-size property\n"); + return -ENODEV; + } + g_fragments_size = 2 * g_cache_line_size; /* Allocate space for the channels in coherent memory */ diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index cb0b7ca36b1e..8a0d214f6e9b 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -34,6 +34,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/sched/signal.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/cdev.h> diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h index 4055d4bf9f74..e63964f5a18a 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h @@ -47,7 +47,7 @@ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/random.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/ctype.h> #include <linux/uaccess.h> #include <linux/time.h> /* for time_t */ diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c index 2fb1bf1a26c5..37a05185dcbe 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c @@ -872,7 +872,8 @@ cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip, goto out; csk->mtu = ndev->mtu; csk->tx_chan = cxgb4_port_chan(ndev); - csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1; + csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type, + cxgb4_port_viid(ndev)); step = cdev->lldi.ntxq / cdev->lldi.nchan; csk->txq_idx = cxgb4_port_idx(ndev) * step; @@ -907,7 +908,8 @@ cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip, port_id = cxgb4_port_idx(ndev); csk->mtu = dst_mtu(dst); csk->tx_chan = cxgb4_port_chan(ndev); - csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1; + csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type, + cxgb4_port_viid(ndev)); step = cdev->lldi.ntxq / cdev->lldi.nports; csk->txq_idx = (port_id * step) + @@ -1066,6 +1068,7 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) struct sk_buff *skb; const struct tcphdr *tcph; struct cpl_t5_pass_accept_rpl *rpl5; + struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; unsigned int len = roundup(sizeof(*rpl5), 16); unsigned int mtu_idx; u64 opt0; @@ -1111,6 +1114,9 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) opt2 = RX_CHANNEL_V(0) | RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid); + if (!is_t5(lldi->adapter_type)) + opt2 |= RX_FC_DISABLE_F; + if (req->tcpopt.tstamp) opt2 |= TSTAMPS_EN_F; if (req->tcpopt.sack) @@ -1119,8 +1125,13 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) opt2 |= WND_SCALE_EN_F; hlen = ntohl(req->hdr_len); - tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) + - IP_HDR_LEN_G(hlen); + + if (is_t5(lldi->adapter_type)) + tcph = (struct tcphdr *)((u8 *)(req + 1) + + ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen)); + else + tcph = (struct tcphdr *)((u8 *)(req + 1) + + T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen)); if (tcph->ece && tcph->cwr) opt2 |= CCTRL_ECN_V(1); @@ -1726,7 +1737,7 @@ static bool cxgbit_credit_err(const struct cxgbit_sock *csk) } while (skb) { - credit += skb->csum; + credit += (__force u32)skb->csum; skb = cxgbit_skcb_tx_wr_next(skb); } @@ -1753,6 +1764,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) while (credits) { struct sk_buff *p = cxgbit_sock_peek_wr(csk); + const u32 csum = (__force u32)p->csum; if (unlikely(!p)) { pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n", @@ -1761,17 +1773,17 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) break; } - if (unlikely(credits < p->csum)) { + if (unlikely(credits < csum)) { pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n", csk, csk->tid, credits, csk->wr_cred, csk->wr_una_cred, - p->csum); - p->csum -= credits; + csum); + p->csum = (__force __wsum)(csum - credits); break; } cxgbit_sock_dequeue_wr(csk); - credits -= p->csum; + credits -= csum; kfree_skb(p); } diff --git a/drivers/target/iscsi/cxgbit/cxgbit_lro.h b/drivers/target/iscsi/cxgbit/cxgbit_lro.h index 28c11bd1b930..dcaed3a1d23f 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_lro.h +++ b/drivers/target/iscsi/cxgbit/cxgbit_lro.h @@ -31,8 +31,9 @@ enum cxgbit_pducb_flags { PDUCBF_RX_DATA = (1 << 1), /* received pdu payload */ PDUCBF_RX_STATUS = (1 << 2), /* received ddp status */ PDUCBF_RX_DATA_DDPD = (1 << 3), /* pdu payload ddp'd */ - PDUCBF_RX_HCRC_ERR = (1 << 4), /* header digest error */ - PDUCBF_RX_DCRC_ERR = (1 << 5), /* data digest error */ + PDUCBF_RX_DDP_CMP = (1 << 4), /* ddp completion */ + PDUCBF_RX_HCRC_ERR = (1 << 5), /* header digest error */ + PDUCBF_RX_DCRC_ERR = (1 << 6), /* data digest error */ }; struct cxgbit_lro_pdu_cb { diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c index 96eedfc49c94..4fd775ace541 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_main.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c @@ -165,29 +165,24 @@ static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state) } static void -cxgbit_proc_ddp_status(unsigned int tid, struct cpl_rx_data_ddp *cpl, - struct cxgbit_lro_pdu_cb *pdu_cb) +cxgbit_process_ddpvld(struct cxgbit_sock *csk, struct cxgbit_lro_pdu_cb *pdu_cb, + u32 ddpvld) { - unsigned int status = ntohl(cpl->ddpvld); - pdu_cb->flags |= PDUCBF_RX_STATUS; - pdu_cb->ddigest = ntohl(cpl->ulp_crc); - pdu_cb->pdulen = ntohs(cpl->len); - - if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) { - pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", tid, status); + if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) { + pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", csk->tid, ddpvld); pdu_cb->flags |= PDUCBF_RX_HCRC_ERR; } - if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) { - pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", tid, status); + if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) { + pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", csk->tid, ddpvld); pdu_cb->flags |= PDUCBF_RX_DCRC_ERR; } - if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT)) - pr_info("tid 0x%x, status 0x%x, pad bad.\n", tid, status); + if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT)) + pr_info("tid 0x%x, status 0x%x, pad bad.\n", csk->tid, ddpvld); - if ((status & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) && + if ((ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) && (!(pdu_cb->flags & PDUCBF_RX_DATA))) { pdu_cb->flags |= PDUCBF_RX_DATA_DDPD; } @@ -201,13 +196,17 @@ cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp) lro_cb->pdu_idx); struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1); - cxgbit_proc_ddp_status(lro_cb->csk->tid, cpl, pdu_cb); + cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, be32_to_cpu(cpl->ddpvld)); + + pdu_cb->flags |= PDUCBF_RX_STATUS; + pdu_cb->ddigest = ntohl(cpl->ulp_crc); + pdu_cb->pdulen = ntohs(cpl->len); if (pdu_cb->flags & PDUCBF_RX_HDR) pdu_cb->complete = true; - lro_cb->complete = true; lro_cb->pdu_totallen += pdu_cb->pdulen; + lro_cb->complete = true; lro_cb->pdu_idx++; } @@ -257,7 +256,7 @@ cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl) cxgbit_skcb_flags(skb) = 0; lro_cb->complete = false; - } else { + } else if (op == CPL_ISCSI_DATA) { struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va; offset = sizeof(struct cpl_iscsi_data); @@ -267,6 +266,36 @@ cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl) pdu_cb->doffset = lro_cb->offset; pdu_cb->nr_dfrags = gl->nfrags; pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags; + lro_cb->complete = false; + } else { + struct cpl_rx_iscsi_cmp *cpl; + + cpl = (struct cpl_rx_iscsi_cmp *)gl->va; + offset = sizeof(struct cpl_rx_iscsi_cmp); + pdu_cb->flags |= (PDUCBF_RX_HDR | PDUCBF_RX_STATUS); + len = be16_to_cpu(cpl->len); + pdu_cb->hdr = gl->va + offset; + pdu_cb->hlen = len; + pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags; + pdu_cb->ddigest = be32_to_cpu(cpl->ulp_crc); + pdu_cb->pdulen = ntohs(cpl->len); + + if (unlikely(gl->nfrags > 1)) + cxgbit_skcb_flags(skb) = 0; + + cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, + be32_to_cpu(cpl->ddpvld)); + + if (pdu_cb->flags & PDUCBF_RX_DATA_DDPD) { + pdu_cb->flags |= PDUCBF_RX_DDP_CMP; + pdu_cb->complete = true; + } else if (pdu_cb->flags & PDUCBF_RX_DATA) { + pdu_cb->complete = true; + } + + lro_cb->pdu_totallen += pdu_cb->hlen + pdu_cb->dlen; + lro_cb->complete = true; + lro_cb->pdu_idx++; } cxgbit_copy_frags(skb, gl, offset); @@ -413,6 +442,7 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp, switch (op) { case CPL_ISCSI_HDR: case CPL_ISCSI_DATA: + case CPL_RX_ISCSI_CMP: case CPL_RX_ISCSI_DDP: case CPL_FW4_ACK: lro_flush = false; @@ -454,12 +484,13 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp, if (unlikely(op != *(u8 *)gl->va)) { pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", gl->va, be64_to_cpu(*rsp), - be64_to_cpu(*(u64 *)gl->va), + get_unaligned_be64(gl->va), gl->tot_len); return 0; } - if (op == CPL_ISCSI_HDR || op == CPL_ISCSI_DATA) { + if ((op == CPL_ISCSI_HDR) || (op == CPL_ISCSI_DATA) || + (op == CPL_RX_ISCSI_CMP)) { if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr, napi)) return 0; diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index 8bcb9b71f764..bdcc8b4c522a 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -8,6 +8,8 @@ #include <linux/workqueue.h> #include <linux/kthread.h> +#include <linux/sched/signal.h> + #include <asm/unaligned.h> #include <net/tcp.h> #include <target/target_core_base.h> @@ -162,12 +164,14 @@ cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen, u32 len, u32 credits, u32 compl) { struct fw_ofld_tx_data_wr *req; + const struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; u32 submode = cxgbit_skcb_submode(skb); u32 wr_ulp_mode = 0; u32 hdr_size = sizeof(*req); u32 opcode = FW_OFLD_TX_DATA_WR; u32 immlen = 0; - u32 force = TX_FORCE_V(!submode); + u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) : + T6_TX_FORCE_F; if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) { opcode = FW_ISCSI_TX_DATA_WR; @@ -243,7 +247,7 @@ void cxgbit_push_tx_frames(struct cxgbit_sock *csk) } __skb_unlink(skb, &csk->txq); set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); - skb->csum = credits_needed + flowclen16; + skb->csum = (__force __wsum)(credits_needed + flowclen16); csk->wr_cred -= credits_needed; csk->wr_una_cred += credits_needed; @@ -651,26 +655,6 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) u32 max_npdu, max_iso_npdu; if (conn->login->leading_connection) { - param = iscsi_find_param_from_key(DATASEQUENCEINORDER, - conn->param_list); - if (!param) { - pr_err("param not found key %s\n", DATASEQUENCEINORDER); - return -1; - } - - if (strcmp(param->value, YES)) - return 0; - - param = iscsi_find_param_from_key(DATAPDUINORDER, - conn->param_list); - if (!param) { - pr_err("param not found key %s\n", DATAPDUINORDER); - return -1; - } - - if (strcmp(param->value, YES)) - return 0; - param = iscsi_find_param_from_key(MAXBURSTLENGTH, conn->param_list); if (!param) { @@ -681,11 +665,6 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) if (kstrtou32(param->value, 0, &mbl) < 0) return -1; } else { - if (!conn->sess->sess_ops->DataSequenceInOrder) - return 0; - if (!conn->sess->sess_ops->DataPDUInOrder) - return 0; - mbl = conn->sess->sess_ops->MaxBurstLength; } @@ -704,6 +683,53 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) return 0; } +/* + * cxgbit_seq_pdu_inorder() + * @csk: pointer to cxgbit socket structure + * + * This function checks whether data sequence and data + * pdu are in order. + * + * Return: returns -1 on error, 0 if data sequence and + * data pdu are in order, 1 if data sequence or data pdu + * is not in order. + */ +static int cxgbit_seq_pdu_inorder(struct cxgbit_sock *csk) +{ + struct iscsi_conn *conn = csk->conn; + struct iscsi_param *param; + + if (conn->login->leading_connection) { + param = iscsi_find_param_from_key(DATASEQUENCEINORDER, + conn->param_list); + if (!param) { + pr_err("param not found key %s\n", DATASEQUENCEINORDER); + return -1; + } + + if (strcmp(param->value, YES)) + return 1; + + param = iscsi_find_param_from_key(DATAPDUINORDER, + conn->param_list); + if (!param) { + pr_err("param not found key %s\n", DATAPDUINORDER); + return -1; + } + + if (strcmp(param->value, YES)) + return 1; + + } else { + if (!conn->sess->sess_ops->DataSequenceInOrder) + return 1; + if (!conn->sess->sess_ops->DataPDUInOrder) + return 1; + } + + return 0; +} + static int cxgbit_set_params(struct iscsi_conn *conn) { struct cxgbit_sock *csk = conn->context; @@ -730,11 +756,24 @@ static int cxgbit_set_params(struct iscsi_conn *conn) } if (!erl) { + int ret; + + ret = cxgbit_seq_pdu_inorder(csk); + if (ret < 0) { + return -1; + } else if (ret > 0) { + if (is_t5(cdev->lldi.adapter_type)) + goto enable_ddp; + else + goto enable_digest; + } + if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) { if (cxgbit_set_iso_npdu(csk)) return -1; } +enable_ddp: if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) { if (cxgbit_setup_conn_pgidx(csk, ppm->tformat.pgsz_idx_dflt)) @@ -743,6 +782,7 @@ static int cxgbit_set_params(struct iscsi_conn *conn) } } +enable_digest: if (cxgbit_set_digest(csk)) return -1; @@ -983,11 +1023,36 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk) int rc, sg_nents, sg_off; bool dcrc_err = false; - rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd); - if (rc < 0) - return rc; - else if (!cmd) - return 0; + if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) { + u32 offset = be32_to_cpu(hdr->offset); + u32 ddp_data_len; + u32 payload_length = ntoh24(hdr->dlength); + bool success = false; + + cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0); + if (!cmd) + return 0; + + ddp_data_len = offset - cmd->write_data_done; + atomic_long_add(ddp_data_len, &conn->sess->rx_data_octets); + + cmd->write_data_done = offset; + cmd->next_burst_len = ddp_data_len; + cmd->data_sn = be32_to_cpu(hdr->datasn); + + rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, + cmd, payload_length, &success); + if (rc < 0) + return rc; + else if (!success) + return 0; + } else { + rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd); + if (rc < 0) + return rc; + else if (!cmd) + return 0; + } if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { pr_err("ITT: 0x%08x, Offset: %u, Length: %u," @@ -1351,6 +1416,9 @@ static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk) for (i = 0; i < ssi->nr_frags; i++) put_page(skb_frag_page(&ssi->frags[i])); ssi->nr_frags = 0; + skb->data_len = 0; + skb->truesize -= skb->len; + skb->len = 0; } static void @@ -1364,39 +1432,42 @@ cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx) unsigned int len = 0; if (pdu_cb->flags & PDUCBF_RX_HDR) { - hpdu_cb->flags = pdu_cb->flags; + u8 hfrag_idx = hssi->nr_frags; + + hpdu_cb->flags |= pdu_cb->flags; hpdu_cb->seq = pdu_cb->seq; hpdu_cb->hdr = pdu_cb->hdr; hpdu_cb->hlen = pdu_cb->hlen; - memcpy(&hssi->frags[0], &ssi->frags[pdu_cb->hfrag_idx], + memcpy(&hssi->frags[hfrag_idx], &ssi->frags[pdu_cb->hfrag_idx], sizeof(skb_frag_t)); - get_page(skb_frag_page(&hssi->frags[0])); - hssi->nr_frags = 1; - hpdu_cb->frags = 1; - hpdu_cb->hfrag_idx = 0; + get_page(skb_frag_page(&hssi->frags[hfrag_idx])); + hssi->nr_frags++; + hpdu_cb->frags++; + hpdu_cb->hfrag_idx = hfrag_idx; - len = hssi->frags[0].size; - hskb->len = len; - hskb->data_len = len; - hskb->truesize = len; + len = hssi->frags[hfrag_idx].size; + hskb->len += len; + hskb->data_len += len; + hskb->truesize += len; } if (pdu_cb->flags & PDUCBF_RX_DATA) { - u8 hfrag_idx = 1, i; + u8 dfrag_idx = hssi->nr_frags, i; hpdu_cb->flags |= pdu_cb->flags; + hpdu_cb->dfrag_idx = dfrag_idx; len = 0; - for (i = 0; i < pdu_cb->nr_dfrags; hfrag_idx++, i++) { - memcpy(&hssi->frags[hfrag_idx], + for (i = 0; i < pdu_cb->nr_dfrags; dfrag_idx++, i++) { + memcpy(&hssi->frags[dfrag_idx], &ssi->frags[pdu_cb->dfrag_idx + i], sizeof(skb_frag_t)); - get_page(skb_frag_page(&hssi->frags[hfrag_idx])); + get_page(skb_frag_page(&hssi->frags[dfrag_idx])); - len += hssi->frags[hfrag_idx].size; + len += hssi->frags[dfrag_idx].size; hssi->nr_frags++; hpdu_cb->frags++; @@ -1405,7 +1476,6 @@ cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx) hpdu_cb->dlen = pdu_cb->dlen; hpdu_cb->doffset = hpdu_cb->hlen; hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags; - hpdu_cb->dfrag_idx = 1; hskb->len += len; hskb->data_len += len; hskb->truesize += len; @@ -1490,10 +1560,15 @@ static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) { + struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; int ret = -1; - if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) - ret = cxgbit_rx_lro_skb(csk, skb); + if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) { + if (is_t5(lldi->adapter_type)) + ret = cxgbit_rx_lro_skb(csk, skb); + else + ret = cxgbit_process_lro_skb(csk, skb); + } __kfree_skb(skb); return ret; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index da2c73a255de..a91802432f2f 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -24,6 +24,7 @@ #include <linux/vmalloc.h> #include <linux/idr.h> #include <linux/delay.h> +#include <linux/sched/signal.h> #include <asm/unaligned.h> #include <net/ipv6.h> #include <scsi/scsi_proto.h> @@ -1431,36 +1432,17 @@ static void iscsit_do_crypto_hash_buf( } int -iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf, - struct iscsi_cmd **out_cmd) +__iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf, + struct iscsi_cmd *cmd, u32 payload_length, + bool *success) { - struct iscsi_data *hdr = (struct iscsi_data *)buf; - struct iscsi_cmd *cmd = NULL; + struct iscsi_data *hdr = buf; struct se_cmd *se_cmd; - u32 payload_length = ntoh24(hdr->dlength); int rc; - if (!payload_length) { - pr_warn("DataOUT payload is ZERO, ignoring.\n"); - return 0; - } - /* iSCSI write */ atomic_long_add(payload_length, &conn->sess->rx_data_octets); - if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { - pr_err("DataSegmentLength: %u is greater than" - " MaxXmitDataSegmentLength: %u\n", payload_length, - conn->conn_ops->MaxXmitDataSegmentLength); - return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, - buf); - } - - cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, - payload_length); - if (!cmd) - return 0; - pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x," " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset), @@ -1545,7 +1527,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf, } } /* - * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and + * Perform DataSN, DataSequenceInOrder, DataPDUInOrder, and * within-command recovery checks before receiving the payload. */ rc = iscsit_check_pre_dataout(cmd, buf); @@ -1553,10 +1535,44 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf, return 0; else if (rc == DATAOUT_CANNOT_RECOVER) return -1; - - *out_cmd = cmd; + *success = true; return 0; } +EXPORT_SYMBOL(__iscsit_check_dataout_hdr); + +int +iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf, + struct iscsi_cmd **out_cmd) +{ + struct iscsi_data *hdr = buf; + struct iscsi_cmd *cmd; + u32 payload_length = ntoh24(hdr->dlength); + int rc; + bool success = false; + + if (!payload_length) { + pr_warn_ratelimited("DataOUT payload is ZERO, ignoring.\n"); + return 0; + } + + if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { + pr_err_ratelimited("DataSegmentLength: %u is greater than" + " MaxXmitDataSegmentLength: %u\n", payload_length, + conn->conn_ops->MaxXmitDataSegmentLength); + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf); + } + + cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, payload_length); + if (!cmd) + return 0; + + rc = __iscsit_check_dataout_hdr(conn, buf, cmd, payload_length, &success); + + if (success) + *out_cmd = cmd; + + return rc; +} EXPORT_SYMBOL(iscsit_check_dataout_hdr); static int @@ -1920,6 +1936,28 @@ out: return ret; } +static enum tcm_tmreq_table iscsit_convert_tmf(u8 iscsi_tmf) +{ + switch (iscsi_tmf) { + case ISCSI_TM_FUNC_ABORT_TASK: + return TMR_ABORT_TASK; + case ISCSI_TM_FUNC_ABORT_TASK_SET: + return TMR_ABORT_TASK_SET; + case ISCSI_TM_FUNC_CLEAR_ACA: + return TMR_CLEAR_ACA; + case ISCSI_TM_FUNC_CLEAR_TASK_SET: + return TMR_CLEAR_TASK_SET; + case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: + return TMR_LUN_RESET; + case ISCSI_TM_FUNC_TARGET_WARM_RESET: + return TMR_TARGET_WARM_RESET; + case ISCSI_TM_FUNC_TARGET_COLD_RESET: + return TMR_TARGET_COLD_RESET; + default: + return TMR_UNKNOWN; + } +} + int iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, unsigned char *buf) @@ -1929,7 +1967,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct iscsi_tm *hdr; int out_of_order_cmdsn = 0, ret; bool sess_ref = false; - u8 function; + u8 function, tcm_function = TMR_UNKNOWN; hdr = (struct iscsi_tm *) buf; hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; @@ -1975,54 +2013,27 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, * LIO-Target $FABRIC_MOD */ if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { - - u8 tcm_function; - int ret; - transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, conn->sess->se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG, cmd->sense_buffer + 2); target_get_sess_cmd(&cmd->se_cmd, true); sess_ref = true; - - switch (function) { - case ISCSI_TM_FUNC_ABORT_TASK: - tcm_function = TMR_ABORT_TASK; - break; - case ISCSI_TM_FUNC_ABORT_TASK_SET: - tcm_function = TMR_ABORT_TASK_SET; - break; - case ISCSI_TM_FUNC_CLEAR_ACA: - tcm_function = TMR_CLEAR_ACA; - break; - case ISCSI_TM_FUNC_CLEAR_TASK_SET: - tcm_function = TMR_CLEAR_TASK_SET; - break; - case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: - tcm_function = TMR_LUN_RESET; - break; - case ISCSI_TM_FUNC_TARGET_WARM_RESET: - tcm_function = TMR_TARGET_WARM_RESET; - break; - case ISCSI_TM_FUNC_TARGET_COLD_RESET: - tcm_function = TMR_TARGET_COLD_RESET; - break; - default: + tcm_function = iscsit_convert_tmf(function); + if (tcm_function == TMR_UNKNOWN) { pr_err("Unknown iSCSI TMR Function:" " 0x%02x\n", function); return iscsit_add_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); } - - ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, - tcm_function, GFP_KERNEL); - if (ret < 0) - return iscsit_add_reject_cmd(cmd, + } + ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function, + GFP_KERNEL); + if (ret < 0) + return iscsit_add_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); - cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; - } + cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC; cmd->i_state = ISTATE_SEND_TASKMGTRSP; @@ -4136,7 +4147,7 @@ int iscsit_close_connection( /* * During Connection recovery drop unacknowledged out of order * commands for this connection, and prepare the other commands - * for realligence. + * for reallegiance. * * During normal operation clear the out of order commands (but * do not free the struct iscsi_ooo_cmdsn's) and release all @@ -4144,7 +4155,7 @@ int iscsit_close_connection( */ if (atomic_read(&conn->connection_recovery)) { iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn); - iscsit_prepare_cmds_for_realligance(conn); + iscsit_prepare_cmds_for_reallegiance(conn); } else { iscsit_clear_ooo_cmdsns_for_conn(conn); iscsit_release_commands_from_conn(conn); diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index b54e72c7ab0f..9a96e17bf7cd 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -17,6 +17,8 @@ * GNU General Public License for more details. ******************************************************************************/ +#include <linux/sched/signal.h> + #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> @@ -44,10 +46,8 @@ void iscsit_set_dataout_sequence_values( */ if (cmd->unsolicited_data) { cmd->seq_start_offset = cmd->write_data_done; - cmd->seq_end_offset = (cmd->write_data_done + - ((cmd->se_cmd.data_length > - conn->sess->sess_ops->FirstBurstLength) ? - conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length)); + cmd->seq_end_offset = min(cmd->se_cmd.data_length, + conn->sess->sess_ops->FirstBurstLength); return; } diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index faf9ae014b30..8df9c90f3db3 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -312,7 +312,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) return 0; } -int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) +int iscsit_prepare_cmds_for_reallegiance(struct iscsi_conn *conn) { u32 cmd_count = 0; struct iscsi_cmd *cmd, *cmd_tmp; @@ -347,7 +347,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) && (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) { - pr_debug("Not performing realligence on" + pr_debug("Not performing reallegiance on" " Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x," " CID: %hu\n", cmd->iscsi_opcode, cmd->init_task_tag, cmd->cmd_sn, conn->cid); @@ -382,7 +382,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) cmd_count++; pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x," " CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for" - " realligence.\n", cmd->iscsi_opcode, + " reallegiance.\n", cmd->iscsi_opcode, cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn, conn->cid); diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h index 7965f1e86506..634d01e13652 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.h +++ b/drivers/target/iscsi/iscsi_target_erl2.h @@ -19,7 +19,7 @@ extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *, struct iscsi_session *); extern void iscsit_discard_cr_cmds_by_expstatsn(struct iscsi_conn_recovery *, u32); extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *); -extern int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *); +extern int iscsit_prepare_cmds_for_reallegiance(struct iscsi_conn *); extern int iscsit_connection_recovery_transport_reset(struct iscsi_conn *); #endif /*** ISCSI_TARGET_ERL2_H ***/ diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index eab274d17b5c..ad8f3011bdc2 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -20,6 +20,7 @@ #include <linux/module.h> #include <linux/string.h> #include <linux/kthread.h> +#include <linux/sched/signal.h> #include <linux/idr.h> #include <linux/tcp.h> /* TCP_NODELAY */ #include <net/ipv6.h> /* ipv6_addr_v4mapped() */ @@ -223,7 +224,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) return 0; pr_debug("%s iSCSI Session SID %u is still active for %s," - " preforming session reinstatement.\n", (sessiontype) ? + " performing session reinstatement.\n", (sessiontype) ? "Discovery" : "Normal", sess->sid, sess->sess_ops->InitiatorName); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 46388c9e08da..7ccc9c1cbfd1 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -19,6 +19,7 @@ #include <linux/ctype.h> #include <linux/kthread.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <net/sock.h> #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> @@ -1249,16 +1250,16 @@ int iscsi_target_start_negotiation( { int ret; - if (conn->sock) { - struct sock *sk = conn->sock->sk; + if (conn->sock) { + struct sock *sk = conn->sock->sk; - write_lock_bh(&sk->sk_callback_lock); - set_bit(LOGIN_FLAGS_READY, &conn->login_flags); - write_unlock_bh(&sk->sk_callback_lock); - } + write_lock_bh(&sk->sk_callback_lock); + set_bit(LOGIN_FLAGS_READY, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); + } - ret = iscsi_target_do_login(conn, login); - if (ret < 0) { + ret = iscsi_target_do_login(conn, login); + if (ret < 0) { cancel_delayed_work_sync(&conn->login_work); cancel_delayed_work_sync(&conn->login_cleanup_work); iscsi_target_restore_sock_callbacks(conn); diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index 3d637055c36f..cb231c907d51 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c @@ -440,14 +440,14 @@ static int iscsit_task_reassign_complete( break; default: pr_err("Illegal iSCSI Opcode 0x%02x during" - " command realligence\n", cmd->iscsi_opcode); + " command reallegiance\n", cmd->iscsi_opcode); return -1; } if (ret != 0) return ret; - pr_debug("Completed connection realligence for Opcode: 0x%02x," + pr_debug("Completed connection reallegiance for Opcode: 0x%02x," " ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode, cmd->init_task_tag, conn->cid); diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index b5a1b4ccba12..5041a9c8bdcb 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -417,6 +417,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( return NULL; } +EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump); struct iscsi_cmd *iscsit_find_cmd_from_ttt( struct iscsi_conn *conn, @@ -1304,39 +1305,6 @@ static int iscsit_do_rx_data( return total_rx; } -static int iscsit_do_tx_data( - struct iscsi_conn *conn, - struct iscsi_data_count *count) -{ - int ret, iov_len; - struct kvec *iov_p; - struct msghdr msg; - - if (!conn || !conn->sock || !conn->conn_ops) - return -1; - - if (count->data_length <= 0) { - pr_err("Data length is: %d\n", count->data_length); - return -1; - } - - memset(&msg, 0, sizeof(struct msghdr)); - - iov_p = count->iov; - iov_len = count->iov_count; - - ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, - count->data_length); - if (ret != count->data_length) { - pr_err("Unexpected ret: %d send data %d\n", - ret, count->data_length); - return -EPIPE; - } - pr_debug("ret: %d, sent data: %d\n", ret, count->data_length); - - return ret; -} - int rx_data( struct iscsi_conn *conn, struct kvec *iov, @@ -1363,45 +1331,35 @@ int tx_data( int iov_count, int data) { - struct iscsi_data_count c; + struct msghdr msg; + int total_tx = 0; if (!conn || !conn->sock || !conn->conn_ops) return -1; - memset(&c, 0, sizeof(struct iscsi_data_count)); - c.iov = iov; - c.iov_count = iov_count; - c.data_length = data; - c.type = ISCSI_TX_DATA; + if (data <= 0) { + pr_err("Data length is: %d\n", data); + return -1; + } - return iscsit_do_tx_data(conn, &c); -} + memset(&msg, 0, sizeof(struct msghdr)); -static bool sockaddr_equal(struct sockaddr_storage *x, struct sockaddr_storage *y) -{ - switch (x->ss_family) { - case AF_INET: { - struct sockaddr_in *sinx = (struct sockaddr_in *)x; - struct sockaddr_in *siny = (struct sockaddr_in *)y; - if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr) - return false; - if (sinx->sin_port != siny->sin_port) - return false; - break; - } - case AF_INET6: { - struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x; - struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y; - if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr)) - return false; - if (sinx->sin6_port != siny->sin6_port) - return false; - break; - } - default: - return false; + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, + iov, iov_count, data); + + while (msg_data_left(&msg)) { + int tx_loop = sock_sendmsg(conn->sock, &msg); + if (tx_loop <= 0) { + pr_debug("tx_loop: %d total_tx %d\n", + tx_loop, total_tx); + return tx_loop; + } + total_tx += tx_loop; + pr_debug("tx_loop: %d, total_tx: %d, data: %d\n", + tx_loop, total_tx, data); } - return true; + + return total_tx; } void iscsit_collect_login_stats( @@ -1420,13 +1378,6 @@ void iscsit_collect_login_stats( ls = &tiqn->login_stats; spin_lock(&ls->lock); - if (sockaddr_equal(&conn->login_sockaddr, &ls->last_intr_fail_sockaddr) && - ((get_jiffies_64() - ls->last_fail_time) < 10)) { - /* We already have the failure info for this login */ - spin_unlock(&ls->lock); - return; - } - if (status_class == ISCSI_STATUS_CLS_SUCCESS) ls->accepts++; else if (status_class == ISCSI_STATUS_CLS_REDIRECT) { @@ -1471,10 +1422,10 @@ struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn) { struct iscsi_portal_group *tpg; - if (!conn || !conn->sess) + if (!conn) return NULL; - tpg = conn->sess->tpg; + tpg = conn->tpg; if (!tpg) return NULL; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 26929c44d703..c754ae33bf7b 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -78,12 +78,16 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) &deve->read_bytes); se_lun = rcu_dereference(deve->se_lun); + + if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { + se_lun = NULL; + goto out_unlock; + } + se_cmd->se_lun = rcu_dereference(deve->se_lun); se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; - - percpu_ref_get(&se_lun->lun_ref); se_cmd->lun_ref_active = true; if ((se_cmd->data_direction == DMA_TO_DEVICE) && @@ -97,6 +101,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) goto ref_dev; } } +out_unlock: rcu_read_unlock(); if (!se_lun) { @@ -163,7 +168,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun) rcu_read_lock(); deve = target_nacl_find_deve(nacl, unpacked_lun); if (deve) { - se_tmr->tmr_lun = rcu_dereference(deve->se_lun); se_cmd->se_lun = rcu_dereference(deve->se_lun); se_lun = rcu_dereference(deve->se_lun); se_cmd->pr_res_key = deve->pr_res_key; @@ -816,6 +820,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) xcopy_lun = &dev->xcopy_lun; rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); init_completion(&xcopy_lun->lun_ref_comp); + init_completion(&xcopy_lun->lun_shutdown_comp); INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index df7b6e95c019..68d8aef7ab78 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -604,7 +604,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes spin_lock_irq(&cmd->t_state_lock); cmd->t_state = TRANSPORT_PROCESSING; - cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; + cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); __target_execute_cmd(cmd, false); diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 1a39033d2bff..8038255b21e8 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -158,12 +158,28 @@ static ssize_t target_stat_tgt_resets_show(struct config_item *item, atomic_long_read(&to_stat_tgt_dev(item)->num_resets)); } +static ssize_t target_stat_tgt_aborts_complete_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&to_stat_tgt_dev(item)->aborts_complete)); +} + +static ssize_t target_stat_tgt_aborts_no_task_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&to_stat_tgt_dev(item)->aborts_no_task)); +} + CONFIGFS_ATTR_RO(target_stat_tgt_, inst); CONFIGFS_ATTR_RO(target_stat_tgt_, indx); CONFIGFS_ATTR_RO(target_stat_tgt_, num_lus); CONFIGFS_ATTR_RO(target_stat_tgt_, status); CONFIGFS_ATTR_RO(target_stat_tgt_, non_access_lus); CONFIGFS_ATTR_RO(target_stat_tgt_, resets); +CONFIGFS_ATTR_RO(target_stat_tgt_, aborts_complete); +CONFIGFS_ATTR_RO(target_stat_tgt_, aborts_no_task); static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = { &target_stat_tgt_attr_inst, @@ -172,6 +188,8 @@ static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = { &target_stat_tgt_attr_status, &target_stat_tgt_attr_non_access_lus, &target_stat_tgt_attr_resets, + &target_stat_tgt_attr_aborts_complete, + &target_stat_tgt_attr_aborts_no_task, NULL, }; @@ -795,16 +813,34 @@ static ssize_t target_stat_transport_dev_name_show(struct config_item *item, return ret; } +static ssize_t target_stat_transport_proto_id_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_transport_stat(item); + struct se_device *dev; + struct se_portal_group *tpg = lun->lun_tpg; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->proto_id); + rcu_read_unlock(); + return ret; +} + CONFIGFS_ATTR_RO(target_stat_transport_, inst); CONFIGFS_ATTR_RO(target_stat_transport_, device); CONFIGFS_ATTR_RO(target_stat_transport_, indx); CONFIGFS_ATTR_RO(target_stat_transport_, dev_name); +CONFIGFS_ATTR_RO(target_stat_transport_, proto_id); static struct configfs_attribute *target_stat_scsi_transport_attrs[] = { &target_stat_transport_attr_inst, &target_stat_transport_attr_device, &target_stat_transport_attr_indx, &target_stat_transport_attr_dev_name, + &target_stat_transport_attr_proto_id, NULL, }; diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 4f229e711e1c..dce1e1b47316 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -175,10 +175,9 @@ void core_tmr_abort_task( printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", se_cmd->se_tfo->get_fabric_name(), ref_tag); - if (!__target_check_io_state(se_cmd, se_sess, 0)) { - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - goto out; - } + if (!__target_check_io_state(se_cmd, se_sess, 0)) + continue; + list_del_init(&se_cmd->se_cmd_list); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); @@ -191,14 +190,15 @@ void core_tmr_abort_task( printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" " ref_tag: %llu\n", ref_tag); tmr->response = TMR_FUNCTION_COMPLETE; + atomic_long_inc(&dev->aborts_complete); return; } spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); -out: printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n", tmr->ref_task_tag); tmr->response = TMR_TASK_DOES_NOT_EXIST; + atomic_long_inc(&dev->aborts_no_task); } static void core_tmr_drain_tmr_list( @@ -217,13 +217,8 @@ static void core_tmr_drain_tmr_list( * LUN_RESET tmr.. */ spin_lock_irqsave(&dev->se_tmr_lock, flags); + list_del_init(&tmr->tmr_list); list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { - /* - * Allow the received TMR to return with FUNCTION_COMPLETE. - */ - if (tmr_p == tmr) - continue; - cmd = tmr_p->task_cmd; if (!cmd) { pr_err("Unable to locate struct se_cmd for TMR\n"); diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 4a8b180c478b..c0dbfa016575 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -445,7 +445,7 @@ static void core_tpg_lun_ref_release(struct percpu_ref *ref) { struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); - complete(&lun->lun_ref_comp); + complete(&lun->lun_shutdown_comp); } /* Does not change se_wwn->priv. */ @@ -572,6 +572,7 @@ struct se_lun *core_tpg_alloc_lun( lun->lun_link_magic = SE_LUN_LINK_MAGIC; atomic_set(&lun->lun_acl_count, 0); init_completion(&lun->lun_ref_comp); + init_completion(&lun->lun_shutdown_comp); INIT_LIST_HEAD(&lun->lun_deve_list); INIT_LIST_HEAD(&lun->lun_dev_link); atomic_set(&lun->lun_tg_pt_secondary_offline, 0); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 437591bc7c08..434d9d693989 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -593,9 +593,6 @@ static void target_remove_from_state_list(struct se_cmd *cmd) if (!dev) return; - if (cmd->transport_state & CMD_T_BUSY) - return; - spin_lock_irqsave(&dev->execute_task_lock, flags); if (cmd->state_active) { list_del(&cmd->state_list); @@ -604,24 +601,18 @@ static void target_remove_from_state_list(struct se_cmd *cmd) spin_unlock_irqrestore(&dev->execute_task_lock, flags); } -static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, - bool write_pending) +static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) { unsigned long flags; - if (remove_from_lists) { - target_remove_from_state_list(cmd); + target_remove_from_state_list(cmd); - /* - * Clear struct se_cmd->se_lun before the handoff to FE. - */ - cmd->se_lun = NULL; - } + /* + * Clear struct se_cmd->se_lun before the handoff to FE. + */ + cmd->se_lun = NULL; spin_lock_irqsave(&cmd->t_state_lock, flags); - if (write_pending) - cmd->t_state = TRANSPORT_WRITE_PENDING; - /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. @@ -635,31 +626,18 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, complete_all(&cmd->t_transport_stop_comp); return 1; } - cmd->transport_state &= ~CMD_T_ACTIVE; - if (remove_from_lists) { - /* - * Some fabric modules like tcm_loop can release - * their internally allocated I/O reference now and - * struct se_cmd now. - * - * Fabric modules are expected to return '1' here if the - * se_cmd being passed is released at this point, - * or zero if not being released. - */ - if (cmd->se_tfo->check_stop_free != NULL) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return cmd->se_tfo->check_stop_free(cmd); - } - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return 0; -} -static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) -{ - return transport_cmd_check_stop(cmd, true, false); + /* + * Some fabric modules like tcm_loop can release their internally + * allocated I/O reference and struct se_cmd now. + * + * Fabric modules are expected to return '1' here if the se_cmd being + * passed is released at this point, or zero if not being released. + */ + return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd) + : 0; } static void transport_lun_remove_cmd(struct se_cmd *cmd) @@ -733,7 +711,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) spin_lock_irqsave(&cmd->t_state_lock, flags); - cmd->transport_state &= ~CMD_T_BUSY; if (dev && dev->transport->transport_complete) { dev->transport->transport_complete(cmd, @@ -1246,7 +1223,6 @@ void transport_init_se_cmd( init_completion(&cmd->cmd_wait_comp); spin_lock_init(&cmd->t_state_lock); kref_init(&cmd->cmd_kref); - cmd->transport_state = CMD_T_DEV_ACTIVE; cmd->se_tfo = tfo; cmd->se_sess = se_sess; @@ -1671,6 +1647,9 @@ void transport_generic_request_failure(struct se_cmd *cmd, { int ret = 0, post_ret = 0; + if (transport_check_aborted_status(cmd, 1)) + return; + pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", @@ -1801,7 +1780,7 @@ void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) return; err: spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); + cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); transport_generic_request_failure(cmd, ret); @@ -1829,7 +1808,7 @@ static int target_write_prot_action(struct se_cmd *cmd) sectors, 0, cmd->t_prot_sg, 0); if (unlikely(cmd->pi_err)) { spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); + cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); transport_generic_request_failure(cmd, cmd->pi_err); return -1; @@ -1918,7 +1897,7 @@ void target_execute_cmd(struct se_cmd *cmd) } cmd->t_state = TRANSPORT_PROCESSING; - cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; + cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); if (target_write_prot_action(cmd)) @@ -1926,7 +1905,7 @@ void target_execute_cmd(struct se_cmd *cmd) if (target_handle_task_attr(cmd)) { spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT); + cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); return; } @@ -1979,8 +1958,6 @@ static void transport_complete_task_attr(struct se_cmd *cmd) if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { atomic_dec_mb(&dev->simple_cmds); dev->dev_cur_ordered_id++; - pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n", - dev->dev_cur_ordered_id); } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", @@ -2387,6 +2364,7 @@ EXPORT_SYMBOL(target_alloc_sgl); sense_reason_t transport_generic_new_cmd(struct se_cmd *cmd) { + unsigned long flags; int ret = 0; bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); @@ -2452,7 +2430,24 @@ transport_generic_new_cmd(struct se_cmd *cmd) target_execute_cmd(cmd); return 0; } - transport_cmd_check_stop(cmd, false, true); + + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd->t_state = TRANSPORT_WRITE_PENDING; + /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend exceptions. + */ + if (cmd->transport_state & CMD_T_STOP) { + pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", + __func__, __LINE__, cmd->tag); + + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + complete_all(&cmd->t_transport_stop_comp); + return 0; + } + cmd->transport_state &= ~CMD_T_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); ret = cmd->se_tfo->write_pending(cmd); if (ret == -EAGAIN || ret == -ENOMEM) @@ -2595,39 +2590,38 @@ static void target_release_cmd_kref(struct kref *kref) unsigned long flags; bool fabric_stop; - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + if (se_sess) { + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - spin_lock(&se_cmd->t_state_lock); - fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) && - (se_cmd->transport_state & CMD_T_ABORTED); - spin_unlock(&se_cmd->t_state_lock); + spin_lock(&se_cmd->t_state_lock); + fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) && + (se_cmd->transport_state & CMD_T_ABORTED); + spin_unlock(&se_cmd->t_state_lock); - if (se_cmd->cmd_wait_set || fabric_stop) { + if (se_cmd->cmd_wait_set || fabric_stop) { + list_del_init(&se_cmd->se_cmd_list); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + target_free_cmd_mem(se_cmd); + complete(&se_cmd->cmd_wait_comp); + return; + } list_del_init(&se_cmd->se_cmd_list); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - target_free_cmd_mem(se_cmd); - complete(&se_cmd->cmd_wait_comp); - return; } - list_del_init(&se_cmd->se_cmd_list); - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); target_free_cmd_mem(se_cmd); se_cmd->se_tfo->release_cmd(se_cmd); } -/* target_put_sess_cmd - Check for active I/O shutdown via kref_put - * @se_cmd: command descriptor to drop +/** + * target_put_sess_cmd - decrease the command reference count + * @se_cmd: command to drop a reference from + * + * Returns 1 if and only if this target_put_sess_cmd() call caused the + * refcount to drop to zero. Returns zero otherwise. */ int target_put_sess_cmd(struct se_cmd *se_cmd) { - struct se_session *se_sess = se_cmd->se_sess; - - if (!se_sess) { - target_free_cmd_mem(se_cmd); - se_cmd->se_tfo->release_cmd(se_cmd); - return 1; - } return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); } EXPORT_SYMBOL(target_put_sess_cmd); @@ -2706,10 +2700,39 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) } EXPORT_SYMBOL(target_wait_for_sess_cmds); +static void target_lun_confirm(struct percpu_ref *ref) +{ + struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); + + complete(&lun->lun_ref_comp); +} + void transport_clear_lun_ref(struct se_lun *lun) { - percpu_ref_kill(&lun->lun_ref); + /* + * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop + * the initial reference and schedule confirm kill to be + * executed after one full RCU grace period has completed. + */ + percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm); + /* + * The first completion waits for percpu_ref_switch_to_atomic_rcu() + * to call target_lun_confirm after lun->lun_ref has been marked + * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t + * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref + * fails for all new incoming I/O. + */ wait_for_completion(&lun->lun_ref_comp); + /* + * The second completion waits for percpu_ref_put_many() to + * invoke ->release() after lun->lun_ref has switched to + * atomic_t mode, and lun->lun_ref.count has reached zero. + * + * At this point all target-core lun->lun_ref references have + * been dropped via transport_lun_remove_cmd(), and it's safe + * to proceed with the remaining LUN shutdown. + */ + wait_for_completion(&lun->lun_shutdown_comp); } static bool @@ -2765,11 +2788,8 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, } /** - * transport_wait_for_tasks - wait for completion to occur - * @cmd: command to wait - * - * Called from frontend fabric context to wait for storage engine - * to pause and/or release frontend generated struct se_cmd. + * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp + * @cmd: command to wait on */ bool transport_wait_for_tasks(struct se_cmd *cmd) { diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 5c1cb2df3a54..c3adefe95e50 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -642,9 +642,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); spin_lock(&udev->commands_lock); - cmd = idr_find(&udev->commands, entry->hdr.cmd_id); - if (cmd) - idr_remove(&udev->commands, cmd->cmd_id); + cmd = idr_remove(&udev->commands, entry->hdr.cmd_id); spin_unlock(&udev->commands_lock); if (!cmd) { diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 9af7842b8178..ec372860106f 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -83,14 +83,12 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) static void ft_free_cmd(struct ft_cmd *cmd) { struct fc_frame *fp; - struct fc_lport *lport; struct ft_sess *sess; if (!cmd) return; sess = cmd->sess; fp = cmd->req_frame; - lport = fr_dev(fp); if (fr_seq(fp)) fc_seq_release(fr_seq(fp)); fc_frame_free(fp); diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index c2c056cc7ea5..776b34396144 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -245,6 +245,15 @@ config RCAR_THERMAL Enable this to plug the R-Car thermal sensor driver into the Linux thermal framework. +config RCAR_GEN3_THERMAL + tristate "Renesas R-Car Gen3 thermal driver" + depends on ARCH_RENESAS || COMPILE_TEST + depends on HAS_IOMEM + depends on OF + help + Enable this to plug the R-Car Gen3 thermal sensor driver into the Linux + thermal framework. + config KIRKWOOD_THERMAL tristate "Temperature sensor on Marvell Kirkwood SoCs" depends on MACH_KIRKWOOD || COMPILE_TEST @@ -436,4 +445,12 @@ depends on (ARCH_QCOM && OF) || COMPILE_TEST source "drivers/thermal/qcom/Kconfig" endmenu +config ZX2967_THERMAL + tristate "Thermal sensors on zx2967 SoC" + depends on ARCH_ZX || COMPILE_TEST + help + Enable the zx2967 thermal sensors driver, which supports + the primitive temperature sensor embedded in zx2967 SoCs. + This sensor generates the real time die temperature. + endif diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 6a3d7b573036..7adae2029355 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_QCOM_SPMI_TEMP_ALARM) += qcom-spmi-temp-alarm.o obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o +obj-$(CONFIG_RCAR_GEN3_THERMAL) += rcar_gen3_thermal.o obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o obj-y += samsung/ obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o @@ -56,3 +57,4 @@ obj-$(CONFIG_TEGRA_SOCTHERM) += tegra/ obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o +obj-$(CONFIG_ZX2967_THERMAL) += zx2967_thermal.o diff --git a/drivers/thermal/clock_cooling.c b/drivers/thermal/clock_cooling.c index ed5dd0e88657..56711c25584d 100644 --- a/drivers/thermal/clock_cooling.c +++ b/drivers/thermal/clock_cooling.c @@ -65,42 +65,7 @@ struct clock_cooling_device { }; #define to_clock_cooling_device(x) \ container_of(x, struct clock_cooling_device, clk_rate_change_nb) -static DEFINE_IDR(clock_idr); -static DEFINE_MUTEX(cooling_clock_lock); - -/** - * clock_cooling_get_idr - function to get an unique id. - * @id: int * value generated by this function. - * - * This function will populate @id with an unique - * id, using the idr API. - * - * Return: 0 on success, an error code on failure. - */ -static int clock_cooling_get_idr(int *id) -{ - int ret; - - mutex_lock(&cooling_clock_lock); - ret = idr_alloc(&clock_idr, NULL, 0, 0, GFP_KERNEL); - mutex_unlock(&cooling_clock_lock); - if (unlikely(ret < 0)) - return ret; - *id = ret; - - return 0; -} - -/** - * release_idr - function to free the unique id. - * @id: int value representing the unique id. - */ -static void release_idr(int id) -{ - mutex_lock(&cooling_clock_lock); - idr_remove(&clock_idr, id); - mutex_unlock(&cooling_clock_lock); -} +static DEFINE_IDA(clock_ida); /* Below code defines functions to be used for clock as cooling device */ @@ -432,16 +397,17 @@ clock_cooling_register(struct device *dev, const char *clock_name) if (IS_ERR(ccdev->clk)) return ERR_CAST(ccdev->clk); - ret = clock_cooling_get_idr(&ccdev->id); - if (ret) - return ERR_PTR(-EINVAL); + ret = ida_simple_get(&clock_ida, 0, 0, GFP_KERNEL); + if (ret < 0) + return ERR_PTR(ret); + ccdev->id = ret; snprintf(dev_name, sizeof(dev_name), "thermal-clock-%d", ccdev->id); cdev = thermal_cooling_device_register(dev_name, ccdev, &clock_cooling_ops); if (IS_ERR(cdev)) { - release_idr(ccdev->id); + ida_simple_remove(&clock_ida, ccdev->id); return ERR_PTR(-EINVAL); } ccdev->cdev = cdev; @@ -450,7 +416,7 @@ clock_cooling_register(struct device *dev, const char *clock_name) /* Assuming someone has already filled the opp table for this device */ ret = dev_pm_opp_init_cpufreq_table(dev, &ccdev->freq_table); if (ret) { - release_idr(ccdev->id); + ida_simple_remove(&clock_ida, ccdev->id); return ERR_PTR(ret); } ccdev->clock_state = 0; @@ -481,6 +447,6 @@ void clock_cooling_unregister(struct thermal_cooling_device *cdev) dev_pm_opp_free_cpufreq_table(ccdev->dev, &ccdev->freq_table); thermal_cooling_device_unregister(ccdev->cdev); - release_idr(ccdev->id); + ida_simple_remove(&clock_ida, ccdev->id); } EXPORT_SYMBOL_GPL(clock_cooling_unregister); diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 85fdbf762fa0..91048eeca28b 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -26,6 +26,7 @@ #include <linux/thermal.h> #include <linux/cpufreq.h> #include <linux/err.h> +#include <linux/idr.h> #include <linux/pm_opp.h> #include <linux/slab.h> #include <linux/cpu.h> @@ -104,50 +105,13 @@ struct cpufreq_cooling_device { struct device *cpu_dev; get_static_t plat_get_static_power; }; -static DEFINE_IDR(cpufreq_idr); -static DEFINE_MUTEX(cooling_cpufreq_lock); +static DEFINE_IDA(cpufreq_ida); static unsigned int cpufreq_dev_count; static DEFINE_MUTEX(cooling_list_lock); static LIST_HEAD(cpufreq_dev_list); -/** - * get_idr - function to get a unique id. - * @idr: struct idr * handle used to create a id. - * @id: int * value generated by this function. - * - * This function will populate @id with an unique - * id, using the idr API. - * - * Return: 0 on success, an error code on failure. - */ -static int get_idr(struct idr *idr, int *id) -{ - int ret; - - mutex_lock(&cooling_cpufreq_lock); - ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL); - mutex_unlock(&cooling_cpufreq_lock); - if (unlikely(ret < 0)) - return ret; - *id = ret; - - return 0; -} - -/** - * release_idr - function to free the unique id. - * @idr: struct idr * handle used for creating the id. - * @id: int value representing the unique id. - */ -static void release_idr(struct idr *idr, int id) -{ - mutex_lock(&cooling_cpufreq_lock); - idr_remove(idr, id); - mutex_unlock(&cooling_cpufreq_lock); -} - /* Below code defines functions to be used for cpufreq as cooling device */ /** @@ -645,31 +609,39 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev, unsigned long state, u32 *power) { unsigned int freq, num_cpus; - cpumask_t cpumask; + cpumask_var_t cpumask; u32 static_power, dynamic_power; int ret; struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; - cpumask_and(&cpumask, &cpufreq_device->allowed_cpus, cpu_online_mask); - num_cpus = cpumask_weight(&cpumask); + if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) + return -ENOMEM; + + cpumask_and(cpumask, &cpufreq_device->allowed_cpus, cpu_online_mask); + num_cpus = cpumask_weight(cpumask); /* None of our cpus are online, so no power */ if (num_cpus == 0) { *power = 0; - return 0; + ret = 0; + goto out; } freq = cpufreq_device->freq_table[state]; - if (!freq) - return -EINVAL; + if (!freq) { + ret = -EINVAL; + goto out; + } dynamic_power = cpu_freq_to_power(cpufreq_device, freq) * num_cpus; ret = get_static_power(cpufreq_device, tz, freq, &static_power); if (ret) - return ret; + goto out; *power = static_power + dynamic_power; - return 0; +out: + free_cpumask_var(cpumask); + return ret; } /** @@ -795,16 +767,20 @@ __cpufreq_cooling_register(struct device_node *np, struct cpufreq_cooling_device *cpufreq_dev; char dev_name[THERMAL_NAME_LENGTH]; struct cpufreq_frequency_table *pos, *table; - struct cpumask temp_mask; + cpumask_var_t temp_mask; unsigned int freq, i, num_cpus; int ret; struct thermal_cooling_device_ops *cooling_ops; - cpumask_and(&temp_mask, clip_cpus, cpu_online_mask); - policy = cpufreq_cpu_get(cpumask_first(&temp_mask)); + if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL)) + return ERR_PTR(-ENOMEM); + + cpumask_and(temp_mask, clip_cpus, cpu_online_mask); + policy = cpufreq_cpu_get(cpumask_first(temp_mask)); if (!policy) { pr_debug("%s: CPUFreq policy not found\n", __func__); - return ERR_PTR(-EPROBE_DEFER); + cool_dev = ERR_PTR(-EPROBE_DEFER); + goto free_cpumask; } table = policy->freq_table; @@ -867,11 +843,12 @@ __cpufreq_cooling_register(struct device_node *np, cooling_ops = &cpufreq_cooling_ops; } - ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); - if (ret) { + ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL); + if (ret < 0) { cool_dev = ERR_PTR(ret); goto free_power_table; } + cpufreq_dev->id = ret; /* Fill freq-table in descending order of frequencies */ for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) { @@ -891,27 +868,24 @@ __cpufreq_cooling_register(struct device_node *np, cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, cooling_ops); if (IS_ERR(cool_dev)) - goto remove_idr; + goto remove_ida; cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0]; cpufreq_dev->cool_dev = cool_dev; - mutex_lock(&cooling_cpufreq_lock); - mutex_lock(&cooling_list_lock); list_add(&cpufreq_dev->node, &cpufreq_dev_list); - mutex_unlock(&cooling_list_lock); /* Register the notifier for first cpufreq cooling device */ if (!cpufreq_dev_count++) cpufreq_register_notifier(&thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); - mutex_unlock(&cooling_cpufreq_lock); + mutex_unlock(&cooling_list_lock); goto put_policy; -remove_idr: - release_idr(&cpufreq_idr, cpufreq_dev->id); +remove_ida: + ida_simple_remove(&cpufreq_ida, cpufreq_dev->id); free_power_table: kfree(cpufreq_dev->dyn_power_table); free_table: @@ -924,7 +898,8 @@ free_cdev: kfree(cpufreq_dev); put_policy: cpufreq_cpu_put(policy); - +free_cpumask: + free_cpumask_var(temp_mask); return cool_dev; } @@ -1052,20 +1027,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) cpufreq_dev = cdev->devdata; + mutex_lock(&cooling_list_lock); /* Unregister the notifier for the last cpufreq cooling device */ - mutex_lock(&cooling_cpufreq_lock); if (!--cpufreq_dev_count) cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); - mutex_lock(&cooling_list_lock); list_del(&cpufreq_dev->node); mutex_unlock(&cooling_list_lock); - mutex_unlock(&cooling_cpufreq_lock); - thermal_cooling_device_unregister(cpufreq_dev->cool_dev); - release_idr(&cpufreq_idr, cpufreq_dev->id); + ida_simple_remove(&cpufreq_ida, cpufreq_dev->id); kfree(cpufreq_dev->dyn_power_table); kfree(cpufreq_dev->time_in_idle_timestamp); kfree(cpufreq_dev->time_in_idle); diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c index ba7a5cd994dc..7743a78d4723 100644 --- a/drivers/thermal/devfreq_cooling.c +++ b/drivers/thermal/devfreq_cooling.c @@ -21,14 +21,14 @@ #include <linux/devfreq.h> #include <linux/devfreq_cooling.h> #include <linux/export.h> +#include <linux/idr.h> #include <linux/slab.h> #include <linux/pm_opp.h> #include <linux/thermal.h> #include <trace/events/thermal.h> -static DEFINE_MUTEX(devfreq_lock); -static DEFINE_IDR(devfreq_idr); +static DEFINE_IDA(devfreq_ida); /** * struct devfreq_cooling_device - Devfreq cooling device @@ -58,42 +58,6 @@ struct devfreq_cooling_device { }; /** - * get_idr - function to get a unique id. - * @idr: struct idr * handle used to create a id. - * @id: int * value generated by this function. - * - * This function will populate @id with an unique - * id, using the idr API. - * - * Return: 0 on success, an error code on failure. - */ -static int get_idr(struct idr *idr, int *id) -{ - int ret; - - mutex_lock(&devfreq_lock); - ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL); - mutex_unlock(&devfreq_lock); - if (unlikely(ret < 0)) - return ret; - *id = ret; - - return 0; -} - -/** - * release_idr - function to free the unique id. - * @idr: struct idr * handle used for creating the id. - * @id: int value representing the unique id. - */ -static void release_idr(struct idr *idr, int id) -{ - mutex_lock(&devfreq_lock); - idr_remove(idr, id); - mutex_unlock(&devfreq_lock); -} - -/** * partition_enable_opps() - disable all opps above a given state * @dfc: Pointer to devfreq we are operating on * @cdev_state: cooling device state we're setting @@ -489,9 +453,10 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, if (err) goto free_dfc; - err = get_idr(&devfreq_idr, &dfc->id); - if (err) + err = ida_simple_get(&devfreq_ida, 0, 0, GFP_KERNEL); + if (err < 0) goto free_tables; + dfc->id = err; snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d", dfc->id); @@ -502,15 +467,15 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, dev_err(df->dev.parent, "Failed to register devfreq cooling device (%d)\n", err); - goto release_idr; + goto release_ida; } dfc->cdev = cdev; return cdev; -release_idr: - release_idr(&devfreq_idr, dfc->id); +release_ida: + ida_simple_remove(&devfreq_ida, dfc->id); free_tables: kfree(dfc->power_table); kfree(dfc->freq_table); @@ -558,7 +523,7 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *cdev) dfc = cdev->devdata; thermal_cooling_device_unregister(dfc->cdev); - release_idr(&devfreq_idr, dfc->id); + ida_simple_remove(&devfreq_ida, dfc->id); kfree(dfc->power_table); kfree(dfc->freq_table); diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index 06912f0602b7..fb648a45754e 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -489,6 +489,10 @@ static int imx_thermal_probe(struct platform_device *pdev) data->tempmon = map; data->socdata = of_device_get_match_data(&pdev->dev); + if (!data->socdata) { + dev_err(&pdev->dev, "no device match found\n"); + return -ENODEV; + } /* make sure the IRQ flag is clear before enabling irq on i.MX6SX */ if (data->socdata->version == TEMPMON_IMX6SX) { diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index df64692e9e64..d718cd179ddb 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c @@ -50,6 +50,7 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/sched/rt.h> +#include <uapi/linux/sched/types.h> #include <asm/nmi.h> #include <asm/msr.h> @@ -461,16 +462,13 @@ static void poll_pkg_cstate(struct work_struct *dummy) { static u64 msr_last; static u64 tsc_last; - static unsigned long jiffies_last; u64 msr_now; - unsigned long jiffies_now; u64 tsc_now; u64 val64; msr_now = pkg_state_counter(); tsc_now = rdtsc(); - jiffies_now = jiffies; /* calculate pkg cstate vs tsc ratio */ if (!msr_last || !tsc_last) @@ -485,7 +483,6 @@ static void poll_pkg_cstate(struct work_struct *dummy) /* update record */ msr_last = msr_now; - jiffies_last = jiffies_now; tsc_last = tsc_now; if (true == clamping) diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c index 34169c32d495..1aff7fde54b1 100644 --- a/drivers/thermal/mtk_thermal.c +++ b/drivers/thermal/mtk_thermal.c @@ -183,37 +183,37 @@ struct mtk_thermal { }; /* MT8173 thermal sensor data */ -const int mt8173_bank_data[MT8173_NUM_ZONES][3] = { +static const int mt8173_bank_data[MT8173_NUM_ZONES][3] = { { MT8173_TS2, MT8173_TS3 }, { MT8173_TS2, MT8173_TS4 }, { MT8173_TS1, MT8173_TS2, MT8173_TSABB }, { MT8173_TS2 }, }; -const int mt8173_msr[MT8173_NUM_SENSORS_PER_ZONE] = { +static const int mt8173_msr[MT8173_NUM_SENSORS_PER_ZONE] = { TEMP_MSR0, TEMP_MSR1, TEMP_MSR2, TEMP_MSR2 }; -const int mt8173_adcpnp[MT8173_NUM_SENSORS_PER_ZONE] = { +static const int mt8173_adcpnp[MT8173_NUM_SENSORS_PER_ZONE] = { TEMP_ADCPNP0, TEMP_ADCPNP1, TEMP_ADCPNP2, TEMP_ADCPNP3 }; -const int mt8173_mux_values[MT8173_NUM_SENSORS] = { 0, 1, 2, 3, 16 }; +static const int mt8173_mux_values[MT8173_NUM_SENSORS] = { 0, 1, 2, 3, 16 }; /* MT2701 thermal sensor data */ -const int mt2701_bank_data[MT2701_NUM_SENSORS] = { +static const int mt2701_bank_data[MT2701_NUM_SENSORS] = { MT2701_TS1, MT2701_TS2, MT2701_TSABB }; -const int mt2701_msr[MT2701_NUM_SENSORS_PER_ZONE] = { +static const int mt2701_msr[MT2701_NUM_SENSORS_PER_ZONE] = { TEMP_MSR0, TEMP_MSR1, TEMP_MSR2 }; -const int mt2701_adcpnp[MT2701_NUM_SENSORS_PER_ZONE] = { +static const int mt2701_adcpnp[MT2701_NUM_SENSORS_PER_ZONE] = { TEMP_ADCPNP0, TEMP_ADCPNP1, TEMP_ADCPNP2 }; -const int mt2701_mux_values[MT2701_NUM_SENSORS] = { 0, 1, 16 }; +static const int mt2701_mux_values[MT2701_NUM_SENSORS] = { 0, 1, 16 }; /** * The MT8173 thermal controller has four banks. Each bank can read up to diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c new file mode 100644 index 000000000000..d33c845244b1 --- /dev/null +++ b/drivers/thermal/rcar_gen3_thermal.c @@ -0,0 +1,335 @@ +/* + * R-Car Gen3 THS thermal sensor driver + * Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen. + * + * Copyright (C) 2016 Renesas Electronics Corporation. + * Copyright (C) 2016 Sang Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + */ +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/thermal.h> + +/* Register offsets */ +#define REG_GEN3_IRQSTR 0x04 +#define REG_GEN3_IRQMSK 0x08 +#define REG_GEN3_IRQCTL 0x0C +#define REG_GEN3_IRQEN 0x10 +#define REG_GEN3_IRQTEMP1 0x14 +#define REG_GEN3_IRQTEMP2 0x18 +#define REG_GEN3_IRQTEMP3 0x1C +#define REG_GEN3_CTSR 0x20 +#define REG_GEN3_THCTR 0x20 +#define REG_GEN3_TEMP 0x28 +#define REG_GEN3_THCODE1 0x50 +#define REG_GEN3_THCODE2 0x54 +#define REG_GEN3_THCODE3 0x58 + +/* CTSR bits */ +#define CTSR_PONM BIT(8) +#define CTSR_AOUT BIT(7) +#define CTSR_THBGR BIT(5) +#define CTSR_VMEN BIT(4) +#define CTSR_VMST BIT(1) +#define CTSR_THSST BIT(0) + +/* THCTR bits */ +#define THCTR_PONM BIT(6) +#define THCTR_THSST BIT(0) + +#define CTEMP_MASK 0xFFF + +#define MCELSIUS(temp) ((temp) * 1000) +#define GEN3_FUSE_MASK 0xFFF + +#define TSC_MAX_NUM 3 + +/* Structure for thermal temperature calculation */ +struct equation_coefs { + int a1; + int b1; + int a2; + int b2; +}; + +struct rcar_gen3_thermal_tsc { + void __iomem *base; + struct thermal_zone_device *zone; + struct equation_coefs coef; + struct mutex lock; +}; + +struct rcar_gen3_thermal_priv { + struct rcar_gen3_thermal_tsc *tscs[TSC_MAX_NUM]; +}; + +struct rcar_gen3_thermal_data { + void (*thermal_init)(struct rcar_gen3_thermal_tsc *tsc); +}; + +static inline u32 rcar_gen3_thermal_read(struct rcar_gen3_thermal_tsc *tsc, + u32 reg) +{ + return ioread32(tsc->base + reg); +} + +static inline void rcar_gen3_thermal_write(struct rcar_gen3_thermal_tsc *tsc, + u32 reg, u32 data) +{ + iowrite32(data, tsc->base + reg); +} + +/* + * Linear approximation for temperature + * + * [reg] = [temp] * a + b => [temp] = ([reg] - b) / a + * + * The constants a and b are calculated using two triplets of int values PTAT + * and THCODE. PTAT and THCODE can either be read from hardware or use hard + * coded values from driver. The formula to calculate a and b are taken from + * BSP and sparsely documented and understood. + * + * Examining the linear formula and the formula used to calculate constants a + * and b while knowing that the span for PTAT and THCODE values are between + * 0x000 and 0xfff the largest integer possible is 0xfff * 0xfff == 0xffe001. + * Integer also needs to be signed so that leaves 7 bits for binary + * fixed point scaling. + */ + +#define FIXPT_SHIFT 7 +#define FIXPT_INT(_x) ((_x) << FIXPT_SHIFT) +#define FIXPT_DIV(_a, _b) DIV_ROUND_CLOSEST(((_a) << FIXPT_SHIFT), (_b)) +#define FIXPT_TO_MCELSIUS(_x) ((_x) * 1000 >> FIXPT_SHIFT) + +#define RCAR3_THERMAL_GRAN 500 /* mili Celsius */ + +/* no idea where these constants come from */ +#define TJ_1 96 +#define TJ_3 -41 + +static void rcar_gen3_thermal_calc_coefs(struct equation_coefs *coef, + int *ptat, int *thcode) +{ + int tj_2; + + /* TODO: Find documentation and document constant calculation formula */ + + /* + * Division is not scaled in BSP and if scaled it might overflow + * the dividend (4095 * 4095 << 14 > INT_MAX) so keep it unscaled + */ + tj_2 = (FIXPT_INT((ptat[1] - ptat[2]) * 137) + / (ptat[0] - ptat[2])) - FIXPT_INT(41); + + coef->a1 = FIXPT_DIV(FIXPT_INT(thcode[1] - thcode[2]), + tj_2 - FIXPT_INT(TJ_3)); + coef->b1 = FIXPT_INT(thcode[2]) - coef->a1 * TJ_3; + + coef->a2 = FIXPT_DIV(FIXPT_INT(thcode[1] - thcode[0]), + tj_2 - FIXPT_INT(TJ_1)); + coef->b2 = FIXPT_INT(thcode[0]) - coef->a2 * TJ_1; +} + +static int rcar_gen3_thermal_round(int temp) +{ + int result, round_offs; + + round_offs = temp >= 0 ? RCAR3_THERMAL_GRAN / 2 : + -RCAR3_THERMAL_GRAN / 2; + result = (temp + round_offs) / RCAR3_THERMAL_GRAN; + return result * RCAR3_THERMAL_GRAN; +} + +static int rcar_gen3_thermal_get_temp(void *devdata, int *temp) +{ + struct rcar_gen3_thermal_tsc *tsc = devdata; + int mcelsius, val1, val2; + u32 reg; + + /* Read register and convert to mili Celsius */ + mutex_lock(&tsc->lock); + + reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK; + + val1 = FIXPT_DIV(FIXPT_INT(reg) - tsc->coef.b1, tsc->coef.a1); + val2 = FIXPT_DIV(FIXPT_INT(reg) - tsc->coef.b2, tsc->coef.a2); + mcelsius = FIXPT_TO_MCELSIUS((val1 + val2) / 2); + + mutex_unlock(&tsc->lock); + + /* Make sure we are inside specifications */ + if ((mcelsius < MCELSIUS(-40)) || (mcelsius > MCELSIUS(125))) + return -EIO; + + /* Round value to device granularity setting */ + *temp = rcar_gen3_thermal_round(mcelsius); + + return 0; +} + +static struct thermal_zone_of_device_ops rcar_gen3_tz_of_ops = { + .get_temp = rcar_gen3_thermal_get_temp, +}; + +static void r8a7795_thermal_init(struct rcar_gen3_thermal_tsc *tsc) +{ + rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, CTSR_THBGR); + rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, 0x0); + + usleep_range(1000, 2000); + + rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, CTSR_PONM); + rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0x3F); + rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, + CTSR_PONM | CTSR_AOUT | CTSR_THBGR | CTSR_VMEN); + + usleep_range(100, 200); + + rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, + CTSR_PONM | CTSR_AOUT | CTSR_THBGR | CTSR_VMEN | + CTSR_VMST | CTSR_THSST); + + usleep_range(1000, 2000); +} + +static void r8a7796_thermal_init(struct rcar_gen3_thermal_tsc *tsc) +{ + u32 reg_val; + + reg_val = rcar_gen3_thermal_read(tsc, REG_GEN3_THCTR); + reg_val &= ~THCTR_PONM; + rcar_gen3_thermal_write(tsc, REG_GEN3_THCTR, reg_val); + + usleep_range(1000, 2000); + + rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0x3F); + reg_val = rcar_gen3_thermal_read(tsc, REG_GEN3_THCTR); + reg_val |= THCTR_THSST; + rcar_gen3_thermal_write(tsc, REG_GEN3_THCTR, reg_val); +} + +static const struct rcar_gen3_thermal_data r8a7795_data = { + .thermal_init = r8a7795_thermal_init, +}; + +static const struct rcar_gen3_thermal_data r8a7796_data = { + .thermal_init = r8a7796_thermal_init, +}; + +static const struct of_device_id rcar_gen3_thermal_dt_ids[] = { + { .compatible = "renesas,r8a7795-thermal", .data = &r8a7795_data}, + { .compatible = "renesas,r8a7796-thermal", .data = &r8a7796_data}, + {}, +}; +MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids); + +static int rcar_gen3_thermal_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return 0; +} + +static int rcar_gen3_thermal_probe(struct platform_device *pdev) +{ + struct rcar_gen3_thermal_priv *priv; + struct device *dev = &pdev->dev; + struct resource *res; + struct thermal_zone_device *zone; + int ret, i; + const struct rcar_gen3_thermal_data *match_data = + of_device_get_match_data(dev); + + /* default values if FUSEs are missing */ + /* TODO: Read values from hardware on supported platforms */ + int ptat[3] = { 2351, 1509, 435 }; + int thcode[TSC_MAX_NUM][3] = { + { 3248, 2800, 2221 }, + { 3245, 2795, 2216 }, + { 3250, 2805, 2237 }, + }; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + + for (i = 0; i < TSC_MAX_NUM; i++) { + struct rcar_gen3_thermal_tsc *tsc; + + tsc = devm_kzalloc(dev, sizeof(*tsc), GFP_KERNEL); + if (!tsc) { + ret = -ENOMEM; + goto error_unregister; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (!res) + break; + + tsc->base = devm_ioremap_resource(dev, res); + if (IS_ERR(tsc->base)) { + ret = PTR_ERR(tsc->base); + goto error_unregister; + } + + priv->tscs[i] = tsc; + mutex_init(&tsc->lock); + + match_data->thermal_init(tsc); + rcar_gen3_thermal_calc_coefs(&tsc->coef, ptat, thcode[i]); + + zone = devm_thermal_zone_of_sensor_register(dev, i, tsc, + &rcar_gen3_tz_of_ops); + if (IS_ERR(zone)) { + dev_err(dev, "Can't register thermal zone\n"); + ret = PTR_ERR(zone); + goto error_unregister; + } + tsc->zone = zone; + } + + return 0; + +error_unregister: + rcar_gen3_thermal_remove(pdev); + + return ret; +} + +static struct platform_driver rcar_gen3_thermal_driver = { + .driver = { + .name = "rcar_gen3_thermal", + .of_match_table = rcar_gen3_thermal_dt_ids, + }, + .probe = rcar_gen3_thermal_probe, + .remove = rcar_gen3_thermal_remove, +}; +module_platform_driver(rcar_gen3_thermal_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("R-Car Gen3 THS thermal sensor driver"); +MODULE_AUTHOR("Wolfram Sang <wsa+renesas@sang-engineering.com>"); diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index ad1186dd6132..7b8ef09d2b3c 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -1168,7 +1168,6 @@ static int exynos_of_sensor_conf(struct device_node *np, pdata->default_temp_offset = (u8)value; of_property_read_u32(np, "samsung,tmu_cal_type", &pdata->cal_type); - of_property_read_u32(np, "samsung,tmu_cal_mode", &pdata->cal_mode); of_node_put(np); return 0; diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h index 440c7140b660..5149c2a3030c 100644 --- a/drivers/thermal/samsung/exynos_tmu.h +++ b/drivers/thermal/samsung/exynos_tmu.h @@ -70,7 +70,6 @@ struct exynos_tmu_platform_data { enum soc_type type; u32 cal_type; - u32 cal_mode; }; #endif /* _EXYNOS_TMU_H */ diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 655591316a88..11f0675cb7e5 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -36,9 +36,8 @@ MODULE_AUTHOR("Zhang Rui"); MODULE_DESCRIPTION("Generic thermal management sysfs support"); MODULE_LICENSE("GPL v2"); -static DEFINE_IDR(thermal_tz_idr); -static DEFINE_IDR(thermal_cdev_idr); -static DEFINE_MUTEX(thermal_idr_lock); +static DEFINE_IDA(thermal_tz_ida); +static DEFINE_IDA(thermal_cdev_ida); static LIST_HEAD(thermal_tz_list); static LIST_HEAD(thermal_cdev_list); @@ -589,29 +588,6 @@ void thermal_zone_device_unbind_exception(struct thermal_zone_device *tz, * - thermal zone devices lifecycle: registration, unregistration, * binding, and unbinding. */ -static int get_idr(struct idr *idr, struct mutex *lock, int *id) -{ - int ret; - - if (lock) - mutex_lock(lock); - ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL); - if (lock) - mutex_unlock(lock); - if (unlikely(ret < 0)) - return ret; - *id = ret; - return 0; -} - -static void release_idr(struct idr *idr, struct mutex *lock, int id) -{ - if (lock) - mutex_lock(lock); - idr_remove(idr, id); - if (lock) - mutex_unlock(lock); -} /** * thermal_zone_bind_cooling_device() - bind a cooling device to a thermal zone @@ -685,15 +661,16 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, dev->target = THERMAL_NO_TARGET; dev->weight = weight; - result = get_idr(&tz->idr, &tz->lock, &dev->id); - if (result) + result = ida_simple_get(&tz->ida, 0, 0, GFP_KERNEL); + if (result < 0) goto free_mem; + dev->id = result; sprintf(dev->name, "cdev%d", dev->id); result = sysfs_create_link(&tz->device.kobj, &cdev->device.kobj, dev->name); if (result) - goto release_idr; + goto release_ida; sprintf(dev->attr_name, "cdev%d_trip_point", dev->id); sysfs_attr_init(&dev->attr.attr); @@ -737,8 +714,8 @@ remove_trip_file: device_remove_file(&tz->device, &dev->attr); remove_symbol_link: sysfs_remove_link(&tz->device.kobj, dev->name); -release_idr: - release_idr(&tz->idr, &tz->lock, dev->id); +release_ida: + ida_simple_remove(&tz->ida, dev->id); free_mem: kfree(dev); return result; @@ -785,7 +762,7 @@ unbind: device_remove_file(&tz->device, &pos->weight_attr); device_remove_file(&tz->device, &pos->attr); sysfs_remove_link(&tz->device.kobj, pos->name); - release_idr(&tz->idr, &tz->lock, pos->id); + ida_simple_remove(&tz->ida, pos->id); kfree(pos); return 0; } @@ -925,12 +902,13 @@ __thermal_cooling_device_register(struct device_node *np, if (!cdev) return ERR_PTR(-ENOMEM); - result = get_idr(&thermal_cdev_idr, &thermal_idr_lock, &cdev->id); - if (result) { + result = ida_simple_get(&thermal_cdev_ida, 0, 0, GFP_KERNEL); + if (result < 0) { kfree(cdev); return ERR_PTR(result); } + cdev->id = result; strlcpy(cdev->type, type ? : "", sizeof(cdev->type)); mutex_init(&cdev->lock); INIT_LIST_HEAD(&cdev->thermal_instances); @@ -943,7 +921,7 @@ __thermal_cooling_device_register(struct device_node *np, dev_set_name(&cdev->device, "cooling_device%d", cdev->id); result = device_register(&cdev->device); if (result) { - release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); + ida_simple_remove(&thermal_cdev_ida, cdev->id); kfree(cdev); return ERR_PTR(result); } @@ -1070,7 +1048,7 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) mutex_unlock(&thermal_list_lock); - release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); + ida_simple_remove(&thermal_cdev_ida, cdev->id); device_unregister(&cdev->device); } EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister); @@ -1172,14 +1150,15 @@ thermal_zone_device_register(const char *type, int trips, int mask, return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&tz->thermal_instances); - idr_init(&tz->idr); + ida_init(&tz->ida); mutex_init(&tz->lock); - result = get_idr(&thermal_tz_idr, &thermal_idr_lock, &tz->id); - if (result) { + result = ida_simple_get(&thermal_tz_ida, 0, 0, GFP_KERNEL); + if (result < 0) { kfree(tz); return ERR_PTR(result); } + tz->id = result; strlcpy(tz->type, type, sizeof(tz->type)); tz->ops = ops; tz->tzp = tzp; @@ -1201,7 +1180,7 @@ thermal_zone_device_register(const char *type, int trips, int mask, dev_set_name(&tz->device, "thermal_zone%d", tz->id); result = device_register(&tz->device); if (result) { - release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); + ida_simple_remove(&thermal_tz_ida, tz->id); kfree(tz); return ERR_PTR(result); } @@ -1255,7 +1234,7 @@ thermal_zone_device_register(const char *type, int trips, int mask, return tz; unregister: - release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); + ida_simple_remove(&thermal_tz_ida, tz->id); device_unregister(&tz->device); return ERR_PTR(result); } @@ -1313,8 +1292,8 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) thermal_set_governor(tz, NULL); thermal_remove_hwmon_sysfs(tz); - release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); - idr_destroy(&tz->idr); + ida_simple_remove(&thermal_tz_ida, tz->id); + ida_destroy(&tz->ida); mutex_destroy(&tz->lock); device_unregister(&tz->device); } @@ -1514,9 +1493,8 @@ unregister_class: unregister_governors: thermal_unregister_governors(); error: - idr_destroy(&thermal_tz_idr); - idr_destroy(&thermal_cdev_idr); - mutex_destroy(&thermal_idr_lock); + ida_destroy(&thermal_tz_ida); + ida_destroy(&thermal_cdev_ida); mutex_destroy(&thermal_list_lock); mutex_destroy(&thermal_governor_lock); return result; @@ -1529,9 +1507,8 @@ static void __exit thermal_exit(void) genetlink_exit(); class_unregister(&thermal_class); thermal_unregister_governors(); - idr_destroy(&thermal_tz_idr); - idr_destroy(&thermal_cdev_idr); - mutex_destroy(&thermal_idr_lock); + ida_destroy(&thermal_tz_ida); + ida_destroy(&thermal_cdev_ida); mutex_destroy(&thermal_list_lock); mutex_destroy(&thermal_governor_lock); } diff --git a/drivers/thermal/ti-soc-thermal/Kconfig b/drivers/thermal/ti-soc-thermal/Kconfig index ea8283f08aa6..fe0e877f84d0 100644 --- a/drivers/thermal/ti-soc-thermal/Kconfig +++ b/drivers/thermal/ti-soc-thermal/Kconfig @@ -11,7 +11,6 @@ config TI_SOC_THERMAL config TI_THERMAL bool "Texas Instruments SoCs thermal framework support" depends on TI_SOC_THERMAL - depends on CPU_THERMAL help If you say yes here you want to get support for generic thermal framework for the Texas Instruments on die bandgap temperature sensor. diff --git a/drivers/thermal/ti-soc-thermal/dra752-bandgap.h b/drivers/thermal/ti-soc-thermal/dra752-bandgap.h index 6b0f2b1160f7..a31e4b5e82cd 100644 --- a/drivers/thermal/ti-soc-thermal/dra752-bandgap.h +++ b/drivers/thermal/ti-soc-thermal/dra752-bandgap.h @@ -54,7 +54,6 @@ #define DRA752_STD_FUSE_OPP_BGAP_CORE_OFFSET 0x8 #define DRA752_TEMP_SENSOR_CORE_OFFSET 0x154 #define DRA752_BANDGAP_THRESHOLD_CORE_OFFSET 0x1ac -#define DRA752_BANDGAP_TSHUT_CORE_OFFSET 0x1b8 #define DRA752_BANDGAP_CUMUL_DTEMP_CORE_OFFSET 0x1c4 #define DRA752_DTEMP_CORE_0_OFFSET 0x208 #define DRA752_DTEMP_CORE_1_OFFSET 0x20c @@ -66,7 +65,6 @@ #define DRA752_STD_FUSE_OPP_BGAP_IVA_OFFSET 0x388 #define DRA752_TEMP_SENSOR_IVA_OFFSET 0x398 #define DRA752_BANDGAP_THRESHOLD_IVA_OFFSET 0x3a4 -#define DRA752_BANDGAP_TSHUT_IVA_OFFSET 0x3ac #define DRA752_BANDGAP_CUMUL_DTEMP_IVA_OFFSET 0x3b4 #define DRA752_DTEMP_IVA_0_OFFSET 0x3d0 #define DRA752_DTEMP_IVA_1_OFFSET 0x3d4 @@ -78,7 +76,6 @@ #define DRA752_STD_FUSE_OPP_BGAP_MPU_OFFSET 0x4 #define DRA752_TEMP_SENSOR_MPU_OFFSET 0x14c #define DRA752_BANDGAP_THRESHOLD_MPU_OFFSET 0x1a4 -#define DRA752_BANDGAP_TSHUT_MPU_OFFSET 0x1b0 #define DRA752_BANDGAP_CUMUL_DTEMP_MPU_OFFSET 0x1bc #define DRA752_DTEMP_MPU_0_OFFSET 0x1e0 #define DRA752_DTEMP_MPU_1_OFFSET 0x1e4 @@ -90,7 +87,6 @@ #define DRA752_STD_FUSE_OPP_BGAP_DSPEVE_OFFSET 0x384 #define DRA752_TEMP_SENSOR_DSPEVE_OFFSET 0x394 #define DRA752_BANDGAP_THRESHOLD_DSPEVE_OFFSET 0x3a0 -#define DRA752_BANDGAP_TSHUT_DSPEVE_OFFSET 0x3a8 #define DRA752_BANDGAP_CUMUL_DTEMP_DSPEVE_OFFSET 0x3b0 #define DRA752_DTEMP_DSPEVE_0_OFFSET 0x3bc #define DRA752_DTEMP_DSPEVE_1_OFFSET 0x3c0 @@ -102,7 +98,6 @@ #define DRA752_STD_FUSE_OPP_BGAP_GPU_OFFSET 0x0 #define DRA752_TEMP_SENSOR_GPU_OFFSET 0x150 #define DRA752_BANDGAP_THRESHOLD_GPU_OFFSET 0x1a8 -#define DRA752_BANDGAP_TSHUT_GPU_OFFSET 0x1b4 #define DRA752_BANDGAP_CUMUL_DTEMP_GPU_OFFSET 0x1c0 #define DRA752_DTEMP_GPU_0_OFFSET 0x1f4 #define DRA752_DTEMP_GPU_1_OFFSET 0x1f8 @@ -173,10 +168,6 @@ #define DRA752_BANDGAP_THRESHOLD_HOT_MASK (0x3ff << 16) #define DRA752_BANDGAP_THRESHOLD_COLD_MASK (0x3ff << 0) -/* DRA752.TSHUT_THRESHOLD */ -#define DRA752_TSHUT_THRESHOLD_MUXCTRL_MASK BIT(31) -#define DRA752_TSHUT_THRESHOLD_HOT_MASK (0x3ff << 16) -#define DRA752_TSHUT_THRESHOLD_COLD_MASK (0x3ff << 0) /* DRA752.BANDGAP_CUMUL_DTEMP_CORE */ #define DRA752_BANDGAP_CUMUL_DTEMP_CORE_MASK (0xffffffff << 0) @@ -216,8 +207,6 @@ #define DRA752_GPU_MAX_TEMP 125000 #define DRA752_GPU_HYST_VAL 5000 /* interrupts thresholds */ -#define DRA752_GPU_TSHUT_HOT 915 -#define DRA752_GPU_TSHUT_COLD 900 #define DRA752_GPU_T_HOT 800 #define DRA752_GPU_T_COLD 795 @@ -230,8 +219,6 @@ #define DRA752_MPU_MAX_TEMP 125000 #define DRA752_MPU_HYST_VAL 5000 /* interrupts thresholds */ -#define DRA752_MPU_TSHUT_HOT 915 -#define DRA752_MPU_TSHUT_COLD 900 #define DRA752_MPU_T_HOT 800 #define DRA752_MPU_T_COLD 795 @@ -244,8 +231,6 @@ #define DRA752_CORE_MAX_TEMP 125000 #define DRA752_CORE_HYST_VAL 5000 /* interrupts thresholds */ -#define DRA752_CORE_TSHUT_HOT 915 -#define DRA752_CORE_TSHUT_COLD 900 #define DRA752_CORE_T_HOT 800 #define DRA752_CORE_T_COLD 795 @@ -258,8 +243,6 @@ #define DRA752_DSPEVE_MAX_TEMP 125000 #define DRA752_DSPEVE_HYST_VAL 5000 /* interrupts thresholds */ -#define DRA752_DSPEVE_TSHUT_HOT 915 -#define DRA752_DSPEVE_TSHUT_COLD 900 #define DRA752_DSPEVE_T_HOT 800 #define DRA752_DSPEVE_T_COLD 795 @@ -272,8 +255,6 @@ #define DRA752_IVA_MAX_TEMP 125000 #define DRA752_IVA_HYST_VAL 5000 /* interrupts thresholds */ -#define DRA752_IVA_TSHUT_HOT 915 -#define DRA752_IVA_TSHUT_COLD 900 #define DRA752_IVA_T_HOT 800 #define DRA752_IVA_T_COLD 795 diff --git a/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c b/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c index 58b5c6694cd4..118d7d847715 100644 --- a/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c +++ b/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c @@ -49,9 +49,6 @@ dra752_core_temp_sensor_registers = { .bgap_threshold = DRA752_BANDGAP_THRESHOLD_CORE_OFFSET, .threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK, .threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK, - .tshut_threshold = DRA752_BANDGAP_TSHUT_CORE_OFFSET, - .tshut_hot_mask = DRA752_TSHUT_THRESHOLD_HOT_MASK, - .tshut_cold_mask = DRA752_TSHUT_THRESHOLD_COLD_MASK, .bgap_status = DRA752_BANDGAP_STATUS_1_OFFSET, .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK, .status_hot_mask = DRA752_BANDGAP_STATUS_1_HOT_CORE_MASK, @@ -85,9 +82,6 @@ dra752_iva_temp_sensor_registers = { .bgap_threshold = DRA752_BANDGAP_THRESHOLD_IVA_OFFSET, .threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK, .threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK, - .tshut_threshold = DRA752_BANDGAP_TSHUT_IVA_OFFSET, - .tshut_hot_mask = DRA752_TSHUT_THRESHOLD_HOT_MASK, - .tshut_cold_mask = DRA752_TSHUT_THRESHOLD_COLD_MASK, .bgap_status = DRA752_BANDGAP_STATUS_2_OFFSET, .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK, .status_hot_mask = DRA752_BANDGAP_STATUS_2_HOT_IVA_MASK, @@ -121,9 +115,6 @@ dra752_mpu_temp_sensor_registers = { .bgap_threshold = DRA752_BANDGAP_THRESHOLD_MPU_OFFSET, .threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK, .threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK, - .tshut_threshold = DRA752_BANDGAP_TSHUT_MPU_OFFSET, - .tshut_hot_mask = DRA752_TSHUT_THRESHOLD_HOT_MASK, - .tshut_cold_mask = DRA752_TSHUT_THRESHOLD_COLD_MASK, .bgap_status = DRA752_BANDGAP_STATUS_1_OFFSET, .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK, .status_hot_mask = DRA752_BANDGAP_STATUS_1_HOT_MPU_MASK, @@ -157,9 +148,6 @@ dra752_dspeve_temp_sensor_registers = { .bgap_threshold = DRA752_BANDGAP_THRESHOLD_DSPEVE_OFFSET, .threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK, .threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK, - .tshut_threshold = DRA752_BANDGAP_TSHUT_DSPEVE_OFFSET, - .tshut_hot_mask = DRA752_TSHUT_THRESHOLD_HOT_MASK, - .tshut_cold_mask = DRA752_TSHUT_THRESHOLD_COLD_MASK, .bgap_status = DRA752_BANDGAP_STATUS_2_OFFSET, .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK, .status_hot_mask = DRA752_BANDGAP_STATUS_2_HOT_DSPEVE_MASK, @@ -193,9 +181,6 @@ dra752_gpu_temp_sensor_registers = { .bgap_threshold = DRA752_BANDGAP_THRESHOLD_GPU_OFFSET, .threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK, .threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK, - .tshut_threshold = DRA752_BANDGAP_TSHUT_GPU_OFFSET, - .tshut_hot_mask = DRA752_TSHUT_THRESHOLD_HOT_MASK, - .tshut_cold_mask = DRA752_TSHUT_THRESHOLD_COLD_MASK, .bgap_status = DRA752_BANDGAP_STATUS_1_OFFSET, .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK, .status_hot_mask = DRA752_BANDGAP_STATUS_1_HOT_GPU_MASK, @@ -211,8 +196,6 @@ dra752_gpu_temp_sensor_registers = { /* Thresholds and limits for DRA752 MPU temperature sensor */ static struct temp_sensor_data dra752_mpu_temp_sensor_data = { - .tshut_hot = DRA752_MPU_TSHUT_HOT, - .tshut_cold = DRA752_MPU_TSHUT_COLD, .t_hot = DRA752_MPU_T_HOT, .t_cold = DRA752_MPU_T_COLD, .min_freq = DRA752_MPU_MIN_FREQ, @@ -226,8 +209,6 @@ static struct temp_sensor_data dra752_mpu_temp_sensor_data = { /* Thresholds and limits for DRA752 GPU temperature sensor */ static struct temp_sensor_data dra752_gpu_temp_sensor_data = { - .tshut_hot = DRA752_GPU_TSHUT_HOT, - .tshut_cold = DRA752_GPU_TSHUT_COLD, .t_hot = DRA752_GPU_T_HOT, .t_cold = DRA752_GPU_T_COLD, .min_freq = DRA752_GPU_MIN_FREQ, @@ -241,8 +222,6 @@ static struct temp_sensor_data dra752_gpu_temp_sensor_data = { /* Thresholds and limits for DRA752 CORE temperature sensor */ static struct temp_sensor_data dra752_core_temp_sensor_data = { - .tshut_hot = DRA752_CORE_TSHUT_HOT, - .tshut_cold = DRA752_CORE_TSHUT_COLD, .t_hot = DRA752_CORE_T_HOT, .t_cold = DRA752_CORE_T_COLD, .min_freq = DRA752_CORE_MIN_FREQ, @@ -256,8 +235,6 @@ static struct temp_sensor_data dra752_core_temp_sensor_data = { /* Thresholds and limits for DRA752 DSPEVE temperature sensor */ static struct temp_sensor_data dra752_dspeve_temp_sensor_data = { - .tshut_hot = DRA752_DSPEVE_TSHUT_HOT, - .tshut_cold = DRA752_DSPEVE_TSHUT_COLD, .t_hot = DRA752_DSPEVE_T_HOT, .t_cold = DRA752_DSPEVE_T_COLD, .min_freq = DRA752_DSPEVE_MIN_FREQ, @@ -271,8 +248,6 @@ static struct temp_sensor_data dra752_dspeve_temp_sensor_data = { /* Thresholds and limits for DRA752 IVA temperature sensor */ static struct temp_sensor_data dra752_iva_temp_sensor_data = { - .tshut_hot = DRA752_IVA_TSHUT_HOT, - .tshut_cold = DRA752_IVA_TSHUT_COLD, .t_hot = DRA752_IVA_T_HOT, .t_cold = DRA752_IVA_T_COLD, .min_freq = DRA752_IVA_MIN_FREQ, @@ -416,8 +391,7 @@ int dra752_adc_to_temp[DRA752_ADC_END_VALUE - DRA752_ADC_START_VALUE + 1] = { /* DRA752 data */ const struct ti_bandgap_data dra752_data = { - .features = TI_BANDGAP_FEATURE_TSHUT_CONFIG | - TI_BANDGAP_FEATURE_FREEZE_BIT | + .features = TI_BANDGAP_FEATURE_FREEZE_BIT | TI_BANDGAP_FEATURE_TALERT | TI_BANDGAP_FEATURE_COUNTER_DELAY | TI_BANDGAP_FEATURE_HISTORY_BUFFER | diff --git a/drivers/thermal/zx2967_thermal.c b/drivers/thermal/zx2967_thermal.c new file mode 100644 index 000000000000..a5670ad2cfc8 --- /dev/null +++ b/drivers/thermal/zx2967_thermal.c @@ -0,0 +1,258 @@ +/* + * ZTE's zx2967 family thermal sensor driver + * + * Copyright (C) 2017 ZTE Ltd. + * + * Author: Baoyou Xie <baoyou.xie@linaro.org> + * + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/thermal.h> + +/* Power Mode: 0->low 1->high */ +#define ZX2967_THERMAL_POWER_MODE 0 +#define ZX2967_POWER_MODE_LOW 0 +#define ZX2967_POWER_MODE_HIGH 1 + +/* DCF Control Register */ +#define ZX2967_THERMAL_DCF 0x4 +#define ZX2967_DCF_EN BIT(1) +#define ZX2967_DCF_FREEZE BIT(0) + +/* Selection Register */ +#define ZX2967_THERMAL_SEL 0x8 + +/* Control Register */ +#define ZX2967_THERMAL_CTRL 0x10 + +#define ZX2967_THERMAL_READY BIT(12) +#define ZX2967_THERMAL_TEMP_MASK GENMASK(11, 0) +#define ZX2967_THERMAL_ID_MASK 0x18 +#define ZX2967_THERMAL_ID 0x10 + +#define ZX2967_GET_TEMP_TIMEOUT_US (100 * 1024) + +/** + * struct zx2967_thermal_priv - zx2967 thermal sensor private structure + * @tzd: struct thermal_zone_device where the sensor is registered + * @lock: prevents read sensor in parallel + * @clk_topcrm: topcrm clk structure + * @clk_apb: apb clk structure + * @regs: pointer to base address of the thermal sensor + */ + +struct zx2967_thermal_priv { + struct thermal_zone_device *tzd; + struct mutex lock; + struct clk *clk_topcrm; + struct clk *clk_apb; + void __iomem *regs; + struct device *dev; +}; + +static int zx2967_thermal_get_temp(void *data, int *temp) +{ + void __iomem *regs; + struct zx2967_thermal_priv *priv = data; + u32 val; + int ret; + + if (!priv->tzd) + return -EAGAIN; + + regs = priv->regs; + mutex_lock(&priv->lock); + writel_relaxed(ZX2967_POWER_MODE_LOW, + regs + ZX2967_THERMAL_POWER_MODE); + writel_relaxed(ZX2967_DCF_EN, regs + ZX2967_THERMAL_DCF); + + val = readl_relaxed(regs + ZX2967_THERMAL_SEL); + val &= ~ZX2967_THERMAL_ID_MASK; + val |= ZX2967_THERMAL_ID; + writel_relaxed(val, regs + ZX2967_THERMAL_SEL); + + /* + * Must wait for a while, surely it's a bit odd. + * otherwise temperature value we got has a few deviation, even if + * the THERMAL_READY bit is set. + */ + usleep_range(100, 300); + ret = readx_poll_timeout(readl, regs + ZX2967_THERMAL_CTRL, + val, val & ZX2967_THERMAL_READY, 300, + ZX2967_GET_TEMP_TIMEOUT_US); + if (ret) { + dev_err(priv->dev, "Thermal sensor data timeout\n"); + goto unlock; + } + + writel_relaxed(ZX2967_DCF_FREEZE | ZX2967_DCF_EN, + regs + ZX2967_THERMAL_DCF); + val = readl_relaxed(regs + ZX2967_THERMAL_CTRL) + & ZX2967_THERMAL_TEMP_MASK; + writel_relaxed(ZX2967_POWER_MODE_HIGH, + regs + ZX2967_THERMAL_POWER_MODE); + + /* + * Calculate temperature + * In dts, slope is multiplied by 1000. + */ + *temp = DIV_ROUND_CLOSEST(((s32)val + priv->tzd->tzp->offset) * 1000, + priv->tzd->tzp->slope); + +unlock: + mutex_unlock(&priv->lock); + return ret; +} + +static struct thermal_zone_of_device_ops zx2967_of_thermal_ops = { + .get_temp = zx2967_thermal_get_temp, +}; + +static int zx2967_thermal_probe(struct platform_device *pdev) +{ + struct zx2967_thermal_priv *priv; + struct resource *res; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->regs)) + return PTR_ERR(priv->regs); + + priv->clk_topcrm = devm_clk_get(&pdev->dev, "topcrm"); + if (IS_ERR(priv->clk_topcrm)) { + ret = PTR_ERR(priv->clk_topcrm); + dev_err(&pdev->dev, "failed to get topcrm clock: %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(priv->clk_topcrm); + if (ret) { + dev_err(&pdev->dev, "failed to enable topcrm clock: %d\n", + ret); + return ret; + } + + priv->clk_apb = devm_clk_get(&pdev->dev, "apb"); + if (IS_ERR(priv->clk_apb)) { + ret = PTR_ERR(priv->clk_apb); + dev_err(&pdev->dev, "failed to get apb clock: %d\n", ret); + goto disable_clk_topcrm; + } + + ret = clk_prepare_enable(priv->clk_apb); + if (ret) { + dev_err(&pdev->dev, "failed to enable apb clock: %d\n", + ret); + goto disable_clk_topcrm; + } + + mutex_init(&priv->lock); + priv->tzd = thermal_zone_of_sensor_register(&pdev->dev, + 0, priv, &zx2967_of_thermal_ops); + + if (IS_ERR(priv->tzd)) { + ret = PTR_ERR(priv->tzd); + dev_err(&pdev->dev, "failed to register sensor: %d\n", ret); + goto disable_clk_all; + } + + if (priv->tzd->tzp->slope == 0) { + thermal_zone_of_sensor_unregister(&pdev->dev, priv->tzd); + dev_err(&pdev->dev, "coefficients of sensor is invalid\n"); + ret = -EINVAL; + goto disable_clk_all; + } + + priv->dev = &pdev->dev; + platform_set_drvdata(pdev, priv); + + return 0; + +disable_clk_all: + clk_disable_unprepare(priv->clk_apb); +disable_clk_topcrm: + clk_disable_unprepare(priv->clk_topcrm); + return ret; +} + +static int zx2967_thermal_exit(struct platform_device *pdev) +{ + struct zx2967_thermal_priv *priv = platform_get_drvdata(pdev); + + thermal_zone_of_sensor_unregister(&pdev->dev, priv->tzd); + clk_disable_unprepare(priv->clk_topcrm); + clk_disable_unprepare(priv->clk_apb); + + return 0; +} + +static const struct of_device_id zx2967_thermal_id_table[] = { + { .compatible = "zte,zx296718-thermal" }, + {} +}; +MODULE_DEVICE_TABLE(of, zx2967_thermal_id_table); + +#ifdef CONFIG_PM_SLEEP +static int zx2967_thermal_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct zx2967_thermal_priv *priv = platform_get_drvdata(pdev); + + if (priv && priv->clk_topcrm) + clk_disable_unprepare(priv->clk_topcrm); + + if (priv && priv->clk_apb) + clk_disable_unprepare(priv->clk_apb); + + return 0; +} + +static int zx2967_thermal_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct zx2967_thermal_priv *priv = platform_get_drvdata(pdev); + int error; + + error = clk_prepare_enable(priv->clk_topcrm); + if (error) + return error; + + error = clk_prepare_enable(priv->clk_apb); + if (error) { + clk_disable_unprepare(priv->clk_topcrm); + return error; + } + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(zx2967_thermal_pm_ops, + zx2967_thermal_suspend, zx2967_thermal_resume); + +static struct platform_driver zx2967_thermal_driver = { + .probe = zx2967_thermal_probe, + .remove = zx2967_thermal_exit, + .driver = { + .name = "zx2967_thermal", + .of_match_table = zx2967_thermal_id_table, + .pm = &zx2967_thermal_pm_ops, + }, +}; +module_platform_driver(zx2967_thermal_driver); + +MODULE_AUTHOR("Baoyou Xie <baoyou.xie@linaro.org>"); +MODULE_DESCRIPTION("ZTE zx2967 thermal driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index f3932baed07d..55577cf9b6a4 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -39,7 +39,7 @@ #include <linux/errno.h> #include <linux/signal.h> #include <linux/fcntl.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/ctype.h> diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index a23fa5ed1d67..66b59a15780d 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -12,7 +12,7 @@ #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/fcntl.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/string.h> #include <linux/major.h> #include <linux/mm.h> diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c index e92c23470e51..59a2a7e18b5a 100644 --- a/drivers/tty/serial/crisv10.c +++ b/drivers/tty/serial/crisv10.c @@ -12,7 +12,7 @@ static char *serial_version = "$Revision: 1.25 $"; #include <linux/types.h> #include <linux/errno.h> #include <linux/signal.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/tty.h> diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 793395451982..ca54ce074a5f 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -29,6 +29,7 @@ #include <linux/tty_flip.h> #include <linux/spi/spi.h> #include <linux/uaccess.h> +#include <uapi/linux/sched/types.h> #define SC16IS7XX_NAME "sc16is7xx" #define SC16IS7XX_MAX_DEVS 8 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 9939c3d9912b..3fe56894974a 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -24,6 +24,7 @@ #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/init.h> #include <linux/console.h> #include <linux/of.h> diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 71136742e606..c6fc7141d7b2 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -14,8 +14,10 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/sched/rt.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/fs.h> diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index a1fd3f7d487a..e6d1a6510886 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -69,7 +69,8 @@ #include <linux/errno.h> #include <linux/signal.h> #include <linux/fcntl.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/tty_driver.h> diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index f27fc0f14c11..a9a978731c5b 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -9,7 +9,7 @@ #include <linux/types.h> #include <linux/termios.h> #include <linux/errno.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/tty.h> diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c index 9229de43e19d..52b7baef4f7a 100644 --- a/drivers/tty/tty_ldsem.c +++ b/drivers/tty/tty_ldsem.c @@ -32,6 +32,8 @@ #include <linux/atomic.h> #include <linux/tty.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 5cd3cd932293..1d21a9c1d33e 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -11,7 +11,7 @@ #include <linux/timer.h> #include <linux/string.h> #include <linux/slab.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/bitops.h> #include <linux/delay.h> diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index 397e1509fe51..c5f0fc906136 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -26,7 +26,9 @@ #include <linux/consolemap.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> +#include <linux/sched/debug.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/mm.h> diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 9d3ce505e7ab..5c4933bb4b53 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -72,7 +72,7 @@ #include <linux/module.h> #include <linux/types.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/kernel.h> diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index a56edf2d58eb..0cbfe1ff6f6c 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -10,7 +10,7 @@ #include <linux/types.h> #include <linux/errno.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/tty.h> #include <linux/timer.h> #include <linux/kernel.h> diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 31d95dc9c202..60ce7fd54e89 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -20,7 +20,7 @@ #include <linux/slab.h> #include <linux/mm.h> #include <linux/idr.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/string.h> #include <linux/kobject.h> #include <linux/cdev.h> diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index 5a59da0dc98a..3e80aa3b917a 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -74,7 +74,7 @@ #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/stat.h> diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 235e305f8473..d5388938bc7a 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -32,6 +32,7 @@ #undef VERBOSE_DEBUG #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 071964c7847f..cc61055fb9be 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -49,7 +49,7 @@ #include <linux/module.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/signal.h> #include <linux/poll.h> #include <linux/slab.h> diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index ca425e8099ea..cfc3cff6e8d5 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -36,6 +36,7 @@ #include <linux/fs.h> #include <linux/mm.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/signal.h> #include <linux/poll.h> diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index a56c75e09786..f0dd08198d74 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -15,7 +15,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/completion.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/ioctl.h> diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 87fccf611b69..a5b7cd615698 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -23,6 +23,7 @@ #include <linux/export.h> #include <linux/hid.h> #include <linux/module.h> +#include <linux/sched/signal.h> #include <linux/uio.h> #include <asm/unaligned.h> diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 8f3659b65f53..4c8aacc232c0 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -207,6 +207,7 @@ #include <linux/fs.h> #include <linux/kref.h> #include <linux/kthread.h> +#include <linux/sched/signal.h> #include <linux/limits.h> #include <linux/rwsem.h> #include <linux/slab.h> diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c index 5cf2633cdb04..e92540a21b6b 100644 --- a/drivers/usb/image/mdc800.c +++ b/drivers/usb/image/mdc800.c @@ -85,7 +85,7 @@ * (20/10/1999) */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/signal.h> #include <linux/spinlock.h> #include <linux/errno.h> diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index c5fa584d8f0a..db9a9e6ff6be 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -21,6 +21,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c index debc1fd74b0d..8b9fd7534f69 100644 --- a/drivers/usb/misc/idmouse.c +++ b/drivers/usb/misc/idmouse.c @@ -17,6 +17,7 @@ */ #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/slab.h> diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c index fc329c98a6e8..b106ce76997b 100644 --- a/drivers/usb/misc/rio500.c +++ b/drivers/usb/misc/rio500.c @@ -31,7 +31,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/signal.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/mutex.h> #include <linux/errno.h> #include <linux/random.h> diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index 0a643fa74cab..e45a3a680db8 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -50,6 +50,7 @@ #include <linux/completion.h> #include <linux/kref.h> #include <linux/slab.h> +#include <linux/sched/signal.h> /* * Version Information diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index 9fb8b1e6ecc2..b6d8bf475c92 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -8,6 +8,7 @@ */ #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/cdev.h> diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index db1a4abf2806..19c416d69eb9 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c @@ -8,6 +8,7 @@ #include <linux/list.h> #include <linux/usb.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/time.h> #include <linux/ktime.h> #include <linux/export.h> diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index eb433922598c..ab78111e0968 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c @@ -27,6 +27,7 @@ #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/wait.h> +#include <linux/sched/signal.h> #include <linux/usb/serial.h> /* Defines */ diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 944de657a07a..49ce2be90fa0 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -10,6 +10,7 @@ */ #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/sysrq.h> diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index 1a6f78d7d027..cab2b71a80d0 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -327,13 +327,11 @@ EXPORT_SYMBOL_GPL(usbip_dump_header); int usbip_recv(struct socket *sock, void *buf, int size) { int result; - struct msghdr msg; - struct kvec iov; + struct kvec iov = {.iov_base = buf, .iov_len = size}; + struct msghdr msg = {.msg_flags = MSG_NOSIGNAL}; int total = 0; - /* for blocks of if (usbip_dbg_flag_xmit) */ - char *bp = buf; - int osize = size; + iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size); usbip_dbg_xmit("enter\n"); @@ -344,26 +342,18 @@ int usbip_recv(struct socket *sock, void *buf, int size) } do { + int sz = msg_data_left(&msg); sock->sk->sk_allocation = GFP_NOIO; - iov.iov_base = buf; - iov.iov_len = size; - msg.msg_name = NULL; - msg.msg_namelen = 0; - msg.msg_control = NULL; - msg.msg_controllen = 0; - msg.msg_flags = MSG_NOSIGNAL; - - result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL); + + result = sock_recvmsg(sock, &msg, MSG_WAITALL); if (result <= 0) { pr_debug("receive sock %p buf %p size %u ret %d total %d\n", - sock, buf, size, result, total); + sock, buf + total, sz, result, total); goto err; } - size -= result; - buf += result; total += result; - } while (size > 0); + } while (msg_data_left(&msg)); if (usbip_dbg_flag_xmit) { if (!in_interrupt()) @@ -372,9 +362,9 @@ int usbip_recv(struct socket *sock, void *buf, int size) pr_debug("interrupt :"); pr_debug("receiving....\n"); - usbip_dump_buffer(bp, osize); - pr_debug("received, osize %d ret %d size %d total %d\n", - osize, result, size, total); + usbip_dump_buffer(buf, size); + pr_debug("received, osize %d ret %d size %zd total %d\n", + size, result, msg_data_left(&msg), total); } return total; diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index 9f490375ac92..f8573a52e41a 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h @@ -31,6 +31,7 @@ #include <linux/types.h> #include <linux/usb.h> #include <linux/wait.h> +#include <linux/sched/task.h> #include <uapi/linux/usbip.h> #define USBIP_VERSION "1.0.0" diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 59b3f62a2d64..cf3de91fbfe7 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -20,6 +20,9 @@ #include <linux/err.h> #include <linux/vfio.h> #include <linux/vmalloc.h> +#include <linux/sched/mm.h> +#include <linux/sched/signal.h> + #include <asm/iommu.h> #include <asm/tce.h> #include <asm/mmu_context.h> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index bd6f293c4ebd..c26fa1f3ed86 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -31,7 +31,8 @@ #include <linux/module.h> #include <linux/mm.h> #include <linux/rbtree.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/vfio.h> diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 2fe35354f20e..9b519897cc17 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -17,6 +17,8 @@ #include <linux/workqueue.h> #include <linux/file.h> #include <linux/slab.h> +#include <linux/sched/clock.h> +#include <linux/sched/signal.h> #include <linux/vmalloc.h> #include <linux/net.h> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 4269e621e254..f0ba362d4c10 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -27,6 +27,8 @@ #include <linux/cgroup.h> #include <linux/module.h> #include <linux/sort.h> +#include <linux/sched/mm.h> +#include <linux/sched/signal.h> #include <linux/interval_tree_generic.h> #include "vhost.h" @@ -282,6 +284,22 @@ void vhost_poll_queue(struct vhost_poll *poll) } EXPORT_SYMBOL_GPL(vhost_poll_queue); +static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq) +{ + int j; + + for (j = 0; j < VHOST_NUM_ADDRS; j++) + vq->meta_iotlb[j] = NULL; +} + +static void vhost_vq_meta_reset(struct vhost_dev *d) +{ + int i; + + for (i = 0; i < d->nvqs; ++i) + __vhost_vq_meta_reset(d->vqs[i]); +} + static void vhost_vq_reset(struct vhost_dev *dev, struct vhost_virtqueue *vq) { @@ -312,6 +330,7 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->busyloop_timeout = 0; vq->umem = NULL; vq->iotlb = NULL; + __vhost_vq_meta_reset(vq); } static int vhost_worker(void *data) @@ -691,6 +710,18 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem, return 1; } +static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, + u64 addr, unsigned int size, + int type) +{ + const struct vhost_umem_node *node = vq->meta_iotlb[type]; + + if (!node) + return NULL; + + return (void *)(uintptr_t)(node->userspace_addr + addr - node->start); +} + /* Can we switch to this memory table? */ /* Caller should have device mutex but not vq mutex */ static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem, @@ -733,8 +764,14 @@ static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to, * could be access through iotlb. So -EAGAIN should * not happen in this case. */ - /* TODO: more fast path */ struct iov_iter t; + void __user *uaddr = vhost_vq_meta_fetch(vq, + (u64)(uintptr_t)to, size, + VHOST_ADDR_DESC); + + if (uaddr) + return __copy_to_user(uaddr, from, size); + ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov, ARRAY_SIZE(vq->iotlb_iov), VHOST_ACCESS_WO); @@ -762,8 +799,14 @@ static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, * could be access through iotlb. So -EAGAIN should * not happen in this case. */ - /* TODO: more fast path */ + void __user *uaddr = vhost_vq_meta_fetch(vq, + (u64)(uintptr_t)from, size, + VHOST_ADDR_DESC); struct iov_iter f; + + if (uaddr) + return __copy_from_user(to, uaddr, size); + ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov, ARRAY_SIZE(vq->iotlb_iov), VHOST_ACCESS_RO); @@ -783,17 +826,12 @@ out: return ret; } -static void __user *__vhost_get_user(struct vhost_virtqueue *vq, - void __user *addr, unsigned size) +static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq, + void __user *addr, unsigned int size, + int type) { int ret; - /* This function should be called after iotlb - * prefetch, which means we're sure that vq - * could be access through iotlb. So -EAGAIN should - * not happen in this case. - */ - /* TODO: more fast path */ ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov, ARRAY_SIZE(vq->iotlb_iov), VHOST_ACCESS_RO); @@ -814,14 +852,32 @@ static void __user *__vhost_get_user(struct vhost_virtqueue *vq, return vq->iotlb_iov[0].iov_base; } -#define vhost_put_user(vq, x, ptr) \ +/* This function should be called after iotlb + * prefetch, which means we're sure that vq + * could be access through iotlb. So -EAGAIN should + * not happen in this case. + */ +static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, + void *addr, unsigned int size, + int type) +{ + void __user *uaddr = vhost_vq_meta_fetch(vq, + (u64)(uintptr_t)addr, size, type); + if (uaddr) + return uaddr; + + return __vhost_get_user_slow(vq, addr, size, type); +} + +#define vhost_put_user(vq, x, ptr) \ ({ \ int ret = -EFAULT; \ if (!vq->iotlb) { \ ret = __put_user(x, ptr); \ } else { \ __typeof__(ptr) to = \ - (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \ + (__typeof__(ptr)) __vhost_get_user(vq, ptr, \ + sizeof(*ptr), VHOST_ADDR_USED); \ if (to != NULL) \ ret = __put_user(x, to); \ else \ @@ -830,14 +886,16 @@ static void __user *__vhost_get_user(struct vhost_virtqueue *vq, ret; \ }) -#define vhost_get_user(vq, x, ptr) \ +#define vhost_get_user(vq, x, ptr, type) \ ({ \ int ret; \ if (!vq->iotlb) { \ ret = __get_user(x, ptr); \ } else { \ __typeof__(ptr) from = \ - (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \ + (__typeof__(ptr)) __vhost_get_user(vq, ptr, \ + sizeof(*ptr), \ + type); \ if (from != NULL) \ ret = __get_user(x, from); \ else \ @@ -846,6 +904,12 @@ static void __user *__vhost_get_user(struct vhost_virtqueue *vq, ret; \ }) +#define vhost_get_avail(vq, x, ptr) \ + vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL) + +#define vhost_get_used(vq, x, ptr) \ + vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) + static void vhost_dev_lock_vqs(struct vhost_dev *d) { int i = 0; @@ -951,6 +1015,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, ret = -EFAULT; break; } + vhost_vq_meta_reset(dev); if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size, msg->iova + msg->size - 1, msg->uaddr, msg->perm)) { @@ -960,6 +1025,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, vhost_iotlb_notify_vq(dev, msg); break; case VHOST_IOTLB_INVALIDATE: + vhost_vq_meta_reset(dev); vhost_del_umem_range(dev->iotlb, msg->iova, msg->iova + msg->size - 1); break; @@ -1103,12 +1169,26 @@ static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, sizeof *used + num * sizeof *used->ring + s); } +static void vhost_vq_meta_update(struct vhost_virtqueue *vq, + const struct vhost_umem_node *node, + int type) +{ + int access = (type == VHOST_ADDR_USED) ? + VHOST_ACCESS_WO : VHOST_ACCESS_RO; + + if (likely(node->perm & access)) + vq->meta_iotlb[type] = node; +} + static int iotlb_access_ok(struct vhost_virtqueue *vq, - int access, u64 addr, u64 len) + int access, u64 addr, u64 len, int type) { const struct vhost_umem_node *node; struct vhost_umem *umem = vq->iotlb; - u64 s = 0, size; + u64 s = 0, size, orig_addr = addr; + + if (vhost_vq_meta_fetch(vq, addr, len, type)) + return true; while (len > s) { node = vhost_umem_interval_tree_iter_first(&umem->umem_tree, @@ -1125,6 +1205,10 @@ static int iotlb_access_ok(struct vhost_virtqueue *vq, } size = node->size - addr + node->start; + + if (orig_addr == addr && size >= len) + vhost_vq_meta_update(vq, node, type); + s += size; addr += size; } @@ -1141,13 +1225,15 @@ int vq_iotlb_prefetch(struct vhost_virtqueue *vq) return 1; return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc, - num * sizeof *vq->desc) && + num * sizeof(*vq->desc), VHOST_ADDR_DESC) && iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail, sizeof *vq->avail + - num * sizeof *vq->avail->ring + s) && + num * sizeof(*vq->avail->ring) + s, + VHOST_ADDR_AVAIL) && iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used, sizeof *vq->used + - num * sizeof *vq->used->ring + s); + num * sizeof(*vq->used->ring) + s, + VHOST_ADDR_USED); } EXPORT_SYMBOL_GPL(vq_iotlb_prefetch); @@ -1728,7 +1814,7 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq) r = -EFAULT; goto err; } - r = vhost_get_user(vq, last_used_idx, &vq->used->idx); + r = vhost_get_used(vq, last_used_idx, &vq->used->idx); if (r) { vq_err(vq, "Can't access used idx at %p\n", &vq->used->idx); @@ -1930,29 +2016,36 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, /* Check it isn't doing very strange things with descriptor numbers. */ last_avail_idx = vq->last_avail_idx; - if (unlikely(vhost_get_user(vq, avail_idx, &vq->avail->idx))) { - vq_err(vq, "Failed to access avail idx at %p\n", - &vq->avail->idx); - return -EFAULT; - } - vq->avail_idx = vhost16_to_cpu(vq, avail_idx); - if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { - vq_err(vq, "Guest moved used index from %u to %u", - last_avail_idx, vq->avail_idx); - return -EFAULT; - } + if (vq->avail_idx == vq->last_avail_idx) { + if (unlikely(vhost_get_avail(vq, avail_idx, &vq->avail->idx))) { + vq_err(vq, "Failed to access avail idx at %p\n", + &vq->avail->idx); + return -EFAULT; + } + vq->avail_idx = vhost16_to_cpu(vq, avail_idx); - /* If there's nothing new since last we looked, return invalid. */ - if (vq->avail_idx == last_avail_idx) - return vq->num; + if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { + vq_err(vq, "Guest moved used index from %u to %u", + last_avail_idx, vq->avail_idx); + return -EFAULT; + } + + /* If there's nothing new since last we looked, return + * invalid. + */ + if (vq->avail_idx == last_avail_idx) + return vq->num; - /* Only get avail ring entries after they have been exposed by guest. */ - smp_rmb(); + /* Only get avail ring entries after they have been + * exposed by guest. + */ + smp_rmb(); + } /* Grab the next descriptor number they're advertising, and increment * the index we've seen. */ - if (unlikely(vhost_get_user(vq, ring_head, + if (unlikely(vhost_get_avail(vq, ring_head, &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) { vq_err(vq, "Failed to read head: idx %d address %p\n", last_avail_idx, @@ -2168,7 +2261,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) * with the barrier that the Guest executes when enabling * interrupts. */ smp_mb(); - if (vhost_get_user(vq, flags, &vq->avail->flags)) { + if (vhost_get_avail(vq, flags, &vq->avail->flags)) { vq_err(vq, "Failed to get flags"); return true; } @@ -2195,7 +2288,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) * interrupts. */ smp_mb(); - if (vhost_get_user(vq, event, vhost_used_event(vq))) { + if (vhost_get_avail(vq, event, vhost_used_event(vq))) { vq_err(vq, "Failed to get used event idx"); return true; } @@ -2242,7 +2335,7 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) if (vq->avail_idx != vq->last_avail_idx) return false; - r = vhost_get_user(vq, avail_idx, &vq->avail->idx); + r = vhost_get_avail(vq, avail_idx, &vq->avail->idx); if (unlikely(r)) return false; vq->avail_idx = vhost16_to_cpu(vq, avail_idx); @@ -2278,7 +2371,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) /* They could have slipped one in as we were doing that: make * sure it's written, then check again. */ smp_mb(); - r = vhost_get_user(vq, avail_idx, &vq->avail->idx); + r = vhost_get_avail(vq, avail_idx, &vq->avail->idx); if (r) { vq_err(vq, "Failed to check avail idx at %p: %d\n", &vq->avail->idx, r); diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index a9cbbb148f46..f55671d53f28 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -76,6 +76,13 @@ struct vhost_umem { int numem; }; +enum vhost_uaddr_type { + VHOST_ADDR_DESC = 0, + VHOST_ADDR_AVAIL = 1, + VHOST_ADDR_USED = 2, + VHOST_NUM_ADDRS = 3, +}; + /* The virtqueue structure describes a queue attached to a device. */ struct vhost_virtqueue { struct vhost_dev *dev; @@ -86,6 +93,7 @@ struct vhost_virtqueue { struct vring_desc __user *desc; struct vring_avail __user *avail; struct vring_used __user *used; + const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS]; struct file *kick; struct file *call; struct file *error; diff --git a/drivers/video/fbdev/auo_k190x.c b/drivers/video/fbdev/auo_k190x.c index 9580374667ba..0d06038324e0 100644 --- a/drivers/video/fbdev/auo_k190x.c +++ b/drivers/video/fbdev/auo_k190x.c @@ -9,6 +9,7 @@ */ #include <linux/module.h> +#include <linux/sched/mm.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <linux/platform_device.h> diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c index 038ac6934fe9..9da90bd242f4 100644 --- a/drivers/video/fbdev/cobalt_lcdfb.c +++ b/drivers/video/fbdev/cobalt_lcdfb.c @@ -26,6 +26,7 @@ #include <linux/uaccess.h> #include <linux/platform_device.h> #include <linux/module.h> +#include <linux/sched/signal.h> /* * Cursor position address diff --git a/drivers/video/fbdev/nvidia/nv_accel.c b/drivers/video/fbdev/nvidia/nv_accel.c index ad6472a894ea..7341fed63e35 100644 --- a/drivers/video/fbdev/nvidia/nv_accel.c +++ b/drivers/video/fbdev/nvidia/nv_accel.c @@ -48,6 +48,8 @@ */ #include <linux/fb.h> +#include <linux/nmi.h> + #include "nv_type.h" #include "nv_proto.h" #include "nv_dma.h" diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c index 8b810696a42b..fd2b372d0264 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c @@ -19,7 +19,7 @@ #include <linux/jiffies.h> #include <linux/module.h> #include <linux/platform_device.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/of_device.h> diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 9d2738e9217f..4e1191508228 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -31,6 +31,7 @@ #include <linux/wait.h> #include <linux/mm.h> #include <linux/mount.h> +#include <linux/magic.h> /* * Balloon device works in 4K page units. So each page is pointed to by @@ -413,7 +414,8 @@ static int init_vqs(struct virtio_balloon *vb) * optionally stat. */ nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2; - err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names); + err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names, + NULL); if (err) return err; diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c index 350a2a5a49db..79f1293cda93 100644 --- a/drivers/virtio/virtio_input.c +++ b/drivers/virtio/virtio_input.c @@ -173,7 +173,8 @@ static int virtinput_init_vqs(struct virtio_input *vi) static const char * const names[] = { "events", "status" }; int err; - err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names); + err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names, + NULL); if (err) return err; vi->evt = vqs[0]; diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index c71fde5fe835..78343b8f9034 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -70,7 +70,7 @@ #include <linux/spinlock.h> #include <linux/virtio.h> #include <linux/virtio_config.h> -#include <linux/virtio_mmio.h> +#include <uapi/linux/virtio_mmio.h> #include <linux/virtio_ring.h> @@ -446,7 +446,8 @@ error_available: static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], + struct irq_affinity *desc) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); unsigned int irq = platform_get_irq(vm_dev->pdev, 0); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 186cbab327b8..df548a6fb844 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -33,10 +33,8 @@ void vp_synchronize_vectors(struct virtio_device *vdev) struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i; - if (vp_dev->intx_enabled) - synchronize_irq(vp_dev->pci_dev->irq); - - for (i = 0; i < vp_dev->msix_vectors; ++i) + synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0)); + for (i = 1; i < vp_dev->msix_vectors; i++) synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i)); } @@ -62,16 +60,13 @@ static irqreturn_t vp_config_changed(int irq, void *opaque) static irqreturn_t vp_vring_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; - struct virtio_pci_vq_info *info; irqreturn_t ret = IRQ_NONE; - unsigned long flags; + struct virtqueue *vq; - spin_lock_irqsave(&vp_dev->lock, flags); - list_for_each_entry(info, &vp_dev->virtqueues, node) { - if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) + list_for_each_entry(vq, &vp_dev->vdev.vqs, list) { + if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED) ret = IRQ_HANDLED; } - spin_unlock_irqrestore(&vp_dev->lock, flags); return ret; } @@ -102,237 +97,185 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) return vp_vring_interrupt(irq, opaque); } -static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, - bool per_vq_vectors) +static void vp_remove_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - const char *name = dev_name(&vp_dev->vdev.dev); - unsigned i, v; - int err = -ENOMEM; - - vp_dev->msix_vectors = nvectors; - - vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, - GFP_KERNEL); - if (!vp_dev->msix_names) - goto error; - vp_dev->msix_affinity_masks - = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks, - GFP_KERNEL); - if (!vp_dev->msix_affinity_masks) - goto error; - for (i = 0; i < nvectors; ++i) - if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], - GFP_KERNEL)) - goto error; - - err = pci_alloc_irq_vectors(vp_dev->pci_dev, nvectors, nvectors, - PCI_IRQ_MSIX); - if (err < 0) - goto error; - vp_dev->msix_enabled = 1; - - /* Set the vector used for configuration */ - v = vp_dev->msix_used_vectors; - snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, - "%s-config", name); - err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), - vp_config_changed, 0, vp_dev->msix_names[v], - vp_dev); - if (err) - goto error; - ++vp_dev->msix_used_vectors; - - v = vp_dev->config_vector(vp_dev, v); - /* Verify we had enough resources to assign the vector */ - if (v == VIRTIO_MSI_NO_VECTOR) { - err = -EBUSY; - goto error; - } - - if (!per_vq_vectors) { - /* Shared vector for all VQs */ - v = vp_dev->msix_used_vectors; - snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, - "%s-virtqueues", name); - err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), - vp_vring_interrupt, 0, vp_dev->msix_names[v], - vp_dev); - if (err) - goto error; - ++vp_dev->msix_used_vectors; - } - return 0; -error: - return err; -} - -static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index, - void (*callback)(struct virtqueue *vq), - const char *name, - u16 msix_vec) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL); - struct virtqueue *vq; - unsigned long flags; - - /* fill out our structure that represents an active queue */ - if (!info) - return ERR_PTR(-ENOMEM); + struct virtqueue *vq, *n; - vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, msix_vec); - if (IS_ERR(vq)) - goto out_info; + list_for_each_entry_safe(vq, n, &vdev->vqs, list) { + if (vp_dev->msix_vector_map) { + int v = vp_dev->msix_vector_map[vq->index]; - info->vq = vq; - if (callback) { - spin_lock_irqsave(&vp_dev->lock, flags); - list_add(&info->node, &vp_dev->virtqueues); - spin_unlock_irqrestore(&vp_dev->lock, flags); - } else { - INIT_LIST_HEAD(&info->node); + if (v != VIRTIO_MSI_NO_VECTOR) + free_irq(pci_irq_vector(vp_dev->pci_dev, v), + vq); + } + vp_dev->del_vq(vq); } - - vp_dev->vqs[index] = info; - return vq; - -out_info: - kfree(info); - return vq; -} - -static void vp_del_vq(struct virtqueue *vq) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); - struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; - unsigned long flags; - - spin_lock_irqsave(&vp_dev->lock, flags); - list_del(&info->node); - spin_unlock_irqrestore(&vp_dev->lock, flags); - - vp_dev->del_vq(info); - kfree(info); } /* the config->del_vqs() implementation */ void vp_del_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtqueue *vq, *n; int i; - list_for_each_entry_safe(vq, n, &vdev->vqs, list) { - if (vp_dev->per_vq_vectors) { - int v = vp_dev->vqs[vq->index]->msix_vector; - - if (v != VIRTIO_MSI_NO_VECTOR) - free_irq(pci_irq_vector(vp_dev->pci_dev, v), - vq); - } - vp_del_vq(vq); - } - vp_dev->per_vq_vectors = false; - - if (vp_dev->intx_enabled) { - free_irq(vp_dev->pci_dev->irq, vp_dev); - vp_dev->intx_enabled = 0; - } + if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs))) + return; - for (i = 0; i < vp_dev->msix_used_vectors; ++i) - free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev); + vp_remove_vqs(vdev); - for (i = 0; i < vp_dev->msix_vectors; i++) - if (vp_dev->msix_affinity_masks[i]) + if (vp_dev->pci_dev->msix_enabled) { + for (i = 0; i < vp_dev->msix_vectors; i++) free_cpumask_var(vp_dev->msix_affinity_masks[i]); - if (vp_dev->msix_enabled) { /* Disable the vector used for configuration */ vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); - pci_free_irq_vectors(vp_dev->pci_dev); - vp_dev->msix_enabled = 0; + kfree(vp_dev->msix_affinity_masks); + kfree(vp_dev->msix_names); + kfree(vp_dev->msix_vector_map); } - vp_dev->msix_vectors = 0; - vp_dev->msix_used_vectors = 0; - kfree(vp_dev->msix_names); - vp_dev->msix_names = NULL; - kfree(vp_dev->msix_affinity_masks); - vp_dev->msix_affinity_masks = NULL; - kfree(vp_dev->vqs); - vp_dev->vqs = NULL; + free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); + pci_free_irq_vectors(vp_dev->pci_dev); } static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[], - bool per_vq_vectors) + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); + const char *name = dev_name(&vp_dev->vdev.dev); + int i, err = -ENOMEM, allocated_vectors, nvectors; + unsigned flags = PCI_IRQ_MSIX; + bool shared = false; u16 msix_vec; - int i, err, nvectors, allocated_vectors; - vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); - if (!vp_dev->vqs) - return -ENOMEM; + if (desc) { + flags |= PCI_IRQ_AFFINITY; + desc->pre_vectors++; /* virtio config vector */ + } - if (per_vq_vectors) { - /* Best option: one for change interrupt, one per vq. */ - nvectors = 1; - for (i = 0; i < nvqs; ++i) - if (callbacks[i]) - ++nvectors; - } else { - /* Second best: one for change, shared for all vqs. */ - nvectors = 2; + nvectors = 1; + for (i = 0; i < nvqs; i++) + if (callbacks[i]) + nvectors++; + + /* Try one vector per queue first. */ + err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, + nvectors, flags, desc); + if (err < 0) { + /* Fallback to one vector for config, one shared for queues. */ + shared = true; + err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2, + PCI_IRQ_MSIX); + if (err < 0) + return err; + } + if (err < 0) + return err; + + vp_dev->msix_vectors = nvectors; + vp_dev->msix_names = kmalloc_array(nvectors, + sizeof(*vp_dev->msix_names), GFP_KERNEL); + if (!vp_dev->msix_names) + goto out_free_irq_vectors; + + vp_dev->msix_affinity_masks = kcalloc(nvectors, + sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL); + if (!vp_dev->msix_affinity_masks) + goto out_free_msix_names; + + for (i = 0; i < nvectors; ++i) { + if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], + GFP_KERNEL)) + goto out_free_msix_affinity_masks; } - err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors); + /* Set the vector used for configuration */ + snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names), + "%s-config", name); + err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed, + 0, vp_dev->msix_names[0], vp_dev); if (err) - goto error_find; + goto out_free_msix_affinity_masks; + + /* Verify we had enough resources to assign the vector */ + if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto out_free_config_irq; + } + + vp_dev->msix_vector_map = kmalloc_array(nvqs, + sizeof(*vp_dev->msix_vector_map), GFP_KERNEL); + if (!vp_dev->msix_vector_map) + goto out_disable_config_irq; - vp_dev->per_vq_vectors = per_vq_vectors; - allocated_vectors = vp_dev->msix_used_vectors; + allocated_vectors = 1; /* vector 0 is the config interrupt */ for (i = 0; i < nvqs; ++i) { if (!names[i]) { vqs[i] = NULL; continue; } - if (!callbacks[i]) - msix_vec = VIRTIO_MSI_NO_VECTOR; - else if (vp_dev->per_vq_vectors) - msix_vec = allocated_vectors++; + if (callbacks[i]) + msix_vec = allocated_vectors; else - msix_vec = VP_MSIX_VQ_VECTOR; - vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], msix_vec); + msix_vec = VIRTIO_MSI_NO_VECTOR; + + vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], + msix_vec); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); - goto error_find; + goto out_remove_vqs; } - if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) + if (msix_vec == VIRTIO_MSI_NO_VECTOR) { + vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; continue; + } - /* allocate per-vq irq if available and necessary */ - snprintf(vp_dev->msix_names[msix_vec], - sizeof *vp_dev->msix_names, - "%s-%s", + snprintf(vp_dev->msix_names[i + 1], + sizeof(*vp_dev->msix_names), "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]); err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), - vring_interrupt, 0, - vp_dev->msix_names[msix_vec], - vqs[i]); - if (err) - goto error_find; + vring_interrupt, IRQF_SHARED, + vp_dev->msix_names[i + 1], vqs[i]); + if (err) { + /* don't free this irq on error */ + vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; + goto out_remove_vqs; + } + vp_dev->msix_vector_map[i] = msix_vec; + + /* + * Use a different vector for each queue if they are available, + * else share the same vector for all VQs. + */ + if (!shared) + allocated_vectors++; } + return 0; -error_find: - vp_del_vqs(vdev); +out_remove_vqs: + vp_remove_vqs(vdev); + kfree(vp_dev->msix_vector_map); +out_disable_config_irq: + vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); +out_free_config_irq: + free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); +out_free_msix_affinity_masks: + for (i = 0; i < nvectors; i++) { + if (vp_dev->msix_affinity_masks[i]) + free_cpumask_var(vp_dev->msix_affinity_masks[i]); + } + kfree(vp_dev->msix_affinity_masks); +out_free_msix_names: + kfree(vp_dev->msix_names); +out_free_irq_vectors: + pci_free_irq_vectors(vp_dev->pci_dev); return err; } @@ -343,53 +286,42 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i, err; - vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); - if (!vp_dev->vqs) - return -ENOMEM; - err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vp_dev); if (err) - goto out_del_vqs; + return err; - vp_dev->intx_enabled = 1; - vp_dev->per_vq_vectors = false; for (i = 0; i < nvqs; ++i) { if (!names[i]) { vqs[i] = NULL; continue; } - vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], + vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], VIRTIO_MSI_NO_VECTOR); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); - goto out_del_vqs; + goto out_remove_vqs; } } return 0; -out_del_vqs: - vp_del_vqs(vdev); + +out_remove_vqs: + vp_remove_vqs(vdev); + free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); return err; } /* the config->find_vqs() implementation */ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[]) + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], struct irq_affinity *desc) { int err; - /* Try MSI-X with one vector per queue. */ - err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true); + err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, desc); if (!err) return 0; - /* Fallback: MSI-X with one vector for config, one shared for queues. */ - err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false); - if (!err) - return 0; - /* Finally fall back to regular interrupts. */ return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names); } @@ -409,16 +341,15 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) { struct virtio_device *vdev = vq->vdev; struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; - struct cpumask *mask; - unsigned int irq; if (!vq->callback) return -EINVAL; - if (vp_dev->msix_enabled) { - mask = vp_dev->msix_affinity_masks[info->msix_vector]; - irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector); + if (vp_dev->pci_dev->msix_enabled) { + int vec = vp_dev->msix_vector_map[vq->index]; + struct cpumask *mask = vp_dev->msix_affinity_masks[vec]; + unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec); + if (cpu == -1) irq_set_affinity_hint(irq, NULL); else { @@ -430,6 +361,17 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) return 0; } +const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + unsigned int *map = vp_dev->msix_vector_map; + + if (!map || map[index] == VIRTIO_MSI_NO_VECTOR) + return NULL; + + return pci_irq_get_affinity(vp_dev->pci_dev, map[index]); +} + #ifdef CONFIG_PM_SLEEP static int virtio_pci_freeze(struct device *dev) { @@ -498,8 +440,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev, vp_dev->vdev.dev.parent = &pci_dev->dev; vp_dev->vdev.dev.release = virtio_pci_release_dev; vp_dev->pci_dev = pci_dev; - INIT_LIST_HEAD(&vp_dev->virtqueues); - spin_lock_init(&vp_dev->lock); /* enable the device */ rc = pci_enable_device(pci_dev); diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index b2f666250ae0..ac8c9d788964 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -31,17 +31,6 @@ #include <linux/highmem.h> #include <linux/spinlock.h> -struct virtio_pci_vq_info { - /* the actual virtqueue */ - struct virtqueue *vq; - - /* the list node for the virtqueues list */ - struct list_head node; - - /* MSI-X vector (or none) */ - unsigned msix_vector; -}; - /* Our device structure */ struct virtio_pci_device { struct virtio_device vdev; @@ -75,47 +64,25 @@ struct virtio_pci_device { /* the IO mapping for the PCI config space */ void __iomem *ioaddr; - /* a list of queues so we can dispatch IRQs */ - spinlock_t lock; - struct list_head virtqueues; - - /* array of all queues for house-keeping */ - struct virtio_pci_vq_info **vqs; - - /* MSI-X support */ - int msix_enabled; - int intx_enabled; cpumask_var_t *msix_affinity_masks; /* Name strings for interrupts. This size should be enough, * and I'm too lazy to allocate each name separately. */ char (*msix_names)[256]; - /* Number of available vectors */ - unsigned msix_vectors; - /* Vectors allocated, excluding per-vq vectors if any */ - unsigned msix_used_vectors; - - /* Whether we have vector per vq */ - bool per_vq_vectors; + /* Total Number of MSI-X vectors (including per-VQ ones). */ + int msix_vectors; + /* Map of per-VQ MSI-X vectors, may be NULL */ + unsigned *msix_vector_map; struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, - struct virtio_pci_vq_info *info, unsigned idx, void (*callback)(struct virtqueue *vq), const char *name, u16 msix_vec); - void (*del_vq)(struct virtio_pci_vq_info *info); + void (*del_vq)(struct virtqueue *vq); u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); }; -/* Constants for MSI-X */ -/* Use first vector for configuration changes, second and the rest for - * virtqueues Thus, we need at least 2 vectors for MSI. */ -enum { - VP_MSIX_CONFIG_VECTOR = 0, - VP_MSIX_VQ_VECTOR = 1, -}; - /* Convert a generic virtio device to our structure */ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) { @@ -130,9 +97,8 @@ bool vp_notify(struct virtqueue *vq); void vp_del_vqs(struct virtio_device *vdev); /* the config->find_vqs() implementation */ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[]); + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], struct irq_affinity *desc); const char *vp_bus_name(struct virtio_device *vdev); /* Setup the affinity for a virtqueue: @@ -142,6 +108,8 @@ const char *vp_bus_name(struct virtio_device *vdev); */ int vp_set_vq_affinity(struct virtqueue *vq, int cpu); +const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index); + #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY) int virtio_pci_legacy_probe(struct virtio_pci_device *); void virtio_pci_legacy_remove(struct virtio_pci_device *); diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index 6d9e5173d5fa..f7362c5fe18a 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c @@ -112,7 +112,6 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) } static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, - struct virtio_pci_vq_info *info, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, @@ -130,8 +129,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) return ERR_PTR(-ENOENT); - info->msix_vector = msix_vec; - /* create the vring */ vq = vring_create_virtqueue(index, num, VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, @@ -162,14 +159,13 @@ out_deactivate: return ERR_PTR(err); } -static void del_vq(struct virtio_pci_vq_info *info) +static void del_vq(struct virtqueue *vq) { - struct virtqueue *vq = info->vq; struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); - if (vp_dev->msix_enabled) { + if (vp_dev->pci_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); /* Flush the write out to device */ @@ -194,6 +190,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = { .finalize_features = vp_finalize_features, .bus_name = vp_bus_name, .set_vq_affinity = vp_set_vq_affinity, + .get_vq_affinity = vp_get_vq_affinity, }; /* the PCI probing function */ diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 4bf7ab375894..7bc3004b840e 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -293,7 +293,6 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) } static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, - struct virtio_pci_vq_info *info, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, @@ -323,8 +322,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, /* get offset of notification word for this vq */ off = vp_ioread16(&cfg->queue_notify_off); - info->msix_vector = msix_vec; - /* create the vring */ vq = vring_create_virtqueue(index, num, SMP_CACHE_BYTES, &vp_dev->vdev, @@ -387,13 +384,12 @@ err_map_notify: } static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[]) + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtqueue *vq; - int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names); + int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, desc); if (rc) return rc; @@ -409,14 +405,13 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, return 0; } -static void del_vq(struct virtio_pci_vq_info *info) +static void del_vq(struct virtqueue *vq) { - struct virtqueue *vq = info->vq; struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); vp_iowrite16(vq->index, &vp_dev->common->queue_select); - if (vp_dev->msix_enabled) { + if (vp_dev->pci_dev->msix_enabled) { vp_iowrite16(VIRTIO_MSI_NO_VECTOR, &vp_dev->common->queue_msix_vector); /* Flush the write out to device */ @@ -442,6 +437,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = { .finalize_features = vp_finalize_features, .bus_name = vp_bus_name, .set_vq_affinity = vp_set_vq_affinity, + .get_vq_affinity = vp_get_vq_affinity, }; static const struct virtio_config_ops virtio_pci_config_ops = { @@ -457,6 +453,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = { .finalize_features = vp_finalize_features, .bus_name = vp_bus_name, .set_vq_affinity = vp_set_vq_affinity, + .get_vq_affinity = vp_get_vq_affinity, }; /** diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c index df1c9bb90eb5..2096f460498f 100644 --- a/drivers/w1/w1_family.c +++ b/drivers/w1/w1_family.c @@ -14,7 +14,7 @@ #include <linux/spinlock.h> #include <linux/list.h> -#include <linux/sched.h> /* schedule_timeout() */ +#include <linux/sched/signal.h> #include <linux/delay.h> #include <linux/export.h> diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c index 4ce1b66d5092..2cae7b29bb5f 100644 --- a/drivers/w1/w1_int.c +++ b/drivers/w1/w1_int.c @@ -17,6 +17,7 @@ #include <linux/delay.h> #include <linux/kthread.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/export.h> #include <linux/moduleparam.h> diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index c831b7967bf9..52a70ee6014f 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -142,6 +142,7 @@ config GPIO_WATCHDOG_ARCH_INITCALL config MENF21BMC_WATCHDOG tristate "MEN 14F021P00 BMC Watchdog" depends on MFD_MENF21BMC || COMPILE_TEST + depends on I2C select WATCHDOG_CORE help Say Y here to include support for the MEN 14F021P00 BMC Watchdog. @@ -176,7 +177,7 @@ config WDAT_WDT config WM831X_WATCHDOG tristate "WM831x watchdog" - depends on MFD_WM831X || COMPILE_TEST + depends on MFD_WM831X select WATCHDOG_CORE help Support for the watchdog in the WM831x AudioPlus PMICs. When @@ -217,7 +218,7 @@ config ZIIRAVE_WATCHDOG config ARM_SP805_WATCHDOG tristate "ARM SP805 Watchdog" - depends on (ARM || ARM64) && (ARM_AMBA || COMPILE_TEST) + depends on (ARM || ARM64 || COMPILE_TEST) && ARM_AMBA select WATCHDOG_CORE help ARM Primecell SP805 Watchdog timer. This will reboot your system when @@ -573,7 +574,7 @@ config IMX2_WDT config UX500_WATCHDOG tristate "ST-Ericsson Ux500 watchdog" - depends on MFD_DB8500_PRCMU || (ARM && COMPILE_TEST) + depends on MFD_DB8500_PRCMU select WATCHDOG_CORE default y help @@ -585,7 +586,7 @@ config UX500_WATCHDOG config RETU_WATCHDOG tristate "Retu watchdog" - depends on MFD_RETU || COMPILE_TEST + depends on MFD_RETU select WATCHDOG_CORE help Retu watchdog driver for Nokia Internet Tablets (770, N800, @@ -851,7 +852,7 @@ config SP5100_TCO config GEODE_WDT tristate "AMD Geode CS5535/CS5536 Watchdog" - depends on CS5535_MFGPT || (X86 && COMPILE_TEST) + depends on CS5535_MFGPT help This driver enables a watchdog capability built into the CS5535/CS5536 companion chips for the AMD Geode GX and LX @@ -1063,7 +1064,7 @@ config HP_WATCHDOG config KEMPLD_WDT tristate "Kontron COM Watchdog Timer" - depends on MFD_KEMPLD || COMPILE_TEST + depends on MFD_KEMPLD select WATCHDOG_CORE help Support for the PLD watchdog on some Kontron ETX and COMexpress @@ -1495,7 +1496,7 @@ config BCM63XX_WDT config BCM2835_WDT tristate "Broadcom BCM2835 hardware watchdog" - depends on ARCH_BCM2835 || COMPILE_TEST + depends on ARCH_BCM2835 || (OF && COMPILE_TEST) select WATCHDOG_CORE help Watchdog driver for the built in watchdog hardware in Broadcom diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c index 73c46b3a09ab..2f3b049ea301 100644 --- a/drivers/watchdog/kempld_wdt.c +++ b/drivers/watchdog/kempld_wdt.c @@ -140,12 +140,19 @@ static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data, unsigned int timeout) { struct kempld_device_data *pld = wdt_data->pld; - u32 prescaler = kempld_prescaler[PRESCALER_21]; + u32 prescaler; u64 stage_timeout64; u32 stage_timeout; u32 remainder; u8 stage_cfg; +#if GCC_VERSION < 40400 + /* work around a bug compiling do_div() */ + prescaler = READ_ONCE(kempld_prescaler[PRESCALER_21]); +#else + prescaler = kempld_prescaler[PRESCALER_21]; +#endif + if (!stage) return -EINVAL; diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c index 7983029852ab..060740625485 100644 --- a/drivers/watchdog/softdog.c +++ b/drivers/watchdog/softdog.c @@ -21,13 +21,12 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/hrtimer.h> #include <linux/init.h> -#include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/reboot.h> -#include <linux/timer.h> #include <linux/types.h> #include <linux/watchdog.h> @@ -54,7 +53,10 @@ module_param(soft_panic, int, 0); MODULE_PARM_DESC(soft_panic, "Softdog action, set to 1 to panic, 0 to reboot (default=0)"); -static void softdog_fire(unsigned long data) +static struct hrtimer softdog_ticktock; +static struct hrtimer softdog_preticktock; + +static enum hrtimer_restart softdog_fire(struct hrtimer *timer) { module_put(THIS_MODULE); if (soft_noboot) { @@ -67,32 +69,33 @@ static void softdog_fire(unsigned long data) emergency_restart(); pr_crit("Reboot didn't ?????\n"); } -} -static struct timer_list softdog_ticktock = - TIMER_INITIALIZER(softdog_fire, 0, 0); + return HRTIMER_NORESTART; +} static struct watchdog_device softdog_dev; -static void softdog_pretimeout(unsigned long data) +static enum hrtimer_restart softdog_pretimeout(struct hrtimer *timer) { watchdog_notify_pretimeout(&softdog_dev); -} -static struct timer_list softdog_preticktock = - TIMER_INITIALIZER(softdog_pretimeout, 0, 0); + return HRTIMER_NORESTART; +} static int softdog_ping(struct watchdog_device *w) { - if (!mod_timer(&softdog_ticktock, jiffies + (w->timeout * HZ))) + if (!hrtimer_active(&softdog_ticktock)) __module_get(THIS_MODULE); + hrtimer_start(&softdog_ticktock, ktime_set(w->timeout, 0), + HRTIMER_MODE_REL); if (IS_ENABLED(CONFIG_SOFT_WATCHDOG_PRETIMEOUT)) { if (w->pretimeout) - mod_timer(&softdog_preticktock, jiffies + - (w->timeout - w->pretimeout) * HZ); + hrtimer_start(&softdog_preticktock, + ktime_set(w->timeout - w->pretimeout, 0), + HRTIMER_MODE_REL); else - del_timer(&softdog_preticktock); + hrtimer_cancel(&softdog_preticktock); } return 0; @@ -100,11 +103,11 @@ static int softdog_ping(struct watchdog_device *w) static int softdog_stop(struct watchdog_device *w) { - if (del_timer(&softdog_ticktock)) + if (hrtimer_cancel(&softdog_ticktock)) module_put(THIS_MODULE); if (IS_ENABLED(CONFIG_SOFT_WATCHDOG_PRETIMEOUT)) - del_timer(&softdog_preticktock); + hrtimer_cancel(&softdog_preticktock); return 0; } @@ -136,8 +139,15 @@ static int __init softdog_init(void) watchdog_set_nowayout(&softdog_dev, nowayout); watchdog_stop_on_reboot(&softdog_dev); - if (IS_ENABLED(CONFIG_SOFT_WATCHDOG_PRETIMEOUT)) + hrtimer_init(&softdog_ticktock, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + softdog_ticktock.function = softdog_fire; + + if (IS_ENABLED(CONFIG_SOFT_WATCHDOG_PRETIMEOUT)) { softdog_info.options |= WDIOF_PRETIMEOUT; + hrtimer_init(&softdog_preticktock, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + softdog_preticktock.function = softdog_pretimeout; + } ret = watchdog_register_device(&softdog_dev); if (ret) diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index db107fa50ca1..a6d4378eb8d9 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -41,6 +41,7 @@ #include <linux/cpu.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/bootmem.h> diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 2ef2b61b69df..c77a0751a311 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -32,6 +32,7 @@ #include <linux/types.h> #include <linux/uaccess.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/highmem.h> |