diff options
Diffstat (limited to 'drivers')
36 files changed, 323 insertions, 272 deletions
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 321cd7b4d817..227bac5f1191 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -377,7 +377,8 @@ int register_cpu(struct cpu *cpu, int num) per_cpu(cpu_sys_devices, num) = &cpu->dev; register_cpu_under_node(num, cpu_to_node(num)); - dev_pm_qos_expose_latency_limit(&cpu->dev, 0); + dev_pm_qos_expose_latency_limit(&cpu->dev, + PM_QOS_RESUME_LATENCY_NO_CONSTRAINT); return 0; } diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 281f949c5ffe..51751cc8c9e6 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -14,23 +14,20 @@ static int dev_update_qos_constraint(struct device *dev, void *data) { s64 *constraint_ns_p = data; - s32 constraint_ns = -1; + s64 constraint_ns = -1; if (dev->power.subsys_data && dev->power.subsys_data->domain_data) constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns; - if (constraint_ns < 0) { + if (constraint_ns < 0) constraint_ns = dev_pm_qos_read_value(dev); - constraint_ns *= NSEC_PER_USEC; - } - if (constraint_ns == 0) + + if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) return 0; - /* - * constraint_ns cannot be negative here, because the device has been - * suspended. - */ - if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0) + constraint_ns *= NSEC_PER_USEC; + + if (constraint_ns < *constraint_ns_p || *constraint_ns_p < 0) *constraint_ns_p = constraint_ns; return 0; @@ -63,10 +60,14 @@ static bool default_suspend_ok(struct device *dev) spin_unlock_irqrestore(&dev->power.lock, flags); - if (constraint_ns < 0) + if (constraint_ns == 0) return false; - constraint_ns *= NSEC_PER_USEC; + if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) + constraint_ns = -1; + else + constraint_ns *= NSEC_PER_USEC; + /* * We can walk the children without any additional locking, because * they all have been suspended at this point and their @@ -76,14 +77,19 @@ static bool default_suspend_ok(struct device *dev) device_for_each_child(dev, &constraint_ns, dev_update_qos_constraint); - if (constraint_ns > 0) { - constraint_ns -= td->suspend_latency_ns + - td->resume_latency_ns; - if (constraint_ns == 0) - return false; + if (constraint_ns < 0) { + /* The children have no constraints. */ + td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; + td->cached_suspend_ok = true; + } else { + constraint_ns -= td->suspend_latency_ns + td->resume_latency_ns; + if (constraint_ns > 0) { + td->effective_constraint_ns = constraint_ns; + td->cached_suspend_ok = true; + } else { + td->effective_constraint_ns = 0; + } } - td->effective_constraint_ns = constraint_ns; - td->cached_suspend_ok = constraint_ns >= 0; /* * The children have been suspended already, so we don't need to take @@ -145,13 +151,14 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd, td = &to_gpd_data(pdd)->td; constraint_ns = td->effective_constraint_ns; /* default_suspend_ok() need not be called before us. */ - if (constraint_ns < 0) { + if (constraint_ns < 0) constraint_ns = dev_pm_qos_read_value(pdd->dev); - constraint_ns *= NSEC_PER_USEC; - } - if (constraint_ns == 0) + + if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) continue; + constraint_ns *= NSEC_PER_USEC; + /* * constraint_ns cannot be negative here, because the device has * been suspended. diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 277d43a83f53..7d29286d9313 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -189,7 +189,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) plist_head_init(&c->list); c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; - c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; + c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; c->type = PM_QOS_MIN; c->notifiers = n; diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 7bcf80fa9ada..13e015905543 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -253,7 +253,7 @@ static int rpm_check_suspend_allowed(struct device *dev) || (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME)) retval = -EAGAIN; - else if (__dev_pm_qos_read_value(dev) < 0) + else if (__dev_pm_qos_read_value(dev) == 0) retval = -EPERM; else if (dev->power.runtime_status == RPM_SUSPENDED) retval = 1; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 156ab57bca77..632077f05c57 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -218,7 +218,14 @@ static ssize_t pm_qos_resume_latency_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev)); + s32 value = dev_pm_qos_requested_resume_latency(dev); + + if (value == 0) + return sprintf(buf, "n/a\n"); + else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) + value = 0; + + return sprintf(buf, "%d\n", value); } static ssize_t pm_qos_resume_latency_store(struct device *dev, @@ -228,11 +235,21 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev, s32 value; int ret; - if (kstrtos32(buf, 0, &value)) - return -EINVAL; + if (!kstrtos32(buf, 0, &value)) { + /* + * Prevent users from writing negative or "no constraint" values + * directly. + */ + if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) + return -EINVAL; - if (value < 0) + if (value == 0) + value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; + } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) { + value = 0; + } else { return -EINVAL; + } ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, value); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index baebbdfd74d5..9adfb5445f8d 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -386,6 +386,15 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send, return result; } +/* + * Different settings for sk->sk_sndtimeo can result in different return values + * if there is a signal pending when we enter sendmsg, because reasons? + */ +static inline int was_interrupted(int result) +{ + return result == -ERESTARTSYS || result == -EINTR; +} + /* always call with the tx_lock held */ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) { @@ -458,7 +467,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) result = sock_xmit(nbd, index, 1, &from, (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); if (result <= 0) { - if (result == -ERESTARTSYS) { + if (was_interrupted(result)) { /* If we havne't sent anything we can just return BUSY, * however if we have sent something we need to make * sure we only allow this req to be sent until we are @@ -502,7 +511,7 @@ send_pages: } result = sock_xmit(nbd, index, 1, &from, flags, &sent); if (result <= 0) { - if (result == -ERESTARTSYS) { + if (was_interrupted(result)) { /* We've already sent the header, we * have no choice but to set pending and * return BUSY. diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 48eaf2879228..aa390404e85f 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -298,8 +298,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) data->needs_update = 0; } - /* resume_latency is 0 means no restriction */ - if (resume_latency && resume_latency < latency_req) + if (resume_latency < latency_req && + resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) latency_req = resume_latency; /* Special case when user has set very strict latency requirement */ diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 1cb2d1c070c3..a94601d5939e 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -238,7 +238,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, efi_random_get_seed(sys_table); - if (!nokaslr()) { + /* hibernation expects the runtime regions to stay in the same place */ + if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) { /* * Randomize the base of the UEFI runtime services region. * Preserve the 2 MB alignment of the region by taking a diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c index 08129b7b80ab..41c48a1e8baa 100644 --- a/drivers/firmware/efi/test/efi_test.c +++ b/drivers/firmware/efi/test/efi_test.c @@ -593,6 +593,9 @@ static long efi_runtime_query_capsulecaps(unsigned long arg) if (copy_from_user(&qcaps, qcaps_user, sizeof(qcaps))) return -EFAULT; + if (qcaps.capsule_count == ULONG_MAX) + return -EINVAL; + capsules = kcalloc(qcaps.capsule_count + 1, sizeof(efi_capsule_header_t), GFP_KERNEL); if (!capsules) diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c index 97a62f5b9ea4..a973eb6a2890 100644 --- a/drivers/hwmon/da9052-hwmon.c +++ b/drivers/hwmon/da9052-hwmon.c @@ -477,6 +477,11 @@ static int da9052_hwmon_probe(struct platform_device *pdev) /* disable touchscreen features */ da9052_reg_write(hwmon->da9052, DA9052_TSI_CONT_A_REG, 0x00); + /* Sample every 1ms */ + da9052_reg_update(hwmon->da9052, DA9052_ADC_CONT_REG, + DA9052_ADCCONT_ADCMODE, + DA9052_ADCCONT_ADCMODE); + err = da9052_request_irq(hwmon->da9052, DA9052_IRQ_TSIREADY, "tsiready-irq", da9052_tsi_datardy_irq, hwmon); diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c index 5eafbaada795..dfc40c740d07 100644 --- a/drivers/hwmon/tmp102.c +++ b/drivers/hwmon/tmp102.c @@ -268,14 +268,11 @@ static int tmp102_probe(struct i2c_client *client, return err; } - tmp102->ready_time = jiffies; - if (tmp102->config_orig & TMP102_CONF_SD) { - /* - * Mark that we are not ready with data until the first - * conversion is complete - */ - tmp102->ready_time += msecs_to_jiffies(CONVERSION_TIME_MS); - } + /* + * Mark that we are not ready with data until the first + * conversion is complete + */ + tmp102->ready_time = jiffies + msecs_to_jiffies(CONVERSION_TIME_MS); hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, tmp102, diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index b12e58787c3d..1fb72c356e36 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -175,13 +175,24 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, !netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; + /* + * LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't + * mistakenly call the .dump() function. + */ + if (index == RDMA_NL_LS) { + if (cb_table[op].doit) + return cb_table[op].doit(skb, nlh, extack); + return -EINVAL; + } /* FIXME: Convert IWCM to properly handle doit callbacks */ if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM || index == RDMA_NL_IWCM) { struct netlink_dump_control c = { .dump = cb_table[op].dump, }; - return netlink_dump_start(nls, skb, nlh, &c); + if (c.dump) + return netlink_dump_start(nls, skb, nlh, &c); + return -EINVAL; } if (cb_table[op].doit) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index af075e998944..be49d0f79381 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2545,10 +2545,10 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) nvme_fc_abort_aen_ops(ctrl); /* wait for all io that had to be aborted */ - spin_lock_irqsave(&ctrl->lock, flags); + spin_lock_irq(&ctrl->lock); wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); ctrl->flags &= ~FCCTRL_TERMIO; - spin_unlock_irqrestore(&ctrl->lock, flags); + spin_unlock_irq(&ctrl->lock); nvme_fc_term_aen_ops(ctrl); @@ -2734,7 +2734,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, { struct nvme_fc_ctrl *ctrl; unsigned long flags; - int ret, idx; + int ret, idx, retry; if (!(rport->remoteport.port_role & (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { @@ -2760,6 +2760,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ctrl->rport = rport; ctrl->dev = lport->dev; ctrl->cnum = idx; + init_waitqueue_head(&ctrl->ioabort_wait); get_device(ctrl->dev); kref_init(&ctrl->ref); @@ -2825,9 +2826,37 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); spin_unlock_irqrestore(&rport->lock, flags); - ret = nvme_fc_create_association(ctrl); + /* + * It's possible that transactions used to create the association + * may fail. Examples: CreateAssociation LS or CreateIOConnection + * LS gets dropped/corrupted/fails; or a frame gets dropped or a + * command times out for one of the actions to init the controller + * (Connect, Get/Set_Property, Set_Features, etc). Many of these + * transport errors (frame drop, LS failure) inherently must kill + * the association. The transport is coded so that any command used + * to create the association (prior to a LIVE state transition + * while NEW or RECONNECTING) will fail if it completes in error or + * times out. + * + * As such: as the connect request was mostly likely due to a + * udev event that discovered the remote port, meaning there is + * not an admin or script there to restart if the connect + * request fails, retry the initial connection creation up to + * three times before giving up and declaring failure. + */ + for (retry = 0; retry < 3; retry++) { + ret = nvme_fc_create_association(ctrl); + if (!ret) + break; + } + if (ret) { + /* couldn't schedule retry - fail out */ + dev_err(ctrl->ctrl.device, + "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum); + ctrl->ctrl.opts = NULL; + /* initiate nvme ctrl ref counting teardown */ nvme_uninit_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl); diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 92a03ff5fb4d..87bac27ec64b 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -571,6 +571,12 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags)) return; + if (nvme_rdma_queue_idx(queue) == 0) { + nvme_rdma_free_qe(queue->device->dev, + &queue->ctrl->async_event_sqe, + sizeof(struct nvme_command), DMA_TO_DEVICE); + } + nvme_rdma_destroy_queue_ib(queue); rdma_destroy_id(queue->cm_id); } @@ -739,8 +745,6 @@ out: static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, bool remove) { - nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe, - sizeof(struct nvme_command), DMA_TO_DEVICE); nvme_rdma_stop_queue(&ctrl->queues[0]); if (remove) { blk_cleanup_queue(ctrl->ctrl.admin_q); @@ -765,8 +769,10 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, if (new) { ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); - if (IS_ERR(ctrl->ctrl.admin_tagset)) + if (IS_ERR(ctrl->ctrl.admin_tagset)) { + error = PTR_ERR(ctrl->ctrl.admin_tagset); goto out_free_queue; + } ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); if (IS_ERR(ctrl->ctrl.admin_q)) { @@ -846,8 +852,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) if (new) { ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false); - if (IS_ERR(ctrl->ctrl.tagset)) + if (IS_ERR(ctrl->ctrl.tagset)) { + ret = PTR_ERR(ctrl->ctrl.tagset); goto out_free_io_queues; + } ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); if (IS_ERR(ctrl->ctrl.connect_q)) { diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 1b208beeef50..645ba7eee35d 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -387,12 +387,21 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) static void __nvmet_req_complete(struct nvmet_req *req, u16 status) { + u32 old_sqhd, new_sqhd; + u16 sqhd; + if (status) nvmet_set_status(req, status); - if (req->sq->size) - req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size; - req->rsp->sq_head = cpu_to_le16(req->sq->sqhd); + if (req->sq->size) { + do { + old_sqhd = req->sq->sqhd; + new_sqhd = (old_sqhd + 1) % req->sq->size; + } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) != + old_sqhd); + } + sqhd = req->sq->sqhd & 0x0000FFFF; + req->rsp->sq_head = cpu_to_le16(sqhd); req->rsp->sq_id = cpu_to_le16(req->sq->qid); req->rsp->command_id = req->cmd->common.command_id; diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 7b8e20adf760..87e429bfcd8a 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -74,7 +74,7 @@ struct nvmet_sq { struct percpu_ref ref; u16 qid; u16 size; - u16 sqhd; + u32 sqhd; struct completion free_done; struct completion confirm_done; }; diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 3f6b34febbf1..433af328d981 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -534,8 +534,16 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) continue; irq = irq_find_mapping(gc->irqdomain, irqnr + i); generic_handle_irq(irq); - /* Clear interrupt */ + + /* Clear interrupt. + * We must read the pin register again, in case the + * value was changed while executing + * generic_handle_irq() above. + */ + raw_spin_lock_irqsave(&gpio_dev->lock, flags); + regval = readl(regs + i); writel(regval, regs + i); + raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); ret = IRQ_HANDLED; } } diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index 3e40d4245512..9c950bbf07ba 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c @@ -407,10 +407,10 @@ static int mcp23s08_get(struct gpio_chip *chip, unsigned offset) ret = mcp_read(mcp, MCP_GPIO, &status); if (ret < 0) status = 0; - else + else { + mcp->cached_gpio = status; status = !!(status & (1 << offset)); - - mcp->cached_gpio = status; + } mutex_unlock(&mcp->lock); return status; diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index bb792a52248b..e03fa31446ca 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -33,6 +33,7 @@ #include <linux/suspend.h> #include <linux/acpi.h> #include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/spinlock.h> #include <asm/intel_pmc_ipc.h> @@ -131,6 +132,7 @@ static struct intel_pmc_ipc_dev { /* gcr */ void __iomem *gcr_mem_base; bool has_gcr_regs; + spinlock_t gcr_lock; /* punit */ struct platform_device *punit_dev; @@ -225,17 +227,17 @@ int intel_pmc_gcr_read(u32 offset, u32 *data) { int ret; - mutex_lock(&ipclock); + spin_lock(&ipcdev.gcr_lock); ret = is_gcr_valid(offset); if (ret < 0) { - mutex_unlock(&ipclock); + spin_unlock(&ipcdev.gcr_lock); return ret; } *data = readl(ipcdev.gcr_mem_base + offset); - mutex_unlock(&ipclock); + spin_unlock(&ipcdev.gcr_lock); return 0; } @@ -255,17 +257,17 @@ int intel_pmc_gcr_write(u32 offset, u32 data) { int ret; - mutex_lock(&ipclock); + spin_lock(&ipcdev.gcr_lock); ret = is_gcr_valid(offset); if (ret < 0) { - mutex_unlock(&ipclock); + spin_unlock(&ipcdev.gcr_lock); return ret; } writel(data, ipcdev.gcr_mem_base + offset); - mutex_unlock(&ipclock); + spin_unlock(&ipcdev.gcr_lock); return 0; } @@ -287,7 +289,7 @@ int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val) u32 new_val; int ret = 0; - mutex_lock(&ipclock); + spin_lock(&ipcdev.gcr_lock); ret = is_gcr_valid(offset); if (ret < 0) @@ -309,7 +311,7 @@ int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val) } gcr_ipc_unlock: - mutex_unlock(&ipclock); + spin_unlock(&ipcdev.gcr_lock); return ret; } EXPORT_SYMBOL_GPL(intel_pmc_gcr_update); @@ -480,52 +482,41 @@ static irqreturn_t ioc(int irq, void *dev_id) static int ipc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - resource_size_t pci_resource; + struct intel_pmc_ipc_dev *pmc = &ipcdev; int ret; - int len; - ipcdev.dev = &pci_dev_get(pdev)->dev; - ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ; + /* Only one PMC is supported */ + if (pmc->dev) + return -EBUSY; - ret = pci_enable_device(pdev); + pmc->irq_mode = IPC_TRIGGER_MODE_IRQ; + + spin_lock_init(&ipcdev.gcr_lock); + + ret = pcim_enable_device(pdev); if (ret) return ret; - ret = pci_request_regions(pdev, "intel_pmc_ipc"); + ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); if (ret) return ret; - pci_resource = pci_resource_start(pdev, 0); - len = pci_resource_len(pdev, 0); - if (!pci_resource || !len) { - dev_err(&pdev->dev, "Failed to get resource\n"); - return -ENOMEM; - } + init_completion(&pmc->cmd_complete); - init_completion(&ipcdev.cmd_complete); + pmc->ipc_base = pcim_iomap_table(pdev)[0]; - if (request_irq(pdev->irq, ioc, 0, "intel_pmc_ipc", &ipcdev)) { + ret = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_pmc_ipc", + pmc); + if (ret) { dev_err(&pdev->dev, "Failed to request irq\n"); - return -EBUSY; + return ret; } - ipcdev.ipc_base = ioremap_nocache(pci_resource, len); - if (!ipcdev.ipc_base) { - dev_err(&pdev->dev, "Failed to ioremap ipc base\n"); - free_irq(pdev->irq, &ipcdev); - ret = -ENOMEM; - } + pmc->dev = &pdev->dev; - return ret; -} + pci_set_drvdata(pdev, pmc); -static void ipc_pci_remove(struct pci_dev *pdev) -{ - free_irq(pdev->irq, &ipcdev); - pci_release_regions(pdev); - pci_dev_put(pdev); - iounmap(ipcdev.ipc_base); - ipcdev.dev = NULL; + return 0; } static const struct pci_device_id ipc_pci_ids[] = { @@ -540,7 +531,6 @@ static struct pci_driver ipc_pci_driver = { .name = "intel_pmc_ipc", .id_table = ipc_pci_ids, .probe = ipc_pci_probe, - .remove = ipc_pci_remove, }; static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev, @@ -850,17 +840,12 @@ static int ipc_plat_get_res(struct platform_device *pdev) return -ENXIO; } size = PLAT_RESOURCE_IPC_SIZE + PLAT_RESOURCE_GCR_SIZE; + res->end = res->start + size - 1; + + addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(addr)) + return PTR_ERR(addr); - if (!request_mem_region(res->start, size, pdev->name)) { - dev_err(&pdev->dev, "Failed to request ipc resource\n"); - return -EBUSY; - } - addr = ioremap_nocache(res->start, size); - if (!addr) { - dev_err(&pdev->dev, "I/O memory remapping failed\n"); - release_mem_region(res->start, size); - return -ENOMEM; - } ipcdev.ipc_base = addr; ipcdev.gcr_mem_base = addr + PLAT_RESOURCE_GCR_OFFSET; @@ -917,12 +902,12 @@ MODULE_DEVICE_TABLE(acpi, ipc_acpi_ids); static int ipc_plat_probe(struct platform_device *pdev) { - struct resource *res; int ret; ipcdev.dev = &pdev->dev; ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ; init_completion(&ipcdev.cmd_complete); + spin_lock_init(&ipcdev.gcr_lock); ipcdev.irq = platform_get_irq(pdev, 0); if (ipcdev.irq < 0) { @@ -939,11 +924,11 @@ static int ipc_plat_probe(struct platform_device *pdev) ret = ipc_create_pmc_devices(); if (ret) { dev_err(&pdev->dev, "Failed to create pmc devices\n"); - goto err_device; + return ret; } - if (request_irq(ipcdev.irq, ioc, IRQF_NO_SUSPEND, - "intel_pmc_ipc", &ipcdev)) { + if (devm_request_irq(&pdev->dev, ipcdev.irq, ioc, IRQF_NO_SUSPEND, + "intel_pmc_ipc", &ipcdev)) { dev_err(&pdev->dev, "Failed to request irq\n"); ret = -EBUSY; goto err_irq; @@ -960,40 +945,22 @@ static int ipc_plat_probe(struct platform_device *pdev) return 0; err_sys: - free_irq(ipcdev.irq, &ipcdev); + devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev); err_irq: platform_device_unregister(ipcdev.tco_dev); platform_device_unregister(ipcdev.punit_dev); platform_device_unregister(ipcdev.telemetry_dev); -err_device: - iounmap(ipcdev.ipc_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, - PLAT_RESOURCE_IPC_INDEX); - if (res) { - release_mem_region(res->start, - PLAT_RESOURCE_IPC_SIZE + - PLAT_RESOURCE_GCR_SIZE); - } + return ret; } static int ipc_plat_remove(struct platform_device *pdev) { - struct resource *res; - sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group); - free_irq(ipcdev.irq, &ipcdev); + devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev); platform_device_unregister(ipcdev.tco_dev); platform_device_unregister(ipcdev.punit_dev); platform_device_unregister(ipcdev.telemetry_dev); - iounmap(ipcdev.ipc_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, - PLAT_RESOURCE_IPC_INDEX); - if (res) { - release_mem_region(res->start, - PLAT_RESOURCE_IPC_SIZE + - PLAT_RESOURCE_GCR_SIZE); - } ipcdev.dev = NULL; return 0; } diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index f18b36dd57dd..376a99b7cf5d 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -590,7 +590,7 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id) case AXP803_DCDC3: return !!(reg & BIT(6)); case AXP803_DCDC6: - return !!(reg & BIT(7)); + return !!(reg & BIT(5)); } break; diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c index ef2be56460fe..790a4a73ea2c 100644 --- a/drivers/regulator/rn5t618-regulator.c +++ b/drivers/regulator/rn5t618-regulator.c @@ -29,7 +29,7 @@ static const struct regulator_ops rn5t618_reg_ops = { }; #define REG(rid, ereg, emask, vreg, vmask, min, max, step) \ - [RN5T618_##rid] = { \ + { \ .name = #rid, \ .of_match = of_match_ptr(#rid), \ .regulators_node = of_match_ptr("regulators"), \ diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 82ac331d9125..84752152d41f 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -357,6 +357,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) adapter->next_port_scan = jiffies; + adapter->erp_action.adapter = adapter; + if (zfcp_qdio_setup(adapter)) goto failed; @@ -513,6 +515,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, port->dev.groups = zfcp_port_attr_groups; port->dev.release = zfcp_port_release; + port->erp_action.adapter = adapter; + port->erp_action.port = port; + if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) { kfree(port); goto err_out; diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 37408f5f81ce..ec2532ee1822 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -193,9 +193,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &zfcp_sdev->status); erp_action = &zfcp_sdev->erp_action; - memset(erp_action, 0, sizeof(struct zfcp_erp_action)); - erp_action->port = port; - erp_action->sdev = sdev; + WARN_ON_ONCE(erp_action->port != port); + WARN_ON_ONCE(erp_action->sdev != sdev); if (!(atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_RUNNING)) act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; @@ -208,8 +207,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, zfcp_erp_action_dismiss_port(port); atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); erp_action = &port->erp_action; - memset(erp_action, 0, sizeof(struct zfcp_erp_action)); - erp_action->port = port; + WARN_ON_ONCE(erp_action->port != port); + WARN_ON_ONCE(erp_action->sdev != NULL); if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; break; @@ -219,7 +218,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, zfcp_erp_action_dismiss_adapter(adapter); atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); erp_action = &adapter->erp_action; - memset(erp_action, 0, sizeof(struct zfcp_erp_action)); + WARN_ON_ONCE(erp_action->port != NULL); + WARN_ON_ONCE(erp_action->sdev != NULL); if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; @@ -229,7 +229,11 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, return NULL; } - erp_action->adapter = adapter; + WARN_ON_ONCE(erp_action->adapter != adapter); + memset(&erp_action->list, 0, sizeof(erp_action->list)); + memset(&erp_action->timer, 0, sizeof(erp_action->timer)); + erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED; + erp_action->fsf_req_id = 0; erp_action->action = need; erp_action->status = act_status; diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index ec3ddd1d31d5..6cf8732627e0 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -115,10 +115,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) struct zfcp_unit *unit; int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE; + zfcp_sdev->erp_action.adapter = adapter; + zfcp_sdev->erp_action.sdev = sdev; + port = zfcp_get_port_by_wwpn(adapter, rport->port_name); if (!port) return -ENXIO; + zfcp_sdev->erp_action.port = port; + unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev)); if (unit) put_device(&unit->dev); diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 97d269f16888..1bc623ad3faf 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -302,9 +302,11 @@ int aac_send_shutdown(struct aac_dev * dev) return -ENOMEM; aac_fib_init(fibctx); - mutex_lock(&dev->ioctl_mutex); - dev->adapter_shutdown = 1; - mutex_unlock(&dev->ioctl_mutex); + if (!dev->adapter_shutdown) { + mutex_lock(&dev->ioctl_mutex); + dev->adapter_shutdown = 1; + mutex_unlock(&dev->ioctl_mutex); + } cmd = (struct aac_close *) fib_data(fibctx); cmd->command = cpu_to_le32(VM_CloseAll); diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 62beb2596466..c9252b138c1f 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1551,8 +1551,9 @@ static void __aac_shutdown(struct aac_dev * aac) { int i; + mutex_lock(&aac->ioctl_mutex); aac->adapter_shutdown = 1; - aac_send_shutdown(aac); + mutex_unlock(&aac->ioctl_mutex); if (aac->aif_thread) { int i; @@ -1565,7 +1566,11 @@ static void __aac_shutdown(struct aac_dev * aac) } kthread_stop(aac->thread); } + + aac_send_shutdown(aac); + aac_adapter_disable_int(aac); + if (aac_is_src(aac)) { if (aac->max_msix > 1) { for (i = 0; i < aac->max_msix; i++) { diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 9abe81021484..4ed3d26ffdde 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -4091,7 +4091,7 @@ static int hpsa_set_local_logical_count(struct ctlr_info *h, memset(id_ctlr, 0, sizeof(*id_ctlr)); rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr)); if (!rc) - if (id_ctlr->configured_logical_drive_count < 256) + if (id_ctlr->configured_logical_drive_count < 255) *nlocals = id_ctlr->configured_logical_drive_count; else *nlocals = le16_to_cpu( diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 937209805baf..3bd956d3bc5d 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3061,6 +3061,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->max_cmd_len, host->max_channel, host->max_lun, host->transportt, sht->vendor_id); + INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); + /* Set up the irqs */ ret = qla2x00_request_irqs(ha, rsp); if (ret) @@ -3175,8 +3177,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->can_queue, base_vha->req, base_vha->mgmt_svr_loop_id, host->sg_tablesize); - INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); - if (ha->mqenable) { bool mq = false; bool startit = false; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9cf6a80fe297..ad3ea24f0885 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1379,8 +1379,6 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req) ret = scsi_setup_cmnd(sdev, req); out: - if (ret != BLKPREP_OK) - cmd->flags &= ~SCMD_INITIALIZED; return scsi_prep_return(q, req, ret); } @@ -1900,7 +1898,6 @@ static int scsi_mq_prep_fn(struct request *req) struct scsi_device *sdev = req->q->queuedata; struct Scsi_Host *shost = sdev->host; struct scatterlist *sg; - int ret; scsi_init_command(sdev, cmd); @@ -1934,10 +1931,7 @@ static int scsi_mq_prep_fn(struct request *req) blk_mq_start_request(req); - ret = scsi_setup_cmnd(sdev, req); - if (ret != BLK_STS_OK) - cmd->flags &= ~SCMD_INITIALIZED; - return ret; + return scsi_setup_cmnd(sdev, req); } static void scsi_mq_done(struct scsi_cmnd *cmd) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 0419c2298eab..aa28874e8fb9 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -837,7 +837,7 @@ sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo) val = 0; list_for_each_entry(srp, &sfp->rq_list, entry) { - if (val > SG_MAX_QUEUE) + if (val >= SG_MAX_QUEUE) break; rinfo[val].req_state = srp->done + 1; rinfo[val].problem = diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c index 6c7d7a460689..568e1c65aa82 100644 --- a/drivers/spi/spi-armada-3700.c +++ b/drivers/spi/spi-armada-3700.c @@ -99,11 +99,6 @@ /* A3700_SPI_IF_TIME_REG */ #define A3700_SPI_CLK_CAPT_EDGE BIT(7) -/* Flags and macros for struct a3700_spi */ -#define A3700_INSTR_CNT 1 -#define A3700_ADDR_CNT 3 -#define A3700_DUMMY_CNT 1 - struct a3700_spi { struct spi_master *master; void __iomem *base; @@ -117,9 +112,6 @@ struct a3700_spi { u8 byte_len; u32 wait_mask; struct completion done; - u32 addr_cnt; - u32 instr_cnt; - size_t hdr_cnt; }; static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset) @@ -161,7 +153,7 @@ static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi, } static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi, - unsigned int pin_mode) + unsigned int pin_mode, bool receiving) { u32 val; @@ -177,6 +169,9 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi, break; case SPI_NBITS_QUAD: val |= A3700_SPI_DATA_PIN1; + /* RX during address reception uses 4-pin */ + if (receiving) + val |= A3700_SPI_ADDR_PIN; break; default: dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode); @@ -392,7 +387,8 @@ static bool a3700_spi_wait_completion(struct spi_device *spi) spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0); - return true; + /* Timeout was reached */ + return false; } static bool a3700_spi_transfer_wait(struct spi_device *spi, @@ -446,59 +442,43 @@ static void a3700_spi_set_cs(struct spi_device *spi, bool enable) static void a3700_spi_header_set(struct a3700_spi *a3700_spi) { - u32 instr_cnt = 0, addr_cnt = 0, dummy_cnt = 0; + unsigned int addr_cnt; u32 val = 0; /* Clear the header registers */ spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0); spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0); spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0); + spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0); /* Set header counters */ if (a3700_spi->tx_buf) { - if (a3700_spi->buf_len <= a3700_spi->instr_cnt) { - instr_cnt = a3700_spi->buf_len; - } else if (a3700_spi->buf_len <= (a3700_spi->instr_cnt + - a3700_spi->addr_cnt)) { - instr_cnt = a3700_spi->instr_cnt; - addr_cnt = a3700_spi->buf_len - instr_cnt; - } else if (a3700_spi->buf_len <= a3700_spi->hdr_cnt) { - instr_cnt = a3700_spi->instr_cnt; - addr_cnt = a3700_spi->addr_cnt; - /* Need to handle the normal write case with 1 byte - * data - */ - if (!a3700_spi->tx_buf[instr_cnt + addr_cnt]) - dummy_cnt = a3700_spi->buf_len - instr_cnt - - addr_cnt; + /* + * when tx data is not 4 bytes aligned, there will be unexpected + * bytes out of SPI output register, since it always shifts out + * as whole 4 bytes. This might cause incorrect transaction with + * some devices. To avoid that, use SPI header count feature to + * transfer up to 3 bytes of data first, and then make the rest + * of data 4-byte aligned. + */ + addr_cnt = a3700_spi->buf_len % 4; + if (addr_cnt) { + val = (addr_cnt & A3700_SPI_ADDR_CNT_MASK) + << A3700_SPI_ADDR_CNT_BIT; + spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val); + + /* Update the buffer length to be transferred */ + a3700_spi->buf_len -= addr_cnt; + + /* transfer 1~3 bytes through address count */ + val = 0; + while (addr_cnt--) { + val = (val << 8) | a3700_spi->tx_buf[0]; + a3700_spi->tx_buf++; + } + spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val); } - val |= ((instr_cnt & A3700_SPI_INSTR_CNT_MASK) - << A3700_SPI_INSTR_CNT_BIT); - val |= ((addr_cnt & A3700_SPI_ADDR_CNT_MASK) - << A3700_SPI_ADDR_CNT_BIT); - val |= ((dummy_cnt & A3700_SPI_DUMMY_CNT_MASK) - << A3700_SPI_DUMMY_CNT_BIT); } - spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val); - - /* Update the buffer length to be transferred */ - a3700_spi->buf_len -= (instr_cnt + addr_cnt + dummy_cnt); - - /* Set Instruction */ - val = 0; - while (instr_cnt--) { - val = (val << 8) | a3700_spi->tx_buf[0]; - a3700_spi->tx_buf++; - } - spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, val); - - /* Set Address */ - val = 0; - while (addr_cnt--) { - val = (val << 8) | a3700_spi->tx_buf[0]; - a3700_spi->tx_buf++; - } - spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val); } static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi) @@ -512,35 +492,12 @@ static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi) static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi) { u32 val; - int i = 0; while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) { - val = 0; - if (a3700_spi->buf_len >= 4) { - val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf); - spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val); - - a3700_spi->buf_len -= 4; - a3700_spi->tx_buf += 4; - } else { - /* - * If the remained buffer length is less than 4-bytes, - * we should pad the write buffer with all ones. So that - * it avoids overwrite the unexpected bytes following - * the last one. - */ - val = GENMASK(31, 0); - while (a3700_spi->buf_len) { - val &= ~(0xff << (8 * i)); - val |= *a3700_spi->tx_buf++ << (8 * i); - i++; - a3700_spi->buf_len--; - - spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, - val); - } - break; - } + val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf); + spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val); + a3700_spi->buf_len -= 4; + a3700_spi->tx_buf += 4; } return 0; @@ -645,15 +602,18 @@ static int a3700_spi_transfer_one(struct spi_master *master, a3700_spi->rx_buf = xfer->rx_buf; a3700_spi->buf_len = xfer->len; - /* SPI transfer headers */ - a3700_spi_header_set(a3700_spi); - if (xfer->tx_buf) nbits = xfer->tx_nbits; else if (xfer->rx_buf) nbits = xfer->rx_nbits; - a3700_spi_pin_mode_set(a3700_spi, nbits); + a3700_spi_pin_mode_set(a3700_spi, nbits, xfer->rx_buf ? true : false); + + /* Flush the FIFOs */ + a3700_spi_fifo_flush(a3700_spi); + + /* Transfer first bytes of data when buffer is not 4-byte aligned */ + a3700_spi_header_set(a3700_spi); if (xfer->rx_buf) { /* Set read data length */ @@ -733,16 +693,11 @@ static int a3700_spi_transfer_one(struct spi_master *master, dev_err(&spi->dev, "wait wfifo empty timed out\n"); return -ETIMEDOUT; } - } else { - /* - * If the instruction in SPI_INSTR does not require data - * to be written to the SPI device, wait until SPI_RDY - * is 1 for the SPI interface to be in idle. - */ - if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) { - dev_err(&spi->dev, "wait xfer ready timed out\n"); - return -ETIMEDOUT; - } + } + + if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) { + dev_err(&spi->dev, "wait xfer ready timed out\n"); + return -ETIMEDOUT; } val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); @@ -834,10 +789,6 @@ static int a3700_spi_probe(struct platform_device *pdev) memset(spi, 0, sizeof(struct a3700_spi)); spi->master = master; - spi->instr_cnt = A3700_INSTR_CNT; - spi->addr_cnt = A3700_ADDR_CNT; - spi->hdr_cnt = A3700_INSTR_CNT + A3700_ADDR_CNT + - A3700_DUMMY_CNT; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); spi->base = devm_ioremap_resource(dev, res); diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 6ef6c44f39f5..a172ab299e80 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -1250,7 +1250,7 @@ int bcm_qspi_probe(struct platform_device *pdev, goto qspi_probe_err; } } else { - goto qspi_probe_err; + goto qspi_resource_err; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi"); @@ -1272,7 +1272,7 @@ int bcm_qspi_probe(struct platform_device *pdev, qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res); if (IS_ERR(qspi->base[CHIP_SELECT])) { ret = PTR_ERR(qspi->base[CHIP_SELECT]); - goto qspi_probe_err; + goto qspi_resource_err; } } @@ -1280,7 +1280,7 @@ int bcm_qspi_probe(struct platform_device *pdev, GFP_KERNEL); if (!qspi->dev_ids) { ret = -ENOMEM; - goto qspi_probe_err; + goto qspi_resource_err; } for (val = 0; val < num_irqs; val++) { @@ -1369,8 +1369,9 @@ qspi_reg_err: bcm_qspi_hw_uninit(qspi); clk_disable_unprepare(qspi->clk); qspi_probe_err: - spi_master_put(master); kfree(qspi->dev_ids); +qspi_resource_err: + spi_master_put(master); return ret; } /* probe function to be called by SoC specific platform driver probe */ diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index 680cdf549506..ba9743fa2326 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -263,8 +263,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz) * no need to check it there. * However, we need to ensure the following calculations. */ - if ((div < SPI_MBR_DIV_MIN) && - (div > SPI_MBR_DIV_MAX)) + if (div < SPI_MBR_DIV_MIN || + div > SPI_MBR_DIV_MAX) return -EINVAL; /* Determine the first power of 2 greater than or equal to div */ diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 6e65524cbfd9..e8b5a5e21b2e 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -45,7 +45,6 @@ #define CREATE_TRACE_POINTS #include <trace/events/spi.h> -#define SPI_DYN_FIRST_BUS_NUM 0 static DEFINE_IDR(spi_master_idr); @@ -2086,7 +2085,7 @@ int spi_register_controller(struct spi_controller *ctlr) struct device *dev = ctlr->dev.parent; struct boardinfo *bi; int status = -ENODEV; - int id; + int id, first_dynamic; if (!dev) return -ENODEV; @@ -2116,9 +2115,15 @@ int spi_register_controller(struct spi_controller *ctlr) } } if (ctlr->bus_num < 0) { + first_dynamic = of_alias_get_highest_id("spi"); + if (first_dynamic < 0) + first_dynamic = 0; + else + first_dynamic++; + mutex_lock(&board_lock); - id = idr_alloc(&spi_master_idr, ctlr, SPI_DYN_FIRST_BUS_NUM, 0, - GFP_KERNEL); + id = idr_alloc(&spi_master_idr, ctlr, first_dynamic, + 0, GFP_KERNEL); mutex_unlock(&board_lock); if (WARN(id < 0, "couldn't get idr")) return id; diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 82360594fa8e..57efbd3b053b 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -1024,6 +1024,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) mutex_unlock(&priv->lock); if (use_ptemod) { + map->pages_vm_start = vma->vm_start; err = apply_to_page_range(vma->vm_mm, vma->vm_start, vma->vm_end - vma->vm_start, find_grant_ptes, map); @@ -1061,7 +1062,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) set_grant_ptes_as_special, NULL); } #endif - map->pages_vm_start = vma->vm_start; } return 0; diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c index e89136ab851e..b437fccd4e62 100644 --- a/drivers/xen/xen-balloon.c +++ b/drivers/xen/xen-balloon.c @@ -57,7 +57,7 @@ static int register_balloon(struct device *dev); static void watch_target(struct xenbus_watch *watch, const char *path, const char *token) { - unsigned long long new_target; + unsigned long long new_target, static_max; int err; static bool watch_fired; static long target_diff; @@ -72,13 +72,20 @@ static void watch_target(struct xenbus_watch *watch, * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ new_target >>= PAGE_SHIFT - 10; - if (watch_fired) { - balloon_set_new_target(new_target - target_diff); - return; + + if (!watch_fired) { + watch_fired = true; + err = xenbus_scanf(XBT_NIL, "memory", "static-max", "%llu", + &static_max); + if (err != 1) + static_max = new_target; + else + static_max >>= PAGE_SHIFT - 10; + target_diff = xen_pv_domain() ? 0 + : static_max - balloon_stats.target_pages; } - watch_fired = true; - target_diff = new_target - balloon_stats.target_pages; + balloon_set_new_target(new_target - target_diff); } static struct xenbus_watch target_watch = { .node = "memory/target", |