diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/arch_topology.c | 11 | ||||
-rw-r--r-- | drivers/base/firmware_loader/main.c | 17 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 53 | ||||
-rw-r--r-- | drivers/base/topology.c | 10 |
4 files changed, 79 insertions, 12 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 1d6636ebaac5..f73b836047cf 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -667,6 +667,15 @@ const struct cpumask *cpu_coregroup_mask(int cpu) core_mask = &cpu_topology[cpu].llc_sibling; } + /* + * For systems with no shared cpu-side LLC but with clusters defined, + * extend core_mask to cluster_siblings. The sched domain builder will + * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled. + */ + if (IS_ENABLED(CONFIG_SCHED_CLUSTER) && + cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling)) + core_mask = &cpu_topology[cpu].cluster_sibling; + return core_mask; } @@ -684,7 +693,7 @@ void update_siblings_masks(unsigned int cpuid) for_each_online_cpu(cpu) { cpu_topo = &cpu_topology[cpu]; - if (cpuid_topo->llc_id == cpu_topo->llc_id) { + if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) { cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); } diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 94d1789a233e..406a907a4cae 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -735,6 +735,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name, size_t offset, u32 opt_flags) { struct firmware *fw = NULL; + struct cred *kern_cred = NULL; + const struct cred *old_cred; bool nondirect = false; int ret; @@ -751,6 +753,18 @@ _request_firmware(const struct firmware **firmware_p, const char *name, if (ret <= 0) /* error or already assigned */ goto out; + /* + * We are about to try to access the firmware file. Because we may have been + * called by a driver when serving an unrelated request from userland, we use + * the kernel credentials to read the file. + */ + kern_cred = prepare_kernel_cred(NULL); + if (!kern_cred) { + ret = -ENOMEM; + goto out; + } + old_cred = override_creds(kern_cred); + ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL); /* Only full reads can support decompression, platform, and sysfs. */ @@ -776,6 +790,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name, } else ret = assign_fw(fw, device); + revert_creds(old_cred); + put_cred(kern_cred); + out: if (ret < 0) { fw_abort_batch_reqs(fw); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index d4059e6ffeae..676dc72d912d 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -263,7 +263,7 @@ static int rpm_check_suspend_allowed(struct device *dev) retval = -EINVAL; else if (dev->power.disable_depth > 0) retval = -EACCES; - else if (atomic_read(&dev->power.usage_count) > 0) + else if (atomic_read(&dev->power.usage_count)) retval = -EAGAIN; else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count)) @@ -1039,13 +1039,33 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) } EXPORT_SYMBOL_GPL(pm_schedule_suspend); +static int rpm_drop_usage_count(struct device *dev) +{ + int ret; + + ret = atomic_sub_return(1, &dev->power.usage_count); + if (ret >= 0) + return ret; + + /* + * Because rpm_resume() does not check the usage counter, it will resume + * the device even if the usage counter is 0 or negative, so it is + * sufficient to increment the usage counter here to reverse the change + * made above. + */ + atomic_inc(&dev->power.usage_count); + dev_warn(dev, "Runtime PM usage count underflow!\n"); + return -EINVAL; +} + /** * __pm_runtime_idle - Entry point for runtime idle operations. * @dev: Device to send idle notification for. * @rpmflags: Flag bits. * * If the RPM_GET_PUT flag is set, decrement the device's usage count and - * return immediately if it is larger than zero. Then carry out an idle + * return immediately if it is larger than zero (if it becomes negative, log a + * warning, increment it, and return an error). Then carry out an idle * notification, either synchronous or asynchronous. * * This routine may be called in atomic context if the RPM_ASYNC flag is set, @@ -1057,7 +1077,10 @@ int __pm_runtime_idle(struct device *dev, int rpmflags) int retval; if (rpmflags & RPM_GET_PUT) { - if (!atomic_dec_and_test(&dev->power.usage_count)) { + retval = rpm_drop_usage_count(dev); + if (retval < 0) { + return retval; + } else if (retval > 0) { trace_rpm_usage_rcuidle(dev, rpmflags); return 0; } @@ -1079,7 +1102,8 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle); * @rpmflags: Flag bits. * * If the RPM_GET_PUT flag is set, decrement the device's usage count and - * return immediately if it is larger than zero. Then carry out a suspend, + * return immediately if it is larger than zero (if it becomes negative, log a + * warning, increment it, and return an error). Then carry out a suspend, * either synchronous or asynchronous. * * This routine may be called in atomic context if the RPM_ASYNC flag is set, @@ -1091,7 +1115,10 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags) int retval; if (rpmflags & RPM_GET_PUT) { - if (!atomic_dec_and_test(&dev->power.usage_count)) { + retval = rpm_drop_usage_count(dev); + if (retval < 0) { + return retval; + } else if (retval > 0) { trace_rpm_usage_rcuidle(dev, rpmflags); return 0; } @@ -1210,12 +1237,13 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) { struct device *parent = dev->parent; bool notify_parent = false; + unsigned long flags; int error = 0; if (status != RPM_ACTIVE && status != RPM_SUSPENDED) return -EINVAL; - spin_lock_irq(&dev->power.lock); + spin_lock_irqsave(&dev->power.lock, flags); /* * Prevent PM-runtime from being enabled for the device or return an @@ -1226,7 +1254,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) else error = -EAGAIN; - spin_unlock_irq(&dev->power.lock); + spin_unlock_irqrestore(&dev->power.lock, flags); if (error) return error; @@ -1247,7 +1275,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) device_links_read_unlock(idx); } - spin_lock_irq(&dev->power.lock); + spin_lock_irqsave(&dev->power.lock, flags); if (dev->power.runtime_status == status || !parent) goto out_set; @@ -1288,7 +1316,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) dev->power.runtime_error = 0; out: - spin_unlock_irq(&dev->power.lock); + spin_unlock_irqrestore(&dev->power.lock, flags); if (notify_parent) pm_request_idle(parent); @@ -1527,14 +1555,17 @@ EXPORT_SYMBOL_GPL(pm_runtime_forbid); */ void pm_runtime_allow(struct device *dev) { + int ret; + spin_lock_irq(&dev->power.lock); if (dev->power.runtime_auto) goto out; dev->power.runtime_auto = true; - if (atomic_dec_and_test(&dev->power.usage_count)) + ret = rpm_drop_usage_count(dev); + if (ret == 0) rpm_idle(dev, RPM_AUTO | RPM_ASYNC); - else + else if (ret > 0) trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC); out: diff --git a/drivers/base/topology.c b/drivers/base/topology.c index e9d1efcda89b..ac6ad9ab67f9 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -152,9 +152,19 @@ static struct attribute *default_attrs[] = { NULL }; +static umode_t topology_is_visible(struct kobject *kobj, + struct attribute *attr, int unused) +{ + if (attr == &dev_attr_ppin.attr && !topology_ppin(kobj_to_dev(kobj)->id)) + return 0; + + return attr->mode; +} + static const struct attribute_group topology_attr_group = { .attrs = default_attrs, .bin_attrs = bin_attrs, + .is_visible = topology_is_visible, .name = "topology" }; |