diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-03 18:08:06 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-03 18:08:06 -0700 |
commit | 9a9594efe54324e9124add7e7b1e7bdb6d0b08a3 (patch) | |
tree | 4544014bf1c214c9c004fe0a64ac78eaa6232cfe /drivers | |
parent | 3ad918e65d6926490c8f18a157cea25bf29ecd3a (diff) | |
parent | 993647a293814dd47ae41d38657fda6e4ab04e33 (diff) | |
download | linux-9a9594efe54324e9124add7e7b1e7bdb6d0b08a3.tar.bz2 |
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull SMP hotplug updates from Thomas Gleixner:
"This update is primarily a cleanup of the CPU hotplug locking code.
The hotplug locking mechanism is an open coded RWSEM, which allows
recursive locking. The main problem with that is the recursive nature
as it evades the full lockdep coverage and hides potential deadlocks.
The rework replaces the open coded RWSEM with a percpu RWSEM and
establishes full lockdep coverage that way.
The bulk of the changes fix up recursive locking issues and address
the now fully reported potential deadlocks all over the place. Some of
these deadlocks have been observed in the RT tree, but on mainline the
probability was low enough to hide them away."
* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits)
cpu/hotplug: Constify attribute_group structures
powerpc: Only obtain cpu_hotplug_lock if called by rtasd
ARM/hw_breakpoint: Fix possible recursive locking for arch_hw_breakpoint_init
cpu/hotplug: Remove unused check_for_tasks() function
perf/core: Don't release cred_guard_mutex if not taken
cpuhotplug: Link lock stacks for hotplug callbacks
acpi/processor: Prevent cpu hotplug deadlock
sched: Provide is_percpu_thread() helper
cpu/hotplug: Convert hotplug locking to percpu rwsem
s390: Prevent hotplug rwsem recursion
arm: Prevent hotplug rwsem recursion
arm64: Prevent cpu hotplug rwsem recursion
kprobes: Cure hotplug lock ordering issues
jump_label: Reorder hotplug lock and jump_label_lock
perf/tracing/cpuhotplug: Fix locking order
ACPI/processor: Use cpu_hotplug_disable() instead of get_online_cpus()
PCI: Replace the racy recursion prevention
PCI: Use cpu_hotplug_disable() instead of get_online_cpus()
perf/x86/intel: Drop get_online_cpus() in intel_snb_check_microcode()
x86/perf: Drop EXPORT of perf_check_microcode
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/acpi/processor_driver.c | 4 | ||||
-rw-r--r-- | drivers/acpi/processor_throttling.c | 16 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 21 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-etm3x.c | 20 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-etm4x.c | 20 | ||||
-rw-r--r-- | drivers/pci/pci-driver.c | 47 |
6 files changed, 66 insertions, 62 deletions
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 8697a82bd465..591d1dd3f04e 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -268,9 +268,9 @@ static int acpi_processor_start(struct device *dev) return -ENODEV; /* Protect against concurrent CPU hotplug operations */ - get_online_cpus(); + cpu_hotplug_disable(); ret = __acpi_processor_start(device); - put_online_cpus(); + cpu_hotplug_enable(); return ret; } diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 3de34633f7f9..7f9aff4b8d62 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -909,6 +909,13 @@ static long __acpi_processor_get_throttling(void *data) return pr->throttling.acpi_processor_get_throttling(pr); } +static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct) +{ + if (direct || (is_percpu_thread() && cpu == smp_processor_id())) + return fn(arg); + return work_on_cpu(cpu, fn, arg); +} + static int acpi_processor_get_throttling(struct acpi_processor *pr) { if (!pr) @@ -926,7 +933,7 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr) if (!cpu_online(pr->id)) return -ENODEV; - return work_on_cpu(pr->id, __acpi_processor_get_throttling, pr); + return call_on_cpu(pr->id, __acpi_processor_get_throttling, pr, false); } static int acpi_processor_get_fadt_info(struct acpi_processor *pr) @@ -1076,13 +1083,6 @@ static long acpi_processor_throttling_fn(void *data) arg->target_state, arg->force); } -static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct) -{ - if (direct) - return fn(arg); - return work_on_cpu(cpu, fn, arg); -} - static int __acpi_processor_set_throttling(struct acpi_processor *pr, int state, bool force, bool direct) { diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 26b643d57847..29c5b0cbad96 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -887,7 +887,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, struct freq_attr *fattr = to_attr(attr); ssize_t ret = -EINVAL; - get_online_cpus(); + cpus_read_lock(); if (cpu_online(policy->cpu)) { down_write(&policy->rwsem); @@ -895,7 +895,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, up_write(&policy->rwsem); } - put_online_cpus(); + cpus_read_unlock(); return ret; } @@ -2441,7 +2441,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) pr_debug("trying to register driver %s\n", driver_data->name); /* Protect against concurrent CPU online/offline. */ - get_online_cpus(); + cpus_read_lock(); write_lock_irqsave(&cpufreq_driver_lock, flags); if (cpufreq_driver) { @@ -2474,9 +2474,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) goto err_if_unreg; } - ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online", - cpuhp_cpufreq_online, - cpuhp_cpufreq_offline); + ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, + "cpufreq:online", + cpuhp_cpufreq_online, + cpuhp_cpufreq_offline); if (ret < 0) goto err_if_unreg; hp_online = ret; @@ -2494,7 +2495,7 @@ err_null_driver: cpufreq_driver = NULL; write_unlock_irqrestore(&cpufreq_driver_lock, flags); out: - put_online_cpus(); + cpus_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(cpufreq_register_driver); @@ -2517,17 +2518,17 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) pr_debug("unregistering driver %s\n", driver->name); /* Protect against concurrent cpu hotplug */ - get_online_cpus(); + cpus_read_lock(); subsys_interface_unregister(&cpufreq_interface); remove_boost_sysfs_file(); - cpuhp_remove_state_nocalls(hp_online); + cpuhp_remove_state_nocalls_cpuslocked(hp_online); write_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; write_unlock_irqrestore(&cpufreq_driver_lock, flags); - put_online_cpus(); + cpus_read_unlock(); return 0; } diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c index a51b6b64ecdf..93ee8fc539be 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x.c +++ b/drivers/hwtracing/coresight/coresight-etm3x.c @@ -587,7 +587,7 @@ static void etm_disable_sysfs(struct coresight_device *csdev) * after cpu online mask indicates the cpu is offline but before the * DYING hotplug callback is serviced by the ETM driver. */ - get_online_cpus(); + cpus_read_lock(); spin_lock(&drvdata->spinlock); /* @@ -597,7 +597,7 @@ static void etm_disable_sysfs(struct coresight_device *csdev) smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1); spin_unlock(&drvdata->spinlock); - put_online_cpus(); + cpus_read_unlock(); dev_info(drvdata->dev, "ETM tracing disabled\n"); } @@ -795,7 +795,7 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) drvdata->cpu = pdata ? pdata->cpu : 0; - get_online_cpus(); + cpus_read_lock(); etmdrvdata[drvdata->cpu] = drvdata; if (smp_call_function_single(drvdata->cpu, @@ -803,17 +803,17 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) dev_err(dev, "ETM arch init failed\n"); if (!etm_count++) { - cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING, - "arm/coresight:starting", - etm_starting_cpu, etm_dying_cpu); - ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, - "arm/coresight:online", - etm_online_cpu, NULL); + cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING, + "arm/coresight:starting", + etm_starting_cpu, etm_dying_cpu); + ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, + "arm/coresight:online", + etm_online_cpu, NULL); if (ret < 0) goto err_arch_supported; hp_online = ret; } - put_online_cpus(); + cpus_read_unlock(); if (etm_arch_supported(drvdata->arch) == false) { ret = -EINVAL; diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index d1340fb4e457..532adc9dd32a 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -371,7 +371,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev) * after cpu online mask indicates the cpu is offline but before the * DYING hotplug callback is serviced by the ETM driver. */ - get_online_cpus(); + cpus_read_lock(); spin_lock(&drvdata->spinlock); /* @@ -381,7 +381,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev) smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1); spin_unlock(&drvdata->spinlock); - put_online_cpus(); + cpus_read_unlock(); dev_info(drvdata->dev, "ETM tracing disabled\n"); } @@ -982,7 +982,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) drvdata->cpu = pdata ? pdata->cpu : 0; - get_online_cpus(); + cpus_read_lock(); etmdrvdata[drvdata->cpu] = drvdata; if (smp_call_function_single(drvdata->cpu, @@ -990,18 +990,18 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) dev_err(dev, "ETM arch init failed\n"); if (!etm4_count++) { - cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING, - "arm/coresight4:starting", - etm4_starting_cpu, etm4_dying_cpu); - ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, - "arm/coresight4:online", - etm4_online_cpu, NULL); + cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING, + "arm/coresight4:starting", + etm4_starting_cpu, etm4_dying_cpu); + ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, + "arm/coresight4:online", + etm4_online_cpu, NULL); if (ret < 0) goto err_arch_supported; hp_online = ret; } - put_online_cpus(); + cpus_read_unlock(); if (etm4_arch_supported(drvdata->arch) == false) { ret = -EINVAL; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 192e7b681b96..fe6be6382505 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -320,10 +320,19 @@ static long local_pci_probe(void *_ddi) return 0; } +static bool pci_physfn_is_probed(struct pci_dev *dev) +{ +#ifdef CONFIG_PCI_IOV + return dev->is_virtfn && dev->physfn->is_probed; +#else + return false; +#endif +} + static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, const struct pci_device_id *id) { - int error, node; + int error, node, cpu; struct drv_dev_and_id ddi = { drv, dev, id }; /* @@ -332,33 +341,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, * on the right node. */ node = dev_to_node(&dev->dev); + dev->is_probed = 1; + + cpu_hotplug_disable(); /* - * On NUMA systems, we are likely to call a PF probe function using - * work_on_cpu(). If that probe calls pci_enable_sriov() (which - * adds the VF devices via pci_bus_add_device()), we may re-enter - * this function to call the VF probe function. Calling - * work_on_cpu() again will cause a lockdep warning. Since VFs are - * always on the same node as the PF, we can work around this by - * avoiding work_on_cpu() when we're already on the correct node. - * - * Preemption is enabled, so it's theoretically unsafe to use - * numa_node_id(), but even if we run the probe function on the - * wrong node, it should be functionally correct. + * Prevent nesting work_on_cpu() for the case where a Virtual Function + * device is probed from work_on_cpu() of the Physical device. */ - if (node >= 0 && node != numa_node_id()) { - int cpu; - - get_online_cpus(); + if (node < 0 || node >= MAX_NUMNODES || !node_online(node) || + pci_physfn_is_probed(dev)) + cpu = nr_cpu_ids; + else cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); - if (cpu < nr_cpu_ids) - error = work_on_cpu(cpu, local_pci_probe, &ddi); - else - error = local_pci_probe(&ddi); - put_online_cpus(); - } else + + if (cpu < nr_cpu_ids) + error = work_on_cpu(cpu, local_pci_probe, &ddi); + else error = local_pci_probe(&ddi); + dev->is_probed = 0; + cpu_hotplug_enable(); return error; } |