summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-03 19:43:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-03 19:43:08 -0700
commit597f03f9d133e9837d00965016170271d4f87dcf (patch)
tree33bdb5c1104d5b466387f4ae98748c5f4ddd29bb /drivers
parent999dcbe2414e15e19cdc1f91497d01f262c6e1cf (diff)
parent0bf71e4d02ffec8ab9a6adecca61d3eed74fc99d (diff)
downloadlinux-597f03f9d133e9837d00965016170271d4f87dcf.tar.bz2
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull CPU hotplug updates from Thomas Gleixner: "Yet another batch of cpu hotplug core updates and conversions: - Provide core infrastructure for multi instance drivers so the drivers do not have to keep custom lists. - Convert custom lists to the new infrastructure. The block-mq custom list conversion comes through the block tree and makes the diffstat tip over to more lines removed than added. - Handle unbalanced hotplug enable/disable calls more gracefully. - Remove the obsolete CPU_STARTING/DYING notifier support. - Convert another batch of notifier users. The relayfs changes which conflicted with the conversion have been shipped to me by Andrew. The remaining lot is targeted for 4.10 so that we finally can remove the rest of the notifiers" * 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (46 commits) cpufreq: Fix up conversion to hotplug state machine blk/mq: Reserve hotplug states for block multiqueue x86/apic/uv: Convert to hotplug state machine s390/mm/pfault: Convert to hotplug state machine mips/loongson/smp: Convert to hotplug state machine mips/octeon/smp: Convert to hotplug state machine fault-injection/cpu: Convert to hotplug state machine padata: Convert to hotplug state machine cpufreq: Convert to hotplug state machine ACPI/processor: Convert to hotplug state machine virtio scsi: Convert to hotplug state machine oprofile/timer: Convert to hotplug state machine block/softirq: Convert to hotplug state machine lib/irq_poll: Convert to hotplug state machine x86/microcode: Convert to hotplug state machine sh/SH-X3 SMP: Convert to hotplug state machine ia64/mca: Convert to hotplug state machine ARM/OMAP/wakeupgen: Convert to hotplug state machine ARM/shmobile: Convert to hotplug state machine arm64/FP/SIMD: Convert to hotplug state machine ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_driver.c91
-rw-r--r--drivers/acpi/processor_throttling.c4
-rw-r--r--drivers/bus/arm-cci.c45
-rw-r--r--drivers/bus/arm-ccn.c54
-rw-r--r--drivers/bus/mips_cdmm.c70
-rw-r--r--drivers/cpufreq/cpufreq.c41
-rw-r--r--drivers/cpuidle/coupled.c75
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c51
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c51
-rw-r--r--drivers/md/raid5.c84
-rw-r--r--drivers/md/raid5.h4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c232
-rw-r--r--drivers/net/virtio_net.c110
-rw-r--r--drivers/oprofile/timer_int.c44
-rw-r--r--drivers/perf/arm_pmu.c44
-rw-r--r--drivers/scsi/virtio_scsi.c76
16 files changed, 542 insertions, 534 deletions
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 8f8552a19e63..9d5f0c7ed3f7 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -110,55 +110,46 @@ static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
static int __acpi_processor_start(struct acpi_device *device);
-static int acpi_cpu_soft_notify(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int acpi_soft_cpu_online(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
struct acpi_processor *pr = per_cpu(processors, cpu);
struct acpi_device *device;
- action &= ~CPU_TASKS_FROZEN;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_DEAD:
- break;
- default:
- return NOTIFY_DONE;
- }
if (!pr || acpi_bus_get_device(pr->handle, &device))
- return NOTIFY_DONE;
-
- if (action == CPU_ONLINE) {
- /*
- * CPU got physically hotplugged and onlined for the first time:
- * Initialize missing things.
- */
- if (pr->flags.need_hotplug_init) {
- int ret;
-
- pr_info("Will online and init hotplugged CPU: %d\n",
- pr->id);
- pr->flags.need_hotplug_init = 0;
- ret = __acpi_processor_start(device);
- WARN(ret, "Failed to start CPU: %d\n", pr->id);
- } else {
- /* Normal CPU soft online event. */
- acpi_processor_ppc_has_changed(pr, 0);
- acpi_processor_hotplug(pr);
- acpi_processor_reevaluate_tstate(pr, action);
- acpi_processor_tstate_has_changed(pr);
- }
- } else if (action == CPU_DEAD) {
- /* Invalidate flag.throttling after the CPU is offline. */
- acpi_processor_reevaluate_tstate(pr, action);
+ return 0;
+ /*
+ * CPU got physically hotplugged and onlined for the first time:
+ * Initialize missing things.
+ */
+ if (pr->flags.need_hotplug_init) {
+ int ret;
+
+ pr_info("Will online and init hotplugged CPU: %d\n",
+ pr->id);
+ pr->flags.need_hotplug_init = 0;
+ ret = __acpi_processor_start(device);
+ WARN(ret, "Failed to start CPU: %d\n", pr->id);
+ } else {
+ /* Normal CPU soft online event. */
+ acpi_processor_ppc_has_changed(pr, 0);
+ acpi_processor_hotplug(pr);
+ acpi_processor_reevaluate_tstate(pr, false);
+ acpi_processor_tstate_has_changed(pr);
}
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block acpi_cpu_notifier = {
- .notifier_call = acpi_cpu_soft_notify,
-};
+static int acpi_soft_cpu_dead(unsigned int cpu)
+{
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+ struct acpi_device *device;
+
+ if (!pr || acpi_bus_get_device(pr->handle, &device))
+ return 0;
+
+ acpi_processor_reevaluate_tstate(pr, true);
+ return 0;
+}
#ifdef CONFIG_ACPI_CPU_FREQ_PSS
static int acpi_pss_perf_init(struct acpi_processor *pr,
@@ -303,7 +294,7 @@ static int acpi_processor_stop(struct device *dev)
* This is needed for the powernow-k8 driver, that works even without
* ACPI, but needs symbols from this driver
*/
-
+static enum cpuhp_state hp_online;
static int __init acpi_processor_driver_init(void)
{
int result = 0;
@@ -315,11 +306,22 @@ static int __init acpi_processor_driver_init(void)
if (result < 0)
return result;
- register_hotcpu_notifier(&acpi_cpu_notifier);
+ result = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "acpi/cpu-drv:online",
+ acpi_soft_cpu_online, NULL);
+ if (result < 0)
+ goto err;
+ hp_online = result;
+ cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead",
+ NULL, acpi_soft_cpu_dead);
+
acpi_thermal_cpufreq_init();
acpi_processor_ppc_init();
acpi_processor_throttling_init();
return 0;
+err:
+ driver_unregister(&acpi_processor_driver);
+ return result;
}
static void __exit acpi_processor_driver_exit(void)
@@ -329,7 +331,8 @@ static void __exit acpi_processor_driver_exit(void)
acpi_processor_ppc_exit();
acpi_thermal_cpufreq_exit();
- unregister_hotcpu_notifier(&acpi_cpu_notifier);
+ cpuhp_remove_state_nocalls(hp_online);
+ cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD);
driver_unregister(&acpi_processor_driver);
}
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index c72e64893d03..d51ca1c05619 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -375,11 +375,11 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
* 3. TSD domain
*/
void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
- unsigned long action)
+ bool is_dead)
{
int result = 0;
- if (action == CPU_DEAD) {
+ if (is_dead) {
/* When one CPU is offline, the T-state throttling
* will be invalidated.
*/
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index ffa7c9dcbd7a..890082315054 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -144,15 +144,12 @@ struct cci_pmu {
int num_cntrs;
atomic_t active_events;
struct mutex reserve_mutex;
- struct list_head entry;
+ struct hlist_node node;
cpumask_t cpus;
};
#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
-static DEFINE_MUTEX(cci_pmu_mutex);
-static LIST_HEAD(cci_pmu_list);
-
enum cci_models {
#ifdef CONFIG_ARM_CCI400_PMU
CCI400_R0,
@@ -1506,25 +1503,21 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
return perf_pmu_register(&cci_pmu->pmu, name, -1);
}
-static int cci_pmu_offline_cpu(unsigned int cpu)
+static int cci_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
- struct cci_pmu *cci_pmu;
+ struct cci_pmu *cci_pmu = hlist_entry_safe(node, struct cci_pmu, node);
unsigned int target;
- mutex_lock(&cci_pmu_mutex);
- list_for_each_entry(cci_pmu, &cci_pmu_list, entry) {
- if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
- continue;
- target = cpumask_any_but(cpu_online_mask, cpu);
- if (target >= nr_cpu_ids)
- continue;
- /*
- * TODO: migrate context once core races on event->ctx have
- * been fixed.
- */
- cpumask_set_cpu(target, &cci_pmu->cpus);
- }
- mutex_unlock(&cci_pmu_mutex);
+ if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
+ return 0;
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+ /*
+ * TODO: migrate context once core races on event->ctx have
+ * been fixed.
+ */
+ cpumask_set_cpu(target, &cci_pmu->cpus);
return 0;
}
@@ -1768,10 +1761,8 @@ static int cci_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- mutex_lock(&cci_pmu_mutex);
- list_add(&cci_pmu->entry, &cci_pmu_list);
- mutex_unlock(&cci_pmu_mutex);
-
+ cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
+ &cci_pmu->node);
pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
return 0;
}
@@ -1804,9 +1795,9 @@ static int __init cci_platform_init(void)
{
int ret;
- ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
- "AP_PERF_ARM_CCI_ONLINE", NULL,
- cci_pmu_offline_cpu);
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE,
+ "AP_PERF_ARM_CCI_ONLINE", NULL,
+ cci_pmu_offline_cpu);
if (ret)
return ret;
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index 884c0305e290..d1074d9b38ba 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -167,7 +167,7 @@ struct arm_ccn_dt {
struct hrtimer hrtimer;
cpumask_t cpu;
- struct list_head entry;
+ struct hlist_node node;
struct pmu pmu;
};
@@ -190,9 +190,6 @@ struct arm_ccn {
int mn_id;
};
-static DEFINE_MUTEX(arm_ccn_mutex);
-static LIST_HEAD(arm_ccn_list);
-
static int arm_ccn_node_to_xp(int node)
{
return node / CCN_NUM_XP_PORTS;
@@ -1214,30 +1211,24 @@ static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
}
-static int arm_ccn_pmu_offline_cpu(unsigned int cpu)
+static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
- struct arm_ccn_dt *dt;
+ struct arm_ccn_dt *dt = hlist_entry_safe(node, struct arm_ccn_dt, node);
+ struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
unsigned int target;
- mutex_lock(&arm_ccn_mutex);
- list_for_each_entry(dt, &arm_ccn_list, entry) {
- struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
-
- if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
- continue;
- target = cpumask_any_but(cpu_online_mask, cpu);
- if (target >= nr_cpu_ids)
- continue;
- perf_pmu_migrate_context(&dt->pmu, cpu, target);
- cpumask_set_cpu(target, &dt->cpu);
- if (ccn->irq)
- WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
- }
- mutex_unlock(&arm_ccn_mutex);
+ if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
+ return 0;
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+ perf_pmu_migrate_context(&dt->pmu, cpu, target);
+ cpumask_set_cpu(target, &dt->cpu);
+ if (ccn->irq)
+ WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
return 0;
}
-
static DEFINE_IDA(arm_ccn_pmu_ida);
static int arm_ccn_pmu_init(struct arm_ccn *ccn)
@@ -1321,9 +1312,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
if (err)
goto error_pmu_register;
- mutex_lock(&arm_ccn_mutex);
- list_add(&ccn->dt.entry, &arm_ccn_list);
- mutex_unlock(&arm_ccn_mutex);
+ cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ &ccn->dt.node);
return 0;
error_pmu_register:
@@ -1339,10 +1329,8 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
{
int i;
- mutex_lock(&arm_ccn_mutex);
- list_del(&ccn->dt.entry);
- mutex_unlock(&arm_ccn_mutex);
-
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ &ccn->dt.node);
if (ccn->irq)
irq_set_affinity_hint(ccn->irq, NULL);
for (i = 0; i < ccn->num_xps; i++)
@@ -1573,9 +1561,9 @@ static int __init arm_ccn_init(void)
{
int i, ret;
- ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
- "AP_PERF_ARM_CCN_ONLINE", NULL,
- arm_ccn_pmu_offline_cpu);
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ "AP_PERF_ARM_CCN_ONLINE", NULL,
+ arm_ccn_pmu_offline_cpu);
if (ret)
return ret;
@@ -1587,7 +1575,7 @@ static int __init arm_ccn_init(void)
static void __exit arm_ccn_exit(void)
{
- cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
platform_driver_unregister(&arm_ccn_driver);
}
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index cad49bc38b3e..1b14256376d2 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -596,19 +596,20 @@ BUILD_PERDEV_HELPER(cpu_down) /* int mips_cdmm_cpu_down_helper(...) */
BUILD_PERDEV_HELPER(cpu_up) /* int mips_cdmm_cpu_up_helper(...) */
/**
- * mips_cdmm_bus_down() - Tear down the CDMM bus.
- * @data: Pointer to unsigned int CPU number.
+ * mips_cdmm_cpu_down_prep() - Callback for CPUHP DOWN_PREP:
+ * Tear down the CDMM bus.
+ * @cpu: unsigned int CPU number.
*
* This function is executed on the hotplugged CPU and calls the CDMM
* driver cpu_down callback for all devices on that CPU.
*/
-static long mips_cdmm_bus_down(void *data)
+static int mips_cdmm_cpu_down_prep(unsigned int cpu)
{
struct mips_cdmm_bus *bus;
long ret;
/* Inform all the devices on the bus */
- ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, data,
+ ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
mips_cdmm_cpu_down_helper);
/*
@@ -623,8 +624,8 @@ static long mips_cdmm_bus_down(void *data)
}
/**
- * mips_cdmm_bus_up() - Bring up the CDMM bus.
- * @data: Pointer to unsigned int CPU number.
+ * mips_cdmm_cpu_online() - Callback for CPUHP ONLINE: Bring up the CDMM bus.
+ * @cpu: unsigned int CPU number.
*
* This work_on_cpu callback function is executed on a given CPU to discover
* CDMM devices on that CPU, or to call the CDMM driver cpu_up callback for all
@@ -634,7 +635,7 @@ static long mips_cdmm_bus_down(void *data)
* initialisation. When CPUs are brought online the function is
* invoked directly on the hotplugged CPU.
*/
-static long mips_cdmm_bus_up(void *data)
+static int mips_cdmm_cpu_online(unsigned int cpu)
{
struct mips_cdmm_bus *bus;
long ret;
@@ -651,51 +652,13 @@ static long mips_cdmm_bus_up(void *data)
mips_cdmm_bus_discover(bus);
else
/* Inform all the devices on the bus */
- ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, data,
+ ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
mips_cdmm_cpu_up_helper);
return ret;
}
/**
- * mips_cdmm_cpu_notify() - Take action when a CPU is going online or offline.
- * @nb: CPU notifier block .
- * @action: Event that has taken place (CPU_*).
- * @data: CPU number.
- *
- * This notifier is used to keep the CDMM buses updated as CPUs are offlined and
- * onlined. When CPUs go offline or come back online, so does their CDMM bus, so
- * devices must be informed. Also when CPUs come online for the first time the
- * devices on the CDMM bus need discovering.
- *
- * Returns: NOTIFY_OK if event was used.
- * NOTIFY_DONE if we didn't care.
- */
-static int mips_cdmm_cpu_notify(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- unsigned int cpu = (unsigned int)data;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- mips_cdmm_bus_up(&cpu);
- break;
- case CPU_DOWN_PREPARE:
- mips_cdmm_bus_down(&cpu);
- break;
- default:
- return NOTIFY_DONE;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block mips_cdmm_cpu_nb = {
- .notifier_call = mips_cdmm_cpu_notify,
-};
-
-/**
* mips_cdmm_init() - Initialise CDMM bus.
*
* Initialise CDMM bus, discover CDMM devices for online CPUs, and arrange for
@@ -703,7 +666,6 @@ static struct notifier_block mips_cdmm_cpu_nb = {
*/
static int __init mips_cdmm_init(void)
{
- unsigned int cpu;
int ret;
/* Register the bus */
@@ -712,19 +674,11 @@ static int __init mips_cdmm_init(void)
return ret;
/* We want to be notified about new CPUs */
- ret = register_cpu_notifier(&mips_cdmm_cpu_nb);
- if (ret) {
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "bus/cdmm:online",
+ mips_cdmm_cpu_online, mips_cdmm_cpu_down_prep);
+ if (ret < 0)
pr_warn("cdmm: Failed to register CPU notifier\n");
- goto out;
- }
-
- /* Discover devices on CDMM of online CPUs */
- for_each_online_cpu(cpu)
- work_on_cpu(cpu, mips_cdmm_bus_up, &cpu);
- return 0;
-out:
- bus_unregister(&mips_cdmm_bustype);
return ret;
}
subsys_initcall(mips_cdmm_init);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 3a64136bf21b..6e6c1fb60fbc 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1286,7 +1286,7 @@ out_free_policy:
return ret;
}
-static void cpufreq_offline(unsigned int cpu);
+static int cpufreq_offline(unsigned int cpu);
/**
* cpufreq_add_dev - the cpufreq interface for a CPU device.
@@ -1321,7 +1321,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
return ret;
}
-static void cpufreq_offline(unsigned int cpu)
+static int cpufreq_offline(unsigned int cpu)
{
struct cpufreq_policy *policy;
int ret;
@@ -1331,7 +1331,7 @@ static void cpufreq_offline(unsigned int cpu)
policy = cpufreq_cpu_get_raw(cpu);
if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
- return;
+ return 0;
}
down_write(&policy->rwsem);
@@ -1380,6 +1380,7 @@ static void cpufreq_offline(unsigned int cpu)
unlock:
up_write(&policy->rwsem);
+ return 0;
}
/**
@@ -2295,28 +2296,6 @@ unlock:
}
EXPORT_SYMBOL(cpufreq_update_policy);
-static int cpufreq_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- cpufreq_online(cpu);
- break;
-
- case CPU_DOWN_PREPARE:
- cpufreq_offline(cpu);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block __refdata cpufreq_cpu_notifier = {
- .notifier_call = cpufreq_cpu_callback,
-};
-
/*********************************************************************
* BOOST *
*********************************************************************/
@@ -2418,6 +2397,7 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
/*********************************************************************
* REGISTER / UNREGISTER CPUFREQ DRIVER *
*********************************************************************/
+static enum cpuhp_state hp_online;
/**
* cpufreq_register_driver - register a CPU Frequency driver
@@ -2480,7 +2460,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
goto err_if_unreg;
}
- register_hotcpu_notifier(&cpufreq_cpu_notifier);
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
+ cpufreq_online,
+ cpufreq_offline);
+ if (ret < 0)
+ goto err_if_unreg;
+ hp_online = ret;
+ ret = 0;
+
pr_debug("driver %s up and running\n", driver_data->name);
goto out;
@@ -2519,7 +2506,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
get_online_cpus();
subsys_interface_unregister(&cpufreq_interface);
remove_boost_sysfs_file();
- unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
+ cpuhp_remove_state_nocalls(hp_online);
write_lock_irqsave(&cpufreq_driver_lock, flags);
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index d5657d50ac40..71e586d7df71 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -749,65 +749,52 @@ static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled)
put_cpu();
}
-/**
- * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions
- * @nb: notifier block
- * @action: hotplug transition
- * @hcpu: target cpu number
- *
- * Called when a cpu is brought on or offline using hotplug. Updates the
- * coupled cpu set appropriately
- */
-static int cpuidle_coupled_cpu_notify(struct notifier_block *nb,
- unsigned long action, void *hcpu)
+static int coupled_cpu_online(unsigned int cpu)
{
- int cpu = (unsigned long)hcpu;
struct cpuidle_device *dev;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- case CPU_DOWN_PREPARE:
- case CPU_ONLINE:
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- case CPU_DOWN_FAILED:
- break;
- default:
- return NOTIFY_OK;
- }
-
mutex_lock(&cpuidle_lock);
dev = per_cpu(cpuidle_devices, cpu);
- if (!dev || !dev->coupled)
- goto out;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- case CPU_DOWN_PREPARE:
- cpuidle_coupled_prevent_idle(dev->coupled);
- break;
- case CPU_ONLINE:
- case CPU_DEAD:
+ if (dev && dev->coupled) {
cpuidle_coupled_update_online_cpus(dev->coupled);
- /* Fall through */
- case CPU_UP_CANCELED:
- case CPU_DOWN_FAILED:
cpuidle_coupled_allow_idle(dev->coupled);
- break;
}
-out:
mutex_unlock(&cpuidle_lock);
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block cpuidle_coupled_cpu_notifier = {
- .notifier_call = cpuidle_coupled_cpu_notify,
-};
+static int coupled_cpu_up_prepare(unsigned int cpu)
+{
+ struct cpuidle_device *dev;
+
+ mutex_lock(&cpuidle_lock);
+
+ dev = per_cpu(cpuidle_devices, cpu);
+ if (dev && dev->coupled)
+ cpuidle_coupled_prevent_idle(dev->coupled);
+
+ mutex_unlock(&cpuidle_lock);
+ return 0;
+}
static int __init cpuidle_coupled_init(void)
{
- return register_cpu_notifier(&cpuidle_coupled_cpu_notifier);
+ int ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE,
+ "cpuidle/coupled:prepare",
+ coupled_cpu_up_prepare,
+ coupled_cpu_online);
+ if (ret)
+ return ret;
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "cpuidle/coupled:online",
+ coupled_cpu_online,
+ coupled_cpu_up_prepare);
+ if (ret < 0)
+ cpuhp_remove_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE);
+ return ret;
}
core_initcall(cpuidle_coupled_init);
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index f7ca891b5b59..7fe442ca38f4 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -119,40 +119,30 @@ static struct cpuidle_state powernv_states[CPUIDLE_STATE_MAX] = {
.enter = snooze_loop },
};
-static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n,
- unsigned long action, void *hcpu)
+static int powernv_cpuidle_cpu_online(unsigned int cpu)
{
- int hotcpu = (unsigned long)hcpu;
- struct cpuidle_device *dev =
- per_cpu(cpuidle_devices, hotcpu);
+ struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
if (dev && cpuidle_get_driver()) {
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- cpuidle_pause_and_lock();
- cpuidle_enable_device(dev);
- cpuidle_resume_and_unlock();
- break;
+ cpuidle_pause_and_lock();
+ cpuidle_enable_device(dev);
+ cpuidle_resume_and_unlock();
+ }
+ return 0;
+}
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- cpuidle_pause_and_lock();
- cpuidle_disable_device(dev);
- cpuidle_resume_and_unlock();
- break;
+static int powernv_cpuidle_cpu_dead(unsigned int cpu)
+{
+ struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
- default:
- return NOTIFY_DONE;
- }
+ if (dev && cpuidle_get_driver()) {
+ cpuidle_pause_and_lock();
+ cpuidle_disable_device(dev);
+ cpuidle_resume_and_unlock();
}
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block setup_hotplug_notifier = {
- .notifier_call = powernv_cpuidle_add_cpu_notifier,
-};
-
/*
* powernv_cpuidle_driver_init()
*/
@@ -355,7 +345,14 @@ static int __init powernv_processor_idle_init(void)
return retval;
}
- register_cpu_notifier(&setup_hotplug_notifier);
+ retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "cpuidle/powernv:online",
+ powernv_cpuidle_cpu_online, NULL);
+ WARN_ON(retval < 0);
+ retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD,
+ "cpuidle/powernv:dead", NULL,
+ powernv_cpuidle_cpu_dead);
+ WARN_ON(retval < 0);
printk(KERN_DEBUG "powernv_idle_driver registered\n");
return 0;
}
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 07135e009d8b..166ccd711ec9 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -171,40 +171,30 @@ static struct cpuidle_state shared_states[] = {
.enter = &shared_cede_loop },
};
-static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n,
- unsigned long action, void *hcpu)
+static int pseries_cpuidle_cpu_online(unsigned int cpu)
{
- int hotcpu = (unsigned long)hcpu;
- struct cpuidle_device *dev =
- per_cpu(cpuidle_devices, hotcpu);
+ struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
if (dev && cpuidle_get_driver()) {
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- cpuidle_pause_and_lock();
- cpuidle_enable_device(dev);
- cpuidle_resume_and_unlock();
- break;
+ cpuidle_pause_and_lock();
+ cpuidle_enable_device(dev);
+ cpuidle_resume_and_unlock();
+ }
+ return 0;
+}
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- cpuidle_pause_and_lock();
- cpuidle_disable_device(dev);
- cpuidle_resume_and_unlock();
- break;
+static int pseries_cpuidle_cpu_dead(unsigned int cpu)
+{
+ struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
- default:
- return NOTIFY_DONE;
- }
+ if (dev && cpuidle_get_driver()) {
+ cpuidle_pause_and_lock();
+ cpuidle_disable_device(dev);
+ cpuidle_resume_and_unlock();
}
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block setup_hotplug_notifier = {
- .notifier_call = pseries_cpuidle_add_cpu_notifier,
-};
-
/*
* pseries_cpuidle_driver_init()
*/
@@ -273,7 +263,14 @@ static int __init pseries_processor_idle_init(void)
return retval;
}
- register_cpu_notifier(&setup_hotplug_notifier);
+ retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "cpuidle/pseries:online",
+ pseries_cpuidle_cpu_online, NULL);
+ WARN_ON(retval < 0);
+ retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD,
+ "cpuidle/pseries:DEAD", NULL,
+ pseries_cpuidle_cpu_dead);
+ WARN_ON(retval < 0);
printk(KERN_DEBUG "pseries_idle_driver registered\n");
return 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ee7fc3701700..5287e79e0b78 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6349,22 +6349,20 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
return 0;
}
-static void raid5_free_percpu(struct r5conf *conf)
+static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
- unsigned long cpu;
+ struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
+
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
+ return 0;
+}
+static void raid5_free_percpu(struct r5conf *conf)
+{
if (!conf->percpu)
return;
-#ifdef CONFIG_HOTPLUG_CPU
- unregister_cpu_notifier(&conf->cpu_notify);
-#endif
-
- get_online_cpus();
- for_each_possible_cpu(cpu)
- free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
- put_online_cpus();
-
+ cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
free_percpu(conf->percpu);
}
@@ -6383,64 +6381,28 @@ static void free_conf(struct r5conf *conf)
kfree(conf);
}
-#ifdef CONFIG_HOTPLUG_CPU
-static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
{
- struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify);
- long cpu = (long)hcpu;
+ struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- if (alloc_scratch_buffer(conf, percpu)) {
- pr_err("%s: failed memory allocation for cpu%ld\n",
- __func__, cpu);
- return notifier_from_errno(-ENOMEM);
- }
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
- break;
- default:
- break;
+ if (alloc_scratch_buffer(conf, percpu)) {
+ pr_err("%s: failed memory allocation for cpu%u\n",
+ __func__, cpu);
+ return -ENOMEM;
}
- return NOTIFY_OK;
+ return 0;
}
-#endif
static int raid5_alloc_percpu(struct r5conf *conf)
{
- unsigned long cpu;
int err = 0;
conf->percpu = alloc_percpu(struct raid5_percpu);
if (!conf->percpu)
return -ENOMEM;
-#ifdef CONFIG_HOTPLUG_CPU
- conf->cpu_notify.notifier_call = raid456_cpu_notify;
- conf->cpu_notify.priority = 0;
- err = register_cpu_notifier(&conf->cpu_notify);
- if (err)
- return err;
-#endif
-
- get_online_cpus();
- for_each_present_cpu(cpu) {
- err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
- if (err) {
- pr_err("%s: failed memory allocation for cpu%ld\n",
- __func__, cpu);
- break;
- }
- }
- put_online_cpus();
-
+ err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
if (!err) {
conf->scribble_disks = max(conf->raid_disks,
conf->previous_raid_disks);
@@ -7985,10 +7947,21 @@ static struct md_personality raid4_personality =
static int __init raid5_init(void)
{
+ int ret;
+
raid5_wq = alloc_workqueue("raid5wq",
WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
if (!raid5_wq)
return -ENOMEM;
+
+ ret = cpuhp_setup_state_multi(CPUHP_MD_RAID5_PREPARE,
+ "md/raid5:prepare",
+ raid456_cpu_up_prepare,
+ raid456_cpu_dead);
+ if (ret) {
+ destroy_workqueue(raid5_wq);
+ return ret;
+ }
register_md_personality(&raid6_personality);
register_md_personality(&raid5_personality);
register_md_personality(&raid4_personality);
@@ -8000,6 +7973,7 @@ static void raid5_exit(void)
unregister_md_personality(&raid6_personality);
unregister_md_personality(&raid5_personality);
unregister_md_personality(&raid4_personality);
+ cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE);
destroy_workqueue(raid5_wq);
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 517d4b68a1be..57ec49f0839e 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -512,9 +512,7 @@ struct r5conf {
} __percpu *percpu;
int scribble_disks;
int scribble_sectors;
-#ifdef CONFIG_HOTPLUG_CPU
- struct notifier_block cpu_notify;
-#endif
+ struct hlist_node node;
/*
* Free stripes pool
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d41c28d00b57..b74548728fb5 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -382,7 +382,8 @@ struct mvneta_port {
struct mvneta_rx_queue *rxqs;
struct mvneta_tx_queue *txqs;
struct net_device *dev;
- struct notifier_block cpu_notifier;
+ struct hlist_node node_online;
+ struct hlist_node node_dead;
int rxq_def;
/* Protect the access to the percpu interrupt registers,
* ensuring that the configuration remains coherent.
@@ -574,6 +575,7 @@ struct mvneta_rx_queue {
int next_desc_to_proc;
};
+static enum cpuhp_state online_hpstate;
/* The hardware supports eight (8) rx queues, but we are only allowing
* the first one to be used. Therefore, let's just allocate one queue.
*/
@@ -3311,101 +3313,104 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
}
};
-static int mvneta_percpu_notifier(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
{
- struct mvneta_port *pp = container_of(nfb, struct mvneta_port,
- cpu_notifier);
- int cpu = (unsigned long)hcpu, other_cpu;
+ int other_cpu;
+ struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
+ node_online);
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
- spin_lock(&pp->lock);
- /* Configuring the driver for a new CPU while the
- * driver is stopping is racy, so just avoid it.
- */
- if (pp->is_stopped) {
- spin_unlock(&pp->lock);
- break;
- }
- netif_tx_stop_all_queues(pp->dev);
- /* We have to synchronise on tha napi of each CPU
- * except the one just being waked up
- */
- for_each_online_cpu(other_cpu) {
- if (other_cpu != cpu) {
- struct mvneta_pcpu_port *other_port =
- per_cpu_ptr(pp->ports, other_cpu);
+ spin_lock(&pp->lock);
+ /*
+ * Configuring the driver for a new CPU while the driver is
+ * stopping is racy, so just avoid it.
+ */
+ if (pp->is_stopped) {
+ spin_unlock(&pp->lock);
+ return 0;
+ }
+ netif_tx_stop_all_queues(pp->dev);
- napi_synchronize(&other_port->napi);
- }
+ /*
+ * We have to synchronise on tha napi of each CPU except the one
+ * just being woken up
+ */
+ for_each_online_cpu(other_cpu) {
+ if (other_cpu != cpu) {
+ struct mvneta_pcpu_port *other_port =
+ per_cpu_ptr(pp->ports, other_cpu);
+
+ napi_synchronize(&other_port->napi);
}
+ }
- /* Mask all ethernet port interrupts */
- on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
- napi_enable(&port->napi);
+ /* Mask all ethernet port interrupts */
+ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
+ napi_enable(&port->napi);
+ /*
+ * Enable per-CPU interrupts on the CPU that is
+ * brought up.
+ */
+ mvneta_percpu_enable(pp);
- /* Enable per-CPU interrupts on the CPU that is
- * brought up.
- */
- mvneta_percpu_enable(pp);
+ /*
+ * Enable per-CPU interrupt on the one CPU we care
+ * about.
+ */
+ mvneta_percpu_elect(pp);
- /* Enable per-CPU interrupt on the one CPU we care
- * about.
- */
- mvneta_percpu_elect(pp);
-
- /* Unmask all ethernet port interrupts */
- on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
- mvreg_write(pp, MVNETA_INTR_MISC_MASK,
- MVNETA_CAUSE_PHY_STATUS_CHANGE |
- MVNETA_CAUSE_LINK_CHANGE |
- MVNETA_CAUSE_PSC_SYNC_CHANGE);
- netif_tx_start_all_queues(pp->dev);
- spin_unlock(&pp->lock);
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- netif_tx_stop_all_queues(pp->dev);
- /* Thanks to this lock we are sure that any pending
- * cpu election is done
- */
- spin_lock(&pp->lock);
- /* Mask all ethernet port interrupts */
- on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
- spin_unlock(&pp->lock);
+ /* Unmask all ethernet port interrupts */
+ on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+ MVNETA_CAUSE_PHY_STATUS_CHANGE |
+ MVNETA_CAUSE_LINK_CHANGE |
+ MVNETA_CAUSE_PSC_SYNC_CHANGE);
+ netif_tx_start_all_queues(pp->dev);
+ spin_unlock(&pp->lock);
+ return 0;
+}
- napi_synchronize(&port->napi);
- napi_disable(&port->napi);
- /* Disable per-CPU interrupts on the CPU that is
- * brought down.
- */
- mvneta_percpu_disable(pp);
+static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
+{
+ struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
+ node_online);
+ struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- /* Check if a new CPU must be elected now this on is down */
- spin_lock(&pp->lock);
- mvneta_percpu_elect(pp);
- spin_unlock(&pp->lock);
- /* Unmask all ethernet port interrupts */
- on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
- mvreg_write(pp, MVNETA_INTR_MISC_MASK,
- MVNETA_CAUSE_PHY_STATUS_CHANGE |
- MVNETA_CAUSE_LINK_CHANGE |
- MVNETA_CAUSE_PSC_SYNC_CHANGE);
- netif_tx_start_all_queues(pp->dev);
- break;
- }
+ /*
+ * Thanks to this lock we are sure that any pending cpu election is
+ * done.
+ */
+ spin_lock(&pp->lock);
+ /* Mask all ethernet port interrupts */
+ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
+ spin_unlock(&pp->lock);
- return NOTIFY_OK;
+ napi_synchronize(&port->napi);
+ napi_disable(&port->napi);
+ /* Disable per-CPU interrupts on the CPU that is brought down. */
+ mvneta_percpu_disable(pp);
+ return 0;
+}
+
+static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
+{
+ struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
+ node_dead);
+
+ /* Check if a new CPU must be elected now this on is down */
+ spin_lock(&pp->lock);
+ mvneta_percpu_elect(pp);
+ spin_unlock(&pp->lock);
+ /* Unmask all ethernet port interrupts */
+ on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+ MVNETA_CAUSE_PHY_STATUS_CHANGE |
+ MVNETA_CAUSE_LINK_CHANGE |
+ MVNETA_CAUSE_PSC_SYNC_CHANGE);
+ netif_tx_start_all_queues(pp->dev);
+ return 0;
}
static int mvneta_open(struct net_device *dev)
@@ -3442,7 +3447,15 @@ static int mvneta_open(struct net_device *dev)
/* Register a CPU notifier to handle the case where our CPU
* might be taken offline.
*/
- register_cpu_notifier(&pp->cpu_notifier);
+ ret = cpuhp_state_add_instance_nocalls(online_hpstate,
+ &pp->node_online);
+ if (ret)
+ goto err_free_irq;
+
+ ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+ &pp->node_dead);
+ if (ret)
+ goto err_free_online_hp;
/* In default link is down */
netif_carrier_off(pp->dev);
@@ -3450,15 +3463,19 @@ static int mvneta_open(struct net_device *dev)
ret = mvneta_mdio_probe(pp);
if (ret < 0) {
netdev_err(dev, "cannot probe MDIO bus\n");
- goto err_free_irq;
+ goto err_free_dead_hp;
}
mvneta_start_dev(pp);
return 0;
+err_free_dead_hp:
+ cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+ &pp->node_dead);
+err_free_online_hp:
+ cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online);
err_free_irq:
- unregister_cpu_notifier(&pp->cpu_notifier);
on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(pp->dev->irq, pp->ports);
err_cleanup_txqs:
@@ -3484,7 +3501,10 @@ static int mvneta_stop(struct net_device *dev)
mvneta_stop_dev(pp);
mvneta_mdio_remove(pp);
- unregister_cpu_notifier(&pp->cpu_notifier);
+
+ cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online);
+ cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+ &pp->node_dead);
on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(dev->irq, pp->ports);
mvneta_cleanup_rxqs(pp);
@@ -4024,7 +4044,6 @@ static int mvneta_probe(struct platform_device *pdev)
err = of_property_read_string(dn, "managed", &managed);
pp->use_inband_status = (err == 0 &&
strcmp(managed, "in-band-status") == 0);
- pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
pp->rxq_def = rxq_def;
@@ -4227,7 +4246,42 @@ static struct platform_driver mvneta_driver = {
},
};
-module_platform_driver(mvneta_driver);
+static int __init mvneta_driver_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
+ mvneta_cpu_online,
+ mvneta_cpu_down_prepare);
+ if (ret < 0)
+ goto out;
+ online_hpstate = ret;
+ ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
+ NULL, mvneta_cpu_dead);
+ if (ret)
+ goto err_dead;
+
+ ret = platform_driver_register(&mvneta_driver);
+ if (ret)
+ goto err;
+ return 0;
+
+err:
+ cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
+err_dead:
+ cpuhp_remove_multi_state(online_hpstate);
+out:
+ return ret;
+}
+module_init(mvneta_driver_init);
+
+static void __exit mvneta_driver_exit(void)
+{
+ platform_driver_unregister(&mvneta_driver);
+ cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
+ cpuhp_remove_multi_state(online_hpstate);
+}
+module_exit(mvneta_driver_exit);
MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 1b5f531eeb25..fad84f3f4109 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -138,8 +138,9 @@ struct virtnet_info {
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
- /* CPU hot plug notifier */
- struct notifier_block nb;
+ /* CPU hotplug instances for online & dead */
+ struct hlist_node node;
+ struct hlist_node node_dead;
/* Control VQ buffers: protected by the rtnl lock */
struct virtio_net_ctrl_hdr ctrl_hdr;
@@ -1237,25 +1238,53 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
vi->affinity_hint_set = true;
}
-static int virtnet_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
{
- struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
+ struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
+ node);
+ virtnet_set_affinity(vi);
+ return 0;
+}
- switch(action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- case CPU_DEAD:
- virtnet_set_affinity(vi);
- break;
- case CPU_DOWN_PREPARE:
- virtnet_clean_affinity(vi, (long)hcpu);
- break;
- default:
- break;
- }
+static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
+{
+ struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
+ node_dead);
+ virtnet_set_affinity(vi);
+ return 0;
+}
- return NOTIFY_OK;
+static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
+{
+ struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
+ node);
+
+ virtnet_clean_affinity(vi, cpu);
+ return 0;
+}
+
+static enum cpuhp_state virtionet_online;
+
+static int virtnet_cpu_notif_add(struct virtnet_info *vi)
+{
+ int ret;
+
+ ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
+ if (ret)
+ return ret;
+ ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
+ &vi->node_dead);
+ if (!ret)
+ return ret;
+ cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
+ return ret;
+}
+
+static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
+{
+ cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
+ cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
+ &vi->node_dead);
}
static void virtnet_get_ringparam(struct net_device *dev,
@@ -1879,8 +1908,7 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
- vi->nb.notifier_call = &virtnet_cpu_callback;
- err = register_hotcpu_notifier(&vi->nb);
+ err = virtnet_cpu_notif_add(vi);
if (err) {
pr_debug("virtio_net: registering cpu notifier failed\n");
goto free_unregister_netdev;
@@ -1934,7 +1962,7 @@ static void virtnet_remove(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- unregister_hotcpu_notifier(&vi->nb);
+ virtnet_cpu_notif_remove(vi);
/* Make sure no work handler is accessing the device. */
flush_work(&vi->config_work);
@@ -1953,7 +1981,7 @@ static int virtnet_freeze(struct virtio_device *vdev)
struct virtnet_info *vi = vdev->priv;
int i;
- unregister_hotcpu_notifier(&vi->nb);
+ virtnet_cpu_notif_remove(vi);
/* Make sure no work handler is accessing the device */
flush_work(&vi->config_work);
@@ -1997,7 +2025,7 @@ static int virtnet_restore(struct virtio_device *vdev)
virtnet_set_queues(vi, vi->curr_queue_pairs);
rtnl_unlock();
- err = register_hotcpu_notifier(&vi->nb);
+ err = virtnet_cpu_notif_add(vi);
if (err)
return err;
@@ -2039,7 +2067,41 @@ static struct virtio_driver virtio_net_driver = {
#endif
};
-module_virtio_driver(virtio_net_driver);
+static __init int virtio_net_driver_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "AP_VIRT_NET_ONLINE",
+ virtnet_cpu_online,
+ virtnet_cpu_down_prep);
+ if (ret < 0)
+ goto out;
+ virtionet_online = ret;
+ ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "VIRT_NET_DEAD",
+ NULL, virtnet_cpu_dead);
+ if (ret)
+ goto err_dead;
+
+ ret = register_virtio_driver(&virtio_net_driver);
+ if (ret)
+ goto err_virtio;
+ return 0;
+err_virtio:
+ cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
+err_dead:
+ cpuhp_remove_multi_state(virtionet_online);
+out:
+ return ret;
+}
+module_init(virtio_net_driver_init);
+
+static __exit void virtio_net_driver_exit(void)
+{
+ cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
+ cpuhp_remove_multi_state(virtionet_online);
+ unregister_virtio_driver(&virtio_net_driver);
+}
+module_exit(virtio_net_driver_exit);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio network driver");
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index bdef916e5dda..2498a6cd7c24 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -74,37 +74,39 @@ static void oprofile_hrtimer_stop(void)
put_online_cpus();
}
-static int oprofile_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int oprofile_timer_online(unsigned int cpu)
{
- long cpu = (long) hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- smp_call_function_single(cpu, __oprofile_hrtimer_start,
- NULL, 1);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- __oprofile_hrtimer_stop(cpu);
- break;
- }
- return NOTIFY_OK;
+ local_irq_disable();
+ __oprofile_hrtimer_start(NULL);
+ local_irq_enable();
+ return 0;
}
-static struct notifier_block __refdata oprofile_cpu_notifier = {
- .notifier_call = oprofile_cpu_notify,
-};
+static int oprofile_timer_prep_down(unsigned int cpu)
+{
+ __oprofile_hrtimer_stop(cpu);
+ return 0;
+}
+
+static enum cpuhp_state hp_online;
static int oprofile_hrtimer_setup(void)
{
- return register_hotcpu_notifier(&oprofile_cpu_notifier);
+ int ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "oprofile/timer:online",
+ oprofile_timer_online,
+ oprofile_timer_prep_down);
+ if (ret < 0)
+ return ret;
+ hp_online = ret;
+ return 0;
}
static void oprofile_hrtimer_shutdown(void)
{
- unregister_hotcpu_notifier(&oprofile_cpu_notifier);
+ cpuhp_remove_state_nocalls(hp_online);
}
int oprofile_timer_init(struct oprofile_operations *ops)
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 30370817bf13..b37b57294566 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -709,28 +709,20 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return 0;
}
-static DEFINE_SPINLOCK(arm_pmu_lock);
-static LIST_HEAD(arm_pmu_list);
-
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
* junk values out of them.
*/
-static int arm_perf_starting_cpu(unsigned int cpu)
+static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
{
- struct arm_pmu *pmu;
-
- spin_lock(&arm_pmu_lock);
- list_for_each_entry(pmu, &arm_pmu_list, entry) {
+ struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
- if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
- continue;
- if (pmu->reset)
- pmu->reset(pmu);
- }
- spin_unlock(&arm_pmu_lock);
+ if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
+ return 0;
+ if (pmu->reset)
+ pmu->reset(pmu);
return 0;
}
@@ -842,9 +834,10 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
if (!cpu_hw_events)
return -ENOMEM;
- spin_lock(&arm_pmu_lock);
- list_add_tail(&cpu_pmu->entry, &arm_pmu_list);
- spin_unlock(&arm_pmu_lock);
+ err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+ &cpu_pmu->node);
+ if (err)
+ goto out_free;
err = cpu_pm_pmu_register(cpu_pmu);
if (err)
@@ -880,9 +873,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
return 0;
out_unregister:
- spin_lock(&arm_pmu_lock);
- list_del(&cpu_pmu->entry);
- spin_unlock(&arm_pmu_lock);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+ &cpu_pmu->node);
+out_free:
free_percpu(cpu_hw_events);
return err;
}
@@ -890,9 +883,8 @@ out_unregister:
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
{
cpu_pm_pmu_unregister(cpu_pmu);
- spin_lock(&arm_pmu_lock);
- list_del(&cpu_pmu->entry);
- spin_unlock(&arm_pmu_lock);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+ &cpu_pmu->node);
free_percpu(cpu_pmu->hw_events);
}
@@ -1091,9 +1083,9 @@ static int arm_pmu_hp_init(void)
{
int ret;
- ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING,
- "AP_PERF_ARM_STARTING",
- arm_perf_starting_cpu, NULL);
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
+ "AP_PERF_ARM_STARTING",
+ arm_perf_starting_cpu, NULL);
if (ret)
pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
ret);
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 7dbbb29d24c6..deefab3a94d0 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -107,8 +107,8 @@ struct virtio_scsi {
/* If the affinity hint is set for virtqueues */
bool affinity_hint_set;
- /* CPU hotplug notifier */
- struct notifier_block nb;
+ struct hlist_node node;
+ struct hlist_node node_dead;
/* Protected by event_vq lock */
bool stop_events;
@@ -118,6 +118,7 @@ struct virtio_scsi {
struct virtio_scsi_vq req_vqs[];
};
+static enum cpuhp_state virtioscsi_online;
static struct kmem_cache *virtscsi_cmd_cache;
static mempool_t *virtscsi_cmd_pool;
@@ -852,21 +853,33 @@ static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
put_online_cpus();
}
-static int virtscsi_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int virtscsi_cpu_online(unsigned int cpu, struct hlist_node *node)
{
- struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb);
- switch(action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- __virtscsi_set_affinity(vscsi, true);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
+ struct virtio_scsi *vscsi = hlist_entry_safe(node, struct virtio_scsi,
+ node);
+ __virtscsi_set_affinity(vscsi, true);
+ return 0;
+}
+
+static int virtscsi_cpu_notif_add(struct virtio_scsi *vi)
+{
+ int ret;
+
+ ret = cpuhp_state_add_instance(virtioscsi_online, &vi->node);
+ if (ret)
+ return ret;
+
+ ret = cpuhp_state_add_instance(CPUHP_VIRT_SCSI_DEAD, &vi->node_dead);
+ if (ret)
+ cpuhp_state_remove_instance(virtioscsi_online, &vi->node);
+ return ret;
+}
+
+static void virtscsi_cpu_notif_remove(struct virtio_scsi *vi)
+{
+ cpuhp_state_remove_instance_nocalls(virtioscsi_online, &vi->node);
+ cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_SCSI_DEAD,
+ &vi->node_dead);
}
static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
@@ -929,8 +942,6 @@ static int virtscsi_init(struct virtio_device *vdev,
virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
vqs[i]);
- virtscsi_set_affinity(vscsi, true);
-
virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
@@ -987,12 +998,9 @@ static int virtscsi_probe(struct virtio_device *vdev)
if (err)
goto virtscsi_init_failed;
- vscsi->nb.notifier_call = &virtscsi_cpu_callback;
- err = register_hotcpu_notifier(&vscsi->nb);
- if (err) {
- pr_err("registering cpu notifier failed\n");
+ err = virtscsi_cpu_notif_add(vscsi);
+ if (err)
goto scsi_add_host_failed;
- }
cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
@@ -1049,7 +1057,7 @@ static void virtscsi_remove(struct virtio_device *vdev)
scsi_remove_host(shost);
- unregister_hotcpu_notifier(&vscsi->nb);
+ virtscsi_cpu_notif_remove(vscsi);
virtscsi_remove_vqs(vdev);
scsi_host_put(shost);
@@ -1061,7 +1069,7 @@ static int virtscsi_freeze(struct virtio_device *vdev)
struct Scsi_Host *sh = virtio_scsi_host(vdev);
struct virtio_scsi *vscsi = shost_priv(sh);
- unregister_hotcpu_notifier(&vscsi->nb);
+ virtscsi_cpu_notif_remove(vscsi);
virtscsi_remove_vqs(vdev);
return 0;
}
@@ -1076,12 +1084,11 @@ static int virtscsi_restore(struct virtio_device *vdev)
if (err)
return err;
- err = register_hotcpu_notifier(&vscsi->nb);
+ err = virtscsi_cpu_notif_add(vscsi);
if (err) {
vdev->config->del_vqs(vdev);
return err;
}
-
virtio_device_ready(vdev);
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
@@ -1136,6 +1143,16 @@ static int __init init(void)
pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
goto error;
}
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "scsi/virtio:online",
+ virtscsi_cpu_online, NULL);
+ if (ret < 0)
+ goto error;
+ virtioscsi_online = ret;
+ ret = cpuhp_setup_state_multi(CPUHP_VIRT_SCSI_DEAD, "scsi/virtio:dead",
+ NULL, virtscsi_cpu_online);
+ if (ret)
+ goto error;
ret = register_virtio_driver(&virtio_scsi_driver);
if (ret < 0)
goto error;
@@ -1151,12 +1168,17 @@ error:
kmem_cache_destroy(virtscsi_cmd_cache);
virtscsi_cmd_cache = NULL;
}
+ if (virtioscsi_online)
+ cpuhp_remove_multi_state(virtioscsi_online);
+ cpuhp_remove_multi_state(CPUHP_VIRT_SCSI_DEAD);
return ret;
}
static void __exit fini(void)
{
unregister_virtio_driver(&virtio_scsi_driver);
+ cpuhp_remove_multi_state(virtioscsi_online);
+ cpuhp_remove_multi_state(CPUHP_VIRT_SCSI_DEAD);
mempool_destroy(virtscsi_cmd_pool);
kmem_cache_destroy(virtscsi_cmd_cache);
}