summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/profile.c3
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/workqueue.c2
4 files changed, 1 insertions, 11 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index bd1e89c4c96a..9b62b4c03ad0 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2044,7 +2044,6 @@ out:
return err;
}
-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_MEMORY_HOTPLUG)
/*
* If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs
* or memory nodes, we need to walk over the cpuset hierarchy,
@@ -2108,9 +2107,7 @@ static void common_cpu_mem_hotplug_unplug(void)
mutex_unlock(&callback_mutex);
mutex_unlock(&manage_mutex);
}
-#endif
-#ifdef CONFIG_HOTPLUG_CPU
/*
* The top_cpuset tracks what CPUs and Memory Nodes are online,
* period. This is necessary in order to make cpusets transparent
@@ -2127,7 +2124,6 @@ static int cpuset_handle_cpuhp(struct notifier_block *nb,
common_cpu_mem_hotplug_unplug();
return 0;
}
-#endif
#ifdef CONFIG_MEMORY_HOTPLUG
/*
diff --git a/kernel/profile.c b/kernel/profile.c
index 04fd84e8cdbe..0961d93e1d91 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -319,7 +319,6 @@ out:
put_cpu();
}
-#ifdef CONFIG_HOTPLUG_CPU
static int __devinit profile_cpu_callback(struct notifier_block *info,
unsigned long action, void *__cpu)
{
@@ -372,10 +371,10 @@ static int __devinit profile_cpu_callback(struct notifier_block *info,
}
return NOTIFY_OK;
}
-#endif /* CONFIG_HOTPLUG_CPU */
#else /* !CONFIG_SMP */
#define profile_flip_buffers() do { } while (0)
#define profile_discard_flip_buffers() do { } while (0)
+#define profile_cpu_callback NULL
void profile_hits(int type, void *__pc, unsigned int nr_hits)
{
diff --git a/kernel/sched.c b/kernel/sched.c
index 75a005ed4eda..c83f531c2886 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6740,8 +6740,6 @@ SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
sched_smt_power_savings_store);
#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
/*
* Force a reinitialization of the sched domains hierarchy. The domains
* and groups cannot be updated in place without racing with the balancing
@@ -6774,7 +6772,6 @@ static int update_sched_domains(struct notifier_block *nfb,
return NOTIFY_OK;
}
-#endif
void __init sched_init_smp(void)
{
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 5484d6e045c2..c5257316f4b9 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -655,7 +655,6 @@ int current_is_keventd(void)
}
-#ifdef CONFIG_HOTPLUG_CPU
/* Take the work from this (downed) CPU. */
static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
{
@@ -738,7 +737,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-#endif
void init_workqueues(void)
{