summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-02 11:44:09 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-02 11:44:09 -0800
commitb840d79631c882786925303c2b0f4fefc31845ed (patch)
treecda60a95d4507fe1321fc285af38982d7eb9693b /include
parent597b0d21626da4e6f09f132442caf0cc2b0eb47c (diff)
parentc3d80000e3a812fe5a200d6bde755fbd7fa65481 (diff)
downloadlinux-b840d79631c882786925303c2b0f4fefc31845ed.tar.bz2
Merge branch 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (66 commits) x86: export vector_used_by_percpu_irq x86: use logical apicid in x2apic_cluster's x2apic_cpu_mask_to_apicid_and() sched: nominate preferred wakeup cpu, fix x86: fix lguest used_vectors breakage, -v2 x86: fix warning in arch/x86/kernel/io_apic.c sched: fix warning in kernel/sched.c sched: move test_sd_parent() to an SMP section of sched.h sched: add SD_BALANCE_NEWIDLE at MC and CPU level for sched_mc>0 sched: activate active load balancing in new idle cpus sched: bias task wakeups to preferred semi-idle packages sched: nominate preferred wakeup cpu sched: favour lower logical cpu number for sched_mc balance sched: framework for sched_mc/smt_power_savings=N sched: convert BALANCE_FOR_xx_POWER to inline functions x86: use possible_cpus=NUM to extend the possible cpus allowed x86: fix cpu_mask_to_apicid_and to include cpu_online_mask x86: update io_apic.c to the new cpumask code x86: Introduce topology_core_cpumask()/topology_thread_cpumask() x86: xen: use smp_call_function_many() x86: use work_on_cpu in x86/kernel/cpu/mcheck/mce_amd_64.c ... Fixed up trivial conflict in kernel/time/tick-sched.c manually
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/topology.h14
-rw-r--r--include/asm-m32r/smp.h2
-rw-r--r--include/linux/clockchips.h4
-rw-r--r--include/linux/cpumask.h98
-rw-r--r--include/linux/interrupt.h4
-rw-r--r--include/linux/irq.h3
-rw-r--r--include/linux/sched.h92
-rw-r--r--include/linux/topology.h6
8 files changed, 159 insertions, 64 deletions
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 54bbf6e04ee8..0e9e2bc0ee96 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -40,6 +40,9 @@
#ifndef node_to_cpumask
#define node_to_cpumask(node) ((void)node, cpu_online_map)
#endif
+#ifndef cpumask_of_node
+#define cpumask_of_node(node) ((void)node, cpu_online_mask)
+#endif
#ifndef node_to_first_cpu
#define node_to_first_cpu(node) ((void)(node),0)
#endif
@@ -54,9 +57,18 @@
)
#endif
+#ifndef cpumask_of_pcibus
+#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
+ cpu_all_mask : \
+ cpumask_of_node(pcibus_to_node(bus)))
+#endif
+
#endif /* CONFIG_NUMA */
-/* returns pointer to cpumask for specified node */
+/*
+ * returns pointer to cpumask for specified node
+ * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
+ */
#ifndef node_to_cpumask_ptr
#define node_to_cpumask_ptr(v, node) \
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h
index c5dd66916692..b96a6d2ffbc3 100644
--- a/include/asm-m32r/smp.h
+++ b/include/asm-m32r/smp.h
@@ -63,8 +63,6 @@ extern volatile int cpu_2_physid[NR_CPUS];
#define raw_smp_processor_id() (current_thread_info()->cpu)
extern cpumask_t cpu_callout_map;
-extern cpumask_t cpu_possible_map;
-extern cpumask_t cpu_present_map;
static __inline__ int hard_smp_processor_id(void)
{
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index ed3a5d473e52..cea153697ec7 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -82,13 +82,13 @@ struct clock_event_device {
int shift;
int rating;
int irq;
- cpumask_t cpumask;
+ const struct cpumask *cpumask;
int (*set_next_event)(unsigned long evt,
struct clock_event_device *);
void (*set_mode)(enum clock_event_mode mode,
struct clock_event_device *);
void (*event_handler)(struct clock_event_device *);
- void (*broadcast)(cpumask_t mask);
+ void (*broadcast)(const struct cpumask *mask);
struct list_head list;
enum clock_event_mode mode;
ktime_t next_event;
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 21e1dd43e52a..d4bf52603e6b 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -339,36 +339,6 @@ extern cpumask_t cpu_mask_all;
#endif
#define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v)
-#define cpumask_scnprintf(buf, len, src) \
- __cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
-static inline int __cpumask_scnprintf(char *buf, int len,
- const cpumask_t *srcp, int nbits)
-{
- return bitmap_scnprintf(buf, len, srcp->bits, nbits);
-}
-
-#define cpumask_parse_user(ubuf, ulen, dst) \
- __cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS)
-static inline int __cpumask_parse_user(const char __user *buf, int len,
- cpumask_t *dstp, int nbits)
-{
- return bitmap_parse_user(buf, len, dstp->bits, nbits);
-}
-
-#define cpulist_scnprintf(buf, len, src) \
- __cpulist_scnprintf((buf), (len), &(src), NR_CPUS)
-static inline int __cpulist_scnprintf(char *buf, int len,
- const cpumask_t *srcp, int nbits)
-{
- return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
-}
-
-#define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS)
-static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits)
-{
- return bitmap_parselist(buf, dstp->bits, nbits);
-}
-
#define cpu_remap(oldbit, old, new) \
__cpu_remap((oldbit), &(old), &(new), NR_CPUS)
static inline int __cpu_remap(int oldbit,
@@ -540,9 +510,6 @@ extern cpumask_t cpu_active_map;
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
}
-/* This produces more efficient code. */
-#define nr_cpumask_bits NR_CPUS
-
#else /* NR_CPUS > BITS_PER_LONG */
#define CPU_BITS_ALL \
@@ -550,9 +517,15 @@ extern cpumask_t cpu_active_map;
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
}
+#endif /* NR_CPUS > BITS_PER_LONG */
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
+ * not all bits may be allocated. */
#define nr_cpumask_bits nr_cpu_ids
-#endif /* NR_CPUS > BITS_PER_LONG */
+#else
+#define nr_cpumask_bits NR_CPUS
+#endif
/* verify cpu argument to cpumask_* operators */
static inline unsigned int cpumask_check(unsigned int cpu)
@@ -946,6 +919,63 @@ static inline void cpumask_copy(struct cpumask *dstp,
#define cpumask_of(cpu) (get_cpu_mask(cpu))
/**
+ * cpumask_scnprintf - print a cpumask into a string as comma-separated hex
+ * @buf: the buffer to sprintf into
+ * @len: the length of the buffer
+ * @srcp: the cpumask to print
+ *
+ * If len is zero, returns zero. Otherwise returns the length of the
+ * (nul-terminated) @buf string.
+ */
+static inline int cpumask_scnprintf(char *buf, int len,
+ const struct cpumask *srcp)
+{
+ return bitmap_scnprintf(buf, len, srcp->bits, nr_cpumask_bits);
+}
+
+/**
+ * cpumask_parse_user - extract a cpumask from a user string
+ * @buf: the buffer to extract from
+ * @len: the length of the buffer
+ * @dstp: the cpumask to set.
+ *
+ * Returns -errno, or 0 for success.
+ */
+static inline int cpumask_parse_user(const char __user *buf, int len,
+ struct cpumask *dstp)
+{
+ return bitmap_parse_user(buf, len, dstp->bits, nr_cpumask_bits);
+}
+
+/**
+ * cpulist_scnprintf - print a cpumask into a string as comma-separated list
+ * @buf: the buffer to sprintf into
+ * @len: the length of the buffer
+ * @srcp: the cpumask to print
+ *
+ * If len is zero, returns zero. Otherwise returns the length of the
+ * (nul-terminated) @buf string.
+ */
+static inline int cpulist_scnprintf(char *buf, int len,
+ const struct cpumask *srcp)
+{
+ return bitmap_scnlistprintf(buf, len, srcp->bits, nr_cpumask_bits);
+}
+
+/**
+ * cpulist_parse_user - extract a cpumask from a user string of ranges
+ * @buf: the buffer to extract from
+ * @len: the length of the buffer
+ * @dstp: the cpumask to set.
+ *
+ * Returns -errno, or 0 for success.
+ */
+static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
+{
+ return bitmap_parselist(buf, dstp->bits, nr_cpumask_bits);
+}
+
+/**
* to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
* @bitmap: the bitmap
*
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 8cc8ef47f5b6..990355fbc54e 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -111,13 +111,13 @@ extern void enable_irq(unsigned int irq);
extern cpumask_t irq_default_affinity;
-extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
+extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
extern int irq_can_set_affinity(unsigned int irq);
extern int irq_select_affinity(unsigned int irq);
#else /* CONFIG_SMP */
-static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
+static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
{
return -EINVAL;
}
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d64a6d49bdef..f899b502f186 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -113,7 +113,8 @@ struct irq_chip {
void (*eoi)(unsigned int irq);
void (*end)(unsigned int irq);
- void (*set_affinity)(unsigned int irq, cpumask_t dest);
+ void (*set_affinity)(unsigned int irq,
+ const struct cpumask *dest);
int (*retrigger)(unsigned int irq);
int (*set_type)(unsigned int irq, unsigned int flow_type);
int (*set_wake)(unsigned int irq, unsigned int on);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8395e715809d..158d53d07765 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -250,7 +250,7 @@ extern void init_idle_bootup_task(struct task_struct *idle);
extern int runqueue_is_locked(void);
extern void task_rq_unlock_wait(struct task_struct *p);
-extern cpumask_t nohz_cpu_mask;
+extern cpumask_var_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern int select_nohz_load_balancer(int cpu);
#else
@@ -758,20 +758,51 @@ enum cpu_idle_type {
#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
-#define BALANCE_FOR_MC_POWER \
- (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
+enum powersavings_balance_level {
+ POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
+ POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package
+ * first for long running threads
+ */
+ POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle
+ * cpu package for power savings
+ */
+ MAX_POWERSAVINGS_BALANCE_LEVELS
+};
-#define BALANCE_FOR_PKG_POWER \
- ((sched_mc_power_savings || sched_smt_power_savings) ? \
- SD_POWERSAVINGS_BALANCE : 0)
+extern int sched_mc_power_savings, sched_smt_power_savings;
-#define test_sd_parent(sd, flag) ((sd->parent && \
- (sd->parent->flags & flag)) ? 1 : 0)
+static inline int sd_balance_for_mc_power(void)
+{
+ if (sched_smt_power_savings)
+ return SD_POWERSAVINGS_BALANCE;
+ return 0;
+}
+
+static inline int sd_balance_for_package_power(void)
+{
+ if (sched_mc_power_savings | sched_smt_power_savings)
+ return SD_POWERSAVINGS_BALANCE;
+
+ return 0;
+}
+
+/*
+ * Optimise SD flags for power savings:
+ * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
+ * Keep default SD flags if sched_{smt,mc}_power_saving=0
+ */
+
+static inline int sd_power_saving_flags(void)
+{
+ if (sched_mc_power_savings | sched_smt_power_savings)
+ return SD_BALANCE_NEWIDLE;
+
+ return 0;
+}
struct sched_group {
struct sched_group *next; /* Must be a circular list */
- cpumask_t cpumask;
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
@@ -784,8 +815,15 @@ struct sched_group {
* (see include/linux/reciprocal_div.h)
*/
u32 reciprocal_cpu_power;
+
+ unsigned long cpumask[];
};
+static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
+{
+ return to_cpumask(sg->cpumask);
+}
+
enum sched_domain_level {
SD_LV_NONE = 0,
SD_LV_SIBLING,
@@ -809,7 +847,6 @@ struct sched_domain {
struct sched_domain *parent; /* top domain must be null terminated */
struct sched_domain *child; /* bottom domain must be null terminated */
struct sched_group *groups; /* the balancing groups of the domain */
- cpumask_t span; /* span of all CPUs in this domain */
unsigned long min_interval; /* Minimum balance interval ms */
unsigned long max_interval; /* Maximum balance interval ms */
unsigned int busy_factor; /* less balancing by factor if busy */
@@ -864,18 +901,35 @@ struct sched_domain {
#ifdef CONFIG_SCHED_DEBUG
char *name;
#endif
+
+ /* span of all CPUs in this domain */
+ unsigned long span[];
};
-extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
+static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
+{
+ return to_cpumask(sd->span);
+}
+
+extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
struct sched_domain_attr *dattr_new);
extern int arch_reinit_sched_domains(void);
+/* Test a flag in parent sched domain */
+static inline int test_sd_parent(struct sched_domain *sd, int flag)
+{
+ if (sd->parent && (sd->parent->flags & flag))
+ return 1;
+
+ return 0;
+}
+
#else /* CONFIG_SMP */
struct sched_domain_attr;
static inline void
-partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
+partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
struct sched_domain_attr *dattr_new)
{
}
@@ -926,7 +980,7 @@ struct sched_class {
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p,
- const cpumask_t *newmask);
+ const struct cpumask *newmask);
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);
@@ -1579,12 +1633,12 @@ extern cputime_t task_gtime(struct task_struct *p);
#ifdef CONFIG_SMP
extern int set_cpus_allowed_ptr(struct task_struct *p,
- const cpumask_t *new_mask);
+ const struct cpumask *new_mask);
#else
static inline int set_cpus_allowed_ptr(struct task_struct *p,
- const cpumask_t *new_mask)
+ const struct cpumask *new_mask)
{
- if (!cpu_isset(0, *new_mask))
+ if (!cpumask_test_cpu(0, new_mask))
return -EINVAL;
return 0;
}
@@ -2195,10 +2249,8 @@ __trace_special(void *__tr, void *__data,
}
#endif
-extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
-extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
-
-extern int sched_mc_power_savings, sched_smt_power_savings;
+extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
+extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
extern void normalize_rt_tasks(void);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 0c5b5ac36d8e..e632d29f0544 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -125,7 +125,8 @@ int arch_update_cpu_topology(void);
| SD_WAKE_AFFINE \
| SD_WAKE_BALANCE \
| SD_SHARE_PKG_RESOURCES\
- | BALANCE_FOR_MC_POWER, \
+ | sd_balance_for_mc_power()\
+ | sd_power_saving_flags(),\
.last_balance = jiffies, \
.balance_interval = 1, \
}
@@ -150,7 +151,8 @@ int arch_update_cpu_topology(void);
| SD_BALANCE_FORK \
| SD_WAKE_AFFINE \
| SD_WAKE_BALANCE \
- | BALANCE_FOR_PKG_POWER,\
+ | sd_balance_for_package_power()\
+ | sd_power_saving_flags(),\
.last_balance = jiffies, \
.balance_interval = 1, \
}