summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-20 10:42:08 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-20 10:42:08 -0800
commita0fa1dd3cdbccec9597fe53b6177a9aa6e20f2f8 (patch)
treeb249854573815eedf377e554f0ea516f86411841 /include
parent9326657abe1a83ed4b4f396b923ca1217fd50cba (diff)
parenteaad45132c564ce377e6dce05e78e08e456d5315 (diff)
downloadlinux-a0fa1dd3cdbccec9597fe53b6177a9aa6e20f2f8.tar.bz2
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar: - Add the initial implementation of SCHED_DEADLINE support: a real-time scheduling policy where tasks that meet their deadlines and periodically execute their instances in less than their runtime quota see real-time scheduling and won't miss any of their deadlines. Tasks that go over their quota get delayed (Available to privileged users for now) - Clean up and fix preempt_enable_no_resched() abuse all around the tree - Do sched_clock() performance optimizations on x86 and elsewhere - Fix and improve auto-NUMA balancing - Fix and clean up the idle loop - Apply various cleanups and fixes * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (60 commits) sched: Fix __sched_setscheduler() nice test sched: Move SCHED_RESET_ON_FORK into attr::sched_flags sched: Fix up attr::sched_priority warning sched: Fix up scheduler syscall LTP fails sched: Preserve the nice level over sched_setscheduler() and sched_setparam() calls sched/core: Fix htmldocs warnings sched/deadline: No need to check p if dl_se is valid sched/deadline: Remove unused variables sched/deadline: Fix sparse static warnings m68k: Fix build warning in mac_via.h sched, thermal: Clean up preempt_enable_no_resched() abuse sched, net: Fixup busy_loop_us_clock() sched, net: Clean up preempt_enable_no_resched() abuse sched/preempt: Fix up missed PREEMPT_NEED_RESCHED folding sched/preempt, locking: Rework local_bh_{dis,en}able() sched/clock, x86: Avoid a runtime condition in native_sched_clock() sched/clock: Fix up clear_sched_clock_stable() sched/clock, x86: Use a static_key for sched_clock_stable sched/clock: Remove local_irq_disable() from the clocks sched/clock, x86: Rewrite cyc2ns() to avoid the need to disable IRQs ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/bottom_half.h32
-rw-r--r--include/linux/hardirq.h1
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/preempt.h37
-rw-r--r--include/linux/preempt_mask.h16
-rw-r--r--include/linux/rtmutex.h18
-rw-r--r--include/linux/rwlock_api_smp.h12
-rw-r--r--include/linux/sched.h141
-rw-r--r--include/linux/sched/deadline.h24
-rw-r--r--include/linux/sched/rt.h5
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/spinlock_api_smp.h12
-rw-r--r--include/linux/spinlock_api_up.h16
-rw-r--r--include/linux/syscalls.h6
-rw-r--r--include/linux/uaccess.h5
-rw-r--r--include/net/busy_poll.h19
-rw-r--r--include/uapi/linux/sched.h6
17 files changed, 298 insertions, 63 deletions
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index 27b1bcffe408..86c12c93e3cf 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -1,9 +1,35 @@
#ifndef _LINUX_BH_H
#define _LINUX_BH_H
-extern void local_bh_disable(void);
+#include <linux/preempt.h>
+#include <linux/preempt_mask.h>
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
+#else
+static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+{
+ preempt_count_add(cnt);
+ barrier();
+}
+#endif
+
+static inline void local_bh_disable(void)
+{
+ __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
+}
+
extern void _local_bh_enable(void);
-extern void local_bh_enable(void);
-extern void local_bh_enable_ip(unsigned long ip);
+extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);
+
+static inline void local_bh_enable_ip(unsigned long ip)
+{
+ __local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET);
+}
+
+static inline void local_bh_enable(void)
+{
+ __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
+}
#endif /* _LINUX_BH_H */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index d9cf963ac832..12d5f972f23f 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -5,6 +5,7 @@
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
#include <linux/vtime.h>
+#include <asm/hardirq.h>
extern void synchronize_irq(unsigned int irq);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index b0ed422e4e4a..f0e52383a001 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -11,6 +11,7 @@
#include <linux/user_namespace.h>
#include <linux/securebits.h>
#include <linux/seqlock.h>
+#include <linux/rbtree.h>
#include <net/net_namespace.h>
#include <linux/sched/rt.h>
@@ -154,6 +155,14 @@ extern struct task_group root_task_group;
#define INIT_TASK_COMM "swapper"
+#ifdef CONFIG_RT_MUTEXES
+# define INIT_RT_MUTEXES(tsk) \
+ .pi_waiters = RB_ROOT, \
+ .pi_waiters_leftmost = NULL,
+#else
+# define INIT_RT_MUTEXES(tsk)
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -221,6 +230,7 @@ extern struct task_group root_task_group;
INIT_TRACE_RECURSION \
INIT_TASK_RCU_PREEMPT(tsk) \
INIT_CPUSET_SEQ(tsk) \
+ INIT_RT_MUTEXES(tsk) \
INIT_VTIME(tsk) \
}
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index a3d9dc8c2c00..59749fc48328 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -64,7 +64,11 @@ do { \
} while (0)
#else
-#define preempt_enable() preempt_enable_no_resched()
+#define preempt_enable() \
+do { \
+ barrier(); \
+ preempt_count_dec(); \
+} while (0)
#define preempt_check_resched() do { } while (0)
#endif
@@ -93,7 +97,11 @@ do { \
__preempt_schedule_context(); \
} while (0)
#else
-#define preempt_enable_notrace() preempt_enable_no_resched_notrace()
+#define preempt_enable_notrace() \
+do { \
+ barrier(); \
+ __preempt_count_dec(); \
+} while (0)
#endif
#else /* !CONFIG_PREEMPT_COUNT */
@@ -116,6 +124,31 @@ do { \
#endif /* CONFIG_PREEMPT_COUNT */
+#ifdef MODULE
+/*
+ * Modules have no business playing preemption tricks.
+ */
+#undef sched_preempt_enable_no_resched
+#undef preempt_enable_no_resched
+#undef preempt_enable_no_resched_notrace
+#undef preempt_check_resched
+#endif
+
+#ifdef CONFIG_PREEMPT
+#define preempt_set_need_resched() \
+do { \
+ set_preempt_need_resched(); \
+} while (0)
+#define preempt_fold_need_resched() \
+do { \
+ if (tif_need_resched()) \
+ set_preempt_need_resched(); \
+} while (0)
+#else
+#define preempt_set_need_resched() do { } while (0)
+#define preempt_fold_need_resched() do { } while (0)
+#endif
+
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
index d169820203dd..dbeec4d4a3be 100644
--- a/include/linux/preempt_mask.h
+++ b/include/linux/preempt_mask.h
@@ -2,7 +2,6 @@
#define LINUX_PREEMPT_MASK_H
#include <linux/preempt.h>
-#include <asm/hardirq.h>
/*
* We put the hardirq and softirq counter into the preemption
@@ -79,6 +78,21 @@
#endif
/*
+ * The preempt_count offset needed for things like:
+ *
+ * spin_lock_bh()
+ *
+ * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
+ * softirqs, such that unlock sequences of:
+ *
+ * spin_unlock();
+ * local_bh_enable();
+ *
+ * Work as expected.
+ */
+#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET)
+
+/*
* Are we running in atomic context? WARNING: this macro cannot
* always detect atomic context; in particular, it cannot know about
* held spinlocks in non-preemptible kernels. Thus it should not be
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index de17134244f3..3aed8d737e1a 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -13,7 +13,7 @@
#define __LINUX_RT_MUTEX_H
#include <linux/linkage.h>
-#include <linux/plist.h>
+#include <linux/rbtree.h>
#include <linux/spinlock_types.h>
extern int max_lock_depth; /* for sysctl */
@@ -22,12 +22,14 @@ extern int max_lock_depth; /* for sysctl */
* The rt_mutex structure
*
* @wait_lock: spinlock to protect the structure
- * @wait_list: pilist head to enqueue waiters in priority order
+ * @waiters: rbtree root to enqueue waiters in priority order
+ * @waiters_leftmost: top waiter
* @owner: the mutex owner
*/
struct rt_mutex {
raw_spinlock_t wait_lock;
- struct plist_head wait_list;
+ struct rb_root waiters;
+ struct rb_node *waiters_leftmost;
struct task_struct *owner;
#ifdef CONFIG_DEBUG_RT_MUTEXES
int save_state;
@@ -66,7 +68,7 @@ struct hrtimer_sleeper;
#define __RT_MUTEX_INITIALIZER(mutexname) \
{ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \
+ , .waiters = RB_ROOT \
, .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
@@ -98,12 +100,4 @@ extern int rt_mutex_trylock(struct rt_mutex *lock);
extern void rt_mutex_unlock(struct rt_mutex *lock);
-#ifdef CONFIG_RT_MUTEXES
-# define INIT_RT_MUTEXES(tsk) \
- .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters), \
- INIT_RT_MUTEX_DEBUG(tsk)
-#else
-# define INIT_RT_MUTEXES(tsk)
-#endif
-
#endif
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index 9c9f0495d37c..5b9b84b20407 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -172,8 +172,7 @@ static inline void __raw_read_lock_irq(rwlock_t *lock)
static inline void __raw_read_lock_bh(rwlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
}
@@ -200,8 +199,7 @@ static inline void __raw_write_lock_irq(rwlock_t *lock)
static inline void __raw_write_lock_bh(rwlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
}
@@ -250,8 +248,7 @@ static inline void __raw_read_unlock_bh(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_read_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
@@ -275,8 +272,7 @@ static inline void __raw_write_unlock_bh(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_write_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
#endif /* __LINUX_RWLOCK_API_SMP_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 53f97eb8dbc7..ffccdad050b5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -16,6 +16,7 @@ struct sched_param {
#include <linux/types.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
+#include <linux/plist.h>
#include <linux/rbtree.h>
#include <linux/thread_info.h>
#include <linux/cpumask.h>
@@ -56,6 +57,70 @@ struct sched_param {
#include <asm/processor.h>
+#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */
+
+/*
+ * Extended scheduling parameters data structure.
+ *
+ * This is needed because the original struct sched_param can not be
+ * altered without introducing ABI issues with legacy applications
+ * (e.g., in sched_getparam()).
+ *
+ * However, the possibility of specifying more than just a priority for
+ * the tasks may be useful for a wide variety of application fields, e.g.,
+ * multimedia, streaming, automation and control, and many others.
+ *
+ * This variant (sched_attr) is meant at describing a so-called
+ * sporadic time-constrained task. In such model a task is specified by:
+ * - the activation period or minimum instance inter-arrival time;
+ * - the maximum (or average, depending on the actual scheduling
+ * discipline) computation time of all instances, a.k.a. runtime;
+ * - the deadline (relative to the actual activation time) of each
+ * instance.
+ * Very briefly, a periodic (sporadic) task asks for the execution of
+ * some specific computation --which is typically called an instance--
+ * (at most) every period. Moreover, each instance typically lasts no more
+ * than the runtime and must be completed by time instant t equal to
+ * the instance activation time + the deadline.
+ *
+ * This is reflected by the actual fields of the sched_attr structure:
+ *
+ * @size size of the structure, for fwd/bwd compat.
+ *
+ * @sched_policy task's scheduling policy
+ * @sched_flags for customizing the scheduler behaviour
+ * @sched_nice task's nice value (SCHED_NORMAL/BATCH)
+ * @sched_priority task's static priority (SCHED_FIFO/RR)
+ * @sched_deadline representative of the task's deadline
+ * @sched_runtime representative of the task's runtime
+ * @sched_period representative of the task's period
+ *
+ * Given this task model, there are a multiplicity of scheduling algorithms
+ * and policies, that can be used to ensure all the tasks will make their
+ * timing constraints.
+ *
+ * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
+ * only user of this new interface. More information about the algorithm
+ * available in the scheduling class file or in Documentation/.
+ */
+struct sched_attr {
+ u32 size;
+
+ u32 sched_policy;
+ u64 sched_flags;
+
+ /* SCHED_NORMAL, SCHED_BATCH */
+ s32 sched_nice;
+
+ /* SCHED_FIFO, SCHED_RR */
+ u32 sched_priority;
+
+ /* SCHED_DEADLINE */
+ u64 sched_runtime;
+ u64 sched_deadline;
+ u64 sched_period;
+};
+
struct exec_domain;
struct futex_pi_state;
struct robust_list_head;
@@ -168,7 +233,6 @@ extern char ___assert_task_state[1 - 2*!!(
#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
-#define task_is_dead(task) ((task)->exit_state != 0)
#define task_is_stopped_or_traced(task) \
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
@@ -1029,6 +1093,51 @@ struct sched_rt_entity {
#endif
};
+struct sched_dl_entity {
+ struct rb_node rb_node;
+
+ /*
+ * Original scheduling parameters. Copied here from sched_attr
+ * during sched_setscheduler2(), they will remain the same until
+ * the next sched_setscheduler2().
+ */
+ u64 dl_runtime; /* maximum runtime for each instance */
+ u64 dl_deadline; /* relative deadline of each instance */
+ u64 dl_period; /* separation of two instances (period) */
+ u64 dl_bw; /* dl_runtime / dl_deadline */
+
+ /*
+ * Actual scheduling parameters. Initialized with the values above,
+ * they are continously updated during task execution. Note that
+ * the remaining runtime could be < 0 in case we are in overrun.
+ */
+ s64 runtime; /* remaining runtime for this instance */
+ u64 deadline; /* absolute deadline for this instance */
+ unsigned int flags; /* specifying the scheduler behaviour */
+
+ /*
+ * Some bool flags:
+ *
+ * @dl_throttled tells if we exhausted the runtime. If so, the
+ * task has to wait for a replenishment to be performed at the
+ * next firing of dl_timer.
+ *
+ * @dl_new tells if a new instance arrived. If so we must
+ * start executing it with full runtime and reset its absolute
+ * deadline;
+ *
+ * @dl_boosted tells if we are boosted due to DI. If so we are
+ * outside bandwidth enforcement mechanism (but only until we
+ * exit the critical section).
+ */
+ int dl_throttled, dl_new, dl_boosted;
+
+ /*
+ * Bandwidth enforcement timer. Each -deadline task has its
+ * own bandwidth to be enforced, thus we need one timer per task.
+ */
+ struct hrtimer dl_timer;
+};
struct rcu_node;
@@ -1065,6 +1174,7 @@ struct task_struct {
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
+ struct sched_dl_entity dl;
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */
@@ -1098,6 +1208,7 @@ struct task_struct {
struct list_head tasks;
#ifdef CONFIG_SMP
struct plist_node pushable_tasks;
+ struct rb_node pushable_dl_tasks;
#endif
struct mm_struct *mm, *active_mm;
@@ -1249,9 +1360,12 @@ struct task_struct {
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
- struct plist_head pi_waiters;
+ struct rb_root pi_waiters;
+ struct rb_node *pi_waiters_leftmost;
/* Deadlock detection and priority inheritance handling */
struct rt_mutex_waiter *pi_blocked_on;
+ /* Top pi_waiters task */
+ struct task_struct *pi_top_task;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
@@ -1880,7 +1994,9 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
* but then during bootup it turns out that sched_clock()
* is reliable after all:
*/
-extern int sched_clock_stable;
+extern int sched_clock_stable(void);
+extern void set_sched_clock_stable(void);
+extern void clear_sched_clock_stable(void);
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
@@ -1959,6 +2075,8 @@ extern int sched_setscheduler(struct task_struct *, int,
const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int,
const struct sched_param *);
+extern int sched_setattr(struct task_struct *,
+ const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
/**
* is_idle_task - is the specified task an idle task?
@@ -2038,7 +2156,7 @@ extern void wake_up_new_task(struct task_struct *tsk);
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
-extern void sched_fork(unsigned long clone_flags, struct task_struct *p);
+extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p);
extern void proc_caches_init(void);
@@ -2627,6 +2745,21 @@ static inline bool __must_check current_clr_polling_and_test(void)
}
#endif
+static inline void current_clr_polling(void)
+{
+ __current_clr_polling();
+
+ /*
+ * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
+ * Once the bit is cleared, we'll get IPIs with every new
+ * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
+ * fold.
+ */
+ smp_mb(); /* paired with resched_task() */
+
+ preempt_fold_need_resched();
+}
+
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
new file mode 100644
index 000000000000..9d303b8847df
--- /dev/null
+++ b/include/linux/sched/deadline.h
@@ -0,0 +1,24 @@
+#ifndef _SCHED_DEADLINE_H
+#define _SCHED_DEADLINE_H
+
+/*
+ * SCHED_DEADLINE tasks has negative priorities, reflecting
+ * the fact that any of them has higher prio than RT and
+ * NORMAL/BATCH tasks.
+ */
+
+#define MAX_DL_PRIO 0
+
+static inline int dl_prio(int prio)
+{
+ if (unlikely(prio < MAX_DL_PRIO))
+ return 1;
+ return 0;
+}
+
+static inline int dl_task(struct task_struct *p)
+{
+ return dl_prio(p->prio);
+}
+
+#endif /* _SCHED_DEADLINE_H */
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index 440434df3627..34e4ebea8fce 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -35,6 +35,7 @@ static inline int rt_task(struct task_struct *p)
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
+extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
extern void rt_mutex_adjust_pi(struct task_struct *p);
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
{
@@ -45,6 +46,10 @@ static inline int rt_mutex_getprio(struct task_struct *p)
{
return p->normal_prio;
}
+static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
+{
+ return NULL;
+}
# define rt_mutex_adjust_pi(p) do { } while (0)
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
{
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 41467f8ff8ec..31e0193cb0c5 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -48,7 +48,6 @@ extern unsigned int sysctl_numa_balancing_scan_delay;
extern unsigned int sysctl_numa_balancing_scan_period_min;
extern unsigned int sysctl_numa_balancing_scan_period_max;
extern unsigned int sysctl_numa_balancing_scan_size;
-extern unsigned int sysctl_numa_balancing_settle_count;
#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_migration_cost;
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index bdb9993f0fda..42dfab89e740 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -131,8 +131,7 @@ static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
@@ -174,20 +173,17 @@ static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
do_raw_spin_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
return 0;
}
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index af1f47229e70..d0d188861ad6 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -24,11 +24,14 @@
* flags straight, to suppress compiler warnings of unused lock
* variables, and to add the proper checker annotations:
*/
+#define ___LOCK(lock) \
+ do { __acquire(lock); (void)(lock); } while (0)
+
#define __LOCK(lock) \
- do { preempt_disable(); __acquire(lock); (void)(lock); } while (0)
+ do { preempt_disable(); ___LOCK(lock); } while (0)
#define __LOCK_BH(lock) \
- do { local_bh_disable(); __LOCK(lock); } while (0)
+ do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
#define __LOCK_IRQ(lock) \
do { local_irq_disable(); __LOCK(lock); } while (0)
@@ -36,12 +39,15 @@
#define __LOCK_IRQSAVE(lock, flags) \
do { local_irq_save(flags); __LOCK(lock); } while (0)
+#define ___UNLOCK(lock) \
+ do { __release(lock); (void)(lock); } while (0)
+
#define __UNLOCK(lock) \
- do { preempt_enable(); __release(lock); (void)(lock); } while (0)
+ do { preempt_enable(); ___UNLOCK(lock); } while (0)
#define __UNLOCK_BH(lock) \
- do { preempt_enable_no_resched(); local_bh_enable(); \
- __release(lock); (void)(lock); } while (0)
+ do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \
+ ___UNLOCK(lock); } while (0)
#define __UNLOCK_IRQ(lock) \
do { local_irq_enable(); __UNLOCK(lock); } while (0)
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 94273bbe6050..40ed9e9a77e5 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -38,6 +38,7 @@ struct rlimit;
struct rlimit64;
struct rusage;
struct sched_param;
+struct sched_attr;
struct sel_arg_struct;
struct semaphore;
struct sembuf;
@@ -279,9 +280,14 @@ asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
struct sched_param __user *param);
asmlinkage long sys_sched_setparam(pid_t pid,
struct sched_param __user *param);
+asmlinkage long sys_sched_setattr(pid_t pid,
+ struct sched_attr __user *attr);
asmlinkage long sys_sched_getscheduler(pid_t pid);
asmlinkage long sys_sched_getparam(pid_t pid,
struct sched_param __user *param);
+asmlinkage long sys_sched_getattr(pid_t pid,
+ struct sched_attr __user *attr,
+ unsigned int size);
asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr);
asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 9d8cf056e661..ecd3319dac33 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -25,13 +25,16 @@ static inline void pagefault_disable(void)
static inline void pagefault_enable(void)
{
+#ifndef CONFIG_PREEMPT
/*
* make sure to issue those last loads/stores before enabling
* the pagefault handler again.
*/
barrier();
preempt_count_dec();
- preempt_check_resched();
+#else
+ preempt_enable();
+#endif
}
#ifndef ARCH_HAS_NOCACHE_UACCESS
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 829627d7b846..1d67fb6b23a0 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -42,27 +42,10 @@ static inline bool net_busy_loop_on(void)
return sysctl_net_busy_poll;
}
-/* a wrapper to make debug_smp_processor_id() happy
- * we can use sched_clock() because we don't care much about precision
- * we only care that the average is bounded
- */
-#ifdef CONFIG_DEBUG_PREEMPT
-static inline u64 busy_loop_us_clock(void)
-{
- u64 rc;
-
- preempt_disable_notrace();
- rc = sched_clock();
- preempt_enable_no_resched_notrace();
-
- return rc >> 10;
-}
-#else /* CONFIG_DEBUG_PREEMPT */
static inline u64 busy_loop_us_clock(void)
{
- return sched_clock() >> 10;
+ return local_clock() >> 10;
}
-#endif /* CONFIG_DEBUG_PREEMPT */
static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
{
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 5a0f945927ac..34f9d7387d13 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -39,8 +39,14 @@
#define SCHED_BATCH 3
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE 5
+#define SCHED_DEADLINE 6
+
/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
#define SCHED_RESET_ON_FORK 0x40000000
+/*
+ * For the sched_{set,get}attr() calls
+ */
+#define SCHED_FLAG_RESET_ON_FORK 0x01
#endif /* _UAPI_LINUX_SCHED_H */