diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 16:23:30 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 16:23:30 -0700 |
commit | 98959948a7ba33cf8c708626e0d2a1456397e1c6 (patch) | |
tree | 8ba9b6c2679a06e89f23bdd7018e9bb0249e3bda /include | |
parent | ef35ad26f8ff44d2c93e29952cdb336bda729d9d (diff) | |
parent | cd3bd4e628a6d57d66afe77835fe8d93ae3e41f8 (diff) | |
download | linux-98959948a7ba33cf8c708626e0d2a1456397e1c6.tar.bz2 |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
- Move the nohz kick code out of the scheduler tick to a dedicated IPI,
from Frederic Weisbecker.
This necessiated quite some background infrastructure rework,
including:
* Clean up some irq-work internals
* Implement remote irq-work
* Implement nohz kick on top of remote irq-work
* Move full dynticks timer enqueue notification to new kick
* Move multi-task notification to new kick
* Remove unecessary barriers on multi-task notification
- Remove proliferation of wait_on_bit() action functions and allow
wait_on_bit_action() functions to support a timeout. (Neil Brown)
- Another round of sched/numa improvements, cleanups and fixes. (Rik
van Riel)
- Implement fast idling of CPUs when the system is partially loaded,
for better scalability. (Tim Chen)
- Restructure and fix the CPU hotplug handling code that may leave
cfs_rq and rt_rq's throttled when tasks are migrated away from a dead
cpu. (Kirill Tkhai)
- Robustify the sched topology setup code. (Peterz Zijlstra)
- Improve sched_feat() handling wrt. static_keys (Jason Baron)
- Misc fixes.
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits)
sched/fair: Fix 'make xmldocs' warning caused by missing description
sched: Use macro for magic number of -1 for setparam
sched: Robustify topology setup
sched: Fix sched_setparam() policy == -1 logic
sched: Allow wait_on_bit_action() functions to support a timeout
sched: Remove proliferation of wait_on_bit() action functions
sched/numa: Revert "Use effective_load() to balance NUMA loads"
sched: Fix static_key race with sched_feat()
sched: Remove extra static_key*() function indirection
sched/rt: Fix replenish_dl_entity() comments to match the current upstream code
sched: Transform resched_task() into resched_curr()
sched/deadline: Kill task_struct->pi_top_task
sched: Rework check_for_tasks()
sched/rt: Enqueue just unthrottled rt_rq back on the stack in __disable_runtime()
sched/fair: Disable runtime_enabled on dying rq
sched/numa: Change scan period code to match intent
sched/numa: Rework best node setting in task_numa_migrate()
sched/numa: Examine a task move when examining a task swap
sched/numa: Simplify task_numa_compare()
sched/numa: Use effective_load() to balance NUMA loads
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/irq_work.h | 5 | ||||
-rw-r--r-- | include/linux/sched.h | 8 | ||||
-rw-r--r-- | include/linux/sunrpc/sched.h | 2 | ||||
-rw-r--r-- | include/linux/tick.h | 9 | ||||
-rw-r--r-- | include/linux/wait.h | 125 | ||||
-rw-r--r-- | include/linux/writeback.h | 3 |
6 files changed, 135 insertions, 17 deletions
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 19ae05d4b8ec..bf9422c3aefe 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -33,6 +33,11 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), } bool irq_work_queue(struct irq_work *work); + +#ifdef CONFIG_SMP +bool irq_work_queue_on(struct irq_work *work, int cpu); +#endif + void irq_work_run(void); void irq_work_sync(struct irq_work *work); diff --git a/include/linux/sched.h b/include/linux/sched.h index b39a671cfd59..42cac4dc2157 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1437,8 +1437,6 @@ struct task_struct { struct rb_node *pi_waiters_leftmost; /* Deadlock detection and priority inheritance handling */ struct rt_mutex_waiter *pi_blocked_on; - /* Top pi_waiters task */ - struct task_struct *pi_top_task; #endif #ifdef CONFIG_DEBUG_MUTEXES @@ -2782,7 +2780,7 @@ static inline bool __must_check current_set_polling_and_test(void) /* * Polling state must be visible before we test NEED_RESCHED, - * paired by resched_task() + * paired by resched_curr() */ smp_mb__after_atomic(); @@ -2800,7 +2798,7 @@ static inline bool __must_check current_clr_polling_and_test(void) /* * Polling state must be visible before we test NEED_RESCHED, - * paired by resched_task() + * paired by resched_curr() */ smp_mb__after_atomic(); @@ -2832,7 +2830,7 @@ static inline void current_clr_polling(void) * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also * fold. */ - smp_mb(); /* paired with resched_task() */ + smp_mb(); /* paired with resched_curr() */ preempt_fold_need_resched(); } diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index ad7dbe2cfecd..1a8959944c5f 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -236,7 +236,7 @@ void * rpc_malloc(struct rpc_task *, size_t); void rpc_free(void *); int rpciod_up(void); void rpciod_down(void); -int __rpc_wait_for_completion_task(struct rpc_task *task, int (*)(void *)); +int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); #ifdef RPC_DEBUG struct net; void rpc_show_tasks(struct net *); diff --git a/include/linux/tick.h b/include/linux/tick.h index 06cc093ab7ad..059052306831 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -183,7 +183,13 @@ static inline bool tick_nohz_full_cpu(int cpu) extern void tick_nohz_init(void); extern void __tick_nohz_full_check(void); -extern void tick_nohz_full_kick(void); +extern void tick_nohz_full_kick_cpu(int cpu); + +static inline void tick_nohz_full_kick(void) +{ + tick_nohz_full_kick_cpu(smp_processor_id()); +} + extern void tick_nohz_full_kick_all(void); extern void __tick_nohz_task_switch(struct task_struct *tsk); #else @@ -191,6 +197,7 @@ static inline void tick_nohz_init(void) { } static inline bool tick_nohz_full_enabled(void) { return false; } static inline bool tick_nohz_full_cpu(int cpu) { return false; } static inline void __tick_nohz_full_check(void) { } +static inline void tick_nohz_full_kick_cpu(int cpu) { } static inline void tick_nohz_full_kick(void) { } static inline void tick_nohz_full_kick_all(void) { } static inline void __tick_nohz_task_switch(struct task_struct *tsk) { } diff --git a/include/linux/wait.h b/include/linux/wait.h index bd68819f0815..6fb1ba5f9b2f 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -25,6 +25,7 @@ struct wait_bit_key { void *flags; int bit_nr; #define WAIT_ATOMIC_T_BIT_NR -1 + unsigned long private; }; struct wait_bit_queue { @@ -141,18 +142,19 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) list_del(&old->task_list); } +typedef int wait_bit_action_f(struct wait_bit_key *); void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); void __wake_up_bit(wait_queue_head_t *, void *, int); -int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); -int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); +int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned); +int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned); void wake_up_bit(void *, int); void wake_up_atomic_t(atomic_t *); -int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned); -int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned); +int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned); +int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned); int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned); wait_queue_head_t *bit_waitqueue(void *, int); @@ -854,11 +856,14 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); (wait)->flags = 0; \ } while (0) + +extern int bit_wait(struct wait_bit_key *); +extern int bit_wait_io(struct wait_bit_key *); + /** * wait_on_bit - wait for a bit to be cleared * @word: the word being waited on, a kernel virtual address * @bit: the bit of the word being waited on - * @action: the function used to sleep, which may take special actions * @mode: the task state to sleep in * * There is a standard hashed waitqueue table for generic use. This @@ -867,9 +872,62 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); * call wait_on_bit() in threads waiting for the bit to clear. * One uses wait_on_bit() where one is waiting for the bit to clear, * but has no intention of setting it. + * Returned value will be zero if the bit was cleared, or non-zero + * if the process received a signal and the mode permitted wakeup + * on that signal. + */ +static inline int +wait_on_bit(void *word, int bit, unsigned mode) +{ + if (!test_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit(word, bit, + bit_wait, + mode); +} + +/** + * wait_on_bit_io - wait for a bit to be cleared + * @word: the word being waited on, a kernel virtual address + * @bit: the bit of the word being waited on + * @mode: the task state to sleep in + * + * Use the standard hashed waitqueue table to wait for a bit + * to be cleared. This is similar to wait_on_bit(), but calls + * io_schedule() instead of schedule() for the actual waiting. + * + * Returned value will be zero if the bit was cleared, or non-zero + * if the process received a signal and the mode permitted wakeup + * on that signal. + */ +static inline int +wait_on_bit_io(void *word, int bit, unsigned mode) +{ + if (!test_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit(word, bit, + bit_wait_io, + mode); +} + +/** + * wait_on_bit_action - wait for a bit to be cleared + * @word: the word being waited on, a kernel virtual address + * @bit: the bit of the word being waited on + * @action: the function used to sleep, which may take special actions + * @mode: the task state to sleep in + * + * Use the standard hashed waitqueue table to wait for a bit + * to be cleared, and allow the waiting action to be specified. + * This is like wait_on_bit() but allows fine control of how the waiting + * is done. + * + * Returned value will be zero if the bit was cleared, or non-zero + * if the process received a signal and the mode permitted wakeup + * on that signal. */ static inline int -wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode) +wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) { if (!test_bit(bit, word)) return 0; @@ -880,7 +938,6 @@ wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode) * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it * @word: the word being waited on, a kernel virtual address * @bit: the bit of the word being waited on - * @action: the function used to sleep, which may take special actions * @mode: the task state to sleep in * * There is a standard hashed waitqueue table for generic use. This @@ -891,9 +948,61 @@ wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode) * wait_on_bit() in threads waiting to be able to set the bit. * One uses wait_on_bit_lock() where one is waiting for the bit to * clear with the intention of setting it, and when done, clearing it. + * + * Returns zero if the bit was (eventually) found to be clear and was + * set. Returns non-zero if a signal was delivered to the process and + * the @mode allows that signal to wake the process. + */ +static inline int +wait_on_bit_lock(void *word, int bit, unsigned mode) +{ + if (!test_and_set_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode); +} + +/** + * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it + * @word: the word being waited on, a kernel virtual address + * @bit: the bit of the word being waited on + * @mode: the task state to sleep in + * + * Use the standard hashed waitqueue table to wait for a bit + * to be cleared and then to atomically set it. This is similar + * to wait_on_bit(), but calls io_schedule() instead of schedule() + * for the actual waiting. + * + * Returns zero if the bit was (eventually) found to be clear and was + * set. Returns non-zero if a signal was delivered to the process and + * the @mode allows that signal to wake the process. + */ +static inline int +wait_on_bit_lock_io(void *word, int bit, unsigned mode) +{ + if (!test_and_set_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode); +} + +/** + * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it + * @word: the word being waited on, a kernel virtual address + * @bit: the bit of the word being waited on + * @action: the function used to sleep, which may take special actions + * @mode: the task state to sleep in + * + * Use the standard hashed waitqueue table to wait for a bit + * to be cleared and then to set it, and allow the waiting action + * to be specified. + * This is like wait_on_bit() but allows fine control of how the waiting + * is done. + * + * Returns zero if the bit was (eventually) found to be clear and was + * set. Returns non-zero if a signal was delivered to the process and + * the @mode allows that signal to wake the process. */ static inline int -wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode) +wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) { if (!test_and_set_bit(bit, word)) return 0; diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 5777c13849ba..a219be961c0a 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -90,7 +90,6 @@ struct writeback_control { * fs/fs-writeback.c */ struct bdi_writeback; -int inode_wait(void *); void writeback_inodes_sb(struct super_block *, enum wb_reason reason); void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, enum wb_reason reason); @@ -105,7 +104,7 @@ void inode_wait_for_writeback(struct inode *inode); static inline void wait_on_inode(struct inode *inode) { might_sleep(); - wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE); + wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE); } /* |