diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2016-10-25 12:21:44 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2016-11-11 16:37:40 +0100 |
commit | c360192bf4a8dc72f102dd6a4e1bf8bd0b404cfa (patch) | |
tree | c54104bae43978dfe31e6fc2c9a4d996218379fd /arch | |
parent | 1993dbc7e09a7ffdd98975589aa83387928e9fe7 (diff) | |
download | linux-c360192bf4a8dc72f102dd6a4e1bf8bd0b404cfa.tar.bz2 |
s390/preempt: move preempt_count to the lowcore
Convert s390 to use a field in the struct lowcore for the CPU
preemption count. It is a bit cheaper to access a lowcore field
compared to a thread_info variable and it removes the depencency
on a task related structure.
bloat-o-meter on the vmlinux image for the default configuration
(CONFIG_PREEMPT_NONE=y) reports a small reduction in text size:
add/remove: 0/0 grow/shrink: 18/578 up/down: 228/-5448 (-5220)
A larger improvement is achieved with the default configuration
but with CONFIG_PREEMPT=y and CONFIG_DEBUG_PREEMPT=n:
add/remove: 2/6 grow/shrink: 59/4477 up/down: 1618/-228762 (-227144)
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/include/asm/lowcore.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/preempt.h | 137 | ||||
-rw-r--r-- | arch/s390/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/s390/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/entry.S | 2 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 1 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 1 |
8 files changed, 144 insertions, 5 deletions
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 7b93b78f423c..1df1e6215e85 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -126,7 +126,8 @@ struct lowcore { __u64 percpu_offset; /* 0x0378 */ __u64 vdso_per_cpu_data; /* 0x0380 */ __u64 machine_flags; /* 0x0388 */ - __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */ + __u32 preempt_count; /* 0x0390 */ + __u8 pad_0x0394[0x0398-0x0394]; /* 0x0394 */ __u64 gmap; /* 0x0398 */ __u32 spinlock_lockval; /* 0x03a0 */ __u32 fpu_flags; /* 0x03a4 */ diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h new file mode 100644 index 000000000000..b0776b2c8dcf --- /dev/null +++ b/arch/s390/include/asm/preempt.h @@ -0,0 +1,137 @@ +#ifndef __ASM_PREEMPT_H +#define __ASM_PREEMPT_H + +#include <asm/current.h> +#include <linux/thread_info.h> +#include <asm/atomic_ops.h> + +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + +#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) + +static inline int preempt_count(void) +{ + return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED; +} + +static inline void preempt_count_set(int pc) +{ + int old, new; + + do { + old = READ_ONCE(S390_lowcore.preempt_count); + new = (old & PREEMPT_NEED_RESCHED) | + (pc & ~PREEMPT_NEED_RESCHED); + } while (__atomic_cmpxchg(&S390_lowcore.preempt_count, + old, new) != old); +} + +#define init_task_preempt_count(p) do { } while (0) + +#define init_idle_preempt_count(p, cpu) do { \ + S390_lowcore.preempt_count = PREEMPT_ENABLED; \ +} while (0) + +static inline void set_preempt_need_resched(void) +{ + __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count); +} + +static inline void clear_preempt_need_resched(void) +{ + __atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count); +} + +static inline bool test_preempt_need_resched(void) +{ + return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED); +} + +static inline void __preempt_count_add(int val) +{ + if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) + __atomic_add_const(val, &S390_lowcore.preempt_count); + else + __atomic_add(val, &S390_lowcore.preempt_count); +} + +static inline void __preempt_count_sub(int val) +{ + __preempt_count_add(-val); +} + +static inline bool __preempt_count_dec_and_test(void) +{ + return __atomic_add(-1, &S390_lowcore.preempt_count) == 1; +} + +static inline bool should_resched(int preempt_offset) +{ + return unlikely(READ_ONCE(S390_lowcore.preempt_count) == + preempt_offset); +} + +#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + +#define PREEMPT_ENABLED (0) + +static inline int preempt_count(void) +{ + return READ_ONCE(S390_lowcore.preempt_count); +} + +static inline void preempt_count_set(int pc) +{ + S390_lowcore.preempt_count = pc; +} + +#define init_task_preempt_count(p) do { } while (0) + +#define init_idle_preempt_count(p, cpu) do { \ + S390_lowcore.preempt_count = PREEMPT_ENABLED; \ +} while (0) + +static inline void set_preempt_need_resched(void) +{ +} + +static inline void clear_preempt_need_resched(void) +{ +} + +static inline bool test_preempt_need_resched(void) +{ + return false; +} + +static inline void __preempt_count_add(int val) +{ + S390_lowcore.preempt_count += val; +} + +static inline void __preempt_count_sub(int val) +{ + S390_lowcore.preempt_count -= val; +} + +static inline bool __preempt_count_dec_and_test(void) +{ + return !--S390_lowcore.preempt_count && tif_need_resched(); +} + +static inline bool should_resched(int preempt_offset) +{ + return unlikely(preempt_count() == preempt_offset && + tif_need_resched()); +} + +#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + +#ifdef CONFIG_PREEMPT +extern asmlinkage void preempt_schedule(void); +#define __preempt_schedule() preempt_schedule() +extern asmlinkage void preempt_schedule_notrace(void); +#define __preempt_schedule_notrace() preempt_schedule_notrace() +#endif /* CONFIG_PREEMPT */ + +#endif /* __ASM_PREEMPT_H */ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index f15c0398c363..933794150e5b 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -34,7 +34,6 @@ struct thread_info { unsigned long flags; /* low level flags */ unsigned long sys_call_table; /* System call table address */ unsigned int cpu; /* current CPU */ - int preempt_count; /* 0 => preemptable, <0 => BUG */ unsigned int system_call; __u64 user_timer; __u64 system_timer; @@ -49,7 +48,6 @@ struct thread_info { .task = &tsk, \ .flags = 0, \ .cpu = 0, \ - .preempt_count = INIT_PREEMPT_COUNT, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index ec16cec6bcd4..6be10e490982 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -43,7 +43,6 @@ int main(void) OFFSET(__TI_flags, thread_info, flags); OFFSET(__TI_sysc_table, thread_info, sys_call_table); OFFSET(__TI_cpu, thread_info, cpu); - OFFSET(__TI_precount, thread_info, preempt_count); OFFSET(__TI_user_timer, thread_info, user_timer); OFFSET(__TI_system_timer, thread_info, system_timer); OFFSET(__TI_last_break, thread_info, last_break); @@ -175,6 +174,7 @@ int main(void) OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset); OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data); OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags); + OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count); OFFSET(__LC_GMAP, lowcore, gmap); OFFSET(__LC_PASTE, lowcore, paste); /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 39d03a7a3354..9cd85adaa7c1 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -293,6 +293,7 @@ static noinline __init void setup_lowcore_early(void) psw.addr = (unsigned long) s390_base_pgm_handler; S390_lowcore.program_new_psw = psw; s390_base_pgm_handler_fn = early_pgm_check_handler; + S390_lowcore.preempt_count = INIT_PREEMPT_COUNT; } static noinline __init void setup_facility_list(void) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 49a30737adde..3f4a1638a700 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -626,7 +626,7 @@ ENTRY(io_int_handler) jo .Lio_work_user # yes -> do resched & signal #ifdef CONFIG_PREEMPT # check for preemptive scheduling - icm %r0,15,__TI_precount(%r12) + icm %r0,15,__LC_PREEMPT_COUNT jnz .Lio_restore # preemption is disabled TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED jno .Lio_restore diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 7f7ba5f23f13..268d6c1b651f 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -333,6 +333,7 @@ static void __init setup_lowcore(void) lc->thread_info = (unsigned long) &init_thread_union; lc->lpp = LPP_MAGIC; lc->machine_flags = S390_lowcore.machine_flags; + lc->preempt_count = S390_lowcore.preempt_count; lc->stfl_fac_list = S390_lowcore.stfl_fac_list; memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, MAX_FACILITY_BIT/8); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 661d9fe63c43..d1faae5cdd12 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -733,6 +733,7 @@ block: * return to userspace schedule() to block. */ __set_current_state(TASK_UNINTERRUPTIBLE); set_tsk_need_resched(tsk); + set_preempt_need_resched(); } } out: |